diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b67f3b4dd29..3e0becdcb3b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## Current develop +### Added (new features/APIs/variables/...) +- [[PR 877]](https://github.com/parthenon-hpc-lab/parthenon/pull/877) Add flat sparse packs +- [[PR 868]](https://github.com/parthenon-hpc-lab/parthenon/pull/868) Add block-local face, edge, and nodal fields and allow for packing + ### Changed (changing behavior/API/variables/...) ### Fixed (not changing behavior/API/variables/...) @@ -15,7 +19,6 @@ Date: 2023-05-26 ### Added (new features/APIs/variables/...) -- [[PR 868]](https://github.com/parthenon-hpc-lab/parthenon/pull/868) Add block-local face, edge, and nodal fields and allow for packing - [[PR 830]](https://github.com/parthenon-hpc-lab/parthenon/pull/830) Add particle output - [[PR 840]](https://github.com/parthenon-hpc-lab/parthenon/pull/840) Generalized integrators infrastructure in a backwards compatible way - [[PR 810]](https://github.com/parthenon-hpc-lab/parthenon/pull/810) Add suport for Ascent in-situ visualization diff --git a/doc/sphinx/src/interface/sparse.rst b/doc/sphinx/src/interface/sparse.rst index 108d06601eaf..babc50e087c1 100644 --- a/doc/sphinx/src/interface/sparse.rst +++ b/doc/sphinx/src/interface/sparse.rst @@ -137,6 +137,16 @@ simulation. The set of possible operations for a sparse field are: ``lroberts36/merge-sparse-with-jdolence-sparse`` that is being used in ``Riot``. This should probably be brought into ``develop``, since the “sparse sparse pack” access pattern is probably not desirable. +- *Flatten outer indices:* Most common packs over ``MeshData`` produce + a 5-dimensional data structure, where the slowest two moving indices + are over ``MeshBlock``\ s then tensor/vector components of + ``Variable``\ s. However, it is sometimes desirable to construct a + 4-dimensional data structure where the slowest moving index ranges + over all variables and all blocks. So for example if we had a system + with 9 blocks and 5 variables per block, the slowest moving index + would be of size 45. This can be enabled ``SparsePack``s with the + ``GetFlat`` series of factory functions or by passing the optional + ``flat`` boolean into the constructor. In comparison to a sparse field, a dense field only requires the operation *Access*. diff --git a/src/interface/sparse_pack.hpp b/src/interface/sparse_pack.hpp index 0a3389d2ae42..b38be745a10d 100644 --- a/src/interface/sparse_pack.hpp +++ b/src/interface/sparse_pack.hpp @@ -125,10 +125,10 @@ class SparsePack : public SparsePackBase { // The pack will be created and accessible on the device template static SparsePack Get(T *pmd, const std::vector &flags = {}, - bool fluxes = false, bool coarse = false) { + bool fluxes = false, bool coarse = false, bool flatten = false) { const impl::PackDescriptor desc(std::vector{Ts::name()...}, std::vector{Ts::regex()...}, flags, fluxes, - coarse); + coarse, flatten); return SparsePack(SparsePackBase::GetPack(pmd, desc)); } @@ -143,9 +143,9 @@ class SparsePack : public SparsePackBase { template static std::tuple Get(T *pmd, const VAR_VEC &vars, const std::vector &flags = {}, - bool fluxes = false, bool coarse = false) { + bool fluxes = false, bool coarse = false, bool flatten = false) { static_assert(sizeof...(Ts) == 0, "Cannot create a string/type hybrid pack"); - impl::PackDescriptor desc(vars, flags, fluxes, coarse); + impl::PackDescriptor desc(vars, flags, fluxes, coarse, flatten); return {SparsePack(SparsePackBase::GetPack(pmd, desc)), SparsePackBase::GetIdxMap(desc)}; } @@ -155,7 +155,7 @@ class SparsePack : public SparsePackBase { static SparsePack GetWithFluxes(T *pmd, const std::vector &flags = {}) { const bool coarse = false; const bool fluxes = true; - return Get(pmd, flags, fluxes, coarse); + return Get(pmd, AddFlag_(flags), fluxes, coarse); } template @@ -164,7 +164,7 @@ class SparsePack : public SparsePackBase { const std::vector &flags = {}) { const bool coarse = false; const bool fluxes = true; - Get(pmd, vars, flags, fluxes, coarse); + Get(pmd, vars, AddFlag_(flags), fluxes, coarse); } template @@ -183,21 +183,79 @@ class SparsePack : public SparsePackBase { Get(pmd, vars, flags, fluxes, coarse); } + template + static SparsePack GetFlat(T *pmd, const std::vector &flags = {}) { + const bool coarse = false; + const bool fluxes = false; + const bool flatten = true; + return Get(pmd, flags, fluxes, coarse, flatten); + } + + template + static std::tuple + GetFlat(T *pmd, const VAR_VEC &vars, const std::vector &flags = {}) { + const bool coarse = false; + const bool fluxes = false; + const bool flatten = true; + return Get(pmd, vars, flags, fluxes, coarse, flatten); + } + + template + static SparsePack GetFlatWithFluxes(T *pmd, + const std::vector &flags = {}) { + const bool coarse = false; + const bool fluxes = true; + const bool flatten = true; + return Get(pmd, AddFlag_(flags), fluxes, coarse, flatten); + } + + template + static std::tuple + GetFlatWithFluxes(T *pmd, const VAR_VEC &vars, + const std::vector &flags = {}) { + const bool coarse = false; + const bool fluxes = true; + const bool flatten = true; + return Get(pmd, vars, AddFlag_(flags), fluxes, coarse, flatten); + } + + template + static SparsePack GetFlatWithCoarse(T *pmd, + const std::vector &flags = {}) { + const bool coarse = true; + const bool fluxes = false; + const bool flatten = true; + return Get(pmd, flags, fluxes, coarse, flatten); + } + + template + static std::tuple + GetFlatWithCoarse(T *pmd, const VAR_VEC &vars, + const std::vector &flags = {}) { + const bool coarse = true; + const bool fluxes = false; + const bool flatten = true; + return Get(pmd, vars, flags, fluxes, coarse, flatten); + } + // Methods for getting parts of the shape of the pack KOKKOS_FORCEINLINE_FUNCTION int GetNBlocks() const { return nblocks_; } KOKKOS_FORCEINLINE_FUNCTION - int GetNDim() const { return ndim_; } + int GetMaxNumberOfVars() const { return pack_.extent_int(2); } + // Returns the total number of vars/components in pack KOKKOS_FORCEINLINE_FUNCTION - int GetMaxNumberOfVars() const { return pack_.extent_int(2); } + int GetSize() const { return size_; } KOKKOS_INLINE_FUNCTION - const Coordinates_t &GetCoordinates(const int b) const { return coords_(b)(); } + const Coordinates_t &GetCoordinates(const int b = 0) const { return coords_(b)(); } // Bound overloads - KOKKOS_INLINE_FUNCTION int GetLowerBound(const int b) const { return 0; } + KOKKOS_INLINE_FUNCTION int GetLowerBound(const int b) const { + return (flat_ && (b > 0)) ? (bounds_(1, b - 1, nvar_) + 1) : 0; + } KOKKOS_INLINE_FUNCTION int GetUpperBound(const int b) const { return bounds_(1, b, nvar_); @@ -226,7 +284,9 @@ class SparsePack : public SparsePackBase { } // Host Bound overloads - KOKKOS_INLINE_FUNCTION int GetLowerBoundHost(const int b) const { return 0; } + KOKKOS_INLINE_FUNCTION int GetLowerBoundHost(const int b) const { + return (flat_ && (b > 0)) ? (bounds_h_(1, b - 1, nvar_) + 1) : 0; + } KOKKOS_INLINE_FUNCTION int GetUpperBoundHost(const int b) const { return bounds_h_(1, b, nvar_); @@ -268,6 +328,7 @@ class SparsePack : public SparsePackBase { KOKKOS_INLINE_FUNCTION auto &operator()(const int b, const TE el, PackIdx idx) const { static_assert(sizeof...(Ts) == 0); + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); const int n = bounds_(0, b, idx.VariableIdx()) + idx.Offset(); return pack_(static_cast(el) % 3, b, n); } @@ -283,14 +344,22 @@ class SparsePack : public SparsePackBase { template ::value)> KOKKOS_INLINE_FUNCTION auto &operator()(const int b, const TIn &t) const { + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); const int vidx = GetLowerBound(b, t) + t.idx; return pack_(0, b, vidx); } - KOKKOS_INLINE_FUNCTION Real &operator()(const int b, const int idx, const int k, - const int j, const int i) const { + KOKKOS_INLINE_FUNCTION + Real &operator()(const int b, const int idx, const int k, const int j, + const int i) const { + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); return pack_(0, b, idx)(k, j, i); } + KOKKOS_INLINE_FUNCTION + Real &operator()(int idx, const int k, const int j, const int i) const { + PARTHENON_DEBUG_REQUIRE(flat_, "Accessor valid only for flat packs"); + return pack_(0, 0, idx)(k, j, i); + } KOKKOS_INLINE_FUNCTION Real &operator()(const int b, const TE el, const int idx, const int k, const int j, const int i) const { @@ -300,6 +369,7 @@ class SparsePack : public SparsePackBase { KOKKOS_INLINE_FUNCTION Real &operator()(const int b, PackIdx idx, const int k, const int j, const int i) const { static_assert(sizeof...(Ts) == 0, "Cannot create a string/type hybrid pack"); + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); const int n = bounds_(0, b, idx.VariableIdx()) + idx.Offset(); return pack_(0, b, n)(k, j, i); } @@ -314,6 +384,7 @@ class SparsePack : public SparsePackBase { template ::value)> KOKKOS_INLINE_FUNCTION Real &operator()(const int b, const TIn &t, const int k, const int j, const int i) const { + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); const int vidx = GetLowerBound(b, t) + t.idx; return pack_(0, b, vidx)(k, j, i); } @@ -328,6 +399,7 @@ class SparsePack : public SparsePackBase { // flux() overloads KOKKOS_INLINE_FUNCTION auto &flux(const int b, const int dir, const int idx) const { + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); PARTHENON_DEBUG_REQUIRE(dir > 0 && dir < 4 && with_fluxes_, "Bad input to flux call"); return pack_(dir, b, idx); } @@ -335,14 +407,23 @@ class SparsePack : public SparsePackBase { KOKKOS_INLINE_FUNCTION Real &flux(const int b, const int dir, const int idx, const int k, const int j, const int i) const { + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); PARTHENON_DEBUG_REQUIRE(dir > 0 && dir < 4 && with_fluxes_, "Bad input to flux call"); return pack_(dir, b, idx)(k, j, i); } + KOKKOS_INLINE_FUNCTION + Real &flux(const int dir, const int idx, const int k, const int j, const int i) const { + PARTHENON_DEBUG_REQUIRE(flat_, "Accessor must only be used for flat packs"); + PARTHENON_DEBUG_REQUIRE(dir > 0 && dir < 4 && with_fluxes_, "Bad input to flux call"); + return pack_(dir, 0, idx)(k, j, i); + } + KOKKOS_INLINE_FUNCTION Real &flux(const int b, const int dir, PackIdx idx, const int k, const int j, const int i) const { static_assert(sizeof...(Ts) == 0, "Cannot create a string/type hybrid pack"); + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); PARTHENON_DEBUG_REQUIRE(dir > 0 && dir < 4 && with_fluxes_, "Bad input to flux call"); const int n = bounds_(0, b, idx.VariableIdx()) + idx.Offset(); return pack_(dir, b, n)(k, j, i); @@ -351,10 +432,27 @@ class SparsePack : public SparsePackBase { template ::value)> KOKKOS_INLINE_FUNCTION Real &flux(const int b, const int dir, const TIn &t, const int k, const int j, const int i) const { + PARTHENON_DEBUG_REQUIRE(!flat_, "Accessor cannot be used for flat packs"); PARTHENON_DEBUG_REQUIRE(dir > 0 && dir < 4 && with_fluxes_, "Bad input to flux call"); const int vidx = GetLowerBound(b, t) + t.idx; return pack_(dir, b, vidx)(k, j, i); } + + private: + // Must make a copy of the vector, since input vector is const, + // and it may not even be an lvalue. + static std::vector AddFlag_(const std::vector &flags, + MetadataFlag mf = Metadata::WithFluxes) { + if (std::find(flags.begin(), flags.end(), mf) == flags.end()) { + std::vector out; + out.reserve(flags.size() + 1); + out.insert(out.begin(), flags.begin(), flags.end()); + out.push_back(mf); + return out; + } else { + return flags; + } + } }; } // namespace parthenon diff --git a/src/interface/sparse_pack_base.cpp b/src/interface/sparse_pack_base.cpp index 8e2071e91a65..3869f2aad27a 100644 --- a/src/interface/sparse_pack_base.cpp +++ b/src/interface/sparse_pack_base.cpp @@ -85,50 +85,66 @@ SparsePackBase SparsePackBase::Build(T *pmd, const PackDescriptor &desc) { pack.with_fluxes_ = desc.with_fluxes; pack.coarse_ = desc.coarse; pack.nvar_ = desc.vars.size(); + pack.flat_ = desc.flat; // Count up the size of the array that is required int max_size = 0; int nblocks = 0; - int ndim = 3; + int size = 0; // local var used to compute size/block + pack.size_ = 0; // total size of pack ForEachBlock(pmd, [&](int b, mbd_t *pmbd) { - int size = 0; + if (!desc.flat) { + size = 0; + } nblocks++; for (auto &pv : pmbd->GetVariableVector()) { for (int i = 0; i < nvar; ++i) { if (desc.IncludeVariable(i, pv)) { if (pv->IsAllocated()) { - size += pv->GetDim(6) * pv->GetDim(5) * pv->GetDim(4); - ndim = (pv->GetDim(1) > 1 ? 1 : 0) + (pv->GetDim(2) > 1 ? 1 : 0) + - (pv->GetDim(3) > 1 ? 1 : 0); + int prod = pv->GetDim(6) * pv->GetDim(5) * pv->GetDim(4); + size += prod; // max size/block (or total size for flat) + pack.size_ += prod; // total ragged size } } } } max_size = std::max(size, max_size); }); - pack.nblocks_ = nblocks; + pack.nblocks_ = desc.flat ? 1 : nblocks; // Allocate the views int leading_dim = 1; if (desc.with_fluxes) leading_dim += 3; - pack.pack_ = pack_t("data_ptr", leading_dim, nblocks, max_size); + pack.pack_ = pack_t("data_ptr", leading_dim, pack.nblocks_, max_size); auto pack_h = Kokkos::create_mirror_view(pack.pack_); + // For non-flat packs, shape of pack is type x block x var x k x j x i + // where type here might be a flux. + // For flat packs, shape is type x (some var on some block) x k x j x 1 + // in the latter case, coords indexes into the some var on some + // block. Bounds provides the start and end index of a var in a block in the flat array. // Size is nvar + 1 to store the maximum idx for easy access pack.bounds_ = bounds_t("bounds", 2, nblocks, nvar + 1); pack.bounds_h_ = Kokkos::create_mirror_view(pack.bounds_); - pack.coords_ = coords_t("coords", nblocks); + pack.coords_ = coords_t("coords", desc.flat ? max_size : nblocks); auto coords_h = Kokkos::create_mirror_view(pack.coords_); // Fill the views - ForEachBlock(pmd, [&](int b, mbd_t *pmbd) { - int idx = 0; - coords_h(b) = pmbd->GetBlockPointer()->coords_device; + int idx = 0; + ForEachBlock(pmd, [&](int block, mbd_t *pmbd) { + int b = 0; + if (!desc.flat) { + idx = 0; + b = block; + // JMM: This line could be unified with the coords_h line below, + // but it would imply unnecessary copies in the case of non-flat + // packs. + coords_h(b) = pmbd->GetBlockPointer()->coords_device; + } for (int i = 0; i < nvar; ++i) { - pack.bounds_h_(0, b, i) = idx; - + pack.bounds_h_(0, block, i) = idx; for (auto &pv : pmbd->GetVariableVector()) { if (desc.IncludeVariable(i, pv)) { if (pv->IsAllocated()) { @@ -152,28 +168,19 @@ SparsePackBase SparsePackBase::Build(T *pmd, const PackDescriptor &desc) { } else { pack_h(0, b, idx) = pv->data.Get(0, t, u, v); } - PARTHENON_REQUIRE( - pack_h(0, b, idx).size() > 0, - "Seems like this variable might not actually be allocated."); if (desc.with_fluxes && pv->IsSet(Metadata::WithFluxes)) { pack_h(1, b, idx) = pv->flux[X1DIR].Get(0, t, u, v); - PARTHENON_REQUIRE(pack_h(1, b, idx).size() == - pack_h(0, b, idx).size(), - "Different size fluxes."); - if (ndim > 1) { - pack_h(2, b, idx) = pv->flux[X2DIR].Get(0, t, u, v); - PARTHENON_REQUIRE(pack_h(2, b, idx).size() == - pack_h(0, b, idx).size(), - "Different size fluxes."); - } - if (ndim > 2) { - pack_h(3, b, idx) = pv->flux[X3DIR].Get(0, t, u, v); - PARTHENON_REQUIRE(pack_h(3, b, idx).size() == - pack_h(0, b, idx).size(), - "Different size fluxes."); - } + pack_h(2, b, idx) = pv->flux[X2DIR].Get(0, t, u, v); + pack_h(3, b, idx) = pv->flux[X3DIR].Get(0, t, u, v); } } + PARTHENON_REQUIRE( + pack_h(0, b, idx).size() > 0, + "Seems like this variable might not actually be allocated."); + + if (desc.flat) { + coords_h(idx) = pmbd->GetBlockPointer()->coords_device; + } idx++; } } @@ -181,24 +188,21 @@ SparsePackBase SparsePackBase::Build(T *pmd, const PackDescriptor &desc) { } } } - - pack.bounds_h_(1, b, i) = idx - 1; - - if (pack.bounds_h_(1, b, i) < pack.bounds_h_(0, b, i)) { + pack.bounds_h_(1, block, i) = idx - 1; + if (pack.bounds_h_(1, block, i) < pack.bounds_h_(0, block, i)) { // Did not find any allocated variables meeting our criteria - pack.bounds_h_(0, b, i) = -1; + pack.bounds_h_(0, block, i) = -1; // Make the upper bound more negative so a for loop won't iterate once - pack.bounds_h_(1, b, i) = -2; + pack.bounds_h_(1, block, i) = -2; } } // Record the maximum for easy access - pack.bounds_h_(1, b, nvar) = idx - 1; + pack.bounds_h_(1, block, nvar) = idx - 1; }); Kokkos::deep_copy(pack.pack_, pack_h); Kokkos::deep_copy(pack.bounds_, pack.bounds_h_); Kokkos::deep_copy(pack.coords_, coords_h); - pack.ndim_ = ndim; pack.dims_[1] = pack.nblocks_; pack.dims_[2] = -1; // Not allowed to ask for the ragged dimension anyway pack.dims_[3] = pack_h(0, 0, 0).extent_int(0); @@ -219,8 +223,6 @@ SparsePackBase &SparsePackCache::Get(T *pmd, const PackDescriptor &desc) { std::string ident = GetIdentifier(desc); if (pack_map.count(ident) > 0) { auto &pack = pack_map[ident].first; - if (desc.with_fluxes != pack.with_fluxes_) return BuildAndAdd(pmd, desc, ident); - if (desc.coarse != pack.coarse_) return BuildAndAdd(pmd, desc, ident); auto alloc_status_in = SparsePackBase::GetAllocStatus(pmd, desc); auto &alloc_status = pack_map[ident].second; if (alloc_status.size() != alloc_status_in.size()) @@ -259,6 +261,10 @@ std::string SparsePackCache::GetIdentifier(const PackDescriptor &desc) const { identifier += "____"; for (int i = 0; i < desc.vars.size(); ++i) identifier += desc.vars[i] + std::to_string(desc.use_regex[i]); + identifier += "____"; + identifier += std::to_string(desc.with_fluxes); + identifier += std::to_string(desc.coarse); + identifier += std::to_string(desc.flat); return identifier; } diff --git a/src/interface/sparse_pack_base.hpp b/src/interface/sparse_pack_base.hpp index 3b1d1f7dbb9b..2ac23775891d 100644 --- a/src/interface/sparse_pack_base.hpp +++ b/src/interface/sparse_pack_base.hpp @@ -40,9 +40,10 @@ using SparsePackIdxMap = std::unordered_map; namespace impl { struct PackDescriptor { PackDescriptor(const std::vector &vars, const std::vector &use_regex, - const std::vector &flags, bool with_fluxes, bool coarse) + const std::vector &flags, bool with_fluxes, bool coarse, + bool flat = false) : vars(vars), use_regex(use_regex), flags(flags), with_fluxes(with_fluxes), - coarse(coarse) { + coarse(coarse), flat(flat) { PARTHENON_REQUIRE(use_regex.size() == vars.size(), "Must have a regex flag for each variable."); PARTHENON_REQUIRE(!(with_fluxes && coarse), @@ -52,8 +53,9 @@ struct PackDescriptor { } PackDescriptor(const std::vector> &vars_in, - const std::vector &flags, bool with_fluxes, bool coarse) - : flags(flags), with_fluxes(with_fluxes), coarse(coarse) { + const std::vector &flags, bool with_fluxes, bool coarse, + bool flat = false) + : flags(flags), with_fluxes(with_fluxes), coarse(coarse), flat(flat) { for (auto var : vars_in) { vars.push_back(var.first); use_regex.push_back(var.second); @@ -65,9 +67,10 @@ struct PackDescriptor { } PackDescriptor(const std::vector &vars_in, - const std::vector &flags, bool with_fluxes, bool coarse) + const std::vector &flags, bool with_fluxes, bool coarse, + bool flat = false) : vars(vars_in), use_regex(vars_in.size(), false), flags(flags), - with_fluxes(with_fluxes), coarse(coarse) { + with_fluxes(with_fluxes), coarse(coarse), flat(flat) { PARTHENON_REQUIRE(!(with_fluxes && coarse), "Probably shouldn't be making a coarse pack with fine fluxes."); for (const auto &var : vars) @@ -98,6 +101,7 @@ struct PackDescriptor { std::vector flags; bool with_fluxes; bool coarse; + bool flat; }; } // namespace impl @@ -152,10 +156,11 @@ class SparsePackBase { bool with_fluxes_; bool coarse_; + bool flat_; int nblocks_; - int ndim_; int dims_[6]; int nvar_; + int size_; }; // Object for cacheing sparse packs in MeshData and MeshBlockData objects. This diff --git a/tst/unit/test_sparse_pack.cpp b/tst/unit/test_sparse_pack.cpp index 5f2bf59857db..e8800b7664c6 100644 --- a/tst/unit/test_sparse_pack.cpp +++ b/tst/unit/test_sparse_pack.cpp @@ -124,6 +124,16 @@ TEST_CASE("Test behavior of sparse packs", "[SparsePack]") { // Deallocate a variable on an arbitrary block block_list[2]->DeallocateSparse("v3"); + THEN("A sparse pack can be loaded on this data and report sthe bounds for block 2 " + "appropriately.") { + auto pack = + parthenon::SparsePack::Get(&mesh_data, {Metadata::WithFluxes}); + int lo = pack.GetLowerBoundHost(2); + int hi = pack.GetUpperBoundHost(2); + REQUIRE(lo == 0); // lo = 0 because always start at 0 on a block + REQUIRE(hi == 0); // hi is scalar. Only one value. + } + THEN("A sparse pack correctly loads this data and can be read from v3 on all " "blocks") { // Create a pack use type variables @@ -167,6 +177,29 @@ TEST_CASE("Test behavior of sparse packs", "[SparsePack]") { REQUIRE(nwrong == 0); } + THEN("A flattened sparse pack can correctly load this data in a unified outer " + "index space") { + using parthenon::variable_names::any; + auto sparse_pack = parthenon::SparsePack::GetFlatWithFluxes(&mesh_data); + REQUIRE(sparse_pack.GetNBlocks() == 1); + // v3 is deallocated on one block. + REQUIRE(sparse_pack.GetMaxNumberOfVars() == 5 * NBLOCKS - 3); + REQUIRE(sparse_pack.GetLowerBoundHost(0) == 0); + // upper bound is inclusive + REQUIRE(sparse_pack.GetUpperBoundHost(0) == 5 - 1); + REQUIRE(sparse_pack.GetSize() == 5 * NBLOCKS - 3); + AND_THEN("A flattened sparse pack starting with v3 has sensible lower/upper " + "bounds on the block where we deallocate") { + auto pack = parthenon::SparsePack::GetFlatWithFluxes(&mesh_data); + int lo = pack.GetLowerBoundHost(2); + int hi = pack.GetUpperBoundHost(2); + REQUIRE(lo == 4 - 1 + 4 + 1); // lo = index in flat pack where block 2 starts. + // v3 and v5 = 4 total var components + REQUIRE(hi == lo); // hi = index in flat pack where block 2 ends. Only v3 + // present, so only 1 var + } + } + THEN("A sparse pack correctly loads this data and can be read from v3 on a single " "block") { auto sparse_pack =