From d05cd23a405d525f4c7a261036ba09d468cc9df4 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Sat, 20 Jul 2024 18:05:39 -0700 Subject: [PATCH 1/5] Added basic Hot Archive BucketList structure --- src/bucket/BucketApplicator.cpp | 2 +- src/bucket/BucketList.cpp | 99 ++++----- src/bucket/BucketList.h | 71 ++++--- src/bucket/BucketListSnapshot.cpp | 11 +- src/bucket/BucketListSnapshot.h | 2 +- src/bucket/BucketManager.h | 21 +- src/bucket/BucketManagerImpl.cpp | 162 ++++++++++----- src/bucket/BucketManagerImpl.h | 33 +-- src/bucket/BucketSnapshotManager.h | 3 +- src/bucket/FutureBucket.cpp | 6 +- src/bucket/test/BucketIndexTests.cpp | 2 +- src/bucket/test/BucketListTests.cpp | 188 +++++++++--------- src/bucket/test/BucketManagerTests.cpp | 64 +++--- src/bucket/test/BucketTestUtils.cpp | 9 +- src/bucket/test/BucketTestUtils.h | 2 +- src/catchup/ApplyBucketsWork.cpp | 19 +- src/catchup/ApplyBucketsWork.h | 2 +- src/catchup/ApplyBufferedLedgersWork.cpp | 2 +- src/catchup/ApplyCheckpointWork.cpp | 2 +- src/catchup/AssumeStateWork.cpp | 2 +- src/catchup/DownloadApplyTxsWork.cpp | 2 +- src/herder/test/UpgradesTests.cpp | 18 +- src/history/HistoryArchive.cpp | 12 +- src/history/HistoryArchive.h | 4 +- src/history/HistoryManager.h | 2 +- src/history/HistoryManagerImpl.cpp | 4 +- src/history/StateSnapshot.cpp | 2 +- src/history/test/HistoryTests.cpp | 4 +- src/history/test/HistoryTestsUtils.cpp | 10 +- src/history/test/HistoryTestsUtils.h | 3 +- .../BucketListIsConsistentWithDatabase.cpp | 4 +- src/invariant/InvariantManagerImpl.cpp | 13 +- ...ucketListIsConsistentWithDatabaseTests.cpp | 40 ++-- src/ledger/LedgerManagerImpl.cpp | 8 +- src/ledger/NetworkConfig.cpp | 8 +- src/ledger/test/LedgerTxnTests.cpp | 4 +- src/main/ApplicationUtils.cpp | 4 +- src/main/Config.cpp | 4 +- src/main/test/ApplicationUtilsTests.cpp | 7 +- src/simulation/CoreTests.cpp | 4 +- src/test/TestUtils.cpp | 6 +- 41 files changed, 491 insertions(+), 374 deletions(-) diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp index 7c739aa6f2..8d20003ec7 100644 --- a/src/bucket/BucketApplicator.cpp +++ b/src/bucket/BucketApplicator.cpp @@ -167,7 +167,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) // The last level can have live entries, but at that point we // know that they are actually init entries because the earliest // state of all entries is init, so we mark them as such here - if (mLevel == BucketList::kNumLevels - 1 && + if (mLevel == BucketListBase::kNumLevels - 1 && e.type() == LIVEENTRY) { ltx->createWithoutLoading(e.liveEntry()); diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketList.cpp index aa3820dd47..2844ec0522 100644 --- a/src/bucket/BucketList.cpp +++ b/src/bucket/BucketList.cpp @@ -80,8 +80,12 @@ BucketLevel::setCurr(std::shared_ptr b) mCurr = b; } +BucketListBase::~BucketListBase() +{ +} + bool -BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) +BucketListBase::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) { if (level != 0) @@ -89,7 +93,7 @@ BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) // Round down the current ledger to when the merge was started, and // re-start the merge via prepare, mimicking the logic in `addBatch` auto mergeStartLedger = - roundDown(ledger, BucketList::levelHalf(level - 1)); + roundDown(ledger, BucketListBase::levelHalf(level - 1)); // Subtle: We're "preparing the next state" of this level's mCurr, which // is *either* mCurr merged with snap, or else just snap (if mCurr is @@ -168,7 +172,7 @@ BucketLevel::prepare(Application& app, uint32_t currLedger, // If more than one absorb is pending at the same time, we have a logic // error in our caller (and all hell will break loose). releaseAssert(!mNextCurr.isMerging()); - auto curr = BucketList::shouldMergeWithEmptyCurr(currLedger, mLevel) + auto curr = BucketListBase::shouldMergeWithEmptyCurr(currLedger, mLevel) ? std::make_shared() : mCurr; @@ -222,7 +226,7 @@ BucketListDepth::operator uint32_t() const // levelSize(9) = 1048576=0x100000 // levelSize(10) = 4194304=0x400000 uint32_t -BucketList::levelSize(uint32_t level) +BucketListBase::levelSize(uint32_t level) { releaseAssert(level < kNumLevels); return 1UL << (2 * (level + 1)); @@ -244,13 +248,13 @@ BucketList::levelSize(uint32_t level) // levelHalf(9) = 524288=0x080000 // levelHalf(10) = 2097152=0x200000 uint32_t -BucketList::levelHalf(uint32_t level) +BucketListBase::levelHalf(uint32_t level) { return levelSize(level) >> 1; } uint32_t -BucketList::sizeOfCurr(uint32_t ledger, uint32_t level) +BucketListBase::sizeOfCurr(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -261,7 +265,7 @@ BucketList::sizeOfCurr(uint32_t ledger, uint32_t level) auto const size = levelSize(level); auto const half = levelHalf(level); - if (level != BucketList::kNumLevels - 1 && roundDown(ledger, half) != 0) + if (level != BucketListBase::kNumLevels - 1 && roundDown(ledger, half) != 0) { uint32_t const sizeDelta = 1UL << (2 * level - 1); if (roundDown(ledger, half) == ledger || @@ -298,11 +302,11 @@ BucketList::sizeOfCurr(uint32_t ledger, uint32_t level) } uint32_t -BucketList::sizeOfSnap(uint32_t ledger, uint32_t level) +BucketListBase::sizeOfSnap(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); - if (level == BucketList::kNumLevels - 1) + if (level == BucketListBase::kNumLevels - 1) { return 0; } @@ -324,7 +328,7 @@ BucketList::sizeOfSnap(uint32_t ledger, uint32_t level) } uint32_t -BucketList::oldestLedgerInCurr(uint32_t ledger, uint32_t level) +BucketListBase::oldestLedgerInCurr(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -344,7 +348,7 @@ BucketList::oldestLedgerInCurr(uint32_t ledger, uint32_t level) } uint32_t -BucketList::oldestLedgerInSnap(uint32_t ledger, uint32_t level) +BucketListBase::oldestLedgerInSnap(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -363,7 +367,7 @@ BucketList::oldestLedgerInSnap(uint32_t ledger, uint32_t level) } uint256 -BucketList::getHash() const +BucketListBase::getHash() const { ZoneScoped; SHA256 hsh; @@ -394,7 +398,7 @@ BucketList::getHash() const // clang-format on bool -BucketList::levelShouldSpill(uint32_t ledger, uint32_t level) +BucketListBase::levelShouldSpill(uint32_t ledger, uint32_t level) { if (level == kNumLevels - 1) { @@ -412,7 +416,7 @@ BucketList::levelShouldSpill(uint32_t ledger, uint32_t level) // incoming_spill_frequency(i) = 2^(2i - 1) for i > 0 // incoming_spill_frequency(0) = 1 uint32_t -BucketList::bucketUpdatePeriod(uint32_t level, bool isCurr) +BucketListBase::bucketUpdatePeriod(uint32_t level, bool isCurr) { if (!isCurr) { @@ -430,19 +434,19 @@ BucketList::bucketUpdatePeriod(uint32_t level, bool isCurr) } bool -BucketList::keepDeadEntries(uint32_t level) +BucketListBase::keepDeadEntries(uint32_t level) { - return level < BucketList::kNumLevels - 1; + return level < BucketListBase::kNumLevels - 1; } BucketLevel const& -BucketList::getLevel(uint32_t i) const +BucketListBase::getLevel(uint32_t i) const { return mLevels.at(i); } BucketLevel& -BucketList::getLevel(uint32_t i) +BucketListBase::getLevel(uint32_t i) { return mLevels.at(i); } @@ -463,7 +467,7 @@ BucketList::resolveAllFutures() #endif void -BucketList::resolveAnyReadyFutures() +BucketListBase::resolveAnyReadyFutures() { ZoneScoped; for (auto& level : mLevels) @@ -476,7 +480,7 @@ BucketList::resolveAnyReadyFutures() } bool -BucketList::futuresAllResolved(uint32_t maxLevel) const +BucketListBase::futuresAllResolved(uint32_t maxLevel) const { ZoneScoped; releaseAssert(maxLevel < mLevels.size()); @@ -492,7 +496,7 @@ BucketList::futuresAllResolved(uint32_t maxLevel) const } uint32_t -BucketList::getMaxMergeLevel(uint32_t currLedger) const +BucketListBase::getMaxMergeLevel(uint32_t currLedger) const { uint32_t i = 0; for (; i < static_cast(mLevels.size()) - 1; ++i) @@ -506,7 +510,7 @@ BucketList::getMaxMergeLevel(uint32_t currLedger) const } uint64_t -BucketList::getSize() const +BucketListBase::getSize() const { uint64_t sum = 0; for (auto const& lev : mLevels) @@ -526,11 +530,11 @@ BucketList::getSize() const } void -BucketList::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +BucketListBase::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { ZoneScoped; releaseAssert(currLedger > 0); @@ -666,9 +670,9 @@ BucketList::sumBucketEntryCounters() const } void -BucketList::updateStartingEvictionIterator(EvictionIterator& iter, - uint32_t firstScanLevel, - uint32_t ledgerSeq) +LiveBucketList::updateStartingEvictionIterator(EvictionIterator& iter, + uint32_t firstScanLevel, + uint32_t ledgerSeq) { // Check if an upgrade has changed the starting scan level to below the // current iterator level @@ -691,8 +695,8 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter, { // Check if bucket received an incoming spill releaseAssert(iter.bucketListLevel != 0); - if (BucketList::levelShouldSpill(ledgerSeq - 1, - iter.bucketListLevel - 1)) + if (BucketListBase::levelShouldSpill(ledgerSeq - 1, + iter.bucketListLevel - 1)) { // If Bucket changed, reset to start of bucket iter.bucketFileOffset = 0; @@ -700,7 +704,8 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter, } else { - if (BucketList::levelShouldSpill(ledgerSeq - 1, iter.bucketListLevel)) + if (BucketListBase::levelShouldSpill(ledgerSeq - 1, + iter.bucketListLevel)) { // If Bucket changed, reset to start of bucket iter.bucketFileOffset = 0; @@ -709,7 +714,7 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter, } bool -BucketList::updateEvictionIterAndRecordStats( +LiveBucketList::updateEvictionIterAndRecordStats( EvictionIterator& iter, EvictionIterator startIter, uint32_t configFirstScanLevel, uint32_t ledgerSeq, std::shared_ptr stats, EvictionCounters& counters) @@ -752,10 +757,10 @@ BucketList::updateEvictionIterAndRecordStats( } void -BucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, - uint32_t scanSize, - std::shared_ptr b, - EvictionCounters& counters) +LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, + uint32_t scanSize, + std::shared_ptr b, + EvictionCounters& counters) { // Check to see if we can finish scanning the new bucket before it // receives an update @@ -773,10 +778,10 @@ BucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, // eviction cycle. If a node joins the network mid cycle, metrics will be // nullopt and be initialized at the start of the next cycle. void -BucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, - EvictionCounters& counters, - std::shared_ptr stats) +LiveBucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, + uint32_t ledgerSeq, + EvictionCounters& counters, + std::shared_ptr stats) { releaseAssert(stats); @@ -822,8 +827,8 @@ BucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, } void -BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, - uint32_t ledger) +BucketListBase::restartMerges(Application& app, uint32_t maxProtocolVersion, + uint32_t ledger) { ZoneScoped; for (uint32_t i = 0; i < static_cast(mLevels.size()); i++) @@ -885,7 +890,7 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, // Round down the current ledger to when the merge was started, and // re-start the merge via prepare, mimicking the logic in `addBatch` auto mergeStartLedger = - roundDown(ledger, BucketList::levelHalf(i - 1)); + roundDown(ledger, BucketListBase::levelHalf(i - 1)); level.prepare( app, mergeStartLedger, version, snap, /* shadows= */ {}, !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING); @@ -893,9 +898,9 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, } } -BucketListDepth BucketList::kNumLevels = 11; +BucketListDepth BucketListBase::kNumLevels = 11; -BucketList::BucketList() +BucketListBase::BucketListBase() { for (uint32_t i = 0; i < kNumLevels; ++i) { diff --git a/src/bucket/BucketList.h b/src/bucket/BucketList.h index 5599132f4c..284eb86b38 100644 --- a/src/bucket/BucketList.h +++ b/src/bucket/BucketList.h @@ -381,7 +381,7 @@ class BucketLevel }; // NOTE: The access specifications for this class have been carefully chosen to -// make it so BucketList::kNumLevels can only be modified from +// make it so BucketListBase::kNumLevels can only be modified from // BucketListDepthModifier -- not even BucketList can modify it. Please // use care when modifying this class. class BucketListDepth @@ -398,11 +398,15 @@ class BucketListDepth friend class testutil::BucketListDepthModifier; }; -class BucketList +class BucketListBase { + protected: std::vector mLevels; public: + // Trivial pure virtual destructor to make this an abstract class + virtual ~BucketListBase() = 0; + // Number of bucket levels in the bucketlist. Every bucketlist in the system // will have this many levels and it effectively gets wired-in to the // protocol. Careful about changing it. @@ -444,7 +448,7 @@ class BucketList // Create a new BucketList with every `kNumLevels` levels, each with // an empty bucket in `curr` and `snap`. - BucketList(); + BucketListBase(); // Return level `i` of the BucketList. BucketLevel const& getLevel(uint32_t i) const; @@ -457,29 +461,6 @@ class BucketList // of the concatenation of the hashes of the `curr` and `snap` buckets. Hash getHash() const; - // Reset Eviction Iterator position if an incoming spill or upgrade has - // invalidated the previous position - static void updateStartingEvictionIterator(EvictionIterator& iter, - uint32_t firstScanLevel, - uint32_t ledgerSeq); - - // Update eviction iter and record stats after scanning a region in one - // bucket. Returns true if scan has looped back to startIter, false - // otherwise. - static bool updateEvictionIterAndRecordStats( - EvictionIterator& iter, EvictionIterator startIter, - uint32_t configFirstScanLevel, uint32_t ledgerSeq, - std::shared_ptr stats, EvictionCounters& counters); - - static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, - uint32_t scanSize, - std::shared_ptr b, - EvictionCounters& counters); - - void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, EvictionCounters& counters, - std::shared_ptr stats); - // Restart any merges that might be running on background worker threads, // merging buckets between levels. This needs to be called after forcing a // BucketList to adopt a new state, either at application restart or when @@ -532,4 +513,42 @@ class BucketList std::vector const& deadEntries); BucketEntryCounters sumBucketEntryCounters() const; }; + +class LiveBucketList : public BucketListBase +{ + public: + // Reset Eviction Iterator position if an incoming spill or upgrade has + // invalidated the previous position + static void updateStartingEvictionIterator(EvictionIterator& iter, + uint32_t firstScanLevel, + uint32_t ledgerSeq); + + // Update eviction iter and record stats after scanning a region in one + // bucket. Returns true if scan has looped back to startIter, false + // otherwise. + static bool updateEvictionIterAndRecordStats( + EvictionIterator& iter, EvictionIterator startIter, + uint32_t configFirstScanLevel, uint32_t ledgerSeq, + std::shared_ptr stats, EvictionCounters& counters); + + static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, + uint32_t scanSize, + std::shared_ptr b, + EvictionCounters& counters); + + void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, + uint32_t ledgerSeq, EvictionCounters& counters, + std::shared_ptr stats); +}; + +class HotArchiveBucketList : public BucketListBase +{ + private: + // For now, this class is identical to LiveBucketList. Later PRs will add + // additional functionality. + + // Merge result future + // This should be the result of merging this entire list into a single file. + // The MerkleBucketList is then initalized with this result +}; } diff --git a/src/bucket/BucketListSnapshot.cpp b/src/bucket/BucketListSnapshot.cpp index 5d26fd8296..a5e2b15ff0 100644 --- a/src/bucket/BucketListSnapshot.cpp +++ b/src/bucket/BucketListSnapshot.cpp @@ -4,6 +4,7 @@ #include "bucket/BucketListSnapshot.h" #include "bucket/BucketInputIterator.h" +#include "bucket/BucketList.h" #include "crypto/SecretKey.h" // IWYU pragma: keep #include "ledger/LedgerTxn.h" @@ -13,13 +14,13 @@ namespace stellar { -BucketListSnapshot::BucketListSnapshot(BucketList const& bl, +BucketListSnapshot::BucketListSnapshot(LiveBucketList const& bl, LedgerHeader header) : mHeader(std::move(header)) { releaseAssert(threadIsMain()); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto const& level = bl.getLevel(i); mLevels.emplace_back(BucketLevelSnapshot(level)); @@ -151,7 +152,7 @@ SearchableBucketListSnapshot::scanForEviction( return iter.isCurrBucket ? level.curr : level.snap; }; - BucketList::updateStartingEvictionIterator( + LiveBucketList::updateStartingEvictionIterator( evictionIter, sas.startingEvictionScanLevel, ledgerSeq); EvictionResult result(sas); @@ -161,7 +162,7 @@ SearchableBucketListSnapshot::scanForEviction( for (;;) { auto const& b = getBucketFromIter(evictionIter); - BucketList::checkIfEvictionScanIsStuck( + LiveBucketList::checkIfEvictionScanIsStuck( evictionIter, sas.evictionScanSize, b.getRawBucket(), counters); // If we scan scanSize before hitting bucket EOF, exit early @@ -172,7 +173,7 @@ SearchableBucketListSnapshot::scanForEviction( } // If we return back to the Bucket we started at, exit - if (BucketList::updateEvictionIterAndRecordStats( + if (LiveBucketList::updateEvictionIterAndRecordStats( evictionIter, startIter, sas.startingEvictionScanLevel, ledgerSeq, stats, counters)) { diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshot.h index ea14869f3a..0397883411 100644 --- a/src/bucket/BucketListSnapshot.h +++ b/src/bucket/BucketListSnapshot.h @@ -34,7 +34,7 @@ class BucketListSnapshot : public NonMovable LedgerHeader const mHeader; public: - BucketListSnapshot(BucketList const& bl, LedgerHeader hhe); + BucketListSnapshot(LiveBucketList const& bl, LedgerHeader hhe); // Only allow copies via constructor BucketListSnapshot(BucketListSnapshot const& snapshot); diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index a64bd9181f..6d352ccc34 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -26,7 +26,8 @@ namespace stellar class AbstractLedgerTxn; class Application; class BasicWork; -class BucketList; +class LiveBucketList; +class HotArchiveBucketList; class BucketSnapshotManager; class Config; class SearchableBucketListSnapshot; @@ -192,7 +193,8 @@ class BucketManager : NonMovableOrCopyable virtual std::string const& getTmpDir() = 0; virtual TmpDirManager& getTmpDirManager() = 0; virtual std::string const& getBucketDir() const = 0; - virtual BucketList& getBucketList() = 0; + virtual LiveBucketList& getLiveBucketList() = 0; + virtual HotArchiveBucketList& getHotArchiveBucketList() = 0; virtual BucketSnapshotManager& getBucketSnapshotManager() const = 0; virtual bool renameBucketDirFile(std::filesystem::path const& src, std::filesystem::path const& dst) = 0; @@ -267,10 +269,15 @@ class BucketManager : NonMovableOrCopyable // be given separate init (created) and live (updated) entry vectors. The // `header` value should be taken from the ledger at which this batch is // being added. - virtual void addBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) = 0; + virtual void addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) = 0; + virtual void + addArchivalBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& deadEntries) = 0; // Update the given LedgerHeader's bucketListHash to reflect the current // state of the bucket list. @@ -300,7 +307,7 @@ class BucketManager : NonMovableOrCopyable #ifdef BUILD_TESTS // Install a fake/assumed ledger version and bucket list hash to use in next - // call to addBatch and snapshotLedger. This interface exists only for + // call to addLiveBatch and snapshotLedger. This interface exists only for // testing in a specific type of history replay. virtual void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, uint256 const& hash) = 0; diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManagerImpl.cpp index 3fbe228b64..8007ee357a 100644 --- a/src/bucket/BucketManagerImpl.cpp +++ b/src/bucket/BucketManagerImpl.cpp @@ -12,6 +12,7 @@ #include "bucket/BucketSnapshotManager.h" #include "crypto/BLAKE2.h" #include "crypto/Hex.h" +#include "crypto/SHA.h" #include "history/HistoryManager.h" #include "historywork/VerifyBucketWork.h" #include "ledger/LedgerManager.h" @@ -23,6 +24,7 @@ #include "util/GlobalChecks.h" #include "util/LogSlowExecution.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" #include "util/TmpDir.h" #include "util/types.h" #include "xdr/Stellar-ledger.h" @@ -124,13 +126,15 @@ BucketManagerImpl::initialize() if (mConfig.MODE_ENABLES_BUCKETLIST) { - mBucketList = std::make_unique(); + mLiveBucketList = std::make_unique(); + mHotArchiveBucketList = std::make_unique(); if (mConfig.isUsingBucketListDB()) { + // TODO: Archival BucketList snapshot mSnapshotManager = std::make_unique( mApp, - std::make_unique(*mBucketList, + std::make_unique(*mLiveBucketList, LedgerHeader()), mConfig.QUERY_SNAPSHOT_LEDGERS); } @@ -177,14 +181,20 @@ EvictionCounters::EvictionCounters(Application& app) BucketManagerImpl::BucketManagerImpl(Application& app) : mApp(app) - , mBucketList(nullptr) + , mLiveBucketList(nullptr) + , mHotArchiveBucketList(nullptr) , mSnapshotManager(nullptr) , mTmpDirManager(nullptr) , mWorkDir(nullptr) , mLockedBucketDir(nullptr) - , mBucketObjectInsertBatch(app.getMetrics().NewMeter( + , mBucketLiveObjectInsertBatch(app.getMetrics().NewMeter( {"bucket", "batch", "objectsadded"}, "object")) - , mBucketAddBatch(app.getMetrics().NewTimer({"bucket", "batch", "addtime"})) + , mBucketArchiveObjectInsertBatch(app.getMetrics().NewMeter( + {"bucket", "batch-archive", "objectsadded"}, "object")) + , mBucketAddLiveBatch( + app.getMetrics().NewTimer({"bucket", "batch", "addtime"})) + , mBucketAddArchiveBatch( + app.getMetrics().NewTimer({"bucket", "batch-archive", "addtime"})) , mBucketSnapMerge(app.getMetrics().NewTimer({"bucket", "snap", "merge"})) , mSharedBucketsSize( app.getMetrics().NewCounter({"bucket", "memory", "shared"})) @@ -192,8 +202,10 @@ BucketManagerImpl::BucketManagerImpl(Application& app) {"bucketlistDB", "bloom", "misses"}, "bloom")) , mBucketListDBBloomLookups(app.getMetrics().NewMeter( {"bucketlistDB", "bloom", "lookups"}, "bloom")) - , mBucketListSizeCounter( + , mLiveBucketListSizeCounter( app.getMetrics().NewCounter({"bucketlist", "size", "bytes"})) + , mArchiveBucketListSizeCounter( + app.getMetrics().NewCounter({"bucketlist-archive", "size", "bytes"})) , mBucketListEvictionCounters(app) , mEvictionStatistics(std::make_shared()) // Minimal DB is stored in the buckets dir, so delete it only when @@ -340,11 +352,18 @@ BucketManagerImpl::deleteTmpDirAndUnlockBucketDir() } } -BucketList& -BucketManagerImpl::getBucketList() +LiveBucketList& +BucketManagerImpl::getLiveBucketList() { releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); - return *mBucketList; + return *LiveBucketList; +} + +HotArchiveBucketList& +BucketManagerImpl::getHotArchiveBucketList() +{ + releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); + return *mHotArchiveBucketList; } BucketSnapshotManager& @@ -709,31 +728,36 @@ BucketManagerImpl::getBucketListReferencedBuckets() const return referenced; } - // retain current bucket list - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) - { - auto const& level = mBucketList->getLevel(i); - auto rit = referenced.emplace(level.getCurr()->getHash()); - if (rit.second) + auto processBucketList = [&](auto const& bl) { + // retain current bucket list + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { - CLOG_TRACE(Bucket, "{} referenced by bucket list", - binToHex(*rit.first)); - } - rit = referenced.emplace(level.getSnap()->getHash()); - if (rit.second) - { - CLOG_TRACE(Bucket, "{} referenced by bucket list", - binToHex(*rit.first)); - } - for (auto const& h : level.getNext().getHashes()) - { - rit = referenced.emplace(hexToBin256(h)); + auto const& level = bl->getLevel(i); + auto rit = referenced.emplace(level.getCurr()->getHash()); if (rit.second) { - CLOG_TRACE(Bucket, "{} referenced by bucket list", h); + CLOG_TRACE(Bucket, "{} referenced by bucket list", + binToHex(*rit.first)); + } + rit = referenced.emplace(level.getSnap()->getHash()); + if (rit.second) + { + CLOG_TRACE(Bucket, "{} referenced by bucket list", + binToHex(*rit.first)); + } + for (auto const& h : level.getNext().getHashes()) + { + rit = referenced.emplace(hexToBin256(h)); + if (rit.second) + { + CLOG_TRACE(Bucket, "{} referenced by bucket list", h); + } } } - } + }; + + processBucketList(mLiveBucketList); + processBucketList(mHotArchiveBucketList); return referenced; } @@ -913,10 +937,10 @@ BucketManagerImpl::forgetUnreferencedBuckets() } void -BucketManagerImpl::addBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { ZoneScoped; releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); @@ -926,12 +950,13 @@ BucketManagerImpl::addBatch(Application& app, LedgerHeader header, header.ledgerVersion = mFakeTestProtocolVersion; } #endif - auto timer = mBucketAddBatch.TimeScope(); - mBucketObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() + + auto timer = mBucketAddLiveBatch.TimeScope(); + mBucketLiveObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() + deadEntries.size()); - mBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, + mLiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, liveEntries, deadEntries); - mBucketListSizeCounter.set_count(mBucketList->getSize()); + + mLiveBucketListSizeCounter.set_count(mLiveBucketList->getSize()); if (app.getConfig().isUsingBucketListDB()) { @@ -939,6 +964,32 @@ BucketManagerImpl::addBatch(Application& app, LedgerHeader header, } } +// TODO: Fix interface to match addLiveBatch +void +BucketManagerImpl::addArchivalBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& deadEntries) +{ + ZoneScoped; + releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); +#ifdef BUILD_TESTS + if (mUseFakeTestValuesForNextClose) + { + currLedgerProtocol = mFakeTestProtocolVersion; + } +#endif + auto timer = mBucketAddArchiveBatch.TimeScope(); + mBucketArchiveObjectInsertBatch.Mark(initEntries.size() + + deadEntries.size()); + + // Hot archive should never modify an existing entry, so there are never + // live entries + mHotArchiveBucketList->addBatch(app, currLedger, currLedgerProtocol, + initEntries, {}, deadEntries); + mArchiveBucketListSizeCounter.set_count(mHotArchiveBucketList->getSize()); +} + #ifdef BUILD_TESTS void BucketManagerImpl::setNextCloseVersionAndHashForTesting(uint32_t protocolVers, @@ -976,7 +1027,18 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) Hash hash; if (mConfig.MODE_ENABLES_BUCKETLIST) { - hash = mBucketList->getHash(); + if (protocolVersionStartsFrom(currentHeader.ledgerVersion, + ProtocolVersion::V_21)) + { + SHA256 hasher; + hasher.add(mLiveBucketList->getHash()); + hasher.add(mHotArchiveBucketList->getHash()); + hash = hasher.finish(); + } + else + { + hash = mLiveBucketList->getHash(); + } } currentHeader.bucketListHash = hash; @@ -1010,7 +1072,7 @@ BucketManagerImpl::scanForEvictionLegacy(AbstractLedgerTxn& ltx, ZoneScoped; releaseAssert(protocolVersionStartsFrom(ltx.getHeader().ledgerVersion, SOROBAN_PROTOCOL_VERSION)); - mBucketList->scanForEvictionLegacy( + mLiveBucketList->scanForEvictionLegacy( mApp, ltx, ledgerSeq, mBucketListEvictionCounters, mEvictionStatistics); } @@ -1178,14 +1240,14 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, ZoneScoped; releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto curr = getBucketByHash(hexToBin256(has.currentBuckets.at(i).curr)); auto snap = getBucketByHash(hexToBin256(has.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while assuming " - "saved BucketList state"); + "saved live BucketList state"); } auto const& nextFuture = has.currentBuckets.at(i).next; @@ -1196,8 +1258,9 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, getBucketByHash(hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { - throw std::runtime_error("Missing future bucket files while " - "assuming saved BucketList state"); + throw std::runtime_error( + "Missing future bucket files while " + "assuming saved live BucketList state"); } } @@ -1213,14 +1276,15 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, } } - mBucketList->getLevel(i).setCurr(curr); - mBucketList->getLevel(i).setSnap(snap); - mBucketList->getLevel(i).setNext(nextFuture); + mLiveBucketList->getLevel(i).setCurr(curr); + mLiveBucketList->getLevel(i).setSnap(snap); + mLiveBucketList->getLevel(i).setNext(nextFuture); } if (restartMerges) { - mBucketList->restartMerges(mApp, maxProtocolVersion, has.currentLedger); + mLiveBucketList->restartMerges(mApp, maxProtocolVersion, + has.currentLedger); } cleanupStaleFiles(); } @@ -1294,7 +1358,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) std::map ledgerMap; std::vector> hashes; - for (uint32_t i = BucketList::kNumLevels; i > 0; --i) + for (uint32_t i = BucketListBase::kNumLevels; i > 0; --i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i - 1); hashes.emplace_back(hexToBin256(hsb.snap), @@ -1471,7 +1535,7 @@ BucketManagerImpl::visitLedgerEntries( UnorderedSet deletedEntries; std::vector> hashes; - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr), diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h index a1d2569ae1..927de0e418 100644 --- a/src/bucket/BucketManagerImpl.h +++ b/src/bucket/BucketManagerImpl.h @@ -30,7 +30,7 @@ class TmpDir; class AbstractLedgerTxn; class Application; class Bucket; -class BucketList; +class LiveBucketList; class BucketSnapshotManager; struct BucketEntryCounters; enum class LedgerEntryTypeAndDurability : uint32_t; @@ -42,7 +42,8 @@ class BucketManagerImpl : public BucketManager static std::string const kLockFilename; Application& mApp; - std::unique_ptr mBucketList; + std::unique_ptr mLiveBucketList; + std::unique_ptr mHotArchiveBucketList; std::unique_ptr mSnapshotManager; std::unique_ptr mTmpDirManager; std::unique_ptr mWorkDir; @@ -52,16 +53,19 @@ class BucketManagerImpl : public BucketManager // Lock for managing raw Bucket files or the bucket directory. This lock is // only required for file access, but is not required for logical changes to - // the BucketList (i.e. addBatch). + // a BucketList (i.e. addLiveBatch). mutable std::recursive_mutex mBucketMutex; std::unique_ptr mLockedBucketDir; - medida::Meter& mBucketObjectInsertBatch; - medida::Timer& mBucketAddBatch; + medida::Meter& mBucketLiveObjectInsertBatch; + medida::Meter& mBucketArchiveObjectInsertBatch; + medida::Timer& mBucketAddLiveBatch; + medida::Timer& mBucketAddArchiveBatch; medida::Timer& mBucketSnapMerge; medida::Counter& mSharedBucketsSize; medida::Meter& mBucketListDBBloomMisses; medida::Meter& mBucketListDBBloomLookups; - medida::Counter& mBucketListSizeCounter; + medida::Counter& mLiveBucketListSizeCounter; + medida::Counter& mArchiveBucketListSizeCounter; EvictionCounters mBucketListEvictionCounters; MergeCounters mMergeCounters; std::shared_ptr mEvictionStatistics{}; @@ -120,7 +124,8 @@ class BucketManagerImpl : public BucketManager std::string bucketIndexFilename(Hash const& hash) const override; std::string const& getTmpDir() override; std::string const& getBucketDir() const override; - BucketList& getBucketList() override; + LiveBucketList& getLiveBucketList() override; + HotArchiveBucketList& getHotArchiveBucketList() override; BucketSnapshotManager& getBucketSnapshotManager() const override; medida::Timer& getMergeTimer() override; MergeCounters readMergeCounters() override; @@ -145,10 +150,14 @@ class BucketManagerImpl : public BucketManager #endif void forgetUnreferencedBuckets() override; - void addBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) override; + void addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) override; + void addArchivalBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& deadEntries) override; void snapshotLedger(LedgerHeader& currentHeader) override; void maybeSetIndex(std::shared_ptr b, std::unique_ptr&& index) override; @@ -164,7 +173,7 @@ class BucketManagerImpl : public BucketManager #ifdef BUILD_TESTS // Install a fake/assumed ledger version and bucket list hash to use in next - // call to addBatch and snapshotLedger. This interface exists only for + // call to addLiveBatch and snapshotLedger. This interface exists only for // testing in a specific type of history replay. void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, uint256 const& hash) override; diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index 71b33862b0..9664f97ab8 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -23,7 +23,7 @@ namespace stellar { class Application; -class BucketList; +class LiveBucketList; class BucketListSnapshot; // This class serves as the boundary between non-threadsafe singleton classes @@ -63,6 +63,7 @@ class BucketSnapshotManager : NonMovableOrCopyable // is updated void updateCurrentSnapshot( std::unique_ptr&& newSnapshot); + // numHistoricalLedgers is the number of historical snapshots that the // snapshot manager will maintain. If numHistoricalLedgers is 5, snapshots // will be capable of querying state from ledger [lcl, lcl - 5]. diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp index 981708e196..1d4ece2c51 100644 --- a/src/bucket/FutureBucket.cpp +++ b/src/bucket/FutureBucket.cpp @@ -297,7 +297,7 @@ getAvailableTimeForMerge(Application& app, uint32_t level) auto closeTime = app.getConfig().getExpectedLedgerCloseTime(); if (level >= 1) { - return closeTime * BucketList::levelHalf(level - 1); + return closeTime * BucketListBase::levelHalf(level - 1); } return closeTime; } @@ -334,7 +334,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, // deserialized. In this case we want to attach to the existing merge, which // will have left a std::shared_future behind in a shared cache in the // bucket manager. - MergeKey mk{BucketList::keepDeadEntries(level), curr, snap, shadows}; + MergeKey mk{BucketListBase::keepDeadEntries(level), curr, snap, shadows}; auto f = bm.getMergeFuture(mk); if (f.valid()) { @@ -364,7 +364,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, auto res = Bucket::merge(bm, maxProtocolVersion, curr, snap, shadows, - BucketList::keepDeadEntries(level), + BucketListBase::keepDeadEntries(level), countMergeEvents, ctx, doFsync); if (res) diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index 0a045e7762..12632a38a4 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -69,7 +69,7 @@ class BucketIndexTest {CONFIG_SETTING}, 10); f(entries); closeLedger(*mApp); - } while (!BucketList::levelShouldSpill(ledger, mLevelsToBuild - 1)); + } while (!BucketListBase::levelShouldSpill(ledger, mLevelsToBuild - 1)); } public: diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index edf229d439..63fa0b3323 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -67,7 +67,7 @@ highBoundInclusive(uint32_t level, uint32_t ledger) } void -checkBucketSizeAndBounds(BucketList& bl, uint32_t ledgerSeq, uint32_t level, +checkBucketSizeAndBounds(LiveBucketList& bl, uint32_t ledgerSeq, uint32_t level, bool isCurr) { std::shared_ptr bucket; @@ -76,14 +76,14 @@ checkBucketSizeAndBounds(BucketList& bl, uint32_t ledgerSeq, uint32_t level, if (isCurr) { bucket = bl.getLevel(level).getCurr(); - sizeOfBucket = BucketList::sizeOfCurr(ledgerSeq, level); - oldestLedger = BucketList::oldestLedgerInCurr(ledgerSeq, level); + sizeOfBucket = BucketListBase::sizeOfCurr(ledgerSeq, level); + oldestLedger = BucketListBase::oldestLedgerInCurr(ledgerSeq, level); } else { bucket = bl.getLevel(level).getSnap(); - sizeOfBucket = BucketList::sizeOfSnap(ledgerSeq, level); - oldestLedger = BucketList::oldestLedgerInSnap(ledgerSeq, level); + sizeOfBucket = BucketListBase::sizeOfSnap(ledgerSeq, level); + oldestLedger = BucketListBase::oldestLedgerInSnap(ledgerSeq, level); } std::set ledgers; @@ -137,7 +137,7 @@ TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") { for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; CLOG_DEBUG(Bucket, "Adding batches to bucket list"); for (uint32_t i = 1; !app->getClock().getIOContext().stopped() && i < 130; ++i) @@ -156,13 +156,13 @@ TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") if (i % 10 == 0) CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i, binToHex(bl.getHash())); - for (uint32_t j = 0; j < BucketList::kNumLevels; ++j) + for (uint32_t j = 0; j < BucketListBase::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto currSz = countEntries(lev.getCurr()); auto snapSz = countEntries(lev.getSnap()); - CHECK(currSz <= BucketList::levelHalf(j) * 100); - CHECK(snapSz <= BucketList::levelHalf(j) * 100); + CHECK(currSz <= BucketListBase::levelHalf(j) * 100); + CHECK(snapSz <= BucketListBase::levelHalf(j) * 100); } } }); @@ -179,16 +179,16 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") { std::map currCalculatedUpdatePeriods; std::map snapCalculatedUpdatePeriods; - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { currCalculatedUpdatePeriods.emplace( - i, BucketList::bucketUpdatePeriod(i, /*isCurr=*/true)); + i, BucketListBase::bucketUpdatePeriod(i, /*isCurr=*/true)); // Last level has no snap - if (i != BucketList::kNumLevels - 1) + if (i != BucketListBase::kNumLevels - 1) { snapCalculatedUpdatePeriods.emplace( - i, BucketList::bucketUpdatePeriod(i, /*isSnap=*/false)); + i, BucketListBase::bucketUpdatePeriod(i, /*isSnap=*/false)); } } @@ -197,7 +197,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") !snapCalculatedUpdatePeriods.empty(); ++ledgerSeq) { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { // Check if curr bucket is updated auto currIter = currCalculatedUpdatePeriods.find(level); @@ -213,7 +213,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") { // For all other levels, an update occurs when the level // above spills - if (BucketList::levelShouldSpill(ledgerSeq, level - 1)) + if (BucketListBase::levelShouldSpill(ledgerSeq, level - 1)) { REQUIRE(currIter->second == ledgerSeq); currCalculatedUpdatePeriods.erase(currIter); @@ -225,7 +225,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") auto snapIter = snapCalculatedUpdatePeriods.find(level); if (snapIter != snapCalculatedUpdatePeriods.end()) { - if (BucketList::levelShouldSpill(ledgerSeq, level)) + if (BucketListBase::levelShouldSpill(ledgerSeq, level)) { // Check that snap bucket calculation is correct REQUIRE(snapIter->second == ledgerSeq); @@ -243,7 +243,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", Config const& cfg = getTestConfig(); for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; // Alice and Bob change in every iteration. auto alice = LedgerTestUtils::generateValidAccountEntry(5); @@ -304,7 +304,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", // Alice and Bob should never occur in level 2 .. N because they // were shadowed in level 0 continuously. - for (uint32_t j = 2; j < BucketList::kNumLevels; ++j) + for (uint32_t j = 2; j < BucketListBase::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto curr = lev.getCurr(); @@ -345,11 +345,11 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; BucketManager& bm = app->getBucketManager(); auto& mergeTimer = bm.getMergeTimer(); CLOG_INFO(Bucket, "Establishing random bucketlist"); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto& level = bl.getLevel(i); level.setCurr(Bucket::fresh( @@ -370,10 +370,10 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", /*doFsync=*/true)); } - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { - std::vector ledgers = {BucketList::levelHalf(i), - BucketList::levelSize(i)}; + std::vector ledgers = {BucketListBase::levelHalf(i), + BucketListBase::levelSize(i)}; for (auto j : ledgers) { auto n = mergeTimer.count(); @@ -388,7 +388,7 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); app->getClock().crank(false); - for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k) + for (uint32_t k = 0u; k < BucketListBase::kNumLevels; ++k) { auto& next = bl.getLevel(k).getNext(); if (next.isLive()) @@ -401,13 +401,13 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", "Added batch at ledger {}, merges provoked: {}", j, n); REQUIRE(n > 0); - REQUIRE(n < 2 * BucketList::kNumLevels); + REQUIRE(n < 2 * BucketListBase::kNumLevels); } } - EntryCounts e0(bl.getLevel(BucketList::kNumLevels - 3).getCurr()); - EntryCounts e1(bl.getLevel(BucketList::kNumLevels - 2).getCurr()); - EntryCounts e2(bl.getLevel(BucketList::kNumLevels - 1).getCurr()); + EntryCounts e0(bl.getLevel(BucketListBase::kNumLevels - 3).getCurr()); + EntryCounts e1(bl.getLevel(BucketListBase::kNumLevels - 2).getCurr()); + EntryCounts e2(bl.getLevel(BucketListBase::kNumLevels - 1).getCurr()); REQUIRE(e0.nDead != 0); REQUIRE(e1.nDead != 0); REQUIRE(e2.nDead == 0); @@ -422,7 +422,7 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; autocheck::generator flip; std::deque entriesToModify; for (uint32_t i = 1; i < 512; ++i) @@ -464,7 +464,7 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", addBatchAndUpdateSnapshot(bl, *app, lh, initEntries, liveEntries, deadEntries); app->getClock().crank(false); - for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k) + for (uint32_t k = 0u; k < BucketListBase::kNumLevels; ++k) { auto& next = bl.getLevel(k).getNext(); if (next.isLive()) @@ -473,7 +473,7 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", } } } - for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k) + for (uint32_t k = 0u; k < BucketListBase::kNumLevels; ++k) { auto const& lev = bl.getLevel(k); auto currSz = countEntries(lev.getCurr()); @@ -501,7 +501,7 @@ TEST_CASE_VERSIONS("single entry bubbling up", { for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; std::vector emptySet; std::vector emptySetEntry; @@ -529,7 +529,7 @@ TEST_CASE_VERSIONS("single entry bubbling up", CLOG_DEBUG(Bucket, "------- ledger {}", i); - for (uint32_t j = 0; j <= BucketList::kNumLevels - 1; ++j) + for (uint32_t j = 0; j <= BucketListBase::kNumLevels - 1; ++j) { uint32_t lb = lowBoundExclusive(j, i); uint32_t hb = highBoundInclusive(j, i); @@ -567,27 +567,27 @@ TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", stellar::uniform_int_distribution dist; for (uint32_t i = 0; i < 1000; ++i) { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { uint32_t ledger = dist(gRandomEngine); - if (BucketList::sizeOfSnap(ledger, level) > 0) + if (BucketListBase::sizeOfSnap(ledger, level) > 0) { uint32_t oldestInCurr = - BucketList::oldestLedgerInSnap(ledger, level) + - BucketList::sizeOfSnap(ledger, level); + BucketListBase::oldestLedgerInSnap(ledger, level) + + BucketListBase::sizeOfSnap(ledger, level); REQUIRE(oldestInCurr == - BucketList::oldestLedgerInCurr(ledger, level)); + BucketListBase::oldestLedgerInCurr(ledger, level)); } - if (BucketList::sizeOfCurr(ledger, level) > 0) + if (BucketListBase::sizeOfCurr(ledger, level) > 0) { uint32_t newestInCurr = - BucketList::oldestLedgerInCurr(ledger, level) + - BucketList::sizeOfCurr(ledger, level) - 1; - REQUIRE(newestInCurr == (level == 0 - ? ledger - : BucketList::oldestLedgerInSnap( - ledger, level - 1) - - 1)); + BucketListBase::oldestLedgerInCurr(ledger, level) + + BucketListBase::sizeOfCurr(ledger, level) - 1; + REQUIRE(newestInCurr == + (level == 0 ? ledger + : BucketListBase::oldestLedgerInSnap( + ledger, level - 1) - + 1)); } } } @@ -597,9 +597,9 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") { // Deliberately exclude deepest level since snap on the deepest level // is always empty. - for (uint32_t level = 0; level < BucketList::kNumLevels - 1; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels - 1; ++level) { - uint32_t const half = BucketList::levelHalf(level); + uint32_t const half = BucketListBase::levelHalf(level); // Use binary search (assuming that it does reach steady state) // to find the ledger where the snap at this level first reaches @@ -607,7 +607,7 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [level, half](uint32_t ledger) { - return (BucketList::sizeOfSnap(ledger, level) == half); + return (BucketListBase::sizeOfSnap(ledger, level) == half); }); // Generate random ledgers above and below the split to test that @@ -618,21 +618,21 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(BucketList::sizeOfSnap(low, level) < half); - REQUIRE(BucketList::sizeOfSnap(high, level) == half); + REQUIRE(BucketListBase::sizeOfSnap(low, level) < half); + REQUIRE(BucketListBase::sizeOfSnap(high, level) == half); } } } TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") { - uint32_t const deepest = BucketList::kNumLevels - 1; + uint32_t const deepest = BucketListBase::kNumLevels - 1; // Use binary search to find the first ledger where the deepest curr // first is non-empty. uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [deepest](uint32_t ledger) { - return (BucketList::sizeOfCurr(ledger, deepest) > 0); + return (BucketListBase::sizeOfCurr(ledger, deepest) > 0); }); stellar::uniform_int_distribution distLow(1, boundary - 1); stellar::uniform_int_distribution distHigh(boundary); @@ -640,29 +640,29 @@ TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(BucketList::sizeOfCurr(low, deepest) == 0); - REQUIRE(BucketList::oldestLedgerInCurr(low, deepest) == + REQUIRE(BucketListBase::sizeOfCurr(low, deepest) == 0); + REQUIRE(BucketListBase::oldestLedgerInCurr(low, deepest) == std::numeric_limits::max()); - REQUIRE(BucketList::sizeOfCurr(high, deepest) > 0); - REQUIRE(BucketList::oldestLedgerInCurr(high, deepest) == 1); + REQUIRE(BucketListBase::sizeOfCurr(high, deepest) > 0); + REQUIRE(BucketListBase::oldestLedgerInCurr(high, deepest) == 1); - REQUIRE(BucketList::sizeOfSnap(low, deepest) == 0); - REQUIRE(BucketList::oldestLedgerInSnap(low, deepest) == + REQUIRE(BucketListBase::sizeOfSnap(low, deepest) == 0); + REQUIRE(BucketListBase::oldestLedgerInSnap(low, deepest) == std::numeric_limits::max()); - REQUIRE(BucketList::sizeOfSnap(high, deepest) == 0); - REQUIRE(BucketList::oldestLedgerInSnap(high, deepest) == + REQUIRE(BucketListBase::sizeOfSnap(high, deepest) == 0); + REQUIRE(BucketListBase::oldestLedgerInSnap(high, deepest) == std::numeric_limits::max()); } } TEST_CASE("BucketList sizes at ledger 1", "[bucket][bucketlist][count]") { - REQUIRE(BucketList::sizeOfCurr(1, 0) == 1); - REQUIRE(BucketList::sizeOfSnap(1, 0) == 0); - for (uint32_t level = 1; level < BucketList::kNumLevels; ++level) + REQUIRE(BucketListBase::sizeOfCurr(1, 0) == 1); + REQUIRE(BucketListBase::sizeOfSnap(1, 0) == 0); + for (uint32_t level = 1; level < BucketListBase::kNumLevels; ++level) { - REQUIRE(BucketList::sizeOfCurr(1, level) == 0); - REQUIRE(BucketList::sizeOfSnap(1, level) == 0); + REQUIRE(BucketListBase::sizeOfCurr(1, level) == 0); + REQUIRE(BucketListBase::sizeOfSnap(1, level) == 0); } } @@ -671,7 +671,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") VirtualClock clock; Config cfg(getTestConfig()); Application::pointer app = createTestApplication(clock, cfg); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); std::vector emptySet; auto ledgers = LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( @@ -688,7 +688,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") addBatchAndUpdateSnapshot(bl, *app, lh, {}, {ledgers[ledgerSeq - 1]}, emptySet); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { checkBucketSizeAndBounds(bl, ledgerSeq, level, true); checkBucketSizeAndBounds(bl, ledgerSeq, level, false); @@ -766,7 +766,7 @@ TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]") { correctWindow.pop_front(); correctWindow.push_back( - app->getBucketManager().getBucketList().getSize()); + app->getBucketManager().getLiveBucketList().getSize()); } lm.setNextLedgerEntryBatchForBucketTesting( @@ -801,7 +801,7 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") for_versions_from(20, *app, [&] { LedgerManagerForBucketTests& lm = app->getLedgerManager(); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); + auto& bl = bm.getLiveBucketList(); auto& networkCfg = [&]() -> SorobanNetworkConfig& { LedgerTxn ltx(app->getLedgerTxnRoot()); @@ -1196,7 +1196,8 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") // Advance until one ledger before bucket is updated auto ledgersUntilUpdate = - BucketList::bucketUpdatePeriod(levelToTest, isCurr) - + BucketListBase::bucketUpdatePeriod(levelToTest, + isCurr) - 1; // updateNetworkCfg closes a ledger that we need to // count for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i) @@ -1342,30 +1343,30 @@ formatLedgerList(std::vector const& ledgers) TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { CLOG_INFO(Bucket, "levelSize({}) = {} (formally)", level, - formatU32(BucketList::levelSize(level))); + formatU32(BucketListBase::levelSize(level))); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { CLOG_INFO(Bucket, "levelHalf({}) = {} (formally)", level, - formatU32(BucketList::levelHalf(level))); + formatU32(BucketListBase::levelHalf(level))); } for (uint32_t probe : {0x100, 0x10000, 0x1000000}) { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { - auto sz = formatU32(BucketList::sizeOfCurr(probe, level)); + auto sz = formatU32(BucketListBase::sizeOfCurr(probe, level)); CLOG_INFO(Bucket, "sizeOfCurr({:#x}, {}) = {} (precisely)", probe, level, sz); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { - auto sz = formatU32(BucketList::sizeOfSnap(probe, level)); + auto sz = formatU32(BucketListBase::sizeOfSnap(probe, level)); CLOG_INFO(Bucket, "sizeOfSnap({:#x}, {}) = {} (precisely)", probe, level, sz); } @@ -1374,17 +1375,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") std::vector> spillEvents; std::vector> nonMergeCommitEvents; std::vector> mergeCommitEvents; - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { spillEvents.push_back({}); nonMergeCommitEvents.push_back({}); mergeCommitEvents.push_back({}); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { for (uint32_t ledger = 0; ledger < 0x1000000; ++ledger) { - if (BucketList::levelShouldSpill(ledger, level)) + if (BucketListBase::levelShouldSpill(ledger, level)) { spillEvents[level].push_back(ledger); if (spillEvents[level].size() > 5) @@ -1392,11 +1393,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") break; } } - if (level != 0 && BucketList::levelShouldSpill(ledger, level - 1)) + if (level != 0 && + BucketListBase::levelShouldSpill(ledger, level - 1)) { uint32_t nextChangeLedger = - ledger + BucketList::levelHalf(level - 1); - if (BucketList::levelShouldSpill(nextChangeLedger, level)) + ledger + BucketListBase::levelHalf(level - 1); + if (BucketListBase::levelShouldSpill(nextChangeLedger, level)) { nonMergeCommitEvents[level].push_back(ledger); } @@ -1407,17 +1409,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") } } } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { auto ls = formatLedgerList(spillEvents[level]); CLOG_INFO(Bucket, "levelShouldSpill({:#x}) = true @ {}", level, ls); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { auto ls = formatLedgerList(mergeCommitEvents[level]); CLOG_INFO(Bucket, "mergeCommit({:#x}) @ {}", level, ls); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { auto ls = formatLedgerList(nonMergeCommitEvents[level]); CLOG_INFO(Bucket, "nonMergeCommit({:#x}) @ {}", level, ls); @@ -1426,12 +1428,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") // Print out the full bucketlist at an arbitrarily-chosen probe ledger. uint32_t probe = 0x11f9ab; CLOG_INFO(Bucket, "BucketList state at {:#x}", probe); - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { - uint32_t currOld = BucketList::oldestLedgerInCurr(probe, level); - uint32_t snapOld = BucketList::oldestLedgerInSnap(probe, level); - uint32_t currSz = BucketList::sizeOfCurr(probe, level); - uint32_t snapSz = BucketList::sizeOfSnap(probe, level); + uint32_t currOld = BucketListBase::oldestLedgerInCurr(probe, level); + uint32_t snapOld = BucketListBase::oldestLedgerInSnap(probe, level); + uint32_t currSz = BucketListBase::sizeOfCurr(probe, level); + uint32_t snapSz = BucketListBase::sizeOfSnap(probe, level); uint32_t currNew = currOld + currSz - 1; uint32_t snapNew = snapOld + snapSz - 1; CLOG_INFO( diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index fc256c4cc2..0ebd1ecb37 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -40,11 +40,11 @@ namespace BucketManagerTests { static void -clearFutures(Application::pointer app, BucketList& bl) +clearFutures(Application::pointer app, LiveBucketList& bl) { // First go through the BL and mop up all the FutureBuckets. - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { bl.getLevel(i).getNext().clear(); } @@ -265,7 +265,7 @@ TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") dropBucket(b1); // Try adding a bucket to the BucketManager's bucketlist - auto& bl = app->getBucketManager().getBucketList(); + auto& bl = app->getBucketManager().getLiveBucketList(); bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); clearFutures(app, bl); b1 = bl.getLevel(0).getCurr(); @@ -310,7 +310,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") VirtualClock clock; auto app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); LedgerManagerForBucketTests& lm = app->getLedgerManager(); uint32_t ledger = 0; @@ -324,7 +324,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") {CONFIG_SETTING}, 10), {}); closeLedger(*app); - } while (!BucketList::levelShouldSpill(ledger, level - 1)); + } while (!BucketListBase::levelShouldSpill(ledger, level - 1)); auto someBucket = bl.getLevel(1).getCurr(); someBucketFileName = someBucket->getFilename().string(); } @@ -353,7 +353,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", Application::pointer app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); auto vers = getAppLedgerVersion(app); // Add some entries to get to a nontrivial merge-state. @@ -371,7 +371,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", {CONFIG_SETTING}, 10), {}); bm.forgetUnreferencedBuckets(); - } while (!BucketList::levelShouldSpill(ledger, level - 1)); + } while (!BucketListBase::levelShouldSpill(ledger, level - 1)); // Check that the merge on level isn't committed (we're in // ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING mode that does not resolve @@ -396,7 +396,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", // Reattach to _finished_ merge future on level. has2.currentBuckets[level].next.makeLive( - *app, vers, BucketList::keepDeadEntries(level)); + *app, vers, BucketListBase::keepDeadEntries(level)); REQUIRE(has2.currentBuckets[level].next.isMerging()); // Resolve reattached future. @@ -420,7 +420,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", Application::pointer app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); auto vers = getAppLedgerVersion(app); // This test is a race that will (if all goes well) eventually be won: @@ -473,12 +473,13 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", // win quite shortly). HistoryArchiveState has2; has2.fromString(serialHas); - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; + ++level) { if (has2.currentBuckets[level].next.hasHashes()) { has2.currentBuckets[level].next.makeLive( - *app, vers, BucketList::keepDeadEntries(level)); + *app, vers, BucketListBase::keepDeadEntries(level)); } } } @@ -509,7 +510,7 @@ TEST_CASE("bucketmanager do not leak empty-merge futures", auto app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); LedgerManagerForBucketTests& lm = app->getLedgerManager(); // We create 8 live ledger entries spread across 8 ledgers then add a ledger @@ -578,7 +579,7 @@ TEST_CASE_VERSIONS( auto vers = getAppLedgerVersion(app); auto& hm = app->getHistoryManager(); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); + auto& bl = bm.getLiveBucketList(); hm.setPublicationEnabled(false); app->getHistoryArchiveManager().initializeHistoryArchive( tcfg.getArchiveDirName()); @@ -681,9 +682,9 @@ TEST_CASE_VERSIONS( class StopAndRestartBucketMergesTest { static void - resolveAllMerges(BucketList& bl) + resolveAllMerges(LiveBucketList& bl) { - for (uint32 i = 0; i < BucketList::kNumLevels; ++i) + for (uint32 i = 0; i < BucketListBase::kNumLevels; ++i) { auto& level = bl.getLevel(i); auto& next = level.getNext(); @@ -932,7 +933,7 @@ class StopAndRestartBucketMergesTest { LedgerManager& lm = app.getLedgerManager(); BucketManager& bm = app.getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); // Complete those merges we're about to inspect. resolveAllMerges(bl); @@ -958,8 +959,8 @@ class StopAndRestartBucketMergesTest collectLedgerEntries(Application& app, std::map& entries) { - auto bl = app.getBucketManager().getBucketList(); - for (uint32_t i = BucketList::kNumLevels; i > 0; --i) + auto bl = app.getBucketManager().getLiveBucketList(); + for (uint32_t i = BucketListBase::kNumLevels; i > 0; --i) { BucketLevel const& level = bl.getLevel(i - 1); for (auto bucket : {level.getSnap(), level.getCurr()}) @@ -1007,10 +1008,11 @@ class StopAndRestartBucketMergesTest void calculateDesignatedLedgers() { - uint32_t spillFreq = BucketList::levelHalf(mDesignatedLevel); - uint32_t prepFreq = (mDesignatedLevel == 0 - ? 1 - : BucketList::levelHalf(mDesignatedLevel - 1)); + uint32_t spillFreq = BucketListBase::levelHalf(mDesignatedLevel); + uint32_t prepFreq = + (mDesignatedLevel == 0 + ? 1 + : BucketListBase::levelHalf(mDesignatedLevel - 1)); uint32_t const SPILLCOUNT = 5; uint32_t const PREPCOUNT = 5; @@ -1214,7 +1216,7 @@ class StopAndRestartBucketMergesTest lm.setNextLedgerEntryBatchForBucketTesting( mInitEntryBatches[i - 2], mLiveEntryBatches[i - 2], mDeadEntryBatches[i - 2]); - resolveAllMerges(app->getBucketManager().getBucketList()); + resolveAllMerges(app->getBucketManager().getLiveBucketList()); auto countersBeforeClose = app->getBucketManager().readMergeCounters(); @@ -1242,12 +1244,13 @@ class StopAndRestartBucketMergesTest auto j = mControlSurveys.find(i); if (j != mControlSurveys.end()) { - if (BucketList::levelShouldSpill(i, mDesignatedLevel - 1)) + if (BucketListBase::levelShouldSpill(i, mDesignatedLevel - 1)) { // Confirm that there's a merge-in-progress at this level // (closing ledger i should have provoked a spill from // mDesignatedLevel-1 to mDesignatedLevel) - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = + app->getBucketManager().getLiveBucketList(); BucketLevel& blv = bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); } @@ -1276,10 +1279,11 @@ class StopAndRestartBucketMergesTest clock = std::make_unique(); app = createTestApplication(*clock, cfg, false); - if (BucketList::levelShouldSpill(i, mDesignatedLevel - 1)) + if (BucketListBase::levelShouldSpill(i, mDesignatedLevel - 1)) { // Confirm that the merge-in-progress was restarted. - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = + app->getBucketManager().getLiveBucketList(); BucketLevel& blv = bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); } @@ -1417,7 +1421,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart", VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg0); sk = std::make_optional(cfg0.NODE_SEED); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); uint32_t i = 2; while (i < pause) @@ -1452,7 +1456,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart", { VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg1); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); uint32_t i = 2; while (i < pause) @@ -1479,7 +1483,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart", VirtualClock clock; Application::pointer app = Application::create(clock, cfg1, false); app->start(); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); // Confirm that we re-acquired the close-ledger state. REQUIRE( diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index 0200e49442..c853076d74 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -30,7 +30,8 @@ getAppLedgerVersion(Application::pointer app) } void -addBatchAndUpdateSnapshot(BucketList& bl, Application& app, LedgerHeader header, +addBatchAndUpdateSnapshot(LiveBucketList& bl, Application& app, + LedgerHeader header, std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries) @@ -74,7 +75,7 @@ closeLedger(Application& app, std::optional skToSignValue, uint32_t ledgerNum = lcl.header.ledgerSeq + 1; CLOG_INFO(Bucket, "Artificially closing ledger {} with lcl={}, buckets={}", ledgerNum, hexAbbrev(lcl.hash), - hexAbbrev(app.getBucketManager().getBucketList().getHash())); + hexAbbrev(app.getBucketManager().getLiveBucketList().getHash())); app.getHerder().externalizeValue(TxSetXDRFrame::makeEmpty(lcl), ledgerNum, lcl.header.scpValue.closeTime, upgrades, skToSignValue); @@ -201,8 +202,8 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( } // Use the testing values. - mApp.getBucketManager().addBatch(mApp, lh, mTestInitEntries, - mTestLiveEntries, mTestDeadEntries); + mApp.getBucketManager().addLiveBatch( + mApp, lh, mTestInitEntries, mTestLiveEntries, mTestDeadEntries); mUseTestEntries = false; } else diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index eddd4ae95b..0b57cc6080 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -11,7 +11,7 @@ namespace stellar namespace BucketTestUtils { -void addBatchAndUpdateSnapshot(BucketList& bl, Application& app, +void addBatchAndUpdateSnapshot(LiveBucketList& bl, Application& app, LedgerHeader header, std::vector const& initEntries, std::vector const& liveEntries, diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index 8a871b5f31..3f890d2ac2 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -54,8 +54,9 @@ class TempLedgerVersionSetter : NonMovableOrCopyable uint32_t ApplyBucketsWork::startingLevel() { - return mApp.getConfig().isUsingBucketListDB() ? 0 - : BucketList::kNumLevels - 1; + return mApp.getConfig().isUsingBucketListDB() + ? 0 + : BucketListBase::kNumLevels - 1; } ApplyBucketsWork::ApplyBucketsWork( @@ -150,12 +151,12 @@ ApplyBucketsWork::doReset() } mBucketsToApply.emplace_back(bucket); }; - // If using bucketlist DB, we iterate through the BucketList in order - // (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying offers - // (and can keep track of all seen keys). Otherwise, we iterate in - // reverse order (i.e. L N snap, L N curr, L N-1 snap, etc.) as we are - // applying all entry types and cannot keep track of all seen keys as it - // would be too large. + // If using bucketlist DB, we iterate through the live BucketList in + // order (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying + // offers (and can keep track of all seen keys). Otherwise, we iterate + // in reverse order (i.e. L N snap, L N curr, L N-1 snap, etc.) as we + // are applying all entry types and cannot keep track of all seen keys + // as it would be too large. if (mApp.getConfig().isUsingBucketListDB()) { for (auto const& hsb : mApplyState.currentBuckets) @@ -222,7 +223,7 @@ ApplyBucketsWork::prepareForNextBucket() } } -// We iterate through the BucketList either in-order (level 0 curr, level 0 +// We iterate through the live BucketList either in-order (level 0 curr, level 0 // snap, level 1 curr, etc) when only applying offers, or in reverse order // (level 9 curr, level 8 snap, level 8 curr, etc) when applying all entry // types. When only applying offers, we keep track of the keys we have already diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index 276e4caa05..173026daec 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -13,7 +13,7 @@ namespace stellar class AssumeStateWork; class BucketLevel; -class BucketList; +class LiveBucketList; class Bucket; class IndexBucketsWork; struct HistoryArchiveState; diff --git a/src/catchup/ApplyBufferedLedgersWork.cpp b/src/catchup/ApplyBufferedLedgersWork.cpp index 6af378daf4..72d396c5a2 100644 --- a/src/catchup/ApplyBufferedLedgersWork.cpp +++ b/src/catchup/ApplyBufferedLedgersWork.cpp @@ -59,7 +59,7 @@ ApplyBufferedLedgersWork::onRun() auto applyLedger = std::make_shared(mApp, lcd); auto predicate = [](Application& app) { - auto& bl = app.getBucketManager().getBucketList(); + auto& bl = app.getBucketManager().getLiveBucketList(); auto& lm = app.getLedgerManager(); bl.resolveAnyReadyFutures(); return bl.futuresAllResolved( diff --git a/src/catchup/ApplyCheckpointWork.cpp b/src/catchup/ApplyCheckpointWork.cpp index 39180e9cdb..cd0ffd822d 100644 --- a/src/catchup/ApplyCheckpointWork.cpp +++ b/src/catchup/ApplyCheckpointWork.cpp @@ -312,7 +312,7 @@ ApplyCheckpointWork::onRun() auto applyLedger = std::make_shared(mApp, *lcd); auto predicate = [](Application& app) { - auto& bl = app.getBucketManager().getBucketList(); + auto& bl = app.getBucketManager().getLiveBucketList(); auto& lm = app.getLedgerManager(); bl.resolveAnyReadyFutures(); return bl.futuresAllResolved( diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index e12ed8ac98..42f80f66d9 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -26,7 +26,7 @@ AssumeStateWork::AssumeStateWork(Application& app, // Maintain reference to all Buckets in HAS to avoid garbage collection, // including future buckets that have already finished merging auto& bm = mApp.getBucketManager(); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto curr = bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr)); diff --git a/src/catchup/DownloadApplyTxsWork.cpp b/src/catchup/DownloadApplyTxsWork.cpp index 90538ce3fa..a9164b5216 100644 --- a/src/catchup/DownloadApplyTxsWork.cpp +++ b/src/catchup/DownloadApplyTxsWork.cpp @@ -84,7 +84,7 @@ DownloadApplyTxsWork::yieldMoreWork() auto maybeWaitForMerges = [](Application& app) { if (app.getConfig().CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING) { - auto& bl = app.getBucketManager().getBucketList(); + auto& bl = app.getBucketManager().getLiveBucketList(); bl.resolveAnyReadyFutures(); return bl.futuresAllResolved(); } diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index c1f86fb7ce..db68391cd4 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -1984,7 +1984,7 @@ TEST_CASE("upgrade to version 11", "[upgrades]") app->getConfig().NODE_SEED); lm.closeLedger(LedgerCloseData(ledgerSeq, txSet, sv)); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); + auto& bl = bm.getLiveBucketList(); while (!bl.futuresAllResolved()) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -1998,9 +1998,9 @@ TEST_CASE("upgrade to version 11", "[upgrades]") ledgerSeq, mc.mPreInitEntryProtocolMerges, mc.mPostInitEntryProtocolMerges, mc.mNewInitEntries, mc.mOldInitEntries); - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { - auto& lev = bm.getBucketList().getLevel(level); + auto& lev = bm.getLiveBucketList().getLevel(level); BucketTestUtils::EntryCounts currCounts(lev.getCurr()); BucketTestUtils::EntryCounts snapCounts(lev.getSnap()); CLOG_INFO( @@ -2030,8 +2030,8 @@ TEST_CASE("upgrade to version 11", "[upgrades]") // - From 8 on, the INITENTRYs propagate to lev[1].curr REQUIRE(mc.mPreInitEntryProtocolMerges == 5); REQUIRE(mc.mPostInitEntryProtocolMerges != 0); - auto& lev0 = bm.getBucketList().getLevel(0); - auto& lev1 = bm.getBucketList().getLevel(1); + auto& lev0 = bm.getLiveBucketList().getLevel(0); + auto& lev1 = bm.getLiveBucketList().getLevel(1); auto lev0Curr = lev0.getCurr(); auto lev0Snap = lev0.getSnap(); auto lev1Curr = lev1.getCurr(); @@ -2108,7 +2108,7 @@ TEST_CASE("upgrade to version 12", "[upgrades]") app->getConfig().NODE_SEED); lm.closeLedger(LedgerCloseData(ledgerSeq, txSet, sv)); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); + auto& bl = bm.getLiveBucketList(); while (!bl.futuresAllResolved()) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -2122,8 +2122,8 @@ TEST_CASE("upgrade to version 12", "[upgrades]") } else { - auto& lev0 = bm.getBucketList().getLevel(0); - auto& lev1 = bm.getBucketList().getLevel(1); + auto& lev0 = bm.getLiveBucketList().getLevel(0); + auto& lev1 = bm.getLiveBucketList().getLevel(1); auto lev0Curr = lev0.getCurr(); auto lev0Snap = lev0.getSnap(); auto lev1Curr = lev1.getCurr(); @@ -2233,7 +2233,7 @@ TEST_CASE("configuration initialized in version upgrade", "[upgrades]") REQUIRE(!ltx.load(getMaxContractSizeKey())); } - auto blSize = app->getBucketManager().getBucketList().getSize(); + auto blSize = app->getBucketManager().getLiveBucketList().getSize(); executeUpgrade(*app, makeProtocolVersionUpgrade( static_cast(SOROBAN_PROTOCOL_VERSION))); diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index a627b98c81..86d91b3ade 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -246,7 +246,7 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const inhibit.insert(b.snap); } std::vector ret; - for (size_t i = BucketList::kNumLevels; i != 0; --i) + for (size_t i = BucketListBase::kNumLevels; i != 0; --i) { auto s = currentBuckets[i - 1].snap; auto n = s; @@ -322,7 +322,7 @@ HistoryArchiveState::containsValidBuckets(Application& app) const }; // Iterate bottom-up, from oldest to newest buckets - for (uint32_t j = BucketList::kNumLevels; j != 0; --j) + for (uint32_t j = BucketListBase::kNumLevels; j != 0; --j) { auto i = j - 1; auto const& level = currentBuckets[i]; @@ -384,7 +384,7 @@ HistoryArchiveState::prepareForPublish(Application& app) // Level 0 future buckets are always clear releaseAssert(currentBuckets[0].next.isClear()); - for (uint32_t i = 1; i < BucketList::kNumLevels; i++) + for (uint32_t i = 1; i < BucketListBase::kNumLevels; i++) { auto& level = currentBuckets[i]; auto& prev = currentBuckets[i - 1]; @@ -423,20 +423,20 @@ HistoryArchiveState::HistoryArchiveState() : server(STELLAR_CORE_VERSION) HistoryStateBucket b; b.curr = s; b.snap = s; - while (currentBuckets.size() < BucketList::kNumLevels) + while (currentBuckets.size() < BucketListBase::kNumLevels) { currentBuckets.push_back(b); } } HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, - BucketList const& buckets, + LiveBucketList const& buckets, std::string const& passphrase) : server(STELLAR_CORE_VERSION) , networkPassphrase(passphrase) , currentLedger(ledgerSeq) { - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { HistoryStateBucket b; auto& level = buckets.getLevel(i); diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index 1a70622dac..02363e86c8 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -27,7 +27,7 @@ namespace stellar { class Application; -class BucketList; +class LiveBucketList; class Bucket; struct HistoryStateBucket @@ -70,7 +70,7 @@ struct HistoryArchiveState HistoryArchiveState(); - HistoryArchiveState(uint32_t ledgerSeq, BucketList const& buckets, + HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& buckets, std::string const& networkPassphrase); static std::string baseName(); diff --git a/src/history/HistoryManager.h b/src/history/HistoryManager.h index eff457ee64..0bfd8adf0f 100644 --- a/src/history/HistoryManager.h +++ b/src/history/HistoryManager.h @@ -181,7 +181,7 @@ namespace stellar { class Application; class Bucket; -class BucketList; +class LiveBucketList; class Config; class Database; class HistoryArchive; diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index b7d799dc4c..9d1a99d54b 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -388,10 +388,10 @@ HistoryManagerImpl::queueCurrentHistory() ZoneScoped; auto ledger = mApp.getLedgerManager().getLastClosedLedgerNum(); - BucketList bl; + LiveBucketList bl; if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - bl = mApp.getBucketManager().getBucketList(); + bl = mApp.getBucketManager().getLiveBucketList(); } HistoryArchiveState has(ledger, bl, mApp.getConfig().NETWORK_PASSPHRASE); diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index 3995aeea1b..bcae0678b8 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -44,7 +44,7 @@ StateSnapshot::StateSnapshot(Application& app, HistoryArchiveState const& state) mSnapDir, FileType::HISTORY_FILE_TYPE_SCP, mLocalState.currentLedger)) { - if (mLocalState.currentBuckets.size() != BucketList::kNumLevels) + if (mLocalState.currentBuckets.size() != BucketListBase::kNumLevels) { throw std::runtime_error("Invalid HAS: malformed bucketlist"); } diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 03aa1592f8..27550f6996 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -1312,7 +1312,7 @@ TEST_CASE_VERSIONS( Application::pointer app = createTestApplication(clock, cfg); auto& hm = app->getHistoryManager(); auto& lm = app->getLedgerManager(); - auto& bl = app->getBucketManager().getBucketList(); + auto& bl = app->getBucketManager().getLiveBucketList(); while (hm.getPublishQueueCount() != 1) { @@ -1337,7 +1337,7 @@ TEST_CASE_VERSIONS( // Second, ensure `next` is in the exact same state as when it was // queued - for (uint32_t i = 0; i < BucketList::kNumLevels; i++) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; i++) { auto const& currentNext = bl.getLevel(i).getNext(); auto const& queuedNext = queuedHAS.currentBuckets[i].next; diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index 908548b433..76b3d5eea3 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -546,13 +546,13 @@ CatchupSimulation::generateRandomLedger(uint32_t version) mBucketListHashes.push_back(lclh.header.bucketListHash); mBucket0Hashes.push_back(getApp() .getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(0) .getCurr() ->getHash()); mBucket1Hashes.push_back(getApp() .getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(2) .getCurr() ->getHash()); @@ -612,7 +612,7 @@ CatchupSimulation::ensureLedgerAvailable(uint32_t targetLedger, if (getApp().getHistoryManager().publishCheckpointOnLedgerClose(lcl)) { mBucketListAtLastPublish = - getApp().getBucketManager().getBucketList(); + getApp().getBucketManager().getLiveBucketList(); } } } @@ -985,12 +985,12 @@ CatchupSimulation::validateCatchup(Application::pointer app) auto haveBucketListHash = lm.getLastClosedLedgerHeader().header.bucketListHash; auto haveBucket0Hash = app->getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(0) .getCurr() ->getHash(); auto haveBucket1Hash = app->getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(2) .getCurr() ->getHash(); diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index ce62a96098..04d8929589 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -184,8 +184,7 @@ class CatchupSimulation Config mCfg; std::vector mCfgs; Application::pointer mAppPtr; - BucketList mBucketListAtLastPublish; - + LiveBucketList mBucketListAtLastPublish; std::vector mLedgerCloseDatas; std::vector mLedgerSeqs; diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp index e12da7b724..97368e785f 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.cpp +++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp @@ -325,9 +325,9 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) { LedgerTxn ltx(mApp.getLedgerTxnRoot()); - auto& bl = mApp.getBucketManager().getBucketList(); + auto& bl = mApp.getBucketManager().getLiveBucketList(); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto const& level = bl.getLevel(i); for (auto const& bucket : {level.getCurr(), level.getSnap()}) diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp index df0ca6f61a..4a88a276e1 100644 --- a/src/invariant/InvariantManagerImpl.cpp +++ b/src/invariant/InvariantManagerImpl.cpp @@ -74,12 +74,13 @@ InvariantManagerImpl::checkOnBucketApply( std::shared_ptr bucket, uint32_t ledger, uint32_t level, bool isCurr, std::function entryTypeFilter) { - uint32_t oldestLedger = isCurr - ? BucketList::oldestLedgerInCurr(ledger, level) - : BucketList::oldestLedgerInSnap(ledger, level); - uint32_t newestLedger = oldestLedger - 1 + - (isCurr ? BucketList::sizeOfCurr(ledger, level) - : BucketList::sizeOfSnap(ledger, level)); + uint32_t oldestLedger = + isCurr ? BucketListBase::oldestLedgerInCurr(ledger, level) + : BucketListBase::oldestLedgerInSnap(ledger, level); + uint32_t newestLedger = + oldestLedger - 1 + + (isCurr ? BucketListBase::sizeOfCurr(ledger, level) + : BucketListBase::sizeOfSnap(ledger, level)); for (auto invariant : mEnabled) { auto result = invariant->checkOnBucketApply( diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index 69edb0711b..c9f7a0c7db 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -144,8 +144,8 @@ struct BucketListGenerator std::vector deadEntries; auto header = ltx.loadHeader().current(); ltx.getAllEntries(initEntries, liveEntries, deadEntries); - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, header, initEntries, + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *app, header, initEntries, liveEntries, deadEntries); ltx.commit(); } @@ -205,16 +205,16 @@ struct BucketListGenerator HistoryArchiveState getHistoryArchiveState(Application::pointer app) { - auto& blGenerate = mAppGenerate->getBucketManager().getBucketList(); + auto& blGenerate = mAppGenerate->getBucketManager().getLiveBucketList(); auto& bmApply = app->getBucketManager(); MergeCounters mergeCounters; LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false); auto vers = ltx.loadHeader().current().ledgerVersion; - for (uint32_t i = 0; i <= BucketList::kNumLevels - 1; i++) + for (uint32_t i = 0; i <= BucketListBase::kNumLevels - 1; i++) { auto& level = blGenerate.getLevel(i); auto meta = testutil::testBucketMetadata(vers); - auto keepDead = BucketList::keepDeadEntries(i); + auto keepDead = BucketListBase::keepDeadEntries(i); auto writeBucketFile = [&](auto b) { BucketOutputIterator out(bmApply.getTmpDir(), keepDead, meta, @@ -259,9 +259,9 @@ doesBucketContain(std::shared_ptr bucket, const BucketEntry& be) } bool -doesBucketListContain(BucketList& bl, const BucketEntry& be) +doesBucketListContain(LiveBucketList& bl, const BucketEntry& be) { - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto const& level = bl.getLevel(i); for (auto const& bucket : {level.getCurr(), level.getSnap()}) @@ -857,15 +857,15 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds", } }; - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) { - uint32_t oldestLedger = BucketList::oldestLedgerInSnap(101, level); + uint32_t oldestLedger = BucketListBase::oldestLedgerInSnap(101, level); if (oldestLedger == std::numeric_limits::max()) { break; } - uint32_t newestLedger = BucketList::oldestLedgerInCurr(101, level) + - BucketList::sizeOfCurr(101, level) - 1; + uint32_t newestLedger = BucketListBase::oldestLedgerInCurr(101, level) + + BucketListBase::sizeOfCurr(101, level) - 1; stellar::uniform_int_distribution ledgerToModifyDist( std::max(2u, oldestLedger), newestLedger); @@ -874,20 +874,21 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds", uint32_t ledgerToModify = ledgerToModifyDist(gRandomEngine); uint32_t maxLowTargetLedger = 0; uint32_t minHighTargetLedger = 0; - if (ledgerToModify >= BucketList::oldestLedgerInCurr(101, level)) + if (ledgerToModify >= + BucketListBase::oldestLedgerInCurr(101, level)) { maxLowTargetLedger = - BucketList::oldestLedgerInCurr(101, level) - 1; + BucketListBase::oldestLedgerInCurr(101, level) - 1; minHighTargetLedger = - BucketList::oldestLedgerInCurr(101, level) + - BucketList::sizeOfCurr(101, level); + BucketListBase::oldestLedgerInCurr(101, level) + + BucketListBase::sizeOfCurr(101, level); } else { maxLowTargetLedger = - BucketList::oldestLedgerInSnap(101, level) - 1; + BucketListBase::oldestLedgerInSnap(101, level) - 1; minHighTargetLedger = - BucketList::oldestLedgerInCurr(101, level); + BucketListBase::oldestLedgerInCurr(101, level); } stellar::uniform_int_distribution lowTargetLedgerDist( 1, maxLowTargetLedger); @@ -955,7 +956,7 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", { MergeBucketListGenerator blg(static_cast(t)); auto& blGenerate = - blg.mAppGenerate->getBucketManager().getBucketList(); + blg.mAppGenerate->getBucketManager().getLiveBucketList(); blg.generateLedgers(100); if (!blg.mSelected) @@ -995,7 +996,8 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", Application::pointer appApply = createTestApplication(clock, cfg); REQUIRE_NOTHROW(blg.applyBuckets(appApply)); - auto& blApply = appApply->getBucketManager().getBucketList(); + auto& blApply = + appApply->getBucketManager().getLiveBucketList(); REQUIRE(!doesBucketListContain(blApply, dead)); REQUIRE(!(doesBucketListContain(blApply, live) || doesBucketListContain(blApply, init))); diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index 8e7c5841b2..86431b6b70 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -1624,10 +1624,10 @@ LedgerManagerImpl::storeCurrentLedger(LedgerHeader const& header, mApp.getPersistentState().setState(PersistentState::kLastClosedLedger, binToHex(hash)); - BucketList bl; + LiveBucketList bl; if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - bl = mApp.getBucketManager().getBucketList(); + bl = mApp.getBucketManager().getLiveBucketList(); } // Store the current HAS in the database; this is really just to checkpoint // the bucketlist so we can survive a restart and re-attach to the buckets. @@ -1697,8 +1697,8 @@ LedgerManagerImpl::transferLedgerEntriesToBucketList( ltx.getAllEntries(initEntries, liveEntries, deadEntries); if (blEnabled) { - mApp.getBucketManager().addBatch(mApp, lh, initEntries, liveEntries, - deadEntries); + mApp.getBucketManager().addLiveBatch(mApp, lh, initEntries, liveEntries, + deadEntries); } } diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp index 1d978bf8ba..45c36ceefb 100644 --- a/src/ledger/NetworkConfig.cpp +++ b/src/ledger/NetworkConfig.cpp @@ -920,7 +920,7 @@ initialBucketListSizeWindow(Application& app) // copies of the current BL size. If the bucketlist is disabled for // testing, just fill with ones to avoid triggering asserts. auto blSize = app.getConfig().MODE_ENABLES_BUCKETLIST - ? app.getBucketManager().getBucketList().getSize() + ? app.getBucketManager().getLiveBucketList().getSize() : 1; for (uint64_t i = 0; i < InitialSorobanNetworkConfig::BUCKET_LIST_SIZE_WINDOW_SAMPLE_SIZE; @@ -1046,7 +1046,7 @@ SorobanNetworkConfig::isValidConfigSettingEntry(ConfigSettingEntry const& cfg, cfg.stateArchivalSettings().startingEvictionScanLevel >= MinimumSorobanNetworkConfig::STARTING_EVICTION_LEVEL && cfg.stateArchivalSettings().startingEvictionScanLevel < - BucketList::kNumLevels && + BucketListBase::kNumLevels && cfg.stateArchivalSettings().bucketListWindowSamplePeriod >= MinimumSorobanNetworkConfig::BUCKETLIST_WINDOW_SAMPLE_PERIOD; @@ -1704,7 +1704,7 @@ SorobanNetworkConfig::maybeSnapshotBucketListSize(uint32_t currLedger, // Update in memory snapshots mBucketListSizeSnapshots.pop_front(); mBucketListSizeSnapshots.push_back( - app.getBucketManager().getBucketList().getSize()); + app.getBucketManager().getLiveBucketList().getSize()); writeBucketListSizeWindow(ltx); updateBucketListSizeAverage(); @@ -1872,7 +1872,7 @@ SorobanNetworkConfig::writeAllSettings(AbstractLedgerTxn& ltx, auto lcl = app.getLedgerManager().getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; BucketTestUtils::addBatchAndUpdateSnapshot( - app.getBucketManager().getBucketList(), app, lcl.header, {}, + app.getBucketManager().getLiveBucketList(), app, lcl.header, {}, entries, {}); } } diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp index ba3d2a698c..e98ad0acce 100644 --- a/src/ledger/test/LedgerTxnTests.cpp +++ b/src/ledger/test/LedgerTxnTests.cpp @@ -2748,7 +2748,7 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") .header.ledgerVersion; lh.ledgerSeq = 2; BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, lh, {}, + app->getBucketManager().getLiveBucketList(), *app, lh, {}, ledgerVect, {}); } ltx.commit(); @@ -2981,7 +2981,7 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]") .header.ledgerVersion; lh.ledgerSeq = 2; BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, lh, {}, ledgerVect, + app->getBucketManager().getLiveBucketList(), *app, lh, {}, ledgerVect, deadKeyVect); ltx.commit(); diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 5f33fce2a5..c055f6c086 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -647,7 +647,7 @@ dumpStateArchivalStatistics(Config cfg) HistoryArchiveState has = lm.getLastClosedLedgerHAS(); std::vector hashes; - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr)); @@ -720,7 +720,7 @@ dumpStateArchivalStatistics(Config cfg) } } - CLOG_INFO(Bucket, "BucketList total bytes: {}", blSize); + CLOG_INFO(Bucket, "Live BucketList total bytes: {}", blSize); CLOG_INFO(Bucket, "Live Temporary Entries: Newest bytes {} ({}%), Outdated bytes " "{} ({}%)", diff --git a/src/main/Config.cpp b/src/main/Config.cpp index 9923da9020..c20c617616 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -1573,8 +1573,8 @@ Config::processConfig(std::shared_ptr t) }}, {"TESTING_STARTING_EVICTION_SCAN_LEVEL", [&]() { - TESTING_STARTING_EVICTION_SCAN_LEVEL = - readInt(item, 1, BucketList::kNumLevels - 1); + TESTING_STARTING_EVICTION_SCAN_LEVEL = readInt( + item, 1, LiveBucketList::kNumLevels - 1); }}, {"TESTING_MAX_ENTRIES_TO_ARCHIVE", [&]() { diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index bdbf6942ea..b416043a04 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -121,9 +121,10 @@ checkState(Application& app) }; auto& bm = app.getBucketManager(); - for (uint32_t i = 0; i < bm.getBucketList().kNumLevels && blcOk; ++i) + for (uint32_t i = 0; i < bm.getLiveBucketList().kNumLevels && blcOk; + ++i) { - auto& level = bm.getBucketList().getLevel(i); + auto& level = bm.getLiveBucketList().getLevel(i); checkBucket(level.getCurr()); checkBucket(level.getSnap()); auto& nextFuture = level.getNext(); @@ -450,7 +451,7 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]") catchupSimulation.catchupOffline(app, l1); chkConfig = app->getConfig(); victimBucketPath = app->getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(0) .getCurr() ->getFilename(); diff --git a/src/simulation/CoreTests.cpp b/src/simulation/CoreTests.cpp index 30f1bffd55..b3304dcdfd 100644 --- a/src/simulation/CoreTests.cpp +++ b/src/simulation/CoreTests.cpp @@ -686,8 +686,8 @@ TEST_CASE("Bucket list entries vs write throughput", "[scalability][!hide]") LedgerHeader lh; lh.ledgerVersion = Config::CURRENT_LEDGER_PROTOCOL_VERSION; lh.ledgerSeq = i; - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, lh, + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *app, lh, LedgerTestUtils::generateValidLedgerEntries(100), LedgerTestUtils::generateValidLedgerEntries(20), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index b351112bd8..cf274ceba0 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -112,14 +112,14 @@ computeMultiplier(LedgerEntry const& le) } BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth) - : mPrevDepth(BucketList::kNumLevels) + : mPrevDepth(BucketListBase::kNumLevels) { - BucketList::kNumLevels = newDepth; + BucketListBase::kNumLevels = newDepth; } BucketListDepthModifier::~BucketListDepthModifier() { - BucketList::kNumLevels = mPrevDepth; + BucketListBase::kNumLevels = mPrevDepth; } } From 0e24f0fb25712267bc3f04953daa23157b3afb1f Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Mon, 22 Jul 2024 16:22:18 -0700 Subject: [PATCH 2/5] Templated common BucketList code --- src/bucket/Bucket.cpp | 262 +++++++++++----- src/bucket/Bucket.h | 124 +++++--- src/bucket/BucketApplicator.cpp | 10 +- src/bucket/BucketApplicator.h | 4 +- src/bucket/BucketInputIterator.cpp | 69 ++++- src/bucket/BucketInputIterator.h | 24 +- src/bucket/BucketList.cpp | 223 +++++++++----- src/bucket/BucketList.h | 50 +-- src/bucket/BucketListSnapshot.cpp | 183 ++++++++--- src/bucket/BucketListSnapshot.h | 64 +++- src/bucket/BucketManager.h | 37 ++- src/bucket/BucketManagerImpl.cpp | 181 ++++++++--- src/bucket/BucketManagerImpl.h | 61 +++- src/bucket/BucketOutputIterator.cpp | 109 +++++-- src/bucket/BucketOutputIterator.h | 24 +- src/bucket/BucketSnapshot.cpp | 116 ++++--- src/bucket/BucketSnapshot.h | 50 ++- src/bucket/BucketSnapshotManager.cpp | 17 +- src/bucket/BucketSnapshotManager.h | 17 +- src/bucket/FutureBucket.cpp | 193 ++++++++---- src/bucket/FutureBucket.h | 37 ++- src/bucket/LedgerCmp.h | 80 ++++- src/bucket/MergeKey.cpp | 16 +- src/bucket/MergeKey.h | 5 +- src/bucket/test/BucketIndexTests.cpp | 6 +- src/bucket/test/BucketListTests.cpp | 182 +++++------ src/bucket/test/BucketManagerTests.cpp | 72 ++--- src/bucket/test/BucketMergeMapTests.cpp | 57 ++-- src/bucket/test/BucketTestUtils.cpp | 12 +- src/bucket/test/BucketTestUtils.h | 4 +- src/bucket/test/BucketTests.cpp | 289 +++++++++--------- src/catchup/ApplyBucketsWork.cpp | 19 +- src/catchup/ApplyBucketsWork.h | 11 +- src/catchup/AssumeStateWork.cpp | 8 +- src/catchup/AssumeStateWork.h | 3 +- src/catchup/CatchupManager.h | 2 +- src/catchup/CatchupManagerImpl.cpp | 2 +- src/catchup/CatchupManagerImpl.h | 8 +- src/catchup/CatchupWork.cpp | 2 +- src/catchup/CatchupWork.h | 6 +- src/catchup/IndexBucketsWork.cpp | 6 +- src/catchup/IndexBucketsWork.h | 9 +- src/herder/test/UpgradesTests.cpp | 10 +- src/history/FileTransferInfo.h | 2 +- src/history/HistoryArchive.cpp | 24 +- src/history/HistoryArchive.h | 4 +- src/history/StateSnapshot.cpp | 4 +- src/history/test/HistoryTests.cpp | 10 +- src/history/test/HistoryTestsUtils.cpp | 6 +- src/history/test/HistoryTestsUtils.h | 2 +- src/historywork/DownloadBucketsWork.cpp | 5 +- src/historywork/DownloadBucketsWork.h | 12 +- .../BucketListIsConsistentWithDatabase.cpp | 11 +- .../BucketListIsConsistentWithDatabase.h | 2 +- src/invariant/Invariant.h | 4 +- src/invariant/InvariantManager.h | 5 +- src/invariant/InvariantManagerImpl.cpp | 10 +- src/invariant/InvariantManagerImpl.h | 4 +- ...ucketListIsConsistentWithDatabaseTests.cpp | 48 +-- src/invariant/test/InvariantTests.cpp | 6 +- src/ledger/LedgerManager.h | 2 +- src/ledger/LedgerManagerImpl.cpp | 2 +- src/ledger/LedgerManagerImpl.h | 8 +- src/ledger/LedgerTxn.cpp | 2 +- src/ledger/LedgerTxnImpl.h | 6 +- src/ledger/NetworkConfig.cpp | 2 +- src/main/ApplicationUtils.cpp | 16 +- src/main/Config.h | 6 +- src/main/test/ApplicationUtilsTests.cpp | 2 +- src/test/TestUtils.cpp | 6 +- src/util/test/XDRStreamTests.cpp | 4 +- src/util/types.h | 16 + 72 files changed, 1881 insertions(+), 1014 deletions(-) diff --git a/src/bucket/Bucket.cpp b/src/bucket/Bucket.cpp index 61e96b3a28..bfc63cf7f9 100644 --- a/src/bucket/Bucket.cpp +++ b/src/bucket/Bucket.cpp @@ -29,6 +29,7 @@ #include #include "medida/counter.h" +#include "xdr/Stellar-ledger.h" namespace stellar { @@ -97,10 +98,10 @@ Bucket::getSize() const } bool -Bucket::containsBucketIdentity(BucketEntry const& id) const +LiveBucket::containsBucketIdentity(BucketEntry const& id) const { - BucketEntryIdCmp cmp; - BucketInputIterator iter(shared_from_this()); + BucketEntryIdCmp cmp; + LiveBucketInputIterator iter(shared_from_this()); while (iter) { if (!(cmp(*iter, id) || cmp(id, *iter))) @@ -132,7 +133,7 @@ Bucket::freeIndex() #ifdef BUILD_TESTS void -Bucket::apply(Application& app) const +LiveBucket::apply(Application& app) const { ZoneScoped; @@ -160,13 +161,12 @@ Bucket::apply(Application& app) const } counters.logInfo("direct", 0, app.getClock().now()); } -#endif // BUILD_TESTS std::vector -Bucket::convertToBucketEntry(bool useInit, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +LiveBucket::convertToBucketEntry(bool useInit, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { std::vector bucket; for (auto const& e : initEntries) @@ -191,7 +191,7 @@ Bucket::convertToBucketEntry(bool useInit, bucket.push_back(ce); } - BucketEntryIdCmp cmp; + BucketEntryIdCmp cmp; std::sort(bucket.begin(), bucket.end(), cmp); releaseAssert(std::adjacent_find( bucket.begin(), bucket.end(), @@ -200,6 +200,7 @@ Bucket::convertToBucketEntry(bool useInit, }) == bucket.end()); return bucket; } +#endif // BUILD_TESTS std::string Bucket::randomFileName(std::string const& tmpDir, std::string ext) @@ -229,12 +230,25 @@ Bucket::randomBucketIndexName(std::string const& tmpDir) return randomFileName(tmpDir, ".index"); } -std::shared_ptr -Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries, bool countMergeEvents, - asio::io_context& ctx, bool doFsync) +std::shared_ptr +HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries, + bool countMergeEvents, asio::io_context& ctx, + bool doFsync) +{ + // TODO: + releaseAssert(false); + return nullptr; +} + +std::shared_ptr +LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries, + bool countMergeEvents, asio::io_context& ctx, bool doFsync) { ZoneScoped; // When building fresh buckets after protocol version 10 (i.e. version @@ -249,8 +263,8 @@ Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, convertToBucketEntry(useInit, initEntries, liveEntries, deadEntries); MergeCounters mc; - BucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx, - doFsync); + LiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx, + doFsync); for (auto const& e : entries) { out.put(e); @@ -265,6 +279,13 @@ Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, bucketManager.getConfig().isUsingBucketListDB()); } +static void +countShadowedEntryType(MergeCounters& mc, HotArchiveBucketEntry const& e) +{ + // TODO: + releaseAssert(false); +} + static void countShadowedEntryType(MergeCounters& mc, BucketEntry const& e) { @@ -286,8 +307,8 @@ countShadowedEntryType(MergeCounters& mc, BucketEntry const& e) } void -Bucket::checkProtocolLegality(BucketEntry const& entry, - uint32_t protocolVersion) +LiveBucket::checkProtocolLegality(BucketEntry const& entry, + uint32_t protocolVersion) { if (protocolVersionIsBefore( protocolVersion, @@ -300,11 +321,18 @@ Bucket::checkProtocolLegality(BucketEntry const& entry, } } +template inline void -maybePut(BucketOutputIterator& out, BucketEntry const& entry, - std::vector& shadowIterators, +maybePut(BucketOutputIterator& out, BucketEntryT const& entry, + std::vector>& shadowIterators, bool keepShadowedLifecycleEntries, MergeCounters& mc) { + static_assert(std::is_same_v || + std::is_same_v); + + static_assert(std::is_same_v || + std::is_same_v); + // In ledgers before protocol 11, keepShadowedLifecycleEntries will be // `false` and we will drop all shadowed entries here. // @@ -340,8 +368,15 @@ maybePut(BucketOutputIterator& out, BucketEntry const& entry, // Note that this decision only controls whether to elide dead entries due // to _shadows_. There is a secondary elision of dead entries at the _oldest // level_ of the bucketlist that is accomplished through filtering at the - // BucketOutputIterator level, and happens independent of ledger protocol - // version. + // LiveBucketOutputIterator level, and happens independent of ledger + // protocol version. + + // TODO: Shadows + if constexpr (std::is_same_v) + { + releaseAssert(false); + return; + } if (keepShadowedLifecycleEntries && (entry.type() == INITENTRY || entry.type() == DEADENTRY)) @@ -351,7 +386,7 @@ maybePut(BucketOutputIterator& out, BucketEntry const& entry, return; } - BucketEntryIdCmp cmp; + BucketEntryIdCmp cmp; for (auto& si : shadowIterators) { // Advance the shadowIterator while it's less than the candidate @@ -447,11 +482,13 @@ countNewEntryType(MergeCounters& mc, BucketEntry const& e) // and shadowing protocol simultaneously, the moment the first new-protocol // bucket enters the youngest level. At least one new bucket is in every merge's // shadows from then on in, so they all upgrade (and preserve lifecycle events). +template static void calculateMergeProtocolVersion( MergeCounters& mc, uint32_t maxProtocolVersion, - BucketInputIterator const& oi, BucketInputIterator const& ni, - std::vector const& shadowIterators, + BucketInputIterator const& oi, + BucketInputIterator const& ni, + std::vector> const& shadowIterators, uint32& protocolVersion, bool& keepShadowedLifecycleEntries) { protocolVersion = std::max(oi.getMetadata().ledgerVersion, @@ -466,7 +503,7 @@ calculateMergeProtocolVersion( { auto version = si.getMetadata().ledgerVersion; if (protocolVersionIsBefore(version, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { protocolVersion = std::max(version, protocolVersion); } @@ -487,10 +524,11 @@ calculateMergeProtocolVersion( // we switch shadowing-behaviour to a more conservative mode, in order to // support annihilation of INITENTRY and DEADENTRY pairs. See commentary // above in `maybePut`. + // TODO: Clean up metrics for archive buckets keepShadowedLifecycleEntries = true; if (protocolVersionIsBefore( protocolVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { ++mc.mPreInitEntryProtocolMerges; keepShadowedLifecycleEntries = false; @@ -501,7 +539,7 @@ calculateMergeProtocolVersion( } if (protocolVersionIsBefore(protocolVersion, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { ++mc.mPreShadowRemovalProtocolMerges; } @@ -519,13 +557,22 @@ calculateMergeProtocolVersion( // side, or entries that compare non-equal. In all these cases we just // take the lesser (or existing) entry and advance only one iterator, // not scrutinizing the entry type further. +template static bool mergeCasesWithDefaultAcceptance( - BucketEntryIdCmp const& cmp, MergeCounters& mc, BucketInputIterator& oi, - BucketInputIterator& ni, BucketOutputIterator& out, - std::vector& shadowIterators, uint32_t protocolVersion, - bool keepShadowedLifecycleEntries) -{ + BucketEntryIdCmp const& cmp, MergeCounters& mc, + BucketInputIterator& oi, BucketInputIterator& ni, + BucketOutputIterator& out, + std::vector>& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) +{ + static_assert(std::is_same_v || + std::is_same_v); + // using BucketEntryT = std::conditional_t, + // BucketEntry, + // HotArchiveBucketEntry>; + if (!ni || (oi && ni && cmp(*oi, *ni))) { // Either of: @@ -535,8 +582,11 @@ mergeCasesWithDefaultAcceptance( // // In both cases: take old entry. ++mc.mOldEntriesDefaultAccepted; - Bucket::checkProtocolLegality(*oi, protocolVersion); - countOldEntryType(mc, *oi); + if constexpr (std::is_same_v) + { + LiveBucket::checkProtocolLegality(*oi, protocolVersion); + countOldEntryType(mc, *oi); + } maybePut(out, *oi, shadowIterators, keepShadowedLifecycleEntries, mc); ++oi; return true; @@ -550,8 +600,11 @@ mergeCasesWithDefaultAcceptance( // // In both cases: take new entry. ++mc.mNewEntriesDefaultAccepted; - Bucket::checkProtocolLegality(*ni, protocolVersion); - countNewEntryType(mc, *ni); + if constexpr (std::is_same_v) + { + LiveBucket::checkProtocolLegality(*ni, protocolVersion); + countNewEntryType(mc, *ni); + } maybePut(out, *ni, shadowIterators, keepShadowedLifecycleEntries, mc); ++ni; return true; @@ -562,9 +615,21 @@ mergeCasesWithDefaultAcceptance( // The remaining cases happen when keys are equal and we have to reason // through the relationships of their bucket lifecycle states. Trickier. static void -mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi, - BucketInputIterator& ni, BucketOutputIterator& out, - std::vector& shadowIterators, +mergeCasesWithEqualKeys( + MergeCounters& mc, HotArchiveBucketInputIterator& oi, + HotArchiveBucketInputIterator& ni, HotArchiveBucketOutputIterator& out, + std::vector& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) +{ + // TODO: + releaseAssert(false); +} + +static void +mergeCasesWithEqualKeys(MergeCounters& mc, LiveBucketInputIterator& oi, + LiveBucketInputIterator& ni, + LiveBucketOutputIterator& out, + std::vector& shadowIterators, uint32_t protocolVersion, bool keepShadowedLifecycleEntries) { @@ -633,8 +698,8 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi, BucketEntry const& oldEntry = *oi; BucketEntry const& newEntry = *ni; - Bucket::checkProtocolLegality(oldEntry, protocolVersion); - Bucket::checkProtocolLegality(newEntry, protocolVersion); + LiveBucket::checkProtocolLegality(oldEntry, protocolVersion); + LiveBucket::checkProtocolLegality(newEntry, protocolVersion); countOldEntryType(mc, oldEntry); countNewEntryType(mc, newEntry); @@ -685,20 +750,18 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi, } bool -Bucket::scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, - uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, - uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const +LiveBucket::scanForEvictionLegacy( + AbstractLedgerTxn& ltx, EvictionIterator& iter, uint32_t& bytesToScan, + uint32_t& remainingEntriesToEvict, uint32_t ledgerSeq, + medida::Counter& entriesEvictedCounter, + medida::Counter& bytesScannedForEvictionCounter, + std::shared_ptr stats) const { ZoneScoped; releaseAssert(stats); if (isEmpty() || - protocolVersionIsBefore(getBucketVersion(shared_from_this()), - SOROBAN_PROTOCOL_VERSION)) + protocolVersionIsBefore(getBucketVersion(), SOROBAN_PROTOCOL_VERSION)) { // EOF, skip to next bucket return false; @@ -777,14 +840,18 @@ Bucket::scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, return false; } -std::shared_ptr +template +std::shared_ptr Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync) { + static_assert(std::is_same_v || + std::is_same_v); + ZoneScoped; // This is the key operation in the scheme: merging two (read-only) // buckets together into a new 3rd bucket, while calculating its hash, @@ -794,24 +861,24 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, releaseAssert(newBucket); MergeCounters mc; - BucketInputIterator oi(oldBucket); - BucketInputIterator ni(newBucket); - std::vector shadowIterators(shadows.begin(), - shadows.end()); + BucketInputIterator oi(oldBucket); + BucketInputIterator ni(newBucket); + std::vector> shadowIterators(shadows.begin(), + shadows.end()); uint32_t protocolVersion; bool keepShadowedLifecycleEntries; - calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni, - shadowIterators, protocolVersion, - keepShadowedLifecycleEntries); + calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni, + shadowIterators, protocolVersion, + keepShadowedLifecycleEntries); auto timer = bucketManager.getMergeTimer().TimeScope(); BucketMetadata meta; meta.ledgerVersion = protocolVersion; - BucketOutputIterator out(bucketManager.getTmpDir(), keepDeadEntries, meta, - mc, ctx, doFsync); + BucketOutputIterator out(bucketManager.getTmpDir(), + keepDeadEntries, meta, mc, ctx, doFsync); - BucketEntryIdCmp cmp; + BucketEntryIdCmp cmp; size_t iter = 0; while (oi || ni) @@ -843,34 +910,79 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, { bucketManager.incrMergeCounters(mc); } - MergeKey mk{keepDeadEntries, oldBucket, newBucket, shadows}; + + std::vector shadowHashes; + shadowHashes.reserve(shadows.size()); + for (auto const& s : shadows) + { + shadowHashes.push_back(s->getHash()); + } + + MergeKey mk{keepDeadEntries, oldBucket->getHash(), newBucket->getHash(), + shadowHashes}; return out.getBucket(bucketManager, bucketManager.getConfig().isUsingBucketListDB(), &mk); } +LiveBucket::LiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index) + : Bucket(filename, hash, std::move(index)) +{ +} + +LiveBucket::LiveBucket() : Bucket() +{ +} + uint32_t -Bucket::getBucketVersion(std::shared_ptr const& bucket) +LiveBucket::getBucketVersion() const { - releaseAssert(bucket); - BucketInputIterator it(bucket); + LiveBucketInputIterator it(shared_from_this()); return it.getMetadata().ledgerVersion; } uint32_t -Bucket::getBucketVersion(std::shared_ptr const& bucket) +HotArchiveBucket::getBucketVersion() const { - releaseAssert(bucket); - BucketInputIterator it(bucket); + HotArchiveBucketInputIterator it(shared_from_this()); return it.getMetadata().ledgerVersion; } BucketEntryCounters const& -Bucket::getBucketEntryCounters() const +LiveBucket::getBucketEntryCounters() const { releaseAssert(mIndex); return mIndex->getBucketEntryCounters(); } +HotArchiveBucket::HotArchiveBucket(std::string const& filename, + Hash const& hash, + std::unique_ptr&& index) + : Bucket(filename, hash, std::move(index)) +{ +} + +HotArchiveBucket::HotArchiveBucket() : Bucket() +{ +} + +template std::shared_ptr Bucket::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); + +template std::shared_ptr Bucket::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); +} + BucketEntryCounters& BucketEntryCounters::operator+=(BucketEntryCounters const& other) { diff --git a/src/bucket/Bucket.h b/src/bucket/Bucket.h index c4b6773949..43d82ba8f6 100644 --- a/src/bucket/Bucket.h +++ b/src/bucket/Bucket.h @@ -5,6 +5,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketIndex.h" +#include "bucket/BucketSnapshot.h" #include "util/NonCopyable.h" #include "util/ProtocolVersion.h" #include "xdr/Stellar-ledger.h" @@ -41,14 +42,13 @@ namespace stellar class AbstractLedgerTxn; class Application; class BucketManager; -class SearchableBucketListSnapshot; struct EvictionResultEntry; class EvictionStatistics; struct BucketEntryCounters; -class Bucket : public std::enable_shared_from_this, - public NonMovableOrCopyable +class Bucket : public NonMovableOrCopyable { + protected: std::filesystem::path const mFilename; Hash const mHash; size_t mSize{0}; @@ -76,10 +76,6 @@ class Bucket : public std::enable_shared_from_this, std::filesystem::path const& getFilename() const; size_t getSize() const; - // Returns true if a BucketEntry that is key-wise identical to the given - // BucketEntry exists in the bucket. For testing. - bool containsBucketIdentity(BucketEntry const& id) const; - bool isEmpty() const; // Delete index and close file stream @@ -96,6 +92,56 @@ class Bucket : public std::enable_shared_from_this, // Sets index, throws if index is already set void setIndex(std::unique_ptr&& index); + // Merge two buckets together, producing a fresh one. Entries in `oldBucket` + // are overridden in the fresh bucket by keywise-equal entries in + // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal + // entries in any of the buckets in the provided `shadows` vector. + // + // Each bucket is self-describing in terms of the ledger protocol version it + // was constructed under, and the merge algorithm adjusts to the maximum of + // the versions attached to each input or shadow bucket. The provided + // `maxProtocolVersion` bounds this (for error checking) and should usually + // be the protocol of the ledger header at which the merge is starting. An + // exception will be thrown if any provided bucket versions exceed it. + template + static std::shared_ptr + merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); + + static std::string randomBucketName(std::string const& tmpDir); + static std::string randomBucketIndexName(std::string const& tmpDir); + +#ifdef BUILD_TESTS + BucketIndex const& + getIndexForTesting() const + { + return getIndex(); + } + +#endif // BUILD_TESTS + + virtual uint32_t getBucketVersion() const = 0; + + template friend class BucketSnapshotBase; +}; + +template class SearchableBucketListSnapshot; +class LiveBucket : public Bucket, + public std::enable_shared_from_this +{ + public: + LiveBucket(); + LiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + + // Returns true if a BucketEntry that is key-wise identical to the given + // BucketEntry exists in the bucket. For testing. + bool containsBucketIdentity(BucketEntry const& id) const; + // At version 11, we added support for INITENTRY and METAENTRY. Before this // we were only supporting LIVEENTRY and DEADENTRY. static constexpr ProtocolVersion @@ -107,29 +153,20 @@ class Bucket : public std::enable_shared_from_this, static void checkProtocolLegality(BucketEntry const& entry, uint32_t protocolVersion); +#ifdef BUILD_TESTS + static std::vector convertToBucketEntry(bool useInit, std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries); - static std::string randomBucketName(std::string const& tmpDir); - static std::string randomBucketIndexName(std::string const& tmpDir); - -#ifdef BUILD_TESTS // "Applies" the bucket to the database. For each entry in the bucket, // if the entry is init or live, creates or updates the corresponding // entry in the database (respectively; if the entry is dead (a // tombstone), deletes the corresponding entry in the database. void apply(Application& app) const; - - BucketIndex const& - getIndexForTesting() const - { - return getIndex(); - } - -#endif // BUILD_TESTS +#endif // Returns false if eof reached, true otherwise. Modifies iter as the bucket // is scanned. Also modifies bytesToScan and maxEntriesToEvict such that @@ -147,42 +184,43 @@ class Bucket : public std::enable_shared_from_this, bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const; + SearchableBucketListSnapshot& bl) const; // Create a fresh bucket from given vectors of init (created) and live // (updated) LedgerEntries, and dead LedgerEntryKeys. The bucket will // be sorted, hashed, and adopted in the provided BucketManager. - static std::shared_ptr + static std::shared_ptr fresh(BucketManager& bucketManager, uint32_t protocolVersion, std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync); - // Merge two buckets together, producing a fresh one. Entries in `oldBucket` - // are overridden in the fresh bucket by keywise-equal entries in - // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal - // entries in any of the buckets in the provided `shadows` vector. - // - // Each bucket is self-describing in terms of the ledger protocol version it - // was constructed under, and the merge algorithm adjusts to the maximum of - // the versions attached to each input or shadow bucket. The provided - // `maxProtocolVersion` bounds this (for error checking) and should usually - // be the protocol of the ledger header at which the merge is starting. An - // exception will be thrown if any provided bucket versions exceed it. - static std::shared_ptr - merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); + uint32_t getBucketVersion() const override; - static uint32_t getBucketVersion(std::shared_ptr const& bucket); - static uint32_t - getBucketVersion(std::shared_ptr const& bucket); BucketEntryCounters const& getBucketEntryCounters() const; - friend class BucketSnapshot; + + friend class LiveBucketSnapshot; +}; + +class HotArchiveBucket : public Bucket, + public std::enable_shared_from_this +{ + public: + HotArchiveBucket(); + HotArchiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + uint32_t getBucketVersion() const override; + + // TOOD: Change params for HotArchiveBucket + static std::shared_ptr + fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); + + friend class HotArchiveBucketSnapshot; }; enum class LedgerEntryTypeAndDurability : uint32_t; diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp index 8d20003ec7..8fab1ed483 100644 --- a/src/bucket/BucketApplicator.cpp +++ b/src/bucket/BucketApplicator.cpp @@ -20,7 +20,7 @@ BucketApplicator::BucketApplicator(Application& app, uint32_t maxProtocolVersion, uint32_t minProtocolVersionSeen, uint32_t level, - std::shared_ptr bucket, + std::shared_ptr bucket, std::function filter, std::unordered_set& seenKeys) : mApp(app) @@ -135,7 +135,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) } BucketEntry const& e = *mBucketIter; - Bucket::checkProtocolLegality(e, mMaxProtocolVersion); + LiveBucket::checkProtocolLegality(e, mMaxProtocolVersion); if (shouldApplyEntry(mEntryTypeFilter, e)) { @@ -167,7 +167,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) // The last level can have live entries, but at that point we // know that they are actually init entries because the earliest // state of all entries is init, so we mark them as such here - if (mLevel == BucketListBase::kNumLevels - 1 && + if (mLevel == LiveBucketList::kNumLevels - 1 && e.type() == LIVEENTRY) { ltx->createWithoutLoading(e.liveEntry()); @@ -175,7 +175,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) else if ( protocolVersionIsBefore( mMinProtocolVersionSeen, - Bucket:: + LiveBucket:: FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { // Prior to protocol 11, INITENTRY didn't exist, so we need @@ -207,7 +207,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) releaseAssertOrThrow(!isUsingBucketListDB); if (protocolVersionIsBefore( mMinProtocolVersionSeen, - Bucket:: + LiveBucket:: FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { // Prior to protocol 11, DEAD entries could exist diff --git a/src/bucket/BucketApplicator.h b/src/bucket/BucketApplicator.h index 88bc58ff6a..5218ac162f 100644 --- a/src/bucket/BucketApplicator.h +++ b/src/bucket/BucketApplicator.h @@ -24,7 +24,7 @@ class BucketApplicator uint32_t mMaxProtocolVersion; uint32_t mMinProtocolVersionSeen; uint32_t mLevel; - BucketInputIterator mBucketIter; + LiveBucketInputIterator mBucketIter; size_t mCount{0}; std::function mEntryTypeFilter; std::unordered_set& mSeenKeys; @@ -72,7 +72,7 @@ class BucketApplicator // When this flag is set, each offer key read is added to seenKeys BucketApplicator(Application& app, uint32_t maxProtocolVersion, uint32_t minProtocolVersionSeen, uint32_t level, - std::shared_ptr bucket, + std::shared_ptr bucket, std::function filter, std::unordered_set& seenKeys); operator bool() const; diff --git a/src/bucket/BucketInputIterator.cpp b/src/bucket/BucketInputIterator.cpp index 7a3673b7f4..c1d290c12b 100644 --- a/src/bucket/BucketInputIterator.cpp +++ b/src/bucket/BucketInputIterator.cpp @@ -4,6 +4,7 @@ #include "bucket/BucketInputIterator.h" #include "bucket/Bucket.h" +#include "xdr/Stellar-ledger.h" #include namespace stellar @@ -12,14 +13,25 @@ namespace stellar * Helper class that reads from the file underlying a bucket, keeping the bucket * alive for the duration of its existence. */ +template void -BucketInputIterator::loadEntry() +BucketInputIterator::loadEntry() { ZoneScoped; if (mIn.readOne(mEntry)) { mEntryPtr = &mEntry; - if (mEntry.type() == METAENTRY) + bool isMeta; + if constexpr (std::is_same::value) + { + isMeta = mEntry.type() == METAENTRY; + } + else + { + isMeta = mEntry.type() == HA_METAENTRY; + } + + if (isMeta) { // There should only be one METAENTRY in the input stream // and it should be the first record. @@ -34,6 +46,18 @@ BucketInputIterator::loadEntry() "Malformed bucket: META after other entries."); } mMetadata = mEntry.metaEntry(); + + if constexpr (std::is_same::value) + { + if (mMetadata.ext.v() != 1 || + mMetadata.ext.bucketListType() != HOT_ARCHIVE) + { + throw std::runtime_error( + "Malformed bucket: META entry with incorrect bucket " + "list type."); + } + } + mSeenMetadata = true; loadEntry(); } @@ -42,7 +66,11 @@ BucketInputIterator::loadEntry() mSeenOtherEntries = true; if (mSeenMetadata) { - Bucket::checkProtocolLegality(mEntry, mMetadata.ledgerVersion); + if constexpr (std::is_same_v) + { + LiveBucket::checkProtocolLegality(mEntry, + mMetadata.ledgerVersion); + } } } } @@ -52,42 +80,48 @@ BucketInputIterator::loadEntry() } } +template std::streamoff -BucketInputIterator::pos() +BucketInputIterator::pos() { return mIn.pos(); } +template size_t -BucketInputIterator::size() const +BucketInputIterator::size() const { return mIn.size(); } -BucketInputIterator::operator bool() const +template BucketInputIterator::operator bool() const { return mEntryPtr != nullptr; } -BucketEntry const& -BucketInputIterator::operator*() +template +typename BucketInputIterator::BucketEntryT const& +BucketInputIterator::operator*() { return *mEntryPtr; } +template bool -BucketInputIterator::seenMetadata() const +BucketInputIterator::seenMetadata() const { return mSeenMetadata; } +template BucketMetadata const& -BucketInputIterator::getMetadata() const +BucketInputIterator::getMetadata() const { return mMetadata; } -BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) +template +BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) : mBucket(bucket), mEntryPtr(nullptr), mSeenMetadata(false) { // In absence of metadata, we treat every bucket as though it is from ledger @@ -106,13 +140,14 @@ BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) } } -BucketInputIterator::~BucketInputIterator() +template BucketInputIterator::~BucketInputIterator() { mIn.close(); } -BucketInputIterator& -BucketInputIterator::operator++() +template +BucketInputIterator& +BucketInputIterator::operator++() { if (mIn) { @@ -125,10 +160,14 @@ BucketInputIterator::operator++() return *this; } +template void -BucketInputIterator::seek(std::streamoff offset) +BucketInputIterator::seek(std::streamoff offset) { mIn.seek(offset); loadEntry(); } + +template class BucketInputIterator; +template class BucketInputIterator; } diff --git a/src/bucket/BucketInputIterator.h b/src/bucket/BucketInputIterator.h index 02bdb2f3ea..ffccf33cd0 100644 --- a/src/bucket/BucketInputIterator.h +++ b/src/bucket/BucketInputIterator.h @@ -8,23 +8,32 @@ #include "xdr/Stellar-ledger.h" #include +#include namespace stellar { class Bucket; +class LiveBucket; +class HotArchiveBucket; // Helper class that reads through the entries in a bucket. -class BucketInputIterator +template class BucketInputIterator { - std::shared_ptr mBucket; + static_assert(std::is_same_v || + std::is_same_v); + + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + + std::shared_ptr mBucket; // Validity and current-value of the iterator is funneled into a // pointer. If // non-null, it points to mEntry. - BucketEntry const* mEntryPtr{nullptr}; + BucketEntryT const* mEntryPtr{nullptr}; XDRInputFileStream mIn; - BucketEntry mEntry; + BucketEntryT mEntry; bool mSeenMetadata{false}; bool mSeenOtherEntries{false}; BucketMetadata mMetadata; @@ -43,9 +52,9 @@ class BucketInputIterator bool seenMetadata() const; BucketMetadata const& getMetadata() const; - BucketEntry const& operator*(); + BucketEntryT const& operator*(); - BucketInputIterator(std::shared_ptr bucket); + BucketInputIterator(std::shared_ptr bucket); ~BucketInputIterator(); @@ -55,4 +64,7 @@ class BucketInputIterator size_t size() const; void seek(std::streamoff offset); }; + +typedef BucketInputIterator LiveBucketInputIterator; +typedef BucketInputIterator HotArchiveBucketInputIterator; } diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketList.cpp index 2844ec0522..129f1681a8 100644 --- a/src/bucket/BucketList.cpp +++ b/src/bucket/BucketList.cpp @@ -25,15 +25,17 @@ namespace stellar { -BucketLevel::BucketLevel(uint32_t i) +template +BucketLevel::BucketLevel(uint32_t i) : mLevel(i) - , mCurr(std::make_shared()) - , mSnap(std::make_shared()) + , mCurr(std::make_shared()) + , mSnap(std::make_shared()) { } +template uint256 -BucketLevel::getHash() const +BucketLevel::getHash() const { SHA256 hsh; hsh.add(mCurr->getHash()); @@ -41,51 +43,59 @@ BucketLevel::getHash() const return hsh.finish(); } -FutureBucket const& -BucketLevel::getNext() const +template +FutureBucket const& +BucketLevel::getNext() const { return mNextCurr; } -FutureBucket& -BucketLevel::getNext() +template +FutureBucket& +BucketLevel::getNext() { return mNextCurr; } +template void -BucketLevel::setNext(FutureBucket const& fb) +BucketLevel::setNext(FutureBucket const& fb) { releaseAssert(threadIsMain()); mNextCurr = fb; } -std::shared_ptr -BucketLevel::getCurr() const +template +std::shared_ptr +BucketLevel::getCurr() const { return mCurr; } -std::shared_ptr -BucketLevel::getSnap() const +template +std::shared_ptr +BucketLevel::getSnap() const { return mSnap; } +template void -BucketLevel::setCurr(std::shared_ptr b) +BucketLevel::setCurr(std::shared_ptr b) { releaseAssert(threadIsMain()); mNextCurr.clear(); mCurr = b; } -BucketListBase::~BucketListBase() +template BucketListBase::~BucketListBase() { } +template bool -BucketListBase::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) +BucketListBase::shouldMergeWithEmptyCurr(uint32_t ledger, + uint32_t level) { if (level != 0) @@ -93,7 +103,7 @@ BucketListBase::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) // Round down the current ledger to when the merge was started, and // re-start the merge via prepare, mimicking the logic in `addBatch` auto mergeStartLedger = - roundDown(ledger, BucketListBase::levelHalf(level - 1)); + roundDown(ledger, BucketListBase::levelHalf(level - 1)); // Subtle: We're "preparing the next state" of this level's mCurr, which // is *either* mCurr merged with snap, or else just snap (if mCurr is @@ -111,15 +121,17 @@ BucketListBase::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) return false; } +template void -BucketLevel::setSnap(std::shared_ptr b) +BucketLevel::setSnap(std::shared_ptr b) { releaseAssert(threadIsMain()); mSnap = b; } +template void -BucketLevel::commit() +BucketLevel::commit() { if (mNextCurr.isLive()) { @@ -162,35 +174,54 @@ BucketLevel::commit() // ---------------------------------------------------------------------------------------- // ... // clang-format on +template void -BucketLevel::prepare(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, std::shared_ptr snap, - std::vector> const& shadows, - bool countMergeEvents) +BucketLevel::prepare( + Application& app, uint32_t currLedger, uint32_t currLedgerProtocol, + std::shared_ptr snap, + std::vector> const& shadows, bool countMergeEvents) { ZoneScoped; // If more than one absorb is pending at the same time, we have a logic // error in our caller (and all hell will break loose). releaseAssert(!mNextCurr.isMerging()); - auto curr = BucketListBase::shouldMergeWithEmptyCurr(currLedger, mLevel) - ? std::make_shared() - : mCurr; - - auto shadowsBasedOnProtocol = - protocolVersionStartsFrom(Bucket::getBucketVersion(snap), - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - ? std::vector>() - : shadows; - mNextCurr = FutureBucket(app, curr, snap, shadowsBasedOnProtocol, - currLedgerProtocol, countMergeEvents, mLevel); + auto curr = + BucketListBase::shouldMergeWithEmptyCurr(currLedger, mLevel) + ? std::make_shared() + : mCurr; + + if constexpr (std::is_same_v) + { + auto shadowsBasedOnProtocol = + protocolVersionStartsFrom( + snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) + ? std::vector>() + : shadows; + + mNextCurr = + FutureBucket(app, curr, snap, shadowsBasedOnProtocol, + currLedgerProtocol, countMergeEvents, mLevel); + } + else + { + // TODO: Constructor with no shadows + // mNextCurr = + // FutureBucket(app, curr, snap, shadowsBasedOnProtocol, + // currLedgerProtocol, countMergeEvents, + // mLevel); + releaseAssert(false); + } + releaseAssert(mNextCurr.isMerging()); } -std::shared_ptr -BucketLevel::snap() +template +std::shared_ptr +BucketLevel::snap() { mSnap = mCurr; - mCurr = std::make_shared(); + mCurr = std::make_shared(); return mSnap; } @@ -225,8 +256,9 @@ BucketListDepth::operator uint32_t() const // levelSize(8) = 262144=0x040000 // levelSize(9) = 1048576=0x100000 // levelSize(10) = 4194304=0x400000 +template uint32_t -BucketListBase::levelSize(uint32_t level) +BucketListBase::levelSize(uint32_t level) { releaseAssert(level < kNumLevels); return 1UL << (2 * (level + 1)); @@ -247,14 +279,16 @@ BucketListBase::levelSize(uint32_t level) // levelHalf(8) = 131072=0x020000 // levelHalf(9) = 524288=0x080000 // levelHalf(10) = 2097152=0x200000 +template uint32_t -BucketListBase::levelHalf(uint32_t level) +BucketListBase::levelHalf(uint32_t level) { return levelSize(level) >> 1; } +template uint32_t -BucketListBase::sizeOfCurr(uint32_t ledger, uint32_t level) +BucketListBase::sizeOfCurr(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -265,7 +299,8 @@ BucketListBase::sizeOfCurr(uint32_t ledger, uint32_t level) auto const size = levelSize(level); auto const half = levelHalf(level); - if (level != BucketListBase::kNumLevels - 1 && roundDown(ledger, half) != 0) + if (level != BucketListBase::kNumLevels - 1 && + roundDown(ledger, half) != 0) { uint32_t const sizeDelta = 1UL << (2 * level - 1); if (roundDown(ledger, half) == ledger || @@ -301,12 +336,13 @@ BucketListBase::sizeOfCurr(uint32_t ledger, uint32_t level) } } +template uint32_t -BucketListBase::sizeOfSnap(uint32_t ledger, uint32_t level) +BucketListBase::sizeOfSnap(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); - if (level == BucketListBase::kNumLevels - 1) + if (level == BucketListBase::kNumLevels - 1) { return 0; } @@ -327,8 +363,9 @@ BucketListBase::sizeOfSnap(uint32_t ledger, uint32_t level) } } +template uint32_t -BucketListBase::oldestLedgerInCurr(uint32_t ledger, uint32_t level) +BucketListBase::oldestLedgerInCurr(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -347,8 +384,9 @@ BucketListBase::oldestLedgerInCurr(uint32_t ledger, uint32_t level) return count + 1; } +template uint32_t -BucketListBase::oldestLedgerInSnap(uint32_t ledger, uint32_t level) +BucketListBase::oldestLedgerInSnap(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -366,8 +404,9 @@ BucketListBase::oldestLedgerInSnap(uint32_t ledger, uint32_t level) return count + 1; } +template uint256 -BucketListBase::getHash() const +BucketListBase::getHash() const { ZoneScoped; SHA256 hsh; @@ -397,8 +436,9 @@ BucketListBase::getHash() const // // clang-format on +template bool -BucketListBase::levelShouldSpill(uint32_t ledger, uint32_t level) +BucketListBase::levelShouldSpill(uint32_t ledger, uint32_t level) { if (level == kNumLevels - 1) { @@ -415,8 +455,9 @@ BucketListBase::levelShouldSpill(uint32_t ledger, uint32_t level) // spill frequency of the level below. // incoming_spill_frequency(i) = 2^(2i - 1) for i > 0 // incoming_spill_frequency(0) = 1 +template uint32_t -BucketListBase::bucketUpdatePeriod(uint32_t level, bool isCurr) +BucketListBase::bucketUpdatePeriod(uint32_t level, bool isCurr) { if (!isCurr) { @@ -433,27 +474,31 @@ BucketListBase::bucketUpdatePeriod(uint32_t level, bool isCurr) return 1u << (2 * level - 1); } +template bool -BucketListBase::keepDeadEntries(uint32_t level) +BucketListBase::keepDeadEntries(uint32_t level) { - return level < BucketListBase::kNumLevels - 1; + return level < BucketListBase::kNumLevels - 1; } -BucketLevel const& -BucketListBase::getLevel(uint32_t i) const +template +BucketLevel const& +BucketListBase::getLevel(uint32_t i) const { return mLevels.at(i); } -BucketLevel& -BucketListBase::getLevel(uint32_t i) +template +BucketLevel& +BucketListBase::getLevel(uint32_t i) { return mLevels.at(i); } #ifdef BUILD_TESTS +template void -BucketList::resolveAllFutures() +BucketListBase::resolveAllFutures() { ZoneScoped; for (auto& level : mLevels) @@ -466,8 +511,9 @@ BucketList::resolveAllFutures() } #endif +template void -BucketListBase::resolveAnyReadyFutures() +BucketListBase::resolveAnyReadyFutures() { ZoneScoped; for (auto& level : mLevels) @@ -479,8 +525,9 @@ BucketListBase::resolveAnyReadyFutures() } } +template bool -BucketListBase::futuresAllResolved(uint32_t maxLevel) const +BucketListBase::futuresAllResolved(uint32_t maxLevel) const { ZoneScoped; releaseAssert(maxLevel < mLevels.size()); @@ -495,8 +542,9 @@ BucketListBase::futuresAllResolved(uint32_t maxLevel) const return true; } +template uint32_t -BucketListBase::getMaxMergeLevel(uint32_t currLedger) const +BucketListBase::getMaxMergeLevel(uint32_t currLedger) const { uint32_t i = 0; for (; i < static_cast(mLevels.size()) - 1; ++i) @@ -509,14 +557,15 @@ BucketListBase::getMaxMergeLevel(uint32_t currLedger) const return i; } +template uint64_t -BucketListBase::getSize() const +BucketListBase::getSize() const { uint64_t sum = 0; for (auto const& lev : mLevels) { - std::array, 2> buckets = {lev.getCurr(), - lev.getSnap()}; + std::array, 2> buckets = {lev.getCurr(), + lev.getSnap()}; for (auto const& b : buckets) { if (b) @@ -529,17 +578,18 @@ BucketListBase::getSize() const return sum; } +template void -BucketListBase::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +BucketListBase::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { ZoneScoped; releaseAssert(currLedger > 0); - std::vector> shadows; + std::vector> shadows; for (auto& level : mLevels) { shadows.push_back(level.getCurr()); @@ -629,12 +679,12 @@ BucketListBase::addBatch(Application& app, uint32_t currLedger, !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; releaseAssert(shadows.size() == 0); - mLevels[0].prepare(app, currLedger, currLedgerProtocol, - Bucket::fresh(app.getBucketManager(), currLedgerProtocol, - initEntries, liveEntries, deadEntries, - countMergeEvents, - app.getClock().getIOContext(), doFsync), - shadows, countMergeEvents); + mLevels[0].prepare( + app, currLedger, currLedgerProtocol, + BucketT::fresh(app.getBucketManager(), currLedgerProtocol, initEntries, + liveEntries, deadEntries, countMergeEvents, + app.getClock().getIOContext(), doFsync), + shadows, countMergeEvents); mLevels[0].commit(); // We almost always want to try to resolve completed merges to single @@ -759,7 +809,7 @@ LiveBucketList::updateEvictionIterAndRecordStats( void LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, uint32_t scanSize, - std::shared_ptr b, + std::shared_ptr b, EvictionCounters& counters) { // Check to see if we can finish scanning the new bucket before it @@ -826,9 +876,11 @@ LiveBucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, networkConfig.updateEvictionIterator(ltx, evictionIter); } +template void -BucketListBase::restartMerges(Application& app, uint32_t maxProtocolVersion, - uint32_t ledger) +BucketListBase::restartMerges(Application& app, + uint32_t maxProtocolVersion, + uint32_t ledger) { ZoneScoped; for (uint32_t i = 0; i < static_cast(mLevels.size()); i++) @@ -876,9 +928,9 @@ BucketListBase::restartMerges(Application& app, uint32_t maxProtocolVersion, return; } - auto version = Bucket::getBucketVersion(snap); - if (protocolVersionIsBefore(version, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + auto version = snap->getBucketVersion(); + if (protocolVersionIsBefore( + version, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { auto msg = fmt::format( FMT_STRING("Invalid state: bucketlist level {:d} has clear " @@ -898,13 +950,20 @@ BucketListBase::restartMerges(Application& app, uint32_t maxProtocolVersion, } } -BucketListDepth BucketListBase::kNumLevels = 11; +// TODO: Different depths for different types? +template +BucketListDepth BucketListBase::kNumLevels = 11; -BucketListBase::BucketListBase() +template BucketListBase::BucketListBase() { for (uint32_t i = 0; i < kNumLevels; ++i) { - mLevels.push_back(BucketLevel(i)); + mLevels.push_back(BucketLevel(i)); } } + +template class BucketListBase; +template class BucketListBase; +template class BucketLevel; +template class BucketLevel; } diff --git a/src/bucket/BucketList.h b/src/bucket/BucketList.h index 284eb86b38..269a032d5b 100644 --- a/src/bucket/BucketList.h +++ b/src/bucket/BucketList.h @@ -355,33 +355,36 @@ namespace testutil class BucketListDepthModifier; } -class BucketLevel +template class BucketLevel { + static_assert(std::is_same_v || + std::is_same_v); + uint32_t mLevel; - FutureBucket mNextCurr; - std::shared_ptr mCurr; - std::shared_ptr mSnap; + FutureBucket mNextCurr; + std::shared_ptr mCurr; + std::shared_ptr mSnap; public: BucketLevel(uint32_t i); uint256 getHash() const; - FutureBucket const& getNext() const; - FutureBucket& getNext(); - std::shared_ptr getCurr() const; - std::shared_ptr getSnap() const; - void setNext(FutureBucket const& fb); - void setCurr(std::shared_ptr); - void setSnap(std::shared_ptr); + FutureBucket const& getNext() const; + FutureBucket& getNext(); + std::shared_ptr getCurr() const; + std::shared_ptr getSnap() const; + void setNext(FutureBucket const& fb); + void setCurr(std::shared_ptr); + void setSnap(std::shared_ptr); void commit(); void prepare(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, std::shared_ptr snap, - std::vector> const& shadows, + uint32_t currLedgerProtocol, std::shared_ptr snap, + std::vector> const& shadows, bool countMergeEvents); - std::shared_ptr snap(); + std::shared_ptr snap(); }; // NOTE: The access specifications for this class have been carefully chosen to -// make it so BucketListBase::kNumLevels can only be modified from +// make it so LiveBucketList::kNumLevels can only be modified from // BucketListDepthModifier -- not even BucketList can modify it. Please // use care when modifying this class. class BucketListDepth @@ -398,10 +401,13 @@ class BucketListDepth friend class testutil::BucketListDepthModifier; }; -class BucketListBase +template class BucketListBase { + static_assert(std::is_same_v || + std::is_same_v); + protected: - std::vector mLevels; + std::vector> mLevels; public: // Trivial pure virtual destructor to make this an abstract class @@ -451,10 +457,10 @@ class BucketListBase BucketListBase(); // Return level `i` of the BucketList. - BucketLevel const& getLevel(uint32_t i) const; + BucketLevel const& getLevel(uint32_t i) const; // Return level `i` of the BucketList. - BucketLevel& getLevel(uint32_t i); + BucketLevel& getLevel(uint32_t i); // Return a cumulative hash of the entire bucketlist; this is the hash of // the concatenation of each level's hash, each of which in turn is the hash @@ -514,7 +520,7 @@ class BucketListBase BucketEntryCounters sumBucketEntryCounters() const; }; -class LiveBucketList : public BucketListBase +class LiveBucketList : public BucketListBase { public: // Reset Eviction Iterator position if an incoming spill or upgrade has @@ -533,7 +539,7 @@ class LiveBucketList : public BucketListBase static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, uint32_t scanSize, - std::shared_ptr b, + std::shared_ptr b, EvictionCounters& counters); void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, @@ -541,7 +547,7 @@ class LiveBucketList : public BucketListBase std::shared_ptr stats); }; -class HotArchiveBucketList : public BucketListBase +class HotArchiveBucketList : public BucketListBase { private: // For now, this class is identical to LiveBucketList. Later PRs will add diff --git a/src/bucket/BucketListSnapshot.cpp b/src/bucket/BucketListSnapshot.cpp index a5e2b15ff0..1e0e3a4c78 100644 --- a/src/bucket/BucketListSnapshot.cpp +++ b/src/bucket/BucketListSnapshot.cpp @@ -3,8 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketListSnapshot.h" +#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketList.h" +#include "bucket/BucketSnapshot.h" #include "crypto/SecretKey.h" // IWYU pragma: keep #include "ledger/LedgerTxn.h" @@ -13,50 +15,51 @@ namespace stellar { - -BucketListSnapshot::BucketListSnapshot(LiveBucketList const& bl, +template +BucketListSnapshot::BucketListSnapshot( + BucketListBase const& bl, LedgerHeader header) : mHeader(std::move(header)) { releaseAssert(threadIsMain()); - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto const& level = bl.getLevel(i); - mLevels.emplace_back(BucketLevelSnapshot(level)); + mLevels.emplace_back(BucketLevelSnapshot(level)); } } -BucketListSnapshot::BucketListSnapshot(BucketListSnapshot const& snapshot) +template +BucketListSnapshot::BucketListSnapshot( + BucketListSnapshot const& snapshot) : mLevels(snapshot.mLevels), mHeader(snapshot.mHeader) { } -std::vector const& -BucketListSnapshot::getLevels() const +template +std::vector> const& +BucketListSnapshot::getLevels() const { return mLevels; } +template uint32_t -BucketListSnapshot::getLedgerSeq() const +BucketListSnapshot::getLedgerSeq() const { return mHeader.ledgerSeq; } -// Loops through all buckets in the given snapshot, starting with curr at level -// 0, then snap at level 0, etc. Calls f on each bucket. Exits early if function -// returns true -namespace -{ +template void -loopAllBuckets(std::function f, - BucketListSnapshot const& snapshot) +SearchableBucketListSnapshotBase::loopAllBuckets( + std::function f, BucketListSnapshot const& snapshot) const { for (auto const& lev : snapshot.getLevels()) { // Return true if we should exit loop early - auto processBucket = [f](BucketSnapshot const& b) { + auto processBucket = [f](BucketSnapshotT const& b) { if (b.isEmpty()) { return false; @@ -137,7 +140,7 @@ SearchableBucketListSnapshot::getLedgerHeader() } EvictionResult -SearchableBucketListSnapshot::scanForEviction( +SearchableLiveBucketListSnapshot::scanForEviction( uint32_t ledgerSeq, EvictionCounters& counters, EvictionIterator evictionIter, std::shared_ptr stats, StateArchivalSettings const& sas) @@ -147,7 +150,7 @@ SearchableBucketListSnapshot::scanForEviction( auto getBucketFromIter = [&levels = mSnapshot->getLevels()]( - EvictionIterator const& iter) -> BucketSnapshot const& { + EvictionIterator const& iter) -> LiveBucketSnapshot const& { auto& level = levels.at(iter.bucketListLevel); return iter.isCurrBucket ? level.curr : level.snap; }; @@ -186,12 +189,20 @@ SearchableBucketListSnapshot::scanForEviction( return result; } +template std::shared_ptr -SearchableBucketListSnapshot::load(LedgerKey const& k) +SearchableBucketListSnapshotBase::getLedgerEntry(LedgerKey const& k) { ZoneScoped; - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - releaseAssert(mSnapshot); + if constexpr (std::is_same_v) + { + mSnapshotManager.maybeUpdateSnapshot(mSnapshot); + } + else + { + // TODO:: + releaseAssert(false); + } if (threadIsMain()) { @@ -207,33 +218,82 @@ SearchableBucketListSnapshot::load(LedgerKey const& k) } } -std::pair, bool> -SearchableBucketListSnapshot::loadKeysFromLedger( - std::set const& inKeys, uint32_t ledgerSeq) +template +std::pair, bool> +SearchableBucketListSnapshotBase::getLedgerEntryInternal( + LedgerKey const& k, uint32_t ledgerSeq) { ZoneScoped; mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); releaseAssert(mSnapshot); - if (ledgerSeq == mSnapshot->getLedgerSeq()) - { - auto result = loadKeysInternal(inKeys, *mSnapshot, /*lkMeter=*/nullptr); - return {result, true}; - } + // if (ledgerSeq == mSnapshot->getLedgerSeq()) + // { + // auto result = loadKeysInternal(inKeys, *mSnapshot, /*lkMeter=*/nullptr); + // return {result, true}; + // } + + // auto iter = mHistoricalSnapshots.find(ledgerSeq); + // if (iter == mHistoricalSnapshots.end()) + // { + // return {{}, false}; + // } + + // releaseAssert(iter->second); + // auto result = loadKeysInternal(inKeys, *iter->second, /*lkMeter=*/nullptr); + // return {result, true}; + auto f = [&](BucketSnapshotT const& b) { + auto [be, bloomMiss] = b.getBucketEntry(k); + sawBloomMiss = sawBloomMiss || bloomMiss; - auto iter = mHistoricalSnapshots.find(ledgerSeq); - if (iter == mHistoricalSnapshots.end()) - { - return {{}, false}; - } + if (be.has_value()) + { + if constexpr (std::is_same_v) + { + result = + be.value().type() == DEADENTRY + ? nullptr + : std::make_shared(be.value().liveEntry()); + } + else + { + releaseAssert(false); + // TODO::W + } + + return true; + } + else + { + return false; + } + }; - releaseAssert(iter->second); - auto result = loadKeysInternal(inKeys, *iter->second, /*lkMeter=*/nullptr); - return {result, true}; + loopAllBuckets(f); + return {result, sawBloomMiss}; } +template std::vector -SearchableBucketListSnapshot::loadKeysWithLimits( +SearchableBucketListSnapshotBase::loadKeysInternal( + std::set const& inKeys, + LedgerKeyMeter* lkMeter) +{ + std::vector entries; + + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + auto f = [&](BucketSnapshotT const& b) { + b.loadKeysWithLimits(keys, entries, lkMeter); + return keys.empty(); + }; + + loopAllBuckets(f); + return entries; +} + +std::vector +SearchableLiveBucketListSnapshot::loadKeysWithLimits( std::set const& inKeys, LedgerKeyMeter* lkMeter) { @@ -260,7 +320,7 @@ SearchableBucketListSnapshot::loadKeysWithLimits( // 2. Perform a bulk lookup for all possible trustline keys, that is, all // trustlines with the given accountID and poolID from step 1 std::vector -SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( +SearchableLiveBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( AccountID const& accountID, Asset const& asset) { ZoneScoped; @@ -272,7 +332,8 @@ SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( LedgerKeySet trustlinesToLoad; - auto trustLineLoop = [&](BucketSnapshot const& b) { + auto trustLineLoop = [&](auto const& rawB) { + auto const& b = static_cast(rawB); for (auto const& poolID : b.getPoolIDsByAsset(asset)) { LedgerKey trustlineKey(TRUSTLINE); @@ -295,8 +356,8 @@ SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( } std::vector -SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners, - int64_t minBalance) +SearchableLiveBucketListSnapshot::loadInflationWinners(size_t maxWinners, + int64_t minBalance) { ZoneScoped; mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); @@ -311,8 +372,8 @@ SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners, UnorderedMap voteCount; UnorderedSet seen; - auto countVotesInBucket = [&](BucketSnapshot const& b) { - for (BucketInputIterator in(b.getRawBucket()); in; ++in) + auto countVotesInBucket = [&](LiveBucketSnapshot const& b) { + for (LiveBucketInputIterator in(b.getRawBucket()); in; ++in) { BucketEntry const& be = *in; if (be.type() == DEADENTRY) @@ -387,17 +448,45 @@ SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners, return winners; } -BucketLevelSnapshot::BucketLevelSnapshot(BucketLevel const& level) +template +BucketLevelSnapshot::BucketLevelSnapshot( + BucketLevel const& level) : curr(level.getCurr()), snap(level.getSnap()) { } -SearchableBucketListSnapshot::SearchableBucketListSnapshot( +template +SearchableBucketListSnapshotBase::SearchableBucketListSnapshotBase( BucketSnapshotManager const& snapshotManager) : mSnapshotManager(snapshotManager), mHistoricalSnapshots() { // Initialize snapshot from SnapshotManager - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + if constexpr (std::is_same_v) + { + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + } + else + { + // TODO: + releaseAssert(false); + } +} + +template +SearchableBucketListSnapshotBase::~SearchableBucketListSnapshotBase() +{ +} + +SearchableLiveBucketListSnapshot::SearchableLiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager) + : SearchableBucketListSnapshotBase(snapshotManager) +{ } +template struct BucketLevelSnapshot; +template struct BucketLevelSnapshot; +template class BucketListSnapshot; +template class BucketListSnapshot; +template class SearchableBucketListSnapshotBase; +template class SearchableBucketListSnapshotBase; } \ No newline at end of file diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshot.h index 0397883411..ed80a6a241 100644 --- a/src/bucket/BucketListSnapshot.h +++ b/src/bucket/BucketListSnapshot.h @@ -17,30 +17,43 @@ class Timer; namespace stellar { -struct BucketLevelSnapshot +template struct BucketLevelSnapshot { - BucketSnapshot curr; - BucketSnapshot snap; + static_assert(std::is_same_v || + std::is_same_v); - BucketLevelSnapshot(BucketLevel const& level); + using BucketSnapshotT = + std::conditional_t, + LiveBucketSnapshot, HotArchiveBucketSnapshot>; + + BucketSnapshotT curr; + BucketSnapshotT snap; + + BucketLevelSnapshot(BucketLevel const& level); }; -class BucketListSnapshot : public NonMovable +template class BucketListSnapshot : public NonMovable { + static_assert(std::is_same_v || + std::is_same_v); + using BucketSnapshotT = + std::conditional_t, + LiveBucketSnapshot, HotArchiveBucketSnapshot>; + private: - std::vector mLevels; + std::vector> mLevels; // LedgerHeader associated with this ledger state snapshot LedgerHeader const mHeader; public: - BucketListSnapshot(LiveBucketList const& bl, LedgerHeader hhe); + BucketListSnapshot(BucketListBase const& bl, LedgerHeader hhe); // Only allow copies via constructor BucketListSnapshot(BucketListSnapshot const& snapshot); BucketListSnapshot& operator=(BucketListSnapshot const&) = delete; - std::vector const& getLevels() const; + std::vector> const& getLevels() const; uint32_t getLedgerSeq() const; LedgerHeader const& getLedgerHeader() const @@ -58,19 +71,41 @@ class BucketListSnapshot : public NonMovable // instance will check that the current snapshot is up to date via the // BucketListSnapshotManager and will be refreshed accordingly. Callers can // assume SearchableBucketListSnapshot is always up to date. -class SearchableBucketListSnapshot : public NonMovableOrCopyable +template +class SearchableBucketListSnapshotBase : public NonMovableOrCopyable { + static_assert(std::is_same_v || + std::is_same_v); + + using BucketSnapshotT = + std::conditional_t, + LiveBucketSnapshot, HotArchiveBucketSnapshot>; + + protected: + virtual ~SearchableBucketListSnapshotBase() = 0; + BucketSnapshotManager const& mSnapshotManager; // Snapshot managed by SnapshotManager - std::unique_ptr mSnapshot{}; - std::map> + std::unique_ptr const> mSnapshot{}; + std::map const>> mHistoricalSnapshots; - SearchableBucketListSnapshot(BucketSnapshotManager const& snapshotManager); + // Loops through all buckets, starting with curr at level 0, then snap at + // level 0, etc. Calls f on each bucket. Exits early if function + // returns true + void loopAllBuckets(std::function f, + BucketListSnapshot const& snapshot) const; - friend std::shared_ptr - BucketSnapshotManager::copySearchableBucketListSnapshot() const; + SearchableBucketListSnapshotBase( + BucketSnapshotManager const& snapshotManager); +}; + +class SearchableLiveBucketListSnapshot + : public SearchableBucketListSnapshotBase +{ + SearchableLiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager); public: std::vector @@ -101,6 +136,7 @@ class SearchableBucketListSnapshot : public NonMovableOrCopyable EvictionIterator evictionIter, std::shared_ptr stats, StateArchivalSettings const& sas); + uint32_t getLedgerSeq() const; LedgerHeader const& getLedgerHeader(); }; diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 6d352ccc34..d78d4322f6 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -30,7 +30,6 @@ class LiveBucketList; class HotArchiveBucketList; class BucketSnapshotManager; class Config; -class SearchableBucketListSnapshot; class TmpDirManager; struct HistoryArchiveState; struct InflationWinner; @@ -217,12 +216,16 @@ class BucketManager : NonMovableOrCopyable // This method is mostly-threadsafe -- assuming you don't destruct the // BucketManager mid-call -- and is intended to be called from both main and // worker threads. Very carefully. - virtual std::shared_ptr - adoptFileAsBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index) = 0; - - // Companion method to `adoptFileAsBucket` also called from the + virtual std::shared_ptr + adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, + std::unique_ptr index) = 0; + virtual std::shared_ptr + adoptFileAsHotArchiveBucket(std::string const& filename, + uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) = 0; + + // Companion method to `adoptFileAsLiveBucket` also called from the // `BucketOutputIterator::getBucket` merge-completion path. This method // however should be called when the output bucket is _empty_ and thereby // doesn't correspond to a file on disk; the method forgets about the @@ -235,15 +238,20 @@ class BucketManager : NonMovableOrCopyable virtual std::shared_ptr getBucketIfExists(uint256 const& hash) = 0; // Return a bucket by hash if we have it, else return nullptr. - virtual std::shared_ptr getBucketByHash(uint256 const& hash) = 0; + virtual std::shared_ptr + getLiveBucketByHash(uint256 const& hash) = 0; + virtual std::shared_ptr + getHotArchiveBucketByHash(uint256 const& hash) = 0; // Get a reference to a merge-future that's either running (or finished // somewhat recently) from either a map of the std::shared_futures doing the // merges and/or a set of records mapping merge inputs to outputs and the // set of outputs held in the BucketManager. Returns an invalid future if no // such future can be found or synthesized. - virtual std::shared_future> - getMergeFuture(MergeKey const& key) = 0; + virtual std::shared_future> + getLiveMergeFuture(MergeKey const& key) = 0; + virtual std::shared_future> + getHotArchiveMergeFuture(MergeKey const& key) = 0; // Add a reference to a merge _in progress_ (not yet adopted as a file) to // the BucketManager's internal map of std::shared_futures doing merges. @@ -251,8 +259,11 @@ class BucketManager : NonMovableOrCopyable // be removed from the map when the merge completes and the output file is // adopted. virtual void - putMergeFuture(MergeKey const& key, - std::shared_future>) = 0; + putLiveMergeFuture(MergeKey const& key, + std::shared_future>) = 0; + virtual void putHotArchiveMergeFuture( + MergeKey const& key, + std::shared_future>) = 0; #ifdef BUILD_TESTS // Drop all references to merge futures in progress. @@ -356,7 +367,7 @@ class BucketManager : NonMovableOrCopyable // Merge the bucket list of the provided HAS into a single "super bucket" // consisting of only live entries, and return it. - virtual std::shared_ptr + virtual std::shared_ptr mergeBuckets(HistoryArchiveState const& has) = 0; // Visits all the active ledger entries or subset thereof. diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManagerImpl.cpp index 8007ee357a..9d5964739d 100644 --- a/src/bucket/BucketManagerImpl.cpp +++ b/src/bucket/BucketManagerImpl.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -134,7 +135,7 @@ BucketManagerImpl::initialize() // TODO: Archival BucketList snapshot mSnapshotManager = std::make_unique( mApp, - std::make_unique(*mLiveBucketList, + std::make_unique>(*mLiveBucketList, LedgerHeader()), mConfig.QUERY_SNAPSHOT_LEDGERS); } @@ -505,7 +506,26 @@ BucketManagerImpl::renameBucketDirFile(std::filesystem::path const& src, } } -std::shared_ptr +std::shared_ptr +BucketManagerImpl::adoptFileAsLiveBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) +{ + return adoptFileAsBucket(filename, hash, mergeKey, + std::move(index)); +} + +std::shared_ptr +BucketManagerImpl::adoptFileAsHotArchiveBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) +{ + return adoptFileAsBucket(filename, hash, mergeKey, + std::move(index)); +} + +template +std::shared_ptr BucketManagerImpl::adoptFileAsBucket(std::string const& filename, uint256 const& hash, MergeKey* mergeKey, std::unique_ptr index) @@ -523,15 +543,16 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, // weak record of the input/output mapping, so we can reconstruct the // future if anyone wants to restart the same merge before the bucket // expires. - CLOG_TRACE(Bucket, - "BucketManager::adoptFileAsBucket switching merge {} from " - "live to finished for output={}", - *mergeKey, hexAbbrev(hash)); + CLOG_TRACE( + Bucket, + "BucketManager::adoptFileAsLiveBucket switching merge {} from " + "live to finished for output={}", + *mergeKey, hexAbbrev(hash)); mLiveFutures.erase(*mergeKey); } // Check to see if we have an existing bucket (either in-memory or on-disk) - std::shared_ptr b = getBucketByHash(hash); + std::shared_ptr b = getBucketByHash(hash); if (b) { CLOG_DEBUG( @@ -566,7 +587,7 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, } } - b = std::make_shared(canonicalName, hash, std::move(index)); + b = std::make_shared(canonicalName, hash, std::move(index)); { mSharedBuckets.emplace(hash, b); mSharedBucketsSize.set_count(mSharedBuckets.size()); @@ -618,21 +639,41 @@ BucketManagerImpl::getBucketIfExists(uint256 const& hash) return nullptr; } -std::shared_ptr +std::shared_ptr +BucketManagerImpl::getLiveBucketByHash(uint256 const& hash) +{ + return getBucketByHash(hash); +} + +std::shared_ptr +BucketManagerImpl::getHotArchiveBucketByHash(uint256 const& hash) +{ + return getBucketByHash(hash); +} + +template +std::shared_ptr BucketManagerImpl::getBucketByHash(uint256 const& hash) { ZoneScoped; std::lock_guard lock(mBucketMutex); if (isZero(hash)) { - return std::make_shared(); + return std::make_shared(); } auto i = mSharedBuckets.find(hash); if (i != mSharedBuckets.end()) { CLOG_TRACE(Bucket, "BucketManager::getBucketByHash({}) found bucket {}", binToHex(hash), i->second->getFilename()); - return i->second; + + // Because BucketManger has an impl class, no public templated functions + // can be declared. This means we have to manually enforce types via + // `getLiveBucketByHash` and `getHotBucketByHash`, leading to this ugly + // cast. + auto ret = std::dynamic_pointer_cast(i->second); + releaseAssertOrThrow(ret); + return ret; } std::string canonicalName = bucketFilename(hash); if (fs::exists(canonicalName)) @@ -643,15 +684,28 @@ BucketManagerImpl::getBucketByHash(uint256 const& hash) binToHex(hash)); auto p = - std::make_shared(canonicalName, hash, /*index=*/nullptr); + std::make_shared(canonicalName, hash, /*index=*/nullptr); mSharedBuckets.emplace(hash, p); mSharedBucketsSize.set_count(mSharedBuckets.size()); return p; } - return std::shared_ptr(); + return std::shared_ptr(); } -std::shared_future> +std::shared_future> +BucketManagerImpl::getLiveMergeFuture(MergeKey const& key) +{ + return getMergeFuture(key); +} + +std::shared_future> +BucketManagerImpl::getHotArchiveMergeFuture(MergeKey const& key) +{ + return getMergeFuture(key); +} + +template +std::shared_future> BucketManagerImpl::getMergeFuture(MergeKey const& key) { ZoneScoped; @@ -665,14 +719,14 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) Hash bucketHash; if (mFinishedMerges.findMergeFor(key, bucketHash)) { - auto bucket = getBucketByHash(bucketHash); + auto bucket = getBucketByHash(bucketHash); if (bucket) { CLOG_TRACE(Bucket, "BucketManager::getMergeFuture returning new future " "for finished merge {} with output={}", key, hexAbbrev(bucketHash)); - std::promise> promise; + std::promise> promise; auto future = promise.get_future().share(); promise.set_value(bucket); mc.mFinishedMergeReattachments++; @@ -684,7 +738,7 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) Bucket, "BucketManager::getMergeFuture returning empty future for merge {}", key); - return std::shared_future>(); + return std::shared_future>(); } CLOG_TRACE( Bucket, @@ -692,12 +746,32 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) key); mc.mRunningMergeReattachments++; incrMergeCounters(mc); - return i->second; + + // Because BucketManger has an impl class, no public templated functions + // can be declared. This means we have to manually enforce types via + // leading to this ugly variadic get that throws if the type is not correct. + return std::get>>(i->second); +} + +void +BucketManagerImpl::putLiveMergeFuture( + MergeKey const& key, std::shared_future> wp) +{ + putMergeFuture(key, wp); } +void +BucketManagerImpl::putHotArchiveMergeFuture( + MergeKey const& key, + std::shared_future> wp) +{ + putMergeFuture(key, wp); +} + +template void BucketManagerImpl::putMergeFuture( - MergeKey const& key, std::shared_future> wp) + MergeKey const& key, std::shared_future> wp) { ZoneScoped; releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); @@ -728,9 +802,9 @@ BucketManagerImpl::getBucketListReferencedBuckets() const return referenced; } - auto processBucketList = [&](auto const& bl) { + auto processBucketList = [&](auto const& bl, uint32_t levels) { // retain current bucket list - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < levels; ++i) { auto const& level = bl->getLevel(i); auto rit = referenced.emplace(level.getCurr()->getHash()); @@ -756,8 +830,9 @@ BucketManagerImpl::getBucketListReferencedBuckets() const } }; - processBucketList(mLiveBucketList); - processBucketList(mHotArchiveBucketList); + processBucketList(mLiveBucketList, LiveBucketList::kNumLevels); + processBucketList(mHotArchiveBucketList, + BucketListBase::kNumLevels); return referenced; } @@ -912,7 +987,7 @@ BucketManagerImpl::forgetUnreferencedBuckets() // There should be no futures alive with this output: we // switched to storing only weak input/output mappings // when any merge producing the bucket completed (in - // adoptFileAsBucket), and we believe there's only one + // adoptFileAsLiveBucket), and we believe there's only one // reference to the bucket anyways -- our own in // mSharedBuckets. But there might be a race we missed, // so double check & mop up here. Worst case we prevent @@ -1028,12 +1103,17 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) if (mConfig.MODE_ENABLES_BUCKETLIST) { if (protocolVersionStartsFrom(currentHeader.ledgerVersion, - ProtocolVersion::V_21)) + ProtocolVersion::V_22)) { - SHA256 hasher; - hasher.add(mLiveBucketList->getHash()); - hasher.add(mHotArchiveBucketList->getHash()); - hash = hasher.finish(); + // TODO: Hash Archive Bucket + // Holding off on this until buckets are written to history + + // SHA256 hasher; + // hasher.add(mLiveBucketList->getHash()); + // hasher.add(mHotArchiveBucketList->getHash()); + // hash = hasher.finish(); + + hash = mLiveBucketList->getHash(); } else { @@ -1240,10 +1320,13 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, ZoneScoped; releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + // TODO: Assume archival bucket state + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - auto curr = getBucketByHash(hexToBin256(has.currentBuckets.at(i).curr)); - auto snap = getBucketByHash(hexToBin256(has.currentBuckets.at(i).snap)); + auto curr = + getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).curr)); + auto snap = + getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while assuming " @@ -1251,11 +1334,11 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, } auto const& nextFuture = has.currentBuckets.at(i).next; - std::shared_ptr nextBucket = nullptr; + std::shared_ptr nextBucket = nullptr; if (nextFuture.hasOutputHash()) { nextBucket = - getBucketByHash(hexToBin256(nextFuture.getOutputHash())); + getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { throw std::runtime_error( @@ -1305,14 +1388,14 @@ BucketManagerImpl::isShutdown() const // inserting live or init entries. Should be called in a loop over a BL, from // old to new. static void -loadEntriesFromBucket(std::shared_ptr b, std::string const& name, +loadEntriesFromBucket(std::shared_ptr b, std::string const& name, std::map& map) { ZoneScoped; using namespace std::chrono; medida::Timer timer; - BucketInputIterator in(b); + LiveBucketInputIterator in(b); timer.Time([&]() { while (in) { @@ -1358,7 +1441,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) std::map ledgerMap; std::vector> hashes; - for (uint32_t i = BucketListBase::kNumLevels; i > 0; --i) + for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i - 1); hashes.emplace_back(hexToBin256(hsb.snap), @@ -1372,7 +1455,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) { continue; } - auto b = getBucketByHash(pair.first); + auto b = getLiveBucketByHash(pair.first); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1383,7 +1466,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) return ledgerMap; } -std::shared_ptr +std::shared_ptr BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) { ZoneScoped; @@ -1393,8 +1476,8 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) MergeCounters mc; auto& ctx = mApp.getClock().getIOContext(); meta.ledgerVersion = mConfig.LEDGER_PROTOCOL_VERSION; - BucketOutputIterator out(getTmpDir(), /*keepDeadEntries=*/false, meta, mc, - ctx, /*doFsync=*/true); + LiveBucketOutputIterator out(getTmpDir(), /*keepDeadEntries=*/false, meta, + mc, ctx, /*doFsync=*/true); for (auto const& pair : ledgerMap) { BucketEntry be; @@ -1407,7 +1490,7 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) static bool visitLiveEntriesInBucket( - std::shared_ptr b, std::string const& name, + std::shared_ptr b, std::string const& name, std::optional minLedger, std::function const& filterEntry, std::function const& acceptEntry, @@ -1420,7 +1503,7 @@ visitLiveEntriesInBucket( bool stopIteration = false; timer.Time([&]() { - for (BucketInputIterator in(b); in; ++in) + for (LiveBucketInputIterator in(b); in; ++in) { BucketEntry const& e = *in; if (e.type() == LIVEENTRY || e.type() == INITENTRY) @@ -1471,7 +1554,7 @@ visitLiveEntriesInBucket( static bool visitAllEntriesInBucket( - std::shared_ptr b, std::string const& name, + std::shared_ptr b, std::string const& name, std::optional minLedger, std::function const& filterEntry, std::function const& acceptEntry) @@ -1483,7 +1566,7 @@ visitAllEntriesInBucket( bool stopIteration = false; timer.Time([&]() { - for (BucketInputIterator in(b); in; ++in) + for (LiveBucketInputIterator in(b); in; ++in) { BucketEntry const& e = *in; if (e.type() == LIVEENTRY || e.type() == INITENTRY) @@ -1535,7 +1618,7 @@ BucketManagerImpl::visitLedgerEntries( UnorderedSet deletedEntries; std::vector> hashes; - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr), @@ -1551,7 +1634,7 @@ BucketManagerImpl::visitLedgerEntries( { continue; } - auto b = getBucketByHash(pair.first); + auto b = getLiveBucketByHash(pair.first); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1587,7 +1670,9 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork() { continue; } - auto b = getBucketByHash(h); + + // TODO: Update verify to for ArchiveBucket + auto b = getBucketByHash(h); if (!b) { throw std::runtime_error(fmt::format( diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h index 927de0e418..51cf16a468 100644 --- a/src/bucket/BucketManagerImpl.h +++ b/src/bucket/BucketManagerImpl.h @@ -85,8 +85,14 @@ class BucketManagerImpl : public BucketManager // FutureBucket being resolved). Entries in this map will be cleared when // the FutureBucket is _cleared_ (typically when the owning BucketList level // is committed). - UnorderedMap>> - mLiveFutures; + + using LiveBucketFutureT = std::shared_future>; + using HotArchiveBucketFutureT = + std::shared_future>; + using BucketFutureT = + std::variant; + + UnorderedMap mLiveFutures; // Records bucket-merges that are _finished_, i.e. have been adopted as // (possibly redundant) bucket files. This is a "weak" (bi-multi-)map of @@ -105,6 +111,23 @@ class BucketManagerImpl : public BucketManager size_t numEntries) const; medida::Timer& getPointLoadTimer(LedgerEntryType t) const; + template + std::shared_ptr + adoptFileAsBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, + std::unique_ptr index); + + template + std::shared_ptr getBucketByHash(uint256 const& hash); + + template + std::shared_future> + getMergeFuture(MergeKey const& key); + + template + void putMergeFuture(MergeKey const& key, + std::shared_future>); + #ifdef BUILD_TESTS bool mUseFakeTestValuesForNextClose{false}; uint32_t mFakeTestProtocolVersion; @@ -133,18 +156,30 @@ class BucketManagerImpl : public BucketManager TmpDirManager& getTmpDirManager() override; bool renameBucketDirFile(std::filesystem::path const& src, std::filesystem::path const& dst) override; - std::shared_ptr - adoptFileAsBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index) override; + std::shared_ptr + adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, + std::unique_ptr index) override; + std::shared_ptr adoptFileAsHotArchiveBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) override; void noteEmptyMergeOutput(MergeKey const& mergeKey) override; std::shared_ptr getBucketIfExists(uint256 const& hash) override; - std::shared_ptr getBucketByHash(uint256 const& hash) override; - - std::shared_future> - getMergeFuture(MergeKey const& key) override; - void putMergeFuture(MergeKey const& key, - std::shared_future>) override; + std::shared_ptr + getLiveBucketByHash(uint256 const& hash) override; + std::shared_ptr + getHotArchiveBucketByHash(uint256 const& hash) override; + + std::shared_future> + getLiveMergeFuture(MergeKey const& key) override; + std::shared_future> + getHotArchiveMergeFuture(MergeKey const& key) override; + void putLiveMergeFuture( + MergeKey const& key, + std::shared_future>) override; + void putHotArchiveMergeFuture( + MergeKey const& key, + std::shared_future>) override; #ifdef BUILD_TESTS void clearMergeFuturesForTesting() override; #endif @@ -196,7 +231,7 @@ class BucketManagerImpl : public BucketManager std::map loadCompleteLedgerState(HistoryArchiveState const& has) override; - std::shared_ptr + std::shared_ptr mergeBuckets(HistoryArchiveState const& has) override; void visitLedgerEntries( diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp index aeb5dac49b..b560ab3cb7 100644 --- a/src/bucket/BucketOutputIterator.cpp +++ b/src/bucket/BucketOutputIterator.cpp @@ -7,6 +7,8 @@ #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" #include "util/GlobalChecks.h" +#include "util/ProtocolVersion.h" +#include "xdr/Stellar-ledger.h" #include #include @@ -17,11 +19,10 @@ namespace stellar * Helper class that points to an output tempfile. Absorbs BucketEntries and * hashes them while writing to either destination. Produces a Bucket when done. */ -BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, - bool keepDeadEntries, - BucketMetadata const& meta, - MergeCounters& mc, - asio::io_context& ctx, bool doFsync) +template +BucketOutputIterator::BucketOutputIterator( + std::string const& tmpDir, bool keepDeadEntries, BucketMetadata const& meta, + MergeCounters& mc, asio::io_context& ctx, bool doFsync) : mFilename(Bucket::randomBucketName(tmpDir)) , mOut(ctx, doFsync) , mCtx(ctx) @@ -38,34 +39,73 @@ BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, if (protocolVersionStartsFrom( meta.ledgerVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { - BucketEntry bme; - bme.type(METAENTRY); - bme.metaEntry() = mMeta; - put(bme); + + if constexpr (std::is_same_v) + { + BucketEntry bme; + bme.type(METAENTRY); + bme.metaEntry() = mMeta; + put(bme); + } + else + { + releaseAssertOrThrow(protocolVersionStartsFrom( + meta.ledgerVersion, ProtocolVersion::V_22)); + + HotArchiveBucketEntry bme; + bme.type(HA_METAENTRY); + bme.metaEntry() = mMeta; + put(bme); + } + mPutMeta = true; } } +template void -BucketOutputIterator::put(BucketEntry const& e) +BucketOutputIterator::put(BucketEntryT const& e) { ZoneScoped; - Bucket::checkProtocolLegality(e, mMeta.ledgerVersion); - if (e.type() == METAENTRY) + + if constexpr (std::is_same_v) { - if (mPutMeta) + LiveBucket::checkProtocolLegality(e, mMeta.ledgerVersion); + if (e.type() == METAENTRY) { - throw std::runtime_error( - "putting META entry in bucket after initial entry"); + if (mPutMeta) + { + throw std::runtime_error( + "putting META entry in bucket after initial entry"); + } } - } - if (!mKeepDeadEntries && e.type() == DEADENTRY) + if (!mKeepDeadEntries && e.type() == DEADENTRY) + { + ++mMergeCounters.mOutputIteratorTombstoneElisions; + return; + } + } + else { - ++mMergeCounters.mOutputIteratorTombstoneElisions; - return; + if (e.type() == HA_METAENTRY) + { + if (mPutMeta) + { + throw std::runtime_error( + "putting META entry in bucket after initial entry"); + } + } + + // RESTORED entries are dropped in the last bucket level (similar to + // DEADENTRY) on live BucketLists + if (!mKeepDeadEntries && e.type() == HA_RESTORED) + { + ++mMergeCounters.mOutputIteratorTombstoneElisions; + return; + } } // Check to see if there's an existing buffered entry. @@ -86,7 +126,7 @@ BucketOutputIterator::put(BucketEntry const& e) } else { - mBuf = std::make_unique(); + mBuf = std::make_unique(); } // In any case, replace *mBuf with e. @@ -94,10 +134,11 @@ BucketOutputIterator::put(BucketEntry const& e) *mBuf = e; } -std::shared_ptr -BucketOutputIterator::getBucket(BucketManager& bucketManager, - bool shouldSynchronouslyIndex, - MergeKey* mergeKey) +template +std::shared_ptr +BucketOutputIterator::getBucket(BucketManager& bucketManager, + bool shouldSynchronouslyIndex, + MergeKey* mergeKey) { ZoneScoped; if (mBuf) @@ -118,7 +159,7 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, { bucketManager.noteEmptyMergeOutput(*mergeKey); } - return std::make_shared(); + return std::make_shared(); } auto hash = mHasher.finish(); @@ -138,7 +179,19 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, } } - return bucketManager.adoptFileAsBucket(mFilename.string(), hash, mergeKey, - std::move(index)); + if constexpr (std::is_same_v) + { + return bucketManager.adoptFileAsLiveBucket(mFilename.string(), hash, + mergeKey, std::move(index)); + } + else + { + // TODO: + releaseAssert(false); + return std::shared_ptr(); + } } + +template class BucketOutputIterator; +template class BucketOutputIterator; } diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h index 20aed133c5..0a72659890 100644 --- a/src/bucket/BucketOutputIterator.h +++ b/src/bucket/BucketOutputIterator.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/BucketManager.h" #include "bucket/LedgerCmp.h" #include "util/XDRStream.h" @@ -20,14 +21,20 @@ class BucketManager; // Helper class that writes new elements to a file and returns a bucket // when finished. -class BucketOutputIterator +template class BucketOutputIterator { + static_assert(std::is_same_v || + std::is_same_v); + + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + protected: std::filesystem::path mFilename; XDROutputFileStream mOut; - BucketEntryIdCmp mCmp; + BucketEntryIdCmp mCmp; asio::io_context& mCtx; - std::unique_ptr mBuf; + std::unique_ptr mBuf; SHA256 mHasher; size_t mBytesPut{0}; size_t mObjectsPut{0}; @@ -48,10 +55,13 @@ class BucketOutputIterator BucketMetadata const& meta, MergeCounters& mc, asio::io_context& ctx, bool doFsync); - void put(BucketEntry const& e); + void put(BucketEntryT const& e); - std::shared_ptr getBucket(BucketManager& bucketManager, - bool shouldSynchronouslyIndex, - MergeKey* mergeKey = nullptr); + std::shared_ptr getBucket(BucketManager& bucketManager, + bool shouldSynchronouslyIndex, + MergeKey* mergeKey = nullptr); }; + +typedef BucketOutputIterator LiveBucketOutputIterator; +typedef BucketOutputIterator HotArchiveBucketOutputIterator; } diff --git a/src/bucket/BucketSnapshot.cpp b/src/bucket/BucketSnapshot.cpp index 921076af82..3786c618ae 100644 --- a/src/bucket/BucketSnapshot.cpp +++ b/src/bucket/BucketSnapshot.cpp @@ -11,28 +11,36 @@ namespace stellar { -BucketSnapshot::BucketSnapshot(std::shared_ptr const b) +template +BucketSnapshotBase::BucketSnapshotBase( + std::shared_ptr const b) : mBucket(b) { releaseAssert(mBucket); } -BucketSnapshot::BucketSnapshot(BucketSnapshot const& b) +template +BucketSnapshotBase::BucketSnapshotBase( + BucketSnapshotBase const& b) : mBucket(b.mBucket), mStream(nullptr) { releaseAssert(mBucket); } +template bool -BucketSnapshot::isEmpty() const +BucketSnapshotBase::isEmpty() const { releaseAssert(mBucket); return mBucket->isEmpty(); } -std::pair, bool> -BucketSnapshot::getEntryAtOffset(LedgerKey const& k, std::streamoff pos, - size_t pageSize) const +template +std::pair::BucketEntryT>, + bool> +BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, + std::streamoff pos, + size_t pageSize) const { ZoneScoped; if (isEmpty()) @@ -43,7 +51,7 @@ BucketSnapshot::getEntryAtOffset(LedgerKey const& k, std::streamoff pos, auto& stream = getStream(); stream.seek(pos); - BucketEntry be; + BucketEntryT be; if (pageSize == 0) { if (stream.readOne(be)) @@ -61,8 +69,10 @@ BucketSnapshot::getEntryAtOffset(LedgerKey const& k, std::streamoff pos, return {std::nullopt, true}; } -std::pair, bool> -BucketSnapshot::getBucketEntry(LedgerKey const& k) const +template +std::pair::BucketEntryT>, + bool> +BucketSnapshotBase::getBucketEntry(LedgerKey const& k) const { ZoneScoped; if (isEmpty()) @@ -85,10 +95,11 @@ BucketSnapshot::getBucketEntry(LedgerKey const& k) const // If we find the entry, we remove the found key from keys so that later buckets // do not load shadowed entries. If we don't find the entry, we do not remove it // from keys so that it will be searched for again at a lower level. +template void -BucketSnapshot::loadKeysWithLimits(std::set& keys, - std::vector& result, - LedgerKeyMeter* lkMeter) const +BucketSnapshotBase::loadKeysWithLimits( + std::set& keys, + std::vector& result, LedgerKeyMeter* lkMeter) const { ZoneScoped; if (isEmpty()) @@ -109,22 +120,29 @@ BucketSnapshot::loadKeysWithLimits(std::set& keys, *currKeyIt, *offOp, mBucket->getIndex().getPageSize()); if (entryOp) { - if (entryOp->type() != DEADENTRY) + // Only live bucket loads can be metered: + // TODO: Refactor metering to only LiveBucket + if constexpr (std::is_same_v) { - bool addEntry = true; - if (lkMeter) - { - // Here, we are metering after the entry has been - // loaded. This is because we need to know the size of - // the entry to meter it. Future work will add metering - // at the xdr level. - auto entrySize = xdr::xdr_size(entryOp->liveEntry()); - addEntry = lkMeter->canLoad(*currKeyIt, entrySize); - lkMeter->updateReadQuotasForKey(*currKeyIt, entrySize); - } - if (addEntry) + if (entryOp->type() != DEADENTRY) { - result.push_back(entryOp->liveEntry()); + bool addEntry = true; + if (lkMeter) + { + // Here, we are metering after the entry has been + // loaded. This is because we need to know the size + // of the entry to meter it. Future work will add + // metering at the xdr level. + auto entrySize = + xdr::xdr_size(entryOp->liveEntry()); + addEntry = lkMeter->canLoad(*currKeyIt, entrySize); + lkMeter->updateReadQuotasForKey(*currKeyIt, + entrySize); + } + if (addEntry) + { + result.push_back(entryOp->liveEntry()); + } } } currKeyIt = keys.erase(currKeyIt); @@ -137,7 +155,7 @@ BucketSnapshot::loadKeysWithLimits(std::set& keys, } std::vector const& -BucketSnapshot::getPoolIDsByAsset(Asset const& asset) const +LiveBucketSnapshot::getPoolIDsByAsset(Asset const& asset) const { static std::vector const emptyVec = {}; if (isEmpty()) @@ -149,13 +167,13 @@ BucketSnapshot::getPoolIDsByAsset(Asset const& asset) const } bool -BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, - uint32_t ledgerSeq, - std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const +LiveBucketSnapshot::scanForEviction( + EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, + std::list& evictableKeys, + SearchableLiveBucketListSnapshot& bl) const { ZoneScoped; - if (isEmpty() || protocolVersionIsBefore(Bucket::getBucketVersion(mBucket), + if (isEmpty() || protocolVersionIsBefore(mBucket->getBucketVersion(), SOROBAN_PROTOCOL_VERSION)) { // EOF, skip to next bucket @@ -240,8 +258,9 @@ BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, return false; } +template XDRInputFileStream& -BucketSnapshot::getStream() const +BucketSnapshotBase::getStream() const { releaseAssertOrThrow(!isEmpty()); if (!mStream) @@ -252,9 +271,36 @@ BucketSnapshot::getStream() const return *mStream; } -std::shared_ptr -BucketSnapshot::getRawBucket() const +template +std::shared_ptr +BucketSnapshotBase::getRawBucket() const { return mBucket; } + +HotArchiveBucketSnapshot::HotArchiveBucketSnapshot( + std::shared_ptr const b) + : BucketSnapshotBase(b) +{ +} + +LiveBucketSnapshot::LiveBucketSnapshot( + std::shared_ptr const b) + : BucketSnapshotBase(b) +{ +} + +HotArchiveBucketSnapshot::HotArchiveBucketSnapshot( + HotArchiveBucketSnapshot const& b) + : BucketSnapshotBase(b) +{ +} + +LiveBucketSnapshot::LiveBucketSnapshot(LiveBucketSnapshot const& b) + : BucketSnapshotBase(b) +{ +} + +template class BucketSnapshotBase; +template class BucketSnapshotBase; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshot.h b/src/bucket/BucketSnapshot.h index 18faa51c34..0bf6a74b7d 100644 --- a/src/bucket/BucketSnapshot.h +++ b/src/bucket/BucketSnapshot.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/LedgerCmp.h" #include "util/NonCopyable.h" #include @@ -14,16 +15,22 @@ namespace stellar { -class Bucket; class XDRInputFileStream; -class SearchableBucketListSnapshot; struct EvictionResultEntry; class LedgerKeyMeter; +class SearchableLiveBucketListSnapshot; // A lightweight wrapper around Bucket for thread safe BucketListDB lookups -class BucketSnapshot : public NonMovable +template class BucketSnapshotBase : public NonMovable { - std::shared_ptr const mBucket; + static_assert(std::is_same_v || + std::is_same_v); + + protected: + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + + std::shared_ptr const mBucket; // Lazily-constructed and retained for read path. mutable std::unique_ptr mStream{}; @@ -37,25 +44,26 @@ class BucketSnapshot : public NonMovable // reads until key is found or the end of the page. Returns , where bloomMiss is true if a bloomMiss occurred during the // load. - std::pair, bool> + std::pair, bool> getEntryAtOffset(LedgerKey const& k, std::streamoff pos, size_t pageSize) const; - BucketSnapshot(std::shared_ptr const b); + BucketSnapshotBase(std::shared_ptr const b); // Only allow copy constructor, is threadsafe - BucketSnapshot(BucketSnapshot const& b); - BucketSnapshot& operator=(BucketSnapshot const&) = delete; + BucketSnapshotBase(BucketSnapshotBase const& b); + BucketSnapshotBase& operator=(BucketSnapshotBase const&) = delete; public: bool isEmpty() const; - std::shared_ptr getRawBucket() const; + std::shared_ptr getRawBucket() const; // Loads bucket entry for LedgerKey k. Returns , // where bloomMiss is true if a bloomMiss occurred during the load. - std::pair, bool> + std::pair, bool> getBucketEntry(LedgerKey const& k) const; + // TODO: Restrict limits to LiveBucket only // Loads LedgerEntry's for given keys. When a key is found, the // entry is added to result and the key is removed from keys. // If a pointer to a LedgerKeyMeter is provided, a key will only be loaded @@ -64,6 +72,17 @@ class BucketSnapshot : public NonMovable std::vector& result, LedgerKeyMeter* lkMeter) const; + // friend struct BucketLevelSnapshot; +}; + +class LiveBucketSnapshot : public BucketSnapshotBase +{ + public: + LiveBucketSnapshot(std::shared_ptr const b); + + // Only allow copy constructors, is threadsafe + LiveBucketSnapshot(LiveBucketSnapshot const& b); + // Return all PoolIDs that contain the given asset on either side of the // pool std::vector const& getPoolIDsByAsset(Asset const& asset) const; @@ -71,8 +90,15 @@ class BucketSnapshot : public NonMovable bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const; + SearchableLiveBucketListSnapshot& bl) const; +}; + +class HotArchiveBucketSnapshot : public BucketSnapshotBase +{ + public: + HotArchiveBucketSnapshot(std::shared_ptr const b); - friend struct BucketLevelSnapshot; + // Only allow copy constructors, is threadsafe + HotArchiveBucketSnapshot(HotArchiveBucketSnapshot const& b); }; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index 52f907307b..ac68a20a73 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -15,7 +15,7 @@ namespace stellar { BucketSnapshotManager::BucketSnapshotManager( - Application& app, std::unique_ptr&& snapshot, + Application& app, std::unique_ptr const>&& snapshot, uint32_t numHistoricalSnapshots) : mApp(app) , mCurrentSnapshot(std::move(snapshot)) @@ -31,12 +31,12 @@ BucketSnapshotManager::BucketSnapshotManager( releaseAssert(threadIsMain()); } -std::shared_ptr +std::shared_ptr BucketSnapshotManager::copySearchableBucketListSnapshot() const { // Can't use std::make_shared due to private constructor - return std::shared_ptr( - new SearchableBucketListSnapshot(*this)); + return std::shared_ptr( + new SearchableLiveBucketListSnapshot(*this)); } medida::Timer& @@ -65,8 +65,8 @@ BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label, void BucketSnapshotManager::maybeUpdateSnapshot( - std::unique_ptr& snapshot, - std::map>& + std::unique_ptr const>& snapshot, + std::map const>>& historicalSnapshots) const { // The canonical snapshot held by the BucketSnapshotManager is not being @@ -81,7 +81,8 @@ BucketSnapshotManager::maybeUpdateSnapshot( // Should only update with a newer snapshot releaseAssert(!snapshot || snapshot->getLedgerSeq() < mCurrentSnapshot->getLedgerSeq()); - snapshot = std::make_unique(*mCurrentSnapshot); + snapshot = + std::make_unique>(*mCurrentSnapshot); } // Then update historical snapshots (if any exist) @@ -108,7 +109,7 @@ BucketSnapshotManager::maybeUpdateSnapshot( void BucketSnapshotManager::updateCurrentSnapshot( - std::unique_ptr&& newSnapshot) + std::unique_ptr const>&& newSnapshot) { releaseAssert(newSnapshot); releaseAssert(threadIsMain()); diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index 9664f97ab8..97ccf1ae30 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -24,7 +24,8 @@ namespace stellar class Application; class LiveBucketList; -class BucketListSnapshot; +template class BucketListSnapshot; +class SearchableLiveBucketListSnapshot; // This class serves as the boundary between non-threadsafe singleton classes // (BucketManager, BucketList, Metrics, etc) and threadsafe, parallel BucketList @@ -37,10 +38,10 @@ class BucketSnapshotManager : NonMovableOrCopyable // Snapshot that is maintained and periodically updated by BucketManager on // the main thread. When background threads need to generate or refresh a // snapshot, they will copy this snapshot. - std::unique_ptr mCurrentSnapshot{}; + std::unique_ptr const> mCurrentSnapshot{}; // ledgerSeq that the snapshot is based on -> snapshot - std::map> + std::map const>> mHistoricalSnapshots; uint32_t const mNumHistoricalSnapshots; @@ -62,23 +63,23 @@ class BucketSnapshotManager : NonMovableOrCopyable // Called by main thread to update mCurrentSnapshot whenever the BucketList // is updated void updateCurrentSnapshot( - std::unique_ptr&& newSnapshot); + std::unique_ptr const>&& newSnapshot); // numHistoricalLedgers is the number of historical snapshots that the // snapshot manager will maintain. If numHistoricalLedgers is 5, snapshots // will be capable of querying state from ledger [lcl, lcl - 5]. BucketSnapshotManager(Application& app, - std::unique_ptr&& snapshot, + std::unique_ptr const>&& snapshot, uint32_t numHistoricalLedgers); - std::shared_ptr + std::shared_ptr copySearchableBucketListSnapshot() const; // Checks if snapshot is out of date with mCurrentSnapshot and updates // it accordingly void maybeUpdateSnapshot( - std::unique_ptr& snapshot, - std::map>& + std::unique_ptr const>& snapshot, + std::map const>>& historicalSnapshots) const; // All metric recording functions must only be called by the main thread diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp index 1d4ece2c51..3fa3a24e02 100644 --- a/src/bucket/FutureBucket.cpp +++ b/src/bucket/FutureBucket.cpp @@ -18,6 +18,7 @@ #include "util/GlobalChecks.h" #include "util/LogSlowExecution.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" #include "util/Thread.h" #include #include @@ -25,16 +26,17 @@ #include "medida/metrics_registry.h" #include +#include +#include namespace stellar { - -FutureBucket::FutureBucket(Application& app, - std::shared_ptr const& curr, - std::shared_ptr const& snap, - std::vector> const& shadows, - uint32_t maxProtocolVersion, bool countMergeEvents, - uint32_t level) +template +FutureBucket::FutureBucket( + Application& app, std::shared_ptr const& curr, + std::shared_ptr const& snap, + std::vector> const& shadows, + uint32_t maxProtocolVersion, bool countMergeEvents, uint32_t level) : mState(FB_LIVE_INPUTS) , mInputCurrBucket(curr) , mInputSnapBucket(snap) @@ -48,8 +50,8 @@ FutureBucket::FutureBucket(Application& app, releaseAssert(snap); mInputCurrBucketHash = binToHex(curr->getHash()); mInputSnapBucketHash = binToHex(snap->getHash()); - if (protocolVersionStartsFrom(Bucket::getBucketVersion(snap), - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + if (protocolVersionStartsFrom(snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { if (!mInputShadowBuckets.empty()) { @@ -57,6 +59,18 @@ FutureBucket::FutureBucket(Application& app, "Invalid FutureBucket: ledger version doesn't support shadows"); } } + + if constexpr (!std::is_same_v) + { + if (protocolVersionIsBefore(snap->getBucketVersion(), + ProtocolVersion::V_22)) + { + throw std::runtime_error( + "Invalid ArchivalFutureBucket: ledger version doesn't support " + "Archival BucketList"); + } + } + for (auto const& b : mInputShadowBuckets) { mInputShadowBucketHashes.push_back(binToHex(b->getHash())); @@ -64,8 +78,9 @@ FutureBucket::FutureBucket(Application& app, startMerge(app, maxProtocolVersion, countMergeEvents, level); } +template void -FutureBucket::setLiveOutput(std::shared_ptr output) +FutureBucket::setLiveOutput(std::shared_ptr output) { ZoneScoped; mState = FB_LIVE_OUTPUT; @@ -74,14 +89,16 @@ FutureBucket::setLiveOutput(std::shared_ptr output) checkState(); } +template static void -checkHashEq(std::shared_ptr const& b, std::string const& h) +checkHashEq(std::shared_ptr const& b, std::string const& h) { releaseAssert(b->getHash() == hexToBin256(h)); } +template void -FutureBucket::checkHashesMatch() const +FutureBucket::checkHashesMatch() const { ZoneScoped; if (!mInputShadowBuckets.empty()) @@ -114,8 +131,9 @@ FutureBucket::checkHashesMatch() const * the different hash-only states are mutually exclusive with each other and * with live values. */ +template void -FutureBucket::checkState() const +FutureBucket::checkState() const { switch (mState) { @@ -174,8 +192,9 @@ FutureBucket::checkState() const } } +template void -FutureBucket::clearInputs() +FutureBucket::clearInputs() { mInputShadowBuckets.clear(); mInputSnapBucket.reset(); @@ -186,50 +205,57 @@ FutureBucket::clearInputs() mInputCurrBucketHash.clear(); } +template void -FutureBucket::clearOutput() +FutureBucket::clearOutput() { // NB: MSVC future<> implementation doesn't purge the task lambda (and // its captures) on invalidation (due to get()); must explicitly reset. - mOutputBucketFuture = std::shared_future>(); + mOutputBucketFuture = std::shared_future>(); mOutputBucketHash.clear(); mOutputBucket.reset(); } +template void -FutureBucket::clear() +FutureBucket::clear() { mState = FB_CLEAR; clearInputs(); clearOutput(); } +template bool -FutureBucket::isLive() const +FutureBucket::isLive() const { return (mState == FB_LIVE_INPUTS || mState == FB_LIVE_OUTPUT); } +template bool -FutureBucket::isMerging() const +FutureBucket::isMerging() const { return mState == FB_LIVE_INPUTS; } +template bool -FutureBucket::hasHashes() const +FutureBucket::hasHashes() const { return (mState == FB_HASH_INPUTS || mState == FB_HASH_OUTPUT); } +template bool -FutureBucket::isClear() const +FutureBucket::isClear() const { return mState == FB_CLEAR; } +template bool -FutureBucket::mergeComplete() const +FutureBucket::mergeComplete() const { ZoneScoped; releaseAssert(isLive()); @@ -241,8 +267,9 @@ FutureBucket::mergeComplete() const return futureIsReady(mOutputBucketFuture); } -std::shared_ptr -FutureBucket::resolve() +template +std::shared_ptr +FutureBucket::resolve() { ZoneScoped; checkState(); @@ -264,7 +291,7 @@ FutureBucket::resolve() // Explicitly reset shared_future to ensure destruction of shared state. // Some compilers store packaged_task lambdas in the shared state, // keeping its captures alive as long as the future is alive. - mOutputBucketFuture = std::shared_future>(); + mOutputBucketFuture = std::shared_future>(); } mState = FB_LIVE_OUTPUT; @@ -272,8 +299,9 @@ FutureBucket::resolve() return mOutputBucket; } +template bool -FutureBucket::hasOutputHash() const +FutureBucket::hasOutputHash() const { if (mState == FB_LIVE_OUTPUT || mState == FB_HASH_OUTPUT) { @@ -283,28 +311,31 @@ FutureBucket::hasOutputHash() const return false; } +template std::string const& -FutureBucket::getOutputHash() const +FutureBucket::getOutputHash() const { releaseAssert(mState == FB_LIVE_OUTPUT || mState == FB_HASH_OUTPUT); releaseAssert(!mOutputBucketHash.empty()); return mOutputBucketHash; } +template static std::chrono::seconds getAvailableTimeForMerge(Application& app, uint32_t level) { auto closeTime = app.getConfig().getExpectedLedgerCloseTime(); if (level >= 1) { - return closeTime * BucketListBase::levelHalf(level - 1); + return closeTime * BucketListBase::levelHalf(level - 1); } return closeTime; } +template void -FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, - bool countMergeEvents, uint32_t level) +FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, + bool countMergeEvents, uint32_t level) { ZoneScoped; // NB: startMerge starts with FutureBucket in a half-valid state; the inputs @@ -313,9 +344,9 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, releaseAssert(mState == FB_LIVE_INPUTS); - std::shared_ptr curr = mInputCurrBucket; - std::shared_ptr snap = mInputSnapBucket; - std::vector> shadows = mInputShadowBuckets; + std::shared_ptr curr = mInputCurrBucket; + std::shared_ptr snap = mInputSnapBucket; + std::vector> shadows = mInputShadowBuckets; releaseAssert(curr); releaseAssert(snap); @@ -329,13 +360,31 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, auto& timer = app.getMetrics().NewTimer( {"bucket", "merge-time", "level-" + std::to_string(level)}); + std::vector shadowHashes; + shadowHashes.reserve(shadows.size()); + for (auto const& b : shadows) + { + shadowHashes.emplace_back(b->getHash()); + } + // It's possible we're running a merge that's already running, for example // due to having been serialized to the publish queue and then immediately // deserialized. In this case we want to attach to the existing merge, which // will have left a std::shared_future behind in a shared cache in the // bucket manager. - MergeKey mk{BucketListBase::keepDeadEntries(level), curr, snap, shadows}; - auto f = bm.getMergeFuture(mk); + MergeKey mk{BucketListBase::keepDeadEntries(level), + curr->getHash(), snap->getHash(), shadowHashes}; + + std::shared_future> f; + if constexpr (std::is_same_v) + { + f = bm.getLiveMergeFuture(mk); + } + else + { + f = bm.getHotArchiveMergeFuture(mk); + } + if (f.valid()) { CLOG_TRACE(Bucket, @@ -347,9 +396,10 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, } asio::io_context& ctx = app.getWorkerIOContext(); bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; - std::chrono::seconds availableTime = getAvailableTimeForMerge(app, level); + std::chrono::seconds availableTime = + getAvailableTimeForMerge(app, level); - using task_t = std::packaged_task()>; + using task_t = std::packaged_task()>; std::shared_ptr task = std::make_shared( [curr, snap, &bm, shadows, maxProtocolVersion, countMergeEvents, level, &timer, &ctx, doFsync, availableTime]() mutable { @@ -362,10 +412,10 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, ZoneNamedN(mergeZone, "Merge task", true); ZoneValueV(mergeZone, static_cast(level)); - auto res = - Bucket::merge(bm, maxProtocolVersion, curr, snap, shadows, - BucketListBase::keepDeadEntries(level), - countMergeEvents, ctx, doFsync); + auto res = Bucket::merge( + bm, maxProtocolVersion, curr, snap, shadows, + BucketListBase::keepDeadEntries(level), + countMergeEvents, ctx, doFsync); if (res) { @@ -395,15 +445,24 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, }); mOutputBucketFuture = task->get_future().share(); - bm.putMergeFuture(mk, mOutputBucketFuture); + if constexpr (std::is_same_v) + { + bm.putLiveMergeFuture(mk, mOutputBucketFuture); + } + else + { + bm.putHotArchiveMergeFuture(mk, mOutputBucketFuture); + } + app.postOnBackgroundThread(bind(&task_t::operator(), task), "FutureBucket: merge"); checkState(); } +template void -FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, - uint32_t level) +FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, + uint32_t level) { ZoneScoped; checkState(); @@ -412,20 +471,48 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, auto& bm = app.getBucketManager(); if (hasOutputHash()) { - auto b = bm.getBucketByHash(hexToBin256(getOutputHash())); + std::shared_ptr b; + if constexpr (std::is_same_v) + { + b = bm.getLiveBucketByHash(hexToBin256(getOutputHash())); + } + else + { + b = bm.getHotArchiveBucketByHash(hexToBin256(getOutputHash())); + } + setLiveOutput(b); } else { releaseAssert(mState == FB_HASH_INPUTS); - mInputCurrBucket = - bm.getBucketByHash(hexToBin256(mInputCurrBucketHash)); - mInputSnapBucket = - bm.getBucketByHash(hexToBin256(mInputSnapBucketHash)); + if constexpr (std::is_same_v) + { + mInputCurrBucket = + bm.getLiveBucketByHash(hexToBin256(mInputCurrBucketHash)); + mInputSnapBucket = + bm.getLiveBucketByHash(hexToBin256(mInputSnapBucketHash)); + } + else + { + mInputCurrBucket = + bm.getHotArchiveBucketByHash(hexToBin256(mInputCurrBucketHash)); + mInputSnapBucket = + bm.getHotArchiveBucketByHash(hexToBin256(mInputSnapBucketHash)); + } releaseAssert(mInputShadowBuckets.empty()); for (auto const& h : mInputShadowBucketHashes) { - auto b = bm.getBucketByHash(hexToBin256(h)); + std::shared_ptr b; + if constexpr (std::is_same_v) + { + b = bm.getLiveBucketByHash(hexToBin256(h)); + } + else + { + b = bm.getHotArchiveBucketByHash(hexToBin256(h)); + } + releaseAssert(b); CLOG_DEBUG(Bucket, "Reconstituting shadow {}", h); mInputShadowBuckets.push_back(b); @@ -436,8 +523,9 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, } } +template std::vector -FutureBucket::getHashes() const +FutureBucket::getHashes() const { ZoneScoped; std::vector hashes; @@ -459,4 +547,7 @@ FutureBucket::getHashes() const } return hashes; } + +template class FutureBucket; +template class FutureBucket; } diff --git a/src/bucket/FutureBucket.h b/src/bucket/FutureBucket.h index 4866d90235..cda7e6b61c 100644 --- a/src/bucket/FutureBucket.h +++ b/src/bucket/FutureBucket.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "util/GlobalChecks.h" #include #include @@ -16,13 +17,16 @@ namespace stellar class Bucket; class Application; +class LiveBucket; +class HotArchiveBucket; /** * FutureBucket is a minor wrapper around - * std::shared_future>, used in merging multiple buckets - * together in the BucketList. The reason this is a separate class is that we - * need to support a level of persistence: serializing merges-in-progress in a - * symbolic fashion, including restarting the merges after we deserialize. + * std::shared_future>, used in merging multiple + * buckets together in the BucketList. The reason this is a separate class is + * that we need to support a level of persistence: serializing + * merges-in-progress in a symbolic fashion, including restarting the merges + * after we deserialize. * * This class is therefore used not _only_ in the BucketList but also in places * that serialize and deserialize snapshots of it in the form of @@ -30,8 +34,11 @@ class Application; * the bottom of closeLedger; and the HistoryManager, when storing and * retrieving HistoryArchiveStates. */ -class FutureBucket +template class FutureBucket { + static_assert(std::is_same_v || + std::is_same_v); + // There are two lifecycles of a FutureBucket: // // In one, it's created live, snapshotted at some point in the process @@ -56,11 +63,11 @@ class FutureBucket // FutureBucket is constructed, when it is reset, or when it is freshly // deserialized and not yet activated. When they are nonempty, they should // have values equal to the subsequent mFooHash values below. - std::shared_ptr mInputCurrBucket; - std::shared_ptr mInputSnapBucket; - std::vector> mInputShadowBuckets; - std::shared_ptr mOutputBucket; - std::shared_future> mOutputBucketFuture; + std::shared_ptr mInputCurrBucket; + std::shared_ptr mInputSnapBucket; + std::vector> mInputShadowBuckets; + std::shared_ptr mOutputBucket; + std::shared_future> mOutputBucketFuture; // These strings hold the serializable (or deserialized) bucket hashes of // the inputs and outputs of a merge; depending on the state of the @@ -79,12 +86,12 @@ class FutureBucket void clearInputs(); void clearOutput(); - void setLiveOutput(std::shared_ptr b); + void setLiveOutput(std::shared_ptr b); public: - FutureBucket(Application& app, std::shared_ptr const& curr, - std::shared_ptr const& snap, - std::vector> const& shadows, + FutureBucket(Application& app, std::shared_ptr const& curr, + std::shared_ptr const& snap, + std::vector> const& shadows, uint32_t maxProtocolVersion, bool countMergeEvents, uint32_t level); @@ -118,7 +125,7 @@ class FutureBucket bool mergeComplete() const; // Precondition: isLive(); waits-for and resolves to merged bucket. - std::shared_ptr resolve(); + std::shared_ptr resolve(); // Precondition: !isLive(); transitions from FB_HASH_FOO to FB_LIVE_FOO void makeLive(Application& app, uint32_t maxProtocolVersion, diff --git a/src/bucket/LedgerCmp.h b/src/bucket/LedgerCmp.h index 6551448f97..2a84cad211 100644 --- a/src/bucket/LedgerCmp.h +++ b/src/bucket/LedgerCmp.h @@ -13,6 +13,9 @@ namespace stellar { +class LiveBucket; +class HotArchiveBucket; + template bool lexCompare(T&& lhs1, T&& rhs1) @@ -126,10 +129,70 @@ struct LedgerEntryIdCmp * LedgerEntries (ignoring their hashes, as the LedgerEntryIdCmp ignores their * bodies). */ -struct BucketEntryIdCmp +template struct BucketEntryIdCmp { + static_assert(std::is_same_v || + std::is_same_v); + + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + bool - operator()(BucketEntry const& a, BucketEntry const& b) const + compareHotArchive(HotArchiveBucketEntry const& a, + HotArchiveBucketEntry const& b) const + { + HotArchiveBucketEntryType aty = a.type(); + HotArchiveBucketEntryType bty = b.type(); + + // METAENTRY sorts below all other entries, comes first in buckets. + if (aty == HA_METAENTRY || bty == HA_METAENTRY) + { + return aty < bty; + } + + if (aty == HA_ARCHIVED) + { + if (bty == HA_ARCHIVED) + { + return LedgerEntryIdCmp{}(a.archivedEntry().data, + b.archivedEntry().data); + } + else + { + if (bty != HA_DELETED || bty != HA_RESTORED) + { + throw std::runtime_error("Malformed bucket: unexpected " + "DELETED/RESTORED key."); + } + return LedgerEntryIdCmp{}(a.archivedEntry().data, b.key()); + } + } + else + { + if (aty != HA_DELETED || aty != HA_RESTORED) + { + throw std::runtime_error( + "Malformed bucket: unexpected DELETED/RESTORED key."); + } + + if (bty == HA_ARCHIVED) + { + return LedgerEntryIdCmp{}(a.key(), b.archivedEntry().data); + } + else + { + if (bty != HA_DELETED || bty != HA_RESTORED) + { + throw std::runtime_error("Malformed bucket: unexpected " + "DELETED/RESTORED key."); + } + return LedgerEntryIdCmp{}(a.key(), b.key()); + } + } + } + + bool + compareLive(BucketEntry const& a, BucketEntry const& b) const { BucketEntryType aty = a.type(); BucketEntryType bty = b.type(); @@ -179,5 +242,18 @@ struct BucketEntryIdCmp } } } + + bool + operator()(BucketEntryT const& a, BucketEntryT const& b) const + { + if constexpr (std::is_same_v) + { + return compareLive(a, b); + } + else + { + return compareHotArchive(a, b); + } + } }; } diff --git a/src/bucket/MergeKey.cpp b/src/bucket/MergeKey.cpp index 74fc5993fb..c52eeca08c 100644 --- a/src/bucket/MergeKey.cpp +++ b/src/bucket/MergeKey.cpp @@ -10,19 +10,13 @@ namespace stellar { -MergeKey::MergeKey(bool keepDeadEntries, - std::shared_ptr const& inputCurr, - std::shared_ptr const& inputSnap, - std::vector> const& inputShadows) +MergeKey::MergeKey(bool keepDeadEntries, Hash const& currHash, + Hash const& snapHash, std::vector const& shadowHashes) : mKeepDeadEntries(keepDeadEntries) - , mInputCurrBucket(inputCurr->getHash()) - , mInputSnapBucket(inputSnap->getHash()) + , mInputCurrBucket(currHash) + , mInputSnapBucket(snapHash) + , mInputShadowBuckets(shadowHashes) { - mInputShadowBuckets.reserve(inputShadows.size()); - for (auto const& s : inputShadows) - { - mInputShadowBuckets.emplace_back(s->getHash()); - } } bool diff --git a/src/bucket/MergeKey.h b/src/bucket/MergeKey.h index e9098f26ac..205d4c5d17 100644 --- a/src/bucket/MergeKey.h +++ b/src/bucket/MergeKey.h @@ -17,9 +17,8 @@ namespace stellar // pre-resolved std::shared_future containing that output. struct MergeKey { - MergeKey(bool keepDeadEntries, std::shared_ptr const& inputCurr, - std::shared_ptr const& inputSnap, - std::vector> const& inputShadows); + MergeKey(bool keepDeadEntries, Hash const& currHash, Hash const& snapHash, + std::vector const& shadowHashes); bool mKeepDeadEntries; Hash mInputCurrBucket; diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index 12632a38a4..7ad466bbfe 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -69,7 +69,7 @@ class BucketIndexTest {CONFIG_SETTING}, 10); f(entries); closeLedger(*mApp); - } while (!BucketListBase::levelShouldSpill(ledger, mLevelsToBuild - 1)); + } while (!LiveBucketList::levelShouldSpill(ledger, mLevelsToBuild - 1)); } public: @@ -631,7 +631,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") auto indexFilename = test.getBM().bucketIndexFilename(bucketHash); REQUIRE(fs::exists(indexFilename)); - auto b = test.getBM().getBucketByHash(bucketHash); + auto b = test.getBM().getLiveBucketByHash(bucketHash); REQUIRE(b->isIndexed()); auto onDiskIndex = @@ -657,7 +657,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") } // Check if in-memory index has correct params - auto b = test.getBM().getBucketByHash(bucketHash); + auto b = test.getBM().getLiveBucketByHash(bucketHash); REQUIRE(!b->isEmpty()); REQUIRE(b->isIndexed()); diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 63fa0b3323..bd1be99898 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -70,26 +70,26 @@ void checkBucketSizeAndBounds(LiveBucketList& bl, uint32_t ledgerSeq, uint32_t level, bool isCurr) { - std::shared_ptr bucket; + std::shared_ptr bucket; uint32_t sizeOfBucket = 0; uint32_t oldestLedger = 0; if (isCurr) { bucket = bl.getLevel(level).getCurr(); - sizeOfBucket = BucketListBase::sizeOfCurr(ledgerSeq, level); - oldestLedger = BucketListBase::oldestLedgerInCurr(ledgerSeq, level); + sizeOfBucket = LiveBucketList::sizeOfCurr(ledgerSeq, level); + oldestLedger = LiveBucketList::oldestLedgerInCurr(ledgerSeq, level); } else { bucket = bl.getLevel(level).getSnap(); - sizeOfBucket = BucketListBase::sizeOfSnap(ledgerSeq, level); - oldestLedger = BucketListBase::oldestLedgerInSnap(ledgerSeq, level); + sizeOfBucket = LiveBucketList::sizeOfSnap(ledgerSeq, level); + oldestLedger = LiveBucketList::oldestLedgerInSnap(ledgerSeq, level); } std::set ledgers; uint32_t lbound = std::numeric_limits::max(); uint32_t ubound = 0; - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { auto lastModified = (*iter).liveEntry().lastModifiedLedgerSeq; ledgers.insert(lastModified); @@ -156,13 +156,13 @@ TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") if (i % 10 == 0) CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i, binToHex(bl.getHash())); - for (uint32_t j = 0; j < BucketListBase::kNumLevels; ++j) + for (uint32_t j = 0; j < LiveBucketList::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto currSz = countEntries(lev.getCurr()); auto snapSz = countEntries(lev.getSnap()); - CHECK(currSz <= BucketListBase::levelHalf(j) * 100); - CHECK(snapSz <= BucketListBase::levelHalf(j) * 100); + CHECK(currSz <= LiveBucketList::levelHalf(j) * 100); + CHECK(snapSz <= LiveBucketList::levelHalf(j) * 100); } } }); @@ -179,16 +179,16 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") { std::map currCalculatedUpdatePeriods; std::map snapCalculatedUpdatePeriods; - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { currCalculatedUpdatePeriods.emplace( - i, BucketListBase::bucketUpdatePeriod(i, /*isCurr=*/true)); + i, LiveBucketList::bucketUpdatePeriod(i, /*isCurr=*/true)); // Last level has no snap - if (i != BucketListBase::kNumLevels - 1) + if (i != LiveBucketList::kNumLevels - 1) { snapCalculatedUpdatePeriods.emplace( - i, BucketListBase::bucketUpdatePeriod(i, /*isSnap=*/false)); + i, LiveBucketList::bucketUpdatePeriod(i, /*isSnap=*/false)); } } @@ -197,7 +197,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") !snapCalculatedUpdatePeriods.empty(); ++ledgerSeq) { - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { // Check if curr bucket is updated auto currIter = currCalculatedUpdatePeriods.find(level); @@ -213,7 +213,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") { // For all other levels, an update occurs when the level // above spills - if (BucketListBase::levelShouldSpill(ledgerSeq, level - 1)) + if (LiveBucketList::levelShouldSpill(ledgerSeq, level - 1)) { REQUIRE(currIter->second == ledgerSeq); currCalculatedUpdatePeriods.erase(currIter); @@ -225,7 +225,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") auto snapIter = snapCalculatedUpdatePeriods.find(level); if (snapIter != snapCalculatedUpdatePeriods.end()) { - if (BucketListBase::levelShouldSpill(ledgerSeq, level)) + if (LiveBucketList::levelShouldSpill(ledgerSeq, level)) { // Check that snap bucket calculation is correct REQUIRE(snapIter->second == ledgerSeq); @@ -304,7 +304,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", // Alice and Bob should never occur in level 2 .. N because they // were shadowed in level 0 continuously. - for (uint32_t j = 2; j < BucketListBase::kNumLevels; ++j) + for (uint32_t j = 2; j < LiveBucketList::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto curr = lev.getCurr(); @@ -317,7 +317,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", snap->containsBucketIdentity(BucketEntryBob)); if (protocolVersionIsBefore( app->getConfig().LEDGER_PROTOCOL_VERSION, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) || + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) || j > 5) { CHECK(!hasAlice); @@ -349,10 +349,10 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", BucketManager& bm = app->getBucketManager(); auto& mergeTimer = bm.getMergeTimer(); CLOG_INFO(Bucket, "Establishing random bucketlist"); - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto& level = bl.getLevel(i); - level.setCurr(Bucket::fresh( + level.setCurr(LiveBucket::fresh( bm, getAppLedgerVersion(app), {}, LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 8), @@ -360,7 +360,7 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", {CONFIG_SETTING}, 5), /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true)); - level.setSnap(Bucket::fresh( + level.setSnap(LiveBucket::fresh( bm, getAppLedgerVersion(app), {}, LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 8), @@ -370,10 +370,10 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", /*doFsync=*/true)); } - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - std::vector ledgers = {BucketListBase::levelHalf(i), - BucketListBase::levelSize(i)}; + std::vector ledgers = {LiveBucketList::levelHalf(i), + LiveBucketList::levelSize(i)}; for (auto j : ledgers) { auto n = mergeTimer.count(); @@ -388,7 +388,7 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); app->getClock().crank(false); - for (uint32_t k = 0u; k < BucketListBase::kNumLevels; ++k) + for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { auto& next = bl.getLevel(k).getNext(); if (next.isLive()) @@ -401,13 +401,13 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", "Added batch at ledger {}, merges provoked: {}", j, n); REQUIRE(n > 0); - REQUIRE(n < 2 * BucketListBase::kNumLevels); + REQUIRE(n < 2 * LiveBucketList::kNumLevels); } } - EntryCounts e0(bl.getLevel(BucketListBase::kNumLevels - 3).getCurr()); - EntryCounts e1(bl.getLevel(BucketListBase::kNumLevels - 2).getCurr()); - EntryCounts e2(bl.getLevel(BucketListBase::kNumLevels - 1).getCurr()); + EntryCounts e0(bl.getLevel(LiveBucketList::kNumLevels - 3).getCurr()); + EntryCounts e1(bl.getLevel(LiveBucketList::kNumLevels - 2).getCurr()); + EntryCounts e2(bl.getLevel(LiveBucketList::kNumLevels - 1).getCurr()); REQUIRE(e0.nDead != 0); REQUIRE(e1.nDead != 0); REQUIRE(e2.nDead == 0); @@ -464,7 +464,7 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", addBatchAndUpdateSnapshot(bl, *app, lh, initEntries, liveEntries, deadEntries); app->getClock().crank(false); - for (uint32_t k = 0u; k < BucketListBase::kNumLevels; ++k) + for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { auto& next = bl.getLevel(k).getNext(); if (next.isLive()) @@ -473,14 +473,15 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", } } } - for (uint32_t k = 0u; k < BucketListBase::kNumLevels; ++k) + for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { auto const& lev = bl.getLevel(k); auto currSz = countEntries(lev.getCurr()); auto snapSz = countEntries(lev.getSnap()); if (protocolVersionStartsFrom( cfg.LEDGER_PROTOCOL_VERSION, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { // init/dead pairs should mutually-annihilate pretty readily as // they go, empirically this test peaks at buckets around 400 @@ -529,7 +530,7 @@ TEST_CASE_VERSIONS("single entry bubbling up", CLOG_DEBUG(Bucket, "------- ledger {}", i); - for (uint32_t j = 0; j <= BucketListBase::kNumLevels - 1; ++j) + for (uint32_t j = 0; j <= LiveBucketList::kNumLevels - 1; ++j) { uint32_t lb = lowBoundExclusive(j, i); uint32_t hb = highBoundInclusive(j, i); @@ -567,25 +568,25 @@ TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", stellar::uniform_int_distribution dist; for (uint32_t i = 0; i < 1000; ++i) { - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { uint32_t ledger = dist(gRandomEngine); - if (BucketListBase::sizeOfSnap(ledger, level) > 0) + if (LiveBucketList::sizeOfSnap(ledger, level) > 0) { uint32_t oldestInCurr = - BucketListBase::oldestLedgerInSnap(ledger, level) + - BucketListBase::sizeOfSnap(ledger, level); + LiveBucketList::oldestLedgerInSnap(ledger, level) + + LiveBucketList::sizeOfSnap(ledger, level); REQUIRE(oldestInCurr == - BucketListBase::oldestLedgerInCurr(ledger, level)); + LiveBucketList::oldestLedgerInCurr(ledger, level)); } - if (BucketListBase::sizeOfCurr(ledger, level) > 0) + if (LiveBucketList::sizeOfCurr(ledger, level) > 0) { uint32_t newestInCurr = - BucketListBase::oldestLedgerInCurr(ledger, level) + - BucketListBase::sizeOfCurr(ledger, level) - 1; + LiveBucketList::oldestLedgerInCurr(ledger, level) + + LiveBucketList::sizeOfCurr(ledger, level) - 1; REQUIRE(newestInCurr == (level == 0 ? ledger - : BucketListBase::oldestLedgerInSnap( + : LiveBucketList::oldestLedgerInSnap( ledger, level - 1) - 1)); } @@ -597,9 +598,9 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") { // Deliberately exclude deepest level since snap on the deepest level // is always empty. - for (uint32_t level = 0; level < BucketListBase::kNumLevels - 1; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels - 1; ++level) { - uint32_t const half = BucketListBase::levelHalf(level); + uint32_t const half = LiveBucketList::levelHalf(level); // Use binary search (assuming that it does reach steady state) // to find the ledger where the snap at this level first reaches @@ -607,7 +608,7 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [level, half](uint32_t ledger) { - return (BucketListBase::sizeOfSnap(ledger, level) == half); + return (LiveBucketList::sizeOfSnap(ledger, level) == half); }); // Generate random ledgers above and below the split to test that @@ -618,21 +619,21 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(BucketListBase::sizeOfSnap(low, level) < half); - REQUIRE(BucketListBase::sizeOfSnap(high, level) == half); + REQUIRE(LiveBucketList::sizeOfSnap(low, level) < half); + REQUIRE(LiveBucketList::sizeOfSnap(high, level) == half); } } } TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") { - uint32_t const deepest = BucketListBase::kNumLevels - 1; + uint32_t const deepest = LiveBucketList::kNumLevels - 1; // Use binary search to find the first ledger where the deepest curr // first is non-empty. uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [deepest](uint32_t ledger) { - return (BucketListBase::sizeOfCurr(ledger, deepest) > 0); + return (LiveBucketList::sizeOfCurr(ledger, deepest) > 0); }); stellar::uniform_int_distribution distLow(1, boundary - 1); stellar::uniform_int_distribution distHigh(boundary); @@ -640,29 +641,29 @@ TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(BucketListBase::sizeOfCurr(low, deepest) == 0); - REQUIRE(BucketListBase::oldestLedgerInCurr(low, deepest) == + REQUIRE(LiveBucketList::sizeOfCurr(low, deepest) == 0); + REQUIRE(LiveBucketList::oldestLedgerInCurr(low, deepest) == std::numeric_limits::max()); - REQUIRE(BucketListBase::sizeOfCurr(high, deepest) > 0); - REQUIRE(BucketListBase::oldestLedgerInCurr(high, deepest) == 1); + REQUIRE(LiveBucketList::sizeOfCurr(high, deepest) > 0); + REQUIRE(LiveBucketList::oldestLedgerInCurr(high, deepest) == 1); - REQUIRE(BucketListBase::sizeOfSnap(low, deepest) == 0); - REQUIRE(BucketListBase::oldestLedgerInSnap(low, deepest) == + REQUIRE(LiveBucketList::sizeOfSnap(low, deepest) == 0); + REQUIRE(LiveBucketList::oldestLedgerInSnap(low, deepest) == std::numeric_limits::max()); - REQUIRE(BucketListBase::sizeOfSnap(high, deepest) == 0); - REQUIRE(BucketListBase::oldestLedgerInSnap(high, deepest) == + REQUIRE(LiveBucketList::sizeOfSnap(high, deepest) == 0); + REQUIRE(LiveBucketList::oldestLedgerInSnap(high, deepest) == std::numeric_limits::max()); } } TEST_CASE("BucketList sizes at ledger 1", "[bucket][bucketlist][count]") { - REQUIRE(BucketListBase::sizeOfCurr(1, 0) == 1); - REQUIRE(BucketListBase::sizeOfSnap(1, 0) == 0); - for (uint32_t level = 1; level < BucketListBase::kNumLevels; ++level) + REQUIRE(LiveBucketList::sizeOfCurr(1, 0) == 1); + REQUIRE(LiveBucketList::sizeOfSnap(1, 0) == 0); + for (uint32_t level = 1; level < LiveBucketList::kNumLevels; ++level) { - REQUIRE(BucketListBase::sizeOfCurr(1, level) == 0); - REQUIRE(BucketListBase::sizeOfSnap(1, level) == 0); + REQUIRE(LiveBucketList::sizeOfCurr(1, level) == 0); + REQUIRE(LiveBucketList::sizeOfSnap(1, level) == 0); } } @@ -688,7 +689,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") addBatchAndUpdateSnapshot(bl, *app, lh, {}, {ledgers[ledgerSeq - 1]}, emptySet); } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { checkBucketSizeAndBounds(bl, ledgerSeq, level, true); checkBucketSizeAndBounds(bl, ledgerSeq, level, false); @@ -1023,7 +1024,7 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") LedgerKey entryToEvict; std::optional expectedEndIterPosition{}; - for (BucketInputIterator in( + for (LiveBucketInputIterator in( bl.getLevel(levelToScan).getCurr()); in; ++in) { @@ -1078,7 +1079,7 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") auto constexpr xdrOverheadBytes = 4; - BucketInputIterator metaIn(bl.getLevel(0).getCurr()); + LiveBucketInputIterator metaIn(bl.getLevel(0).getCurr()); BucketEntry be(METAENTRY); be.metaEntry() = metaIn.getMetadata(); auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes; @@ -1101,7 +1102,8 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") size_t prevOff = evictionIter.bucketFileOffset; // Check that each scan only reads one entry - for (BucketInputIterator in(bl.getLevel(levelToScan).getCurr()); + for (LiveBucketInputIterator in( + bl.getLevel(levelToScan).getCurr()); in; ++in) { auto startingOffset = evictionIter.bucketFileOffset; @@ -1196,7 +1198,7 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") // Advance until one ledger before bucket is updated auto ledgersUntilUpdate = - BucketListBase::bucketUpdatePeriod(levelToTest, + LiveBucketList::bucketUpdatePeriod(levelToTest, isCurr) - 1; // updateNetworkCfg closes a ledger that we need to // count @@ -1227,7 +1229,7 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") closeLedger(*app); ++ledgerSeq; - BucketInputIterator in(bucket()); + LiveBucketInputIterator in(bucket()); // Check that iterator has reset to beginning of bucket and // read meta entry + one additional entry @@ -1343,30 +1345,30 @@ formatLedgerList(std::vector const& ledgers) TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") { - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { CLOG_INFO(Bucket, "levelSize({}) = {} (formally)", level, - formatU32(BucketListBase::levelSize(level))); + formatU32(LiveBucketList::levelSize(level))); } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { CLOG_INFO(Bucket, "levelHalf({}) = {} (formally)", level, - formatU32(BucketListBase::levelHalf(level))); + formatU32(LiveBucketList::levelHalf(level))); } for (uint32_t probe : {0x100, 0x10000, 0x1000000}) { - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - auto sz = formatU32(BucketListBase::sizeOfCurr(probe, level)); + auto sz = formatU32(LiveBucketList::sizeOfCurr(probe, level)); CLOG_INFO(Bucket, "sizeOfCurr({:#x}, {}) = {} (precisely)", probe, level, sz); } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - auto sz = formatU32(BucketListBase::sizeOfSnap(probe, level)); + auto sz = formatU32(LiveBucketList::sizeOfSnap(probe, level)); CLOG_INFO(Bucket, "sizeOfSnap({:#x}, {}) = {} (precisely)", probe, level, sz); } @@ -1375,17 +1377,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") std::vector> spillEvents; std::vector> nonMergeCommitEvents; std::vector> mergeCommitEvents; - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { spillEvents.push_back({}); nonMergeCommitEvents.push_back({}); mergeCommitEvents.push_back({}); } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { for (uint32_t ledger = 0; ledger < 0x1000000; ++ledger) { - if (BucketListBase::levelShouldSpill(ledger, level)) + if (LiveBucketList::levelShouldSpill(ledger, level)) { spillEvents[level].push_back(ledger); if (spillEvents[level].size() > 5) @@ -1394,11 +1396,11 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") } } if (level != 0 && - BucketListBase::levelShouldSpill(ledger, level - 1)) + LiveBucketList::levelShouldSpill(ledger, level - 1)) { uint32_t nextChangeLedger = - ledger + BucketListBase::levelHalf(level - 1); - if (BucketListBase::levelShouldSpill(nextChangeLedger, level)) + ledger + LiveBucketList::levelHalf(level - 1); + if (LiveBucketList::levelShouldSpill(nextChangeLedger, level)) { nonMergeCommitEvents[level].push_back(ledger); } @@ -1409,17 +1411,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") } } } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto ls = formatLedgerList(spillEvents[level]); CLOG_INFO(Bucket, "levelShouldSpill({:#x}) = true @ {}", level, ls); } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto ls = formatLedgerList(mergeCommitEvents[level]); CLOG_INFO(Bucket, "mergeCommit({:#x}) @ {}", level, ls); } - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto ls = formatLedgerList(nonMergeCommitEvents[level]); CLOG_INFO(Bucket, "nonMergeCommit({:#x}) @ {}", level, ls); @@ -1428,12 +1430,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") // Print out the full bucketlist at an arbitrarily-chosen probe ledger. uint32_t probe = 0x11f9ab; CLOG_INFO(Bucket, "BucketList state at {:#x}", probe); - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - uint32_t currOld = BucketListBase::oldestLedgerInCurr(probe, level); - uint32_t snapOld = BucketListBase::oldestLedgerInSnap(probe, level); - uint32_t currSz = BucketListBase::sizeOfCurr(probe, level); - uint32_t snapSz = BucketListBase::sizeOfSnap(probe, level); + uint32_t currOld = LiveBucketList::oldestLedgerInCurr(probe, level); + uint32_t snapOld = LiveBucketList::oldestLedgerInSnap(probe, level); + uint32_t currSz = LiveBucketList::sizeOfCurr(probe, level); + uint32_t snapSz = LiveBucketList::sizeOfSnap(probe, level); uint32_t currNew = currOld + currSz - 1; uint32_t snapNew = snapOld + snapSz - 1; CLOG_INFO( diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 0ebd1ecb37..701e3ca7ff 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -44,7 +44,7 @@ clearFutures(Application::pointer app, LiveBucketList& bl) { // First go through the BL and mop up all the FutureBuckets. - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { bl.getLevel(i).getNext().clear(); } @@ -217,10 +217,10 @@ TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") {CONFIG_SETTING}, 10)); std::vector dead{}; - std::shared_ptr b1; + std::shared_ptr b1; { - std::shared_ptr b2 = Bucket::fresh( + std::shared_ptr b2 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -229,11 +229,11 @@ TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") // Bucket is referenced by b1, b2 and the BucketManager. CHECK(b1.use_count() == 3); - std::shared_ptr b3 = Bucket::fresh( + std::shared_ptr b3 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - std::shared_ptr b4 = Bucket::fresh( + std::shared_ptr b4 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -242,7 +242,7 @@ TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") } // Take pointer by reference to not mess up use_count() - auto dropBucket = [&](std::shared_ptr& b) { + auto dropBucket = [&](std::shared_ptr& b) { std::string filename = b->getFilename().string(); std::string indexFilename = app->getBucketManager().bucketIndexFilename(b->getHash()); @@ -324,7 +324,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") {CONFIG_SETTING}, 10), {}); closeLedger(*app); - } while (!BucketListBase::levelShouldSpill(ledger, level - 1)); + } while (!LiveBucketList::levelShouldSpill(ledger, level - 1)); auto someBucket = bl.getLevel(1).getCurr(); someBucketFileName = someBucket->getFilename().string(); } @@ -371,7 +371,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", {CONFIG_SETTING}, 10), {}); bm.forgetUnreferencedBuckets(); - } while (!BucketListBase::levelShouldSpill(ledger, level - 1)); + } while (!LiveBucketList::levelShouldSpill(ledger, level - 1)); // Check that the merge on level isn't committed (we're in // ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING mode that does not resolve @@ -396,7 +396,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", // Reattach to _finished_ merge future on level. has2.currentBuckets[level].next.makeLive( - *app, vers, BucketListBase::keepDeadEntries(level)); + *app, vers, LiveBucketList::keepDeadEntries(level)); REQUIRE(has2.currentBuckets[level].next.isMerging()); // Resolve reattached future. @@ -473,13 +473,13 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", // win quite shortly). HistoryArchiveState has2; has2.fromString(serialHas); - for (uint32_t level = 0; level < BucketListBase::kNumLevels; + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { if (has2.currentBuckets[level].next.hasHashes()) { has2.currentBuckets[level].next.makeLive( - *app, vers, BucketListBase::keepDeadEntries(level)); + *app, vers, LiveBucketList::keepDeadEntries(level)); } } } @@ -504,7 +504,7 @@ TEST_CASE("bucketmanager do not leak empty-merge futures", cfg.ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING = true; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1; auto app = createTestApplication(clock, cfg); @@ -619,7 +619,7 @@ TEST_CASE_VERSIONS( auto ra = bm.readMergeCounters().mFinishedMergeReattachments; if (protocolVersionIsBefore(vers, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { // Versions prior to FIRST_PROTOCOL_SHADOWS_REMOVED re-attach to // finished merges @@ -684,7 +684,7 @@ class StopAndRestartBucketMergesTest static void resolveAllMerges(LiveBucketList& bl) { - for (uint32 i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32 i = 0; i < LiveBucketList::kNumLevels; ++i) { auto& level = bl.getLevel(i); auto& next = level.getNext(); @@ -770,8 +770,8 @@ class StopAndRestartBucketMergesTest checkSensiblePostInitEntryMergeCounters(uint32_t protocol) const { CHECK(mMergeCounters.mPostInitEntryProtocolMerges != 0); - if (protocolVersionIsBefore(protocol, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + if (protocolVersionIsBefore( + protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == 0); } @@ -797,8 +797,8 @@ class StopAndRestartBucketMergesTest CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead != 0); CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); - if (protocolVersionIsBefore(protocol, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + if (protocolVersionIsBefore( + protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { CHECK(mMergeCounters.mShadowScanSteps != 0); CHECK(mMergeCounters.mLiveEntryShadowElisions != 0); @@ -940,7 +940,7 @@ class StopAndRestartBucketMergesTest mMergeCounters = bm.readMergeCounters(); mLedgerHeaderHash = lm.getLastClosedLedgerHeader().hash; mBucketListHash = bl.getHash(); - BucketLevel& blv = bl.getLevel(level); + BucketLevel& blv = bl.getLevel(level); mCurrBucketHash = blv.getCurr()->getHash(); mSnapBucketHash = blv.getSnap()->getHash(); } @@ -960,12 +960,12 @@ class StopAndRestartBucketMergesTest std::map& entries) { auto bl = app.getBucketManager().getLiveBucketList(); - for (uint32_t i = BucketListBase::kNumLevels; i > 0; --i) + for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) { - BucketLevel const& level = bl.getLevel(i - 1); + BucketLevel const& level = bl.getLevel(i - 1); for (auto bucket : {level.getSnap(), level.getCurr()}) { - for (BucketInputIterator bi(bucket); bi; ++bi) + for (LiveBucketInputIterator bi(bucket); bi; ++bi) { BucketEntry const& e = *bi; if (e.type() == LIVEENTRY || e.type() == INITENTRY) @@ -1008,11 +1008,11 @@ class StopAndRestartBucketMergesTest void calculateDesignatedLedgers() { - uint32_t spillFreq = BucketListBase::levelHalf(mDesignatedLevel); + uint32_t spillFreq = LiveBucketList::levelHalf(mDesignatedLevel); uint32_t prepFreq = (mDesignatedLevel == 0 ? 1 - : BucketListBase::levelHalf(mDesignatedLevel - 1)); + : LiveBucketList::levelHalf(mDesignatedLevel - 1)); uint32_t const SPILLCOUNT = 5; uint32_t const PREPCOUNT = 5; @@ -1244,14 +1244,15 @@ class StopAndRestartBucketMergesTest auto j = mControlSurveys.find(i); if (j != mControlSurveys.end()) { - if (BucketListBase::levelShouldSpill(i, mDesignatedLevel - 1)) + if (LiveBucketList::levelShouldSpill(i, mDesignatedLevel - 1)) { // Confirm that there's a merge-in-progress at this level // (closing ledger i should have provoked a spill from // mDesignatedLevel-1 to mDesignatedLevel) LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); - BucketLevel& blv = bl.getLevel(mDesignatedLevel); + BucketLevel& blv = + bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); } @@ -1279,12 +1280,13 @@ class StopAndRestartBucketMergesTest clock = std::make_unique(); app = createTestApplication(*clock, cfg, false); - if (BucketListBase::levelShouldSpill(i, mDesignatedLevel - 1)) + if (LiveBucketList::levelShouldSpill(i, mDesignatedLevel - 1)) { // Confirm that the merge-in-progress was restarted. LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); - BucketLevel& blv = bl.getLevel(mDesignatedLevel); + BucketLevel& blv = + bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); } @@ -1318,7 +1320,7 @@ class StopAndRestartBucketMergesTest assert(!mControlSurveys.empty()); if (protocolVersionStartsFrom( mProtocol, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { mControlSurveys.rbegin()->second.dumpMergeCounters( "control, Post-INITENTRY", mDesignatedLevel); @@ -1342,11 +1344,11 @@ TEST_CASE("bucket persistence over app restart with initentry", { for (uint32_t protocol : {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) { for (uint32_t level : {2, 3}) { @@ -1362,11 +1364,11 @@ TEST_CASE("bucket persistence over app restart with initentry - extended", { for (uint32_t protocol : {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) { for (uint32_t level : {2, 3, 4, 5}) { diff --git a/src/bucket/test/BucketMergeMapTests.cpp b/src/bucket/test/BucketMergeMapTests.cpp index b5f9ea81a8..c5883e1c82 100644 --- a/src/bucket/test/BucketMergeMapTests.cpp +++ b/src/bucket/test/BucketMergeMapTests.cpp @@ -22,7 +22,7 @@ TEST_CASE("bucket merge map", "[bucket][bucketmergemap]") std::vector live = LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, numEntries); - std::shared_ptr b1 = Bucket::fresh( + std::shared_ptr b1 = LiveBucket::fresh( app->getBucketManager(), BucketTestUtils::getAppLedgerVersion(app), {}, live, {}, /*countMergeEvents=*/true, clock.getIOContext(), @@ -30,41 +30,44 @@ TEST_CASE("bucket merge map", "[bucket][bucketmergemap]") return b1; }; - std::shared_ptr in1a = getValidBucket(); - std::shared_ptr in1b = getValidBucket(); - std::shared_ptr in1c = getValidBucket(); + std::shared_ptr in1a = getValidBucket(); + std::shared_ptr in1b = getValidBucket(); + std::shared_ptr in1c = getValidBucket(); - std::shared_ptr in2a = getValidBucket(); - std::shared_ptr in2b = getValidBucket(); - std::shared_ptr in2c = getValidBucket(); + std::shared_ptr in2a = getValidBucket(); + std::shared_ptr in2b = getValidBucket(); + std::shared_ptr in2c = getValidBucket(); - std::shared_ptr in3a = getValidBucket(); - std::shared_ptr in3b = getValidBucket(); - std::shared_ptr in3c = getValidBucket(); - std::shared_ptr in3d = getValidBucket(); + std::shared_ptr in3a = getValidBucket(); + std::shared_ptr in3b = getValidBucket(); + std::shared_ptr in3c = getValidBucket(); + std::shared_ptr in3d = getValidBucket(); - std::shared_ptr in4a = getValidBucket(); - std::shared_ptr in4b = getValidBucket(); + std::shared_ptr in4a = getValidBucket(); + std::shared_ptr in4b = getValidBucket(); - std::shared_ptr in5a = getValidBucket(); - std::shared_ptr in5b = getValidBucket(); + std::shared_ptr in5a = getValidBucket(); + std::shared_ptr in5b = getValidBucket(); - std::shared_ptr in6a = getValidBucket(); - std::shared_ptr in6b = getValidBucket(); + std::shared_ptr in6a = getValidBucket(); + std::shared_ptr in6b = getValidBucket(); - std::shared_ptr out1 = getValidBucket(); - std::shared_ptr out2 = getValidBucket(); - std::shared_ptr out4 = getValidBucket(); - std::shared_ptr out6 = getValidBucket(); + std::shared_ptr out1 = getValidBucket(); + std::shared_ptr out2 = getValidBucket(); + std::shared_ptr out4 = getValidBucket(); + std::shared_ptr out6 = getValidBucket(); BucketMergeMap bmm; - MergeKey m1{true, in1a, in1b, {in1c}}; - MergeKey m2{true, in2a, in2b, {in2c}}; - MergeKey m3{true, in3a, in3b, {in3c, in3d}}; - MergeKey m4{true, in4a, in4b, {}}; - MergeKey m5{true, in5a, in5b, {}}; - MergeKey m6{true, in6a, in6b, {in1a}}; + MergeKey m1{true, in1a->getHash(), in1b->getHash(), {in1c->getHash()}}; + MergeKey m2{true, in2a->getHash(), in2b->getHash(), {in2c->getHash()}}; + MergeKey m3{true, + in3a->getHash(), + in3b->getHash(), + {in3c->getHash(), in3d->getHash()}}; + MergeKey m4{true, in4a->getHash(), in4b->getHash(), {}}; + MergeKey m5{true, in5a->getHash(), in5b->getHash(), {}}; + MergeKey m6{true, in6a->getHash(), in6b->getHash(), {in1a->getHash()}}; bmm.recordMerge(m1, out1->getHash()); bmm.recordMerge(m2, out2->getHash()); diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index c853076d74..795d9a7a06 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -51,16 +51,16 @@ for_versions_with_differing_bucket_logic( { for_versions( {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}, cfg, f); } size_t -countEntries(std::shared_ptr bucket) +countEntries(std::shared_ptr bucket) { EntryCounts e(bucket); return e.sum(); @@ -88,9 +88,9 @@ closeLedger(Application& app) return closeLedger(app, std::nullopt); } -EntryCounts::EntryCounts(std::shared_ptr bucket) +EntryCounts::EntryCounts(std::shared_ptr bucket) { - BucketInputIterator iter(bucket); + LiveBucketInputIterator iter(bucket); if (iter.seenMetadata()) { ++nMeta; diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index 0b57cc6080..8fff139f37 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -41,10 +41,10 @@ struct EntryCounts return nLive + nInit + nDead + nMeta; } - EntryCounts(std::shared_ptr bucket); + EntryCounts(std::shared_ptr bucket); }; -size_t countEntries(std::shared_ptr bucket); +size_t countEntries(std::shared_ptr bucket); Hash closeLedger(Application& app, std::optional skToSignValue, xdr::xvector upgrades = emptyUpgradeSteps); diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index 4da46e26a7..6f08b4c8bf 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -48,10 +48,10 @@ for_versions_with_differing_initentry_logic( { for_versions( {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)}, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)}, cfg, f); } @@ -67,7 +67,7 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]") auto dead = LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 1000); CLOG_DEBUG(Bucket, "Hashing entries"); - std::shared_ptr b1 = Bucket::fresh( + std::shared_ptr b1 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -83,11 +83,11 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]") b1 = Bucket::merge( app->getBucketManager(), app->getConfig().LEDGER_PROTOCOL_VERSION, b1, - Bucket::fresh(app->getBucketManager(), - getAppLedgerVersion(app), {}, live, dead, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true), + LiveBucket::fresh(app->getBucketManager(), + getAppLedgerVersion(app), {}, live, dead, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true), /*shadows=*/{}, /*keepDeadEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), @@ -161,14 +161,14 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") abort(); } auto deadEntry = LedgerEntryKey(liveEntry); - auto bLive = Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); - auto bDead = Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); + auto bLive = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, /*keepDeadEntries=*/true, /*countMergeEvents=*/true, @@ -200,14 +200,14 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") dead.push_back(LedgerEntryKey(e)); } } - auto bLive = - Bucket::fresh(bm, vers, {}, live, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bDead = - Bucket::fresh(bm, vers, {}, {}, dead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bLive = LiveBucket::fresh(bm, vers, {}, live, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, dead, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, /*keepDeadEntries=*/true, @@ -226,7 +226,7 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100); std::vector dead; - std::shared_ptr b1 = Bucket::fresh( + std::shared_ptr b1 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -258,11 +258,11 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") ++liveCount; } } - std::shared_ptr b2 = Bucket::fresh( + std::shared_ptr b2 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - std::shared_ptr b3 = + std::shared_ptr b3 = Bucket::merge(app->getBucketManager(), app->getConfig().LEDGER_PROTOCOL_VERSION, b1, b2, /*shadows=*/{}, /*keepDeadEntries=*/true, @@ -330,7 +330,8 @@ TEST_CASE("merges proceed old-style despite newer shadows", Config const& cfg = getTestConfig(); Application::pointer app = createTestApplication(clock, cfg); auto& bm = app->getBucketManager(); - auto v12 = static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED); + auto v12 = + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED); auto v11 = v12 - 1; auto v10 = v11 - 1; @@ -338,31 +339,31 @@ TEST_CASE("merges proceed old-style despite newer shadows", LedgerEntry otherLiveA = generateDifferentAccount({liveEntry}); auto b10first = - Bucket::fresh(bm, v10, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v10, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b10second = - Bucket::fresh(bm, v10, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v10, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b11first = - Bucket::fresh(bm, v11, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v11, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b11second = - Bucket::fresh(bm, v11, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v11, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b12first = - Bucket::fresh(bm, v12, {}, {liveEntry}, {}, /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v12, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b12second = - Bucket::fresh(bm, v12, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v12, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); SECTION("shadow version 12") { @@ -373,7 +374,7 @@ TEST_CASE("merges proceed old-style despite newer shadows", /*keepDeadEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - REQUIRE(Bucket::getBucketVersion(bucket) == v11); + REQUIRE(bucket->getBucketVersion() == v11); } SECTION("shadow versions mixed, pick lower") { @@ -385,7 +386,7 @@ TEST_CASE("merges proceed old-style despite newer shadows", /*keepDeadEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - REQUIRE(Bucket::getBucketVersion(bucket) == v11); + REQUIRE(bucket->getBucketVersion() == v11); } SECTION("refuse to merge new version with shadow") { @@ -409,18 +410,22 @@ TEST_CASE("merges refuse to exceed max protocol version", auto vers = getAppLedgerVersion(app); LedgerEntry liveEntry = generateAccount(); LedgerEntry otherLiveA = generateDifferentAccount({liveEntry}); - auto bold1 = Bucket::fresh(bm, vers - 1, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bold2 = Bucket::fresh(bm, vers - 1, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bnew1 = Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bnew2 = Bucket::fresh(bm, vers, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bold1 = + LiveBucket::fresh(bm, vers - 1, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto bold2 = + LiveBucket::fresh(bm, vers - 1, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto bnew1 = + LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto bnew2 = + LiveBucket::fresh(bm, vers, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); REQUIRE_THROWS_AS(Bucket::merge(bm, vers - 1, bnew1, bnew2, /*shadows=*/{}, /*keepDeadEntries=*/true, @@ -436,7 +441,7 @@ TEST_CASE("bucket output iterator rejects wrong-version entries", VirtualClock clock; Config const& cfg = getTestConfig(); auto vers_new = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); BucketMetadata meta; meta.ledgerVersion = vers_new - 1; Application::pointer app = createTestApplication(clock, cfg); @@ -447,8 +452,8 @@ TEST_CASE("bucket output iterator rejects wrong-version entries", metaEntry.type(METAENTRY); metaEntry.metaEntry() = meta; MergeCounters mc; - BucketOutputIterator out(bm.getTmpDir(), true, meta, mc, - clock.getIOContext(), /*doFsync=*/true); + LiveBucketOutputIterator out(bm.getTmpDir(), true, meta, mc, + clock.getIOContext(), /*doFsync=*/true); REQUIRE_THROWS_AS(out.put(initEntry), std::runtime_error); REQUIRE_THROWS_AS(out.put(metaEntry), std::runtime_error); } @@ -466,7 +471,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", // Whether we're in the era of supporting or not-supporting INITENTRY. bool initEra = protocolVersionStartsFrom( - vers, Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + vers, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); CLOG_INFO(Bucket, "=== finished buckets for initial account == "); @@ -488,14 +494,14 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", SECTION("dead and init account entries merge correctly") { - auto bInit = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bDead = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bInit = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto b1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bDead, /*shadows=*/{}, /*keepDeadEntries=*/true, @@ -524,18 +530,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", SECTION("dead and init entries merge with intervening live entries " "correctly") { - auto bInit = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bLive = - Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bDead = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bInit = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bLive = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto bmerge1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bLive, /*shadows=*/{}, /*keepDeadEntries=*/true, @@ -566,18 +572,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", SECTION("dead and init entries annihilate multiple live entries via " "separate buckets") { - auto bold = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bmed = Bucket::fresh( + auto bold = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bmed = LiveBucket::fresh( bm, vers, {}, {otherLiveA, otherLiveB, liveEntry, otherLiveC}, {}, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto bnew = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bnew = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); EntryCounts eold(bold), emed(bmed), enew(bnew); if (initEra) { @@ -655,7 +661,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Whether we're in the era of supporting or not-supporting INITENTRY. bool initEra = protocolVersionStartsFrom( - vers, Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + vers, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); CLOG_INFO(Bucket, "=== finished buckets for initial account == "); @@ -680,18 +687,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // In pre-11 versions, shadows _do_ eliminate lifecycle entries // (INIT/DEAD). In 11-and-after versions, shadows _don't_ eliminate // lifecycle entries. - auto shadow = - Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto b1 = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto b2 = - Bucket::fresh(bm, vers, {otherInitA}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto shadow = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto b1 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto b2 = LiveBucket::fresh(bm, vers, {otherInitA}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto merged = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, b1, b2, /*shadows=*/{shadow}, @@ -722,26 +729,26 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // INIT. See comment in `maybePut` in Bucket.cpp. // // (level1 is newest here, level5 is oldest) - auto level1 = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level2 = - Bucket::fresh(bm, vers, {initEntry2}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level3 = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level4 = - Bucket::fresh(bm, vers, {}, {}, {}, /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); - auto level5 = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto level1 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level2 = LiveBucket::fresh(bm, vers, {initEntry2}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level3 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level4 = LiveBucket::fresh(bm, vers, {}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level5 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); // Do a merge between levels 4 and 3, with shadows from 2 and 1, // risking shadowing-out level 3. Level 4 is a placeholder here, @@ -839,18 +846,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // `maybePut` in Bucket.cpp. // // (level1 is newest here, level3 is oldest) - auto level1 = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level2 = - Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level3 = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto level1 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level2 = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level3 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); // Do a merge between levels 3 and 2, with shadow from 1, risking // shadowing-out the init on level 3. Level 2 is a placeholder here, @@ -930,12 +937,12 @@ TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]") dead.emplace_back(LedgerEntryKey(e)); } - std::shared_ptr birth = Bucket::fresh( + std::shared_ptr birth = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - std::shared_ptr death = Bucket::fresh( + std::shared_ptr death = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, noLive, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -973,7 +980,7 @@ TEST_CASE("bucket apply bench", "[bucketbench][!hide]") a = LedgerTestUtils::generateValidAccountEntry(5); } - std::shared_ptr birth = Bucket::fresh( + std::shared_ptr birth = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index 3f890d2ac2..a71b672f0b 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -56,12 +56,12 @@ ApplyBucketsWork::startingLevel() { return mApp.getConfig().isUsingBucketListDB() ? 0 - : BucketListBase::kNumLevels - 1; + : LiveBucketList::kNumLevels - 1; } ApplyBucketsWork::ApplyBucketsWork( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, std::function onlyApply) : Work(app, "apply-buckets", BasicWork::RETRY_NEVER) @@ -77,20 +77,21 @@ ApplyBucketsWork::ApplyBucketsWork( ApplyBucketsWork::ApplyBucketsWork( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion) : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, [](LedgerEntryType) { return true; }) { } -std::shared_ptr +std::shared_ptr ApplyBucketsWork::getBucket(std::string const& hash) { auto i = mBuckets.find(hash); - auto b = (i != mBuckets.end()) - ? i->second - : mApp.getBucketManager().getBucketByHash(hexToBin256(hash)); + auto b = + (i != mBuckets.end()) + ? i->second + : mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash)); releaseAssert(b); return b; } @@ -143,7 +144,7 @@ ApplyBucketsWork::doReset() } } - auto addBucket = [this](std::shared_ptr const& bucket) { + auto addBucket = [this](std::shared_ptr const& bucket) { if (bucket->getSize() > 0) { mTotalBuckets++; @@ -199,7 +200,7 @@ ApplyBucketsWork::startBucket() ZoneScoped; auto bucket = mBucketsToApply.at(mBucketToApplyIndex); mMinProtocolVersionSeen = - std::min(mMinProtocolVersionSeen, Bucket::getBucketVersion(bucket)); + std::min(mMinProtocolVersionSeen, bucket->getBucketVersion()); // Create a new applicator for the bucket. mBucketApplicator = std::make_unique( mApp, mMaxProtocolVersion, mMinProtocolVersionSeen, mLevel, bucket, diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index 173026daec..77674e816e 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -12,7 +12,6 @@ namespace stellar { class AssumeStateWork; -class BucketLevel; class LiveBucketList; class Bucket; class IndexBucketsWork; @@ -21,7 +20,7 @@ struct LedgerHeaderHistoryEntry; class ApplyBucketsWork : public Work { - std::map> const& mBuckets; + std::map> const& mBuckets; HistoryArchiveState const& mApplyState; std::function mEntryTypeFilter; @@ -40,14 +39,14 @@ class ApplyBucketsWork : public Work uint32_t mMaxProtocolVersion{0}; uint32_t mMinProtocolVersionSeen{UINT32_MAX}; std::unordered_set mSeenKeys; - std::vector> mBucketsToApply; + std::vector> mBucketsToApply; std::unique_ptr mBucketApplicator; bool mDelayChecked{false}; BucketApplicator::Counters mCounters; void advance(std::string const& name, BucketApplicator& applicator); - std::shared_ptr getBucket(std::string const& bucketHash); + std::shared_ptr getBucket(std::string const& bucketHash); uint32_t startingLevel(); bool appliedAllBuckets() const; @@ -57,11 +56,11 @@ class ApplyBucketsWork : public Work public: ApplyBucketsWork( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion); ApplyBucketsWork( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, std::function onlyApply); ~ApplyBucketsWork() = default; diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index 42f80f66d9..0325180c23 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -26,12 +26,12 @@ AssumeStateWork::AssumeStateWork(Application& app, // Maintain reference to all Buckets in HAS to avoid garbage collection, // including future buckets that have already finished merging auto& bm = mApp.getBucketManager(); - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto curr = - bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr)); + bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr)); auto snap = - bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap)); + bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while " @@ -44,7 +44,7 @@ AssumeStateWork::AssumeStateWork(Application& app, if (nextFuture.hasOutputHash()) { auto nextBucket = - bm.getBucketByHash(hexToBin256(nextFuture.getOutputHash())); + bm.getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { throw std::runtime_error("Missing future bucket files while " diff --git a/src/catchup/AssumeStateWork.h b/src/catchup/AssumeStateWork.h index 689cc1f1f6..92dc4b903c 100644 --- a/src/catchup/AssumeStateWork.h +++ b/src/catchup/AssumeStateWork.h @@ -11,6 +11,7 @@ namespace stellar class Bucket; struct HistoryArchiveState; +class LiveBucket; class AssumeStateWork : public Work { @@ -21,7 +22,7 @@ class AssumeStateWork : public Work // Keep strong reference to buckets in HAS so they are not garbage // collected during indexing - std::vector> mBuckets{}; + std::vector> mBuckets{}; public: AssumeStateWork(Application& app, HistoryArchiveState const& has, diff --git a/src/catchup/CatchupManager.h b/src/catchup/CatchupManager.h index dc5655f05d..a6f5344880 100644 --- a/src/catchup/CatchupManager.h +++ b/src/catchup/CatchupManager.h @@ -64,7 +64,7 @@ class CatchupManager virtual void startCatchup(CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + std::set> bucketsToRetain) = 0; // Return status of catchup for or empty string, if no catchup in progress virtual std::string getStatus() const = 0; diff --git a/src/catchup/CatchupManagerImpl.cpp b/src/catchup/CatchupManagerImpl.cpp index 2070989de8..9c101066cf 100644 --- a/src/catchup/CatchupManagerImpl.cpp +++ b/src/catchup/CatchupManagerImpl.cpp @@ -238,7 +238,7 @@ CatchupManagerImpl::processLedger(LedgerCloseData const& ledgerData) void CatchupManagerImpl::startCatchup( CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) + std::set> bucketsToRetain) { ZoneScoped; auto lastClosedLedger = mApp.getLedgerManager().getLastClosedLedgerNum(); diff --git a/src/catchup/CatchupManagerImpl.h b/src/catchup/CatchupManagerImpl.h index 917f5a3a4a..b02876c7c7 100644 --- a/src/catchup/CatchupManagerImpl.h +++ b/src/catchup/CatchupManagerImpl.h @@ -62,10 +62,10 @@ class CatchupManagerImpl : public CatchupManager ~CatchupManagerImpl() override; void processLedger(LedgerCloseData const& ledgerData) override; - void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup( + CatchupConfiguration configuration, + std::shared_ptr archive, + std::set> bucketsToRetain) override; std::string getStatus() const override; diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index 2d06425863..bbc177f8c0 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -77,7 +77,7 @@ setHerderStateTo(FileTransferInfo const& ft, uint32_t ledger, Application& app) CatchupWork::CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, + std::set> bucketsToRetain, std::shared_ptr archive) : Work(app, "catchup", BasicWork::RETRY_NEVER) , mLocalState{app.getLedgerManager().getLastClosedLedgerHAS()} diff --git a/src/catchup/CatchupWork.h b/src/catchup/CatchupWork.h index ed36c75f5c..d650bbc910 100644 --- a/src/catchup/CatchupWork.h +++ b/src/catchup/CatchupWork.h @@ -47,7 +47,7 @@ class CatchupWork : public Work protected: HistoryArchiveState mLocalState; std::unique_ptr mDownloadDir; - std::map> mBuckets; + std::map> mBuckets; void doReset() override; BasicWork::State doWork() override; @@ -65,7 +65,7 @@ class CatchupWork : public Work static uint32_t const PUBLISH_QUEUE_MAX_SIZE; CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, + std::set> bucketsToRetain, std::shared_ptr archive = nullptr); virtual ~CatchupWork(); std::string getStatus() const override; @@ -128,6 +128,6 @@ class CatchupWork : public Work std::optional mHAS; std::optional mBucketHAS; - std::set> mRetainedBuckets; + std::set> mRetainedBuckets; }; } diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 40ff5d21f9..8a5ad90936 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -15,7 +15,7 @@ namespace stellar { IndexBucketsWork::IndexWork::IndexWork(Application& app, - std::shared_ptr b) + std::shared_ptr b) : BasicWork(app, "index-work", BasicWork::RETRY_NEVER), mBucket(b) { } @@ -118,7 +118,7 @@ IndexBucketsWork::IndexWork::postWork() } IndexBucketsWork::IndexBucketsWork( - Application& app, std::vector> const& buckets) + Application& app, std::vector> const& buckets) : Work(app, "index-bucketList", BasicWork::RETRY_NEVER), mBuckets(buckets) { } @@ -144,7 +144,7 @@ void IndexBucketsWork::spawnWork() { UnorderedSet indexedBuckets; - auto spawnIndexWork = [&](std::shared_ptr const& b) { + auto spawnIndexWork = [&](std::shared_ptr const& b) { // Don't index empty bucket or buckets that are already being // indexed. Sometimes one level's snap bucket may be another // level's future bucket. The indexing job may have started but diff --git a/src/catchup/IndexBucketsWork.h b/src/catchup/IndexBucketsWork.h index 5473c1c7d7..08415387ee 100644 --- a/src/catchup/IndexBucketsWork.h +++ b/src/catchup/IndexBucketsWork.h @@ -14,19 +14,20 @@ namespace stellar class Bucket; class BucketIndex; class BucketManager; +class LiveBucket; class IndexBucketsWork : public Work { class IndexWork : public BasicWork { - std::shared_ptr mBucket; + std::shared_ptr mBucket; std::unique_ptr mIndex; BasicWork::State mState{BasicWork::State::WORK_WAITING}; void postWork(); public: - IndexWork(Application& app, std::shared_ptr b); + IndexWork(Application& app, std::shared_ptr b); protected: State onRun() override; @@ -34,14 +35,14 @@ class IndexBucketsWork : public Work void onReset() override; }; - std::vector> const& mBuckets; + std::vector> const& mBuckets; bool mWorkSpawned{false}; void spawnWork(); public: IndexBucketsWork(Application& app, - std::vector> const& buckets); + std::vector> const& buckets); protected: State doWork() override; diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index db68391cd4..002ab4fd00 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -1998,7 +1998,7 @@ TEST_CASE("upgrade to version 11", "[upgrades]") ledgerSeq, mc.mPreInitEntryProtocolMerges, mc.mPostInitEntryProtocolMerges, mc.mNewInitEntries, mc.mOldInitEntries); - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto& lev = bm.getLiveBucketList().getLevel(level); BucketTestUtils::EntryCounts currCounts(lev.getCurr()); @@ -2039,8 +2039,8 @@ TEST_CASE("upgrade to version 11", "[upgrades]") BucketTestUtils::EntryCounts lev0CurrCounts(lev0Curr); BucketTestUtils::EntryCounts lev0SnapCounts(lev0Snap); BucketTestUtils::EntryCounts lev1CurrCounts(lev1Curr); - auto getVers = [](std::shared_ptr b) -> uint32_t { - return BucketInputIterator(b).getMetadata().ledgerVersion; + auto getVers = [](std::shared_ptr b) -> uint32_t { + return LiveBucketInputIterator(b).getMetadata().ledgerVersion; }; switch (ledgerSeq) { @@ -2128,8 +2128,8 @@ TEST_CASE("upgrade to version 12", "[upgrades]") auto lev0Snap = lev0.getSnap(); auto lev1Curr = lev1.getCurr(); auto lev1Snap = lev1.getSnap(); - auto getVers = [](std::shared_ptr b) -> uint32_t { - return BucketInputIterator(b).getMetadata().ledgerVersion; + auto getVers = [](std::shared_ptr b) -> uint32_t { + return LiveBucketInputIterator(b).getMetadata().ledgerVersion; }; switch (ledgerSeq) { diff --git a/src/history/FileTransferInfo.h b/src/history/FileTransferInfo.h index bfe830762e..d62e43cd48 100644 --- a/src/history/FileTransferInfo.h +++ b/src/history/FileTransferInfo.h @@ -38,7 +38,7 @@ class FileTransferInfo std::string getLocalDir(TmpDir const& localRoot) const; public: - FileTransferInfo(Bucket const& bucket) + FileTransferInfo(LiveBucket const& bucket) : mType(FileType::HISTORY_FILE_TYPE_BUCKET) , mHexDigits(binToHex(bucket.getHash())) , mLocalPath(bucket.getFilename().string()) diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index 86d91b3ade..a2f8992547 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -246,7 +246,7 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const inhibit.insert(b.snap); } std::vector ret; - for (size_t i = BucketListBase::kNumLevels; i != 0; --i) + for (size_t i = LiveBucketList::kNumLevels; i != 0; --i) { auto s = currentBuckets[i - 1].snap; auto n = s; @@ -307,12 +307,12 @@ HistoryArchiveState::containsValidBuckets(Application& app) const // Process bucket, return version auto processBucket = [&](std::string const& bucketHash) { auto bucket = - app.getBucketManager().getBucketByHash(hexToBin256(bucketHash)); + app.getBucketManager().getLiveBucketByHash(hexToBin256(bucketHash)); releaseAssert(bucket); int32_t version = 0; if (!bucket->isEmpty()) { - version = Bucket::getBucketVersion(bucket); + version = bucket->getBucketVersion(); if (!nonEmptySeen) { nonEmptySeen = true; @@ -322,7 +322,7 @@ HistoryArchiveState::containsValidBuckets(Application& app) const }; // Iterate bottom-up, from oldest to newest buckets - for (uint32_t j = BucketListBase::kNumLevels; j != 0; --j) + for (uint32_t j = LiveBucketList::kNumLevels; j != 0; --j) { auto i = j - 1; auto const& level = currentBuckets[i]; @@ -358,7 +358,8 @@ HistoryArchiveState::containsValidBuckets(Application& app) const continue; } else if (protocolVersionStartsFrom( - prevSnapVersion, Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + prevSnapVersion, + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { if (!level.next.isClear()) { @@ -384,16 +385,17 @@ HistoryArchiveState::prepareForPublish(Application& app) // Level 0 future buckets are always clear releaseAssert(currentBuckets[0].next.isClear()); - for (uint32_t i = 1; i < BucketListBase::kNumLevels; i++) + for (uint32_t i = 1; i < LiveBucketList::kNumLevels; i++) { auto& level = currentBuckets[i]; auto& prev = currentBuckets[i - 1]; auto snap = - app.getBucketManager().getBucketByHash(hexToBin256(prev.snap)); + app.getBucketManager().getLiveBucketByHash(hexToBin256(prev.snap)); if (!level.next.isClear() && - protocolVersionStartsFrom(Bucket::getBucketVersion(snap), - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + protocolVersionStartsFrom( + snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { level.next.clear(); } @@ -423,7 +425,7 @@ HistoryArchiveState::HistoryArchiveState() : server(STELLAR_CORE_VERSION) HistoryStateBucket b; b.curr = s; b.snap = s; - while (currentBuckets.size() < BucketListBase::kNumLevels) + while (currentBuckets.size() < LiveBucketList::kNumLevels) { currentBuckets.push_back(b); } @@ -436,7 +438,7 @@ HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, , networkPassphrase(passphrase) , currentLedger(ledgerSeq) { - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { HistoryStateBucket b; auto& level = buckets.getLevel(i); diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index 02363e86c8..378716118d 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -33,7 +33,9 @@ class Bucket; struct HistoryStateBucket { std::string curr; - FutureBucket next; + + // TODO: Add archival buckets to history + FutureBucket next; std::string snap; template diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index bcae0678b8..8e28261745 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -44,7 +44,7 @@ StateSnapshot::StateSnapshot(Application& app, HistoryArchiveState const& state) mSnapDir, FileType::HISTORY_FILE_TYPE_SCP, mLocalState.currentLedger)) { - if (mLocalState.currentBuckets.size() != BucketListBase::kNumLevels) + if (mLocalState.currentBuckets.size() != LiveBucketList::kNumLevels) { throw std::runtime_error("Invalid HAS: malformed bucketlist"); } @@ -121,7 +121,7 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) for (auto const& hash : mLocalState.differingBuckets(other)) { - auto b = mApp.getBucketManager().getBucketByHash(hexToBin256(hash)); + auto b = mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash)); releaseAssert(b); addIfExists(std::make_shared(*b)); } diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 27550f6996..875f9f90f6 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -147,7 +147,7 @@ TEST_CASE("History bucket verification", "[history][catchup]") cg->getArchiveDirName())}; std::vector hashes; auto& wm = app->getWorkScheduler(); - std::map> mBuckets; + std::map> mBuckets; auto tmpDir = std::make_unique(app->getTmpDirManager().tmpDir("bucket-test")); @@ -642,7 +642,7 @@ TEST_CASE("Publish works correctly post shadow removal", "[history]") // Perform publish: 2 checkpoints (or 127 ledgers) correspond to 3 // levels being initialized and partially filled in the bucketlist sim.setUpgradeLedger(upgradeLedger, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED); + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED); auto checkpointLedger = sim.getLastCheckpointLedger(2); auto maxLevelTouched = 3; sim.ensureOfflineCatchupPossible(checkpointLedger); @@ -661,7 +661,7 @@ TEST_CASE("Publish works correctly post shadow removal", "[history]") configurator}; uint32_t oldProto = - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - 1; + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - 1; catchupSimulation.generateRandomLedger(oldProto); // The next sections reflect how future buckets in HAS change, depending on @@ -1138,7 +1138,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works", "[history][bucket][acceptance]") { uint32_t newProto = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); uint32_t oldProto = newProto - 1; auto configurator = std::make_shared(); @@ -1337,7 +1337,7 @@ TEST_CASE_VERSIONS( // Second, ensure `next` is in the exact same state as when it was // queued - for (uint32_t i = 0; i < BucketListBase::kNumLevels; i++) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; i++) { auto const& currentNext = bl.getLevel(i).getNext(); auto const& queuedNext = queuedHAS.currentBuckets[i].next; diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index 76b3d5eea3..6f7a880638 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -139,7 +139,7 @@ BucketOutputIteratorForTesting::writeTmpTestBucket() auto ledgerEntries = LedgerTestUtils::generateValidUniqueLedgerEntries(NUM_ITEMS_PER_BUCKET); auto bucketEntries = - Bucket::convertToBucketEntry(false, {}, ledgerEntries, {}); + LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); for (auto const& bucketEntry : bucketEntries) { put(bucketEntry); @@ -1021,8 +1021,8 @@ CatchupSimulation::validateCatchup(Application::pointer app) CHECK(wantBucketListHash == haveBucketListHash); CHECK(wantHash == haveHash); - CHECK(app->getBucketManager().getBucketByHash(wantBucket0Hash)); - CHECK(app->getBucketManager().getBucketByHash(wantBucket1Hash)); + CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket0Hash)); + CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket1Hash)); CHECK(wantBucket0Hash == haveBucket0Hash); CHECK(wantBucket1Hash == haveBucket1Hash); diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index 04d8929589..7ab09e1443 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -98,7 +98,7 @@ class RealGenesisTmpDirHistoryConfigurator : public TmpDirHistoryConfigurator Config& configure(Config& cfg, bool writable) const override; }; -class BucketOutputIteratorForTesting : public BucketOutputIterator +class BucketOutputIteratorForTesting : public LiveBucketOutputIterator { const size_t NUM_ITEMS_PER_BUCKET = 5; diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp index fffc94a0ea..0ef542af58 100644 --- a/src/historywork/DownloadBucketsWork.cpp +++ b/src/historywork/DownloadBucketsWork.cpp @@ -17,7 +17,8 @@ namespace stellar { DownloadBucketsWork::DownloadBucketsWork( - Application& app, std::map>& buckets, + Application& app, + std::map>& buckets, std::vector hashes, TmpDir const& downloadDir, std::shared_ptr archive) : BatchWork{app, "download-verify-buckets"} @@ -94,7 +95,7 @@ DownloadBucketsWork::yieldMoreWork() if (self) { auto bucketPath = ft.localPath_nogz(); - auto b = app.getBucketManager().adoptFileAsBucket( + auto b = app.getBucketManager().adoptFileAsLiveBucket( bucketPath, hexToBin256(hash), /*mergeKey=*/nullptr, /*index=*/nullptr); diff --git a/src/historywork/DownloadBucketsWork.h b/src/historywork/DownloadBucketsWork.h index b55942eeb3..52db6cd968 100644 --- a/src/historywork/DownloadBucketsWork.h +++ b/src/historywork/DownloadBucketsWork.h @@ -17,18 +17,18 @@ class HistoryArchive; class DownloadBucketsWork : public BatchWork { - std::map>& mBuckets; + std::map>& mBuckets; std::vector mHashes; std::vector::const_iterator mNextBucketIter; TmpDir const& mDownloadDir; std::shared_ptr mArchive; public: - DownloadBucketsWork(Application& app, - std::map>& buckets, - std::vector hashes, - TmpDir const& downloadDir, - std::shared_ptr archive = nullptr); + DownloadBucketsWork( + Application& app, + std::map>& buckets, + std::vector hashes, TmpDir const& downloadDir, + std::shared_ptr archive = nullptr); ~DownloadBucketsWork() = default; std::string getStatus() const override; diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp index 97368e785f..11aa8ba0dc 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.cpp +++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp @@ -273,7 +273,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) LedgerKeySet seenKeys; auto perBucketCheck = [&](auto bucket, auto& ltx) { - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { auto const& e = *iter; @@ -327,7 +327,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) LedgerTxn ltx(mApp.getLedgerTxnRoot()); auto& bl = mApp.getBucketManager().getLiveBucketList(); - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto const& level = bl.getLevel(i); for (auto const& bucket : {level.getCurr(), level.getSnap()}) @@ -351,7 +351,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) std::string BucketListIsConsistentWithDatabase::checkOnBucketApply( - std::shared_ptr bucket, uint32_t oldestLedger, + std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, std::function entryTypeFilter) { EntryCounts counts; @@ -360,10 +360,11 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply( bool hasPreviousEntry = false; BucketEntry previousEntry; - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { auto const& e = *iter; - if (hasPreviousEntry && !BucketEntryIdCmp{}(previousEntry, e)) + if (hasPreviousEntry && + !BucketEntryIdCmp{}(previousEntry, e)) { std::string s = "Bucket has out of order entries: "; s += xdrToCerealString(previousEntry, "previous"); diff --git a/src/invariant/BucketListIsConsistentWithDatabase.h b/src/invariant/BucketListIsConsistentWithDatabase.h index b98253dbc9..36b5a71559 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.h +++ b/src/invariant/BucketListIsConsistentWithDatabase.h @@ -34,7 +34,7 @@ class BucketListIsConsistentWithDatabase : public Invariant virtual std::string getName() const override; virtual std::string checkOnBucketApply( - std::shared_ptr bucket, uint32_t oldestLedger, + std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, std::function entryTypeFilter) override; diff --git a/src/invariant/Invariant.h b/src/invariant/Invariant.h index ddb235795d..8a2a12ec04 100644 --- a/src/invariant/Invariant.h +++ b/src/invariant/Invariant.h @@ -12,7 +12,7 @@ namespace stellar { -class Bucket; +class LiveBucket; enum LedgerEntryType : std::int32_t; struct LedgerTxnDelta; struct Operation; @@ -43,7 +43,7 @@ class Invariant } virtual std::string - checkOnBucketApply(std::shared_ptr bucket, + checkOnBucketApply(std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, std::function entryTypeFilter) { diff --git a/src/invariant/InvariantManager.h b/src/invariant/InvariantManager.h index 361afc150a..220209f1c7 100644 --- a/src/invariant/InvariantManager.h +++ b/src/invariant/InvariantManager.h @@ -37,8 +37,9 @@ class InvariantManager virtual std::vector getEnabledInvariants() const = 0; virtual void checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, uint32_t level, - bool isCurr, std::function entryTypeFilter) = 0; + std::shared_ptr bucket, uint32_t ledger, + uint32_t level, bool isCurr, + std::function entryTypeFilter) = 0; virtual void checkAfterAssumeState(uint32_t newestLedger) = 0; diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp index 4a88a276e1..b5157bfd0e 100644 --- a/src/invariant/InvariantManagerImpl.cpp +++ b/src/invariant/InvariantManagerImpl.cpp @@ -71,16 +71,16 @@ InvariantManagerImpl::getEnabledInvariants() const void InvariantManagerImpl::checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, uint32_t level, + std::shared_ptr bucket, uint32_t ledger, uint32_t level, bool isCurr, std::function entryTypeFilter) { uint32_t oldestLedger = - isCurr ? BucketListBase::oldestLedgerInCurr(ledger, level) - : BucketListBase::oldestLedgerInSnap(ledger, level); + isCurr ? LiveBucketList::oldestLedgerInCurr(ledger, level) + : LiveBucketList::oldestLedgerInSnap(ledger, level); uint32_t newestLedger = oldestLedger - 1 + - (isCurr ? BucketListBase::sizeOfCurr(ledger, level) - : BucketListBase::sizeOfSnap(ledger, level)); + (isCurr ? LiveBucketList::sizeOfCurr(ledger, level) + : LiveBucketList::sizeOfSnap(ledger, level)); for (auto invariant : mEnabled) { auto result = invariant->checkOnBucketApply( diff --git a/src/invariant/InvariantManagerImpl.h b/src/invariant/InvariantManagerImpl.h index 5e495bcf3c..689ab6b750 100644 --- a/src/invariant/InvariantManagerImpl.h +++ b/src/invariant/InvariantManagerImpl.h @@ -42,8 +42,8 @@ class InvariantManagerImpl : public InvariantManager LedgerTxnDelta const& ltxDelta) override; virtual void checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, uint32_t level, - bool isCurr, + std::shared_ptr bucket, uint32_t ledger, + uint32_t level, bool isCurr, std::function entryTypeFilter) override; virtual void checkAfterAssumeState(uint32_t newestLedger) override; diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index c9f7a0c7db..f95b1660c6 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -88,7 +88,7 @@ struct BucketListGenerator void applyBuckets(Application::pointer app, Args&&... args) { - std::map> buckets; + std::map> buckets; auto has = getHistoryArchiveState(app); auto& wm = app->getWorkScheduler(); wm.executeWork(buckets, has, @@ -210,17 +210,18 @@ struct BucketListGenerator MergeCounters mergeCounters; LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false); auto vers = ltx.loadHeader().current().ledgerVersion; - for (uint32_t i = 0; i <= BucketListBase::kNumLevels - 1; i++) + for (uint32_t i = 0; i <= LiveBucketList::kNumLevels - 1; i++) { auto& level = blGenerate.getLevel(i); auto meta = testutil::testBucketMetadata(vers); - auto keepDead = BucketListBase::keepDeadEntries(i); + auto keepDead = LiveBucketList::keepDeadEntries(i); auto writeBucketFile = [&](auto b) { - BucketOutputIterator out(bmApply.getTmpDir(), keepDead, meta, - mergeCounters, mClock.getIOContext(), - /*doFsync=*/true); - for (BucketInputIterator in(b); in; ++in) + LiveBucketOutputIterator out(bmApply.getTmpDir(), keepDead, + meta, mergeCounters, + mClock.getIOContext(), + /*doFsync=*/true); + for (LiveBucketInputIterator in(b); in; ++in) { out.put(*in); } @@ -246,9 +247,10 @@ struct BucketListGenerator }; bool -doesBucketContain(std::shared_ptr bucket, const BucketEntry& be) +doesBucketContain(std::shared_ptr bucket, + const BucketEntry& be) { - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { if (*iter == be) { @@ -261,7 +263,7 @@ doesBucketContain(std::shared_ptr bucket, const BucketEntry& be) bool doesBucketListContain(LiveBucketList& bl, const BucketEntry& be) { - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto const& level = bl.getLevel(i); for (auto const& bucket : {level.getCurr(), level.getSnap()}) @@ -337,7 +339,7 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork public: ApplyBucketsWorkAddEntry( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, std::function filter, LedgerEntry const& entry) : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, filter) @@ -391,7 +393,7 @@ class ApplyBucketsWorkDeleteEntry : public ApplyBucketsWork public: ApplyBucketsWorkDeleteEntry( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, LedgerEntry const& target) : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion) @@ -548,7 +550,7 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork public: ApplyBucketsWorkModifyEntry( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, LedgerEntry const& target) : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion) @@ -857,15 +859,15 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds", } }; - for (uint32_t level = 0; level < BucketListBase::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - uint32_t oldestLedger = BucketListBase::oldestLedgerInSnap(101, level); + uint32_t oldestLedger = LiveBucketList::oldestLedgerInSnap(101, level); if (oldestLedger == std::numeric_limits::max()) { break; } - uint32_t newestLedger = BucketListBase::oldestLedgerInCurr(101, level) + - BucketListBase::sizeOfCurr(101, level) - 1; + uint32_t newestLedger = LiveBucketList::oldestLedgerInCurr(101, level) + + LiveBucketList::sizeOfCurr(101, level) - 1; stellar::uniform_int_distribution ledgerToModifyDist( std::max(2u, oldestLedger), newestLedger); @@ -875,20 +877,20 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds", uint32_t maxLowTargetLedger = 0; uint32_t minHighTargetLedger = 0; if (ledgerToModify >= - BucketListBase::oldestLedgerInCurr(101, level)) + LiveBucketList::oldestLedgerInCurr(101, level)) { maxLowTargetLedger = - BucketListBase::oldestLedgerInCurr(101, level) - 1; + LiveBucketList::oldestLedgerInCurr(101, level) - 1; minHighTargetLedger = - BucketListBase::oldestLedgerInCurr(101, level) + - BucketListBase::sizeOfCurr(101, level); + LiveBucketList::oldestLedgerInCurr(101, level) + + LiveBucketList::sizeOfCurr(101, level); } else { maxLowTargetLedger = - BucketListBase::oldestLedgerInSnap(101, level) - 1; + LiveBucketList::oldestLedgerInSnap(101, level) - 1; minHighTargetLedger = - BucketListBase::oldestLedgerInCurr(101, level); + LiveBucketList::oldestLedgerInCurr(101, level); } stellar::uniform_int_distribution lowTargetLedgerDist( 1, maxLowTargetLedger); diff --git a/src/invariant/test/InvariantTests.cpp b/src/invariant/test/InvariantTests.cpp index 020e94037d..db5b8fb477 100644 --- a/src/invariant/test/InvariantTests.cpp +++ b/src/invariant/test/InvariantTests.cpp @@ -54,7 +54,7 @@ class TestInvariant : public Invariant } virtual std::string - checkOnBucketApply(std::shared_ptr bucket, + checkOnBucketApply(std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, std::function filter) override { @@ -164,7 +164,7 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]") app->getInvariantManager().enableInvariant( TestInvariant::toString(0, true)); - auto bucket = std::make_shared(); + auto bucket = std::make_shared(); uint32_t ledger = 1; uint32_t level = 0; bool isCurr = true; @@ -184,7 +184,7 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]") app->getInvariantManager().enableInvariant( TestInvariant::toString(0, false)); - auto bucket = std::make_shared(); + auto bucket = std::make_shared(); uint32_t ledger = 1; uint32_t level = 0; bool isCurr = true; diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h index 710ca7271d..acf6e1ee62 100644 --- a/src/ledger/LedgerManager.h +++ b/src/ledger/LedgerManager.h @@ -172,7 +172,7 @@ class LedgerManager virtual void startCatchup(CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + std::set> bucketsToRetain) = 0; // Forcibly close the current ledger, applying `ledgerData` as the consensus // changes. This is normally done automatically as part of diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index 86431b6b70..80d551eee3 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -736,7 +736,7 @@ LedgerManagerImpl::closeLedgerIf(LedgerCloseData const& ledgerData) void LedgerManagerImpl::startCatchup( CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) + std::set> bucketsToRetain) { ZoneScoped; setState(LM_CATCHING_UP_STATE); diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h index 1d16f93f4e..675dfe90c5 100644 --- a/src/ledger/LedgerManagerImpl.h +++ b/src/ledger/LedgerManagerImpl.h @@ -183,10 +183,10 @@ class LedgerManagerImpl : public LedgerManager Database& getDatabase() override; - void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup( + CatchupConfiguration configuration, + std::shared_ptr archive, + std::set> bucketsToRetain) override; void closeLedger(LedgerCloseData const& ledgerData) override; void deleteOldEntries(Database& db, uint32_t ledgerSeq, diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index 2085d7c92c..8cf1e52261 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -3486,7 +3486,7 @@ LedgerTxnRoot::Impl::areEntriesMissingInCacheForOffer(OfferEntry const& oe) return false; } -SearchableBucketListSnapshot& +SearchableLiveBucketListSnapshot& LedgerTxnRoot::Impl::getSearchableBucketListSnapshot() const { releaseAssert(mApp.getConfig().isUsingBucketListDB()); diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index 4d71595f70..42b4571514 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -20,7 +20,7 @@ namespace stellar { -class SearchableBucketListSnapshot; +class SearchableLiveBucketListSnapshot; class EntryIterator::AbstractImpl { @@ -737,7 +737,7 @@ class LedgerTxnRoot::Impl mutable BestOffers mBestOffers; mutable uint64_t mPrefetchHits{0}; mutable uint64_t mPrefetchMisses{0}; - mutable std::shared_ptr + mutable std::shared_ptr mSearchableBucketListSnapshot{}; size_t mBulkLoadBatchSize; @@ -871,7 +871,7 @@ class LedgerTxnRoot::Impl bool areEntriesMissingInCacheForOffer(OfferEntry const& oe); - SearchableBucketListSnapshot& getSearchableBucketListSnapshot() const; + SearchableLiveBucketListSnapshot& getSearchableBucketListSnapshot() const; uint32_t prefetchInternal(UnorderedSet const& keys, LedgerKeyMeter* lkMeter = nullptr); diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp index 45c36ceefb..89db21694b 100644 --- a/src/ledger/NetworkConfig.cpp +++ b/src/ledger/NetworkConfig.cpp @@ -1046,7 +1046,7 @@ SorobanNetworkConfig::isValidConfigSettingEntry(ConfigSettingEntry const& cfg, cfg.stateArchivalSettings().startingEvictionScanLevel >= MinimumSorobanNetworkConfig::STARTING_EVICTION_LEVEL && cfg.stateArchivalSettings().startingEvictionScanLevel < - BucketListBase::kNumLevels && + LiveBucketList::kNumLevels && cfg.stateArchivalSettings().bucketListWindowSamplePeriod >= MinimumSorobanNetworkConfig::BUCKETLIST_WINDOW_SAMPLE_PERIOD; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index c055f6c086..6e1c2e4a49 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -237,11 +237,11 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, // Collect bucket references to pass to catchup _before_ starting // the app, which may trigger garbage collection - std::set> retained; + std::set> retained; for (auto const& b : has.allBuckets()) { auto bPtr = - app->getBucketManager().getBucketByHash(hexToBin256(b)); + app->getBucketManager().getLiveBucketByHash(hexToBin256(b)); releaseAssert(bPtr); retained.insert(bPtr); } @@ -329,7 +329,7 @@ applyBucketsForLCL(Application& app, maxProtocolVersion = currentLedger->ledgerVersion; } - std::map> buckets; + std::map> buckets; auto work = app.getWorkScheduler().scheduleWork( buckets, has, maxProtocolVersion, onlyApply); @@ -574,11 +574,11 @@ struct StateArchivalMetric static void processArchivalMetrics( - std::shared_ptr const b, + std::shared_ptr const b, UnorderedMap& ledgerEntries, UnorderedMap>& ttls) { - for (BucketInputIterator in(b); in; ++in) + for (LiveBucketInputIterator in(b); in; ++in) { auto const& be = *in; bool isDead = be.type() == DEADENTRY; @@ -647,7 +647,7 @@ dumpStateArchivalStatistics(Config cfg) HistoryArchiveState has = lm.getLastClosedLedgerHAS(); std::vector hashes; - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr)); @@ -665,7 +665,7 @@ dumpStateArchivalStatistics(Config cfg) { continue; } - auto b = bm.getBucketByHash(hash); + auto b = bm.getLiveBucketByHash(hash); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -929,7 +929,7 @@ loadXdr(Config cfg, std::string const& bucketFile) Application::pointer app = Application::create(clock, cfg, false); uint256 zero; - Bucket bucket(bucketFile, zero, nullptr); + LiveBucket bucket(bucketFile, zero, nullptr); bucket.apply(*app); } diff --git a/src/main/Config.h b/src/main/Config.h index 3d8b7df194..736bea2864 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -235,9 +235,9 @@ class Config : public std::enable_shared_from_this bool ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING; // A config parameter that avoids counting level 0 merge events and those - // within Bucket::fresh; this option exists only for calculating adjustments - // to the expected count of merges when stopping and resuming merges, - // and should be false in all normal cases. + // within LiveBucket::fresh; this option exists only for calculating + // adjustments to the expected count of merges when stopping and resuming + // merges, and should be false in all normal cases. bool ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; // A config parameter that skips adjustment of target outbound connections diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index b416043a04..a2897175c4 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -131,7 +131,7 @@ checkState(Application& app) if (nextFuture.hasOutputHash()) { auto hash = hexToBin256(nextFuture.getOutputHash()); - checkBucket(bm.getBucketByHash(hash)); + checkBucket(bm.getLiveBucketByHash(hash)); } } } diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index cf274ceba0..89eecff723 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -112,14 +112,14 @@ computeMultiplier(LedgerEntry const& le) } BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth) - : mPrevDepth(BucketListBase::kNumLevels) + : mPrevDepth(LiveBucketList::kNumLevels) { - BucketListBase::kNumLevels = newDepth; + LiveBucketList::kNumLevels = newDepth; } BucketListDepthModifier::~BucketListDepthModifier() { - BucketListBase::kNumLevels = mPrevDepth; + LiveBucketList::kNumLevels = mPrevDepth; } } diff --git a/src/util/test/XDRStreamTests.cpp b/src/util/test/XDRStreamTests.cpp index 7710562c91..16754b5a1b 100644 --- a/src/util/test/XDRStreamTests.cpp +++ b/src/util/test/XDRStreamTests.cpp @@ -33,7 +33,7 @@ TEST_CASE("XDROutputFileStream fail modes", "[xdrstream]") size_t bytes = 0; auto ledgerEntries = LedgerTestUtils::generateValidLedgerEntries(1); auto bucketEntries = - Bucket::convertToBucketEntry(false, {}, ledgerEntries, {}); + LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); REQUIRE_THROWS_AS(out.writeOne(bucketEntries[0], &hasher, &bytes), std::runtime_error); @@ -53,7 +53,7 @@ TEST_CASE("XDROutputFileStream fsync bench", "[!hide][xdrstream][bench]") SHA256 hasher; auto ledgerEntries = LedgerTestUtils::generateValidLedgerEntries(10000000); auto bucketEntries = - Bucket::convertToBucketEntry(false, {}, ledgerEntries, {}); + LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); fs::mkpath(cfg.BUCKET_DIR_PATH); diff --git a/src/util/types.h b/src/util/types.h index 0a893c5a1e..6ead1c844c 100644 --- a/src/util/types.h +++ b/src/util/types.h @@ -128,6 +128,22 @@ assetToString(const Asset& asset) return r; }; +inline LedgerKey +getBucketLedgerKey(HotArchiveBucketEntry const& be) +{ + switch (be.type()) + { + case HA_RESTORED: + case HA_DELETED: + return be.key(); + case HA_ARCHIVED: + return LedgerEntryKey(be.archivedEntry()); + case HA_METAENTRY: + default: + throw std::invalid_argument("Tried to get key for METAENTRY"); + } +} + inline LedgerKey getBucketLedgerKey(BucketEntry const& be) { From 896b81e085744f7c3752424122ba124e56112446 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Thu, 25 Jul 2024 11:16:07 -0700 Subject: [PATCH 3/5] Added Hot Archive merge and search logic --- src/bucket/Bucket.cpp | 214 +++++++--- src/bucket/Bucket.h | 56 ++- src/bucket/BucketIndex.h | 1 + src/bucket/BucketIndexImpl.cpp | 98 +++-- src/bucket/BucketIndexImpl.h | 6 +- src/bucket/BucketInputIterator.cpp | 5 +- src/bucket/BucketList.cpp | 121 ++++-- src/bucket/BucketList.h | 60 ++- src/bucket/BucketListSnapshot.cpp | 337 ++++++++-------- src/bucket/BucketListSnapshot.h | 52 ++- src/bucket/BucketManager.h | 13 +- src/bucket/BucketManagerImpl.cpp | 68 ++-- src/bucket/BucketManagerImpl.h | 16 +- src/bucket/BucketOutputIterator.cpp | 57 ++- src/bucket/BucketOutputIterator.h | 4 +- src/bucket/BucketSnapshot.cpp | 55 ++- src/bucket/BucketSnapshot.h | 23 +- src/bucket/BucketSnapshotManager.cpp | 144 +++++-- src/bucket/BucketSnapshotManager.h | 49 ++- src/bucket/FutureBucket.cpp | 10 +- src/bucket/LedgerCmp.h | 22 +- src/bucket/MergeKey.cpp | 10 +- src/bucket/MergeKey.h | 6 +- src/bucket/test/BucketIndexTests.cpp | 225 ++++++++++- src/bucket/test/BucketListTests.cpp | 374 +++++++++++++----- src/bucket/test/BucketManagerTests.cpp | 18 +- src/bucket/test/BucketTestUtils.cpp | 102 ++++- src/bucket/test/BucketTestUtils.h | 31 +- src/bucket/test/BucketTests.cpp | 175 ++++++-- src/catchup/IndexBucketsWork.cpp | 7 +- src/herder/test/UpgradesTests.cpp | 9 +- src/history/test/HistoryTests.cpp | 4 +- ...ucketListIsConsistentWithDatabaseTests.cpp | 8 +- src/ledger/LedgerManagerImpl.cpp | 12 +- src/ledger/LedgerStateSnapshot.cpp | 2 +- src/ledger/LedgerStateSnapshot.h | 2 +- src/ledger/LedgerTxn.cpp | 17 +- src/ledger/LedgerTxnImpl.h | 3 +- src/ledger/NetworkConfig.cpp | 5 +- src/ledger/test/LedgerTestUtils.cpp | 24 ++ src/ledger/test/LedgerTestUtils.h | 6 + src/ledger/test/LedgerTxnTests.cpp | 10 +- src/main/QueryServer.cpp | 11 +- src/main/QueryServer.h | 4 +- src/overlay/test/FloodTests.cpp | 5 +- src/simulation/CoreTests.cpp | 3 +- src/test/TestUtils.cpp | 16 +- src/test/TestUtils.h | 13 +- src/test/test.cpp | 7 + src/test/test.h | 3 + ...ger-close-meta-v1-protocol-23-soroban.json | 372 ++++++++--------- .../ledger-close-meta-v1-protocol-23.json | 14 +- src/util/ProtocolVersion.h | 3 +- src/util/types.h | 8 +- 54 files changed, 1994 insertions(+), 926 deletions(-) diff --git a/src/bucket/Bucket.cpp b/src/bucket/Bucket.cpp index bfc63cf7f9..d533f688b6 100644 --- a/src/bucket/Bucket.cpp +++ b/src/bucket/Bucket.cpp @@ -8,6 +8,7 @@ #include "util/asio.h" // IWYU pragma: keep #include "bucket/Bucket.h" #include "bucket/BucketApplicator.h" +#include "bucket/BucketInputIterator.h" #include "bucket/BucketList.h" #include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" @@ -24,6 +25,7 @@ #include "util/Fs.h" #include "util/GlobalChecks.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" #include "util/XDRStream.h" #include "util/types.h" #include @@ -161,6 +163,7 @@ LiveBucket::apply(Application& app) const } counters.logInfo("direct", 0, app.getClock().now()); } +#endif // BUILD_TESTS std::vector LiveBucket::convertToBucketEntry(bool useInit, @@ -200,7 +203,6 @@ LiveBucket::convertToBucketEntry(bool useInit, }) == bucket.end()); return bucket; } -#endif // BUILD_TESTS std::string Bucket::randomFileName(std::string const& tmpDir, std::string ext) @@ -230,17 +232,76 @@ Bucket::randomBucketIndexName(std::string const& tmpDir) return randomFileName(tmpDir, ".index"); } +std::vector +HotArchiveBucket::convertToBucketEntry( + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + std::vector bucket; + for (auto const& e : archivedEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_ARCHIVED); + be.archivedEntry() = e; + bucket.push_back(be); + } + for (auto const& k : restoredEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_LIVE); + be.key() = k; + bucket.push_back(be); + } + for (auto const& k : deletedEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_DELETED); + be.key() = k; + bucket.push_back(be); + } + + BucketEntryIdCmp cmp; + std::sort(bucket.begin(), bucket.end(), cmp); + releaseAssert(std::adjacent_find(bucket.begin(), bucket.end(), + [&cmp](HotArchiveBucketEntry const& lhs, + HotArchiveBucketEntry const& rhs) { + return !cmp(lhs, rhs); + }) == bucket.end()); + return bucket; +} + std::shared_ptr HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync) { - // TODO: - releaseAssert(false); - return nullptr; + ZoneScoped; + BucketMetadata meta; + meta.ledgerVersion = protocolVersion; + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::HOT_ARCHIVE; + auto entries = + convertToBucketEntry(archivedEntries, restoredEntries, deletedEntries); + + MergeCounters mc; + HotArchiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, + mc, ctx, doFsync); + for (auto const& e : entries) + { + out.put(e); + } + + if (countMergeEvents) + { + bucketManager.incrMergeCounters(mc); + } + + return out.getBucket(bucketManager, + bucketManager.getConfig().isUsingBucketListDB()); } std::shared_ptr @@ -259,6 +320,15 @@ LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, BucketMetadata meta; meta.ledgerVersion = protocolVersion; + + if (protocolVersionStartsFrom( + protocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::LIVE; + } + auto entries = convertToBucketEntry(useInit, initEntries, liveEntries, deadEntries); @@ -279,13 +349,6 @@ LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, bucketManager.getConfig().isUsingBucketListDB()); } -static void -countShadowedEntryType(MergeCounters& mc, HotArchiveBucketEntry const& e) -{ - // TODO: - releaseAssert(false); -} - static void countShadowedEntryType(MergeCounters& mc, BucketEntry const& e) { @@ -321,18 +384,22 @@ LiveBucket::checkProtocolLegality(BucketEntry const& entry, } } -template inline void -maybePut(BucketOutputIterator& out, BucketEntryT const& entry, - std::vector>& shadowIterators, +maybePut(HotArchiveBucketOutputIterator& out, + HotArchiveBucketEntry const& entry, + std::vector& shadowIterators, bool keepShadowedLifecycleEntries, MergeCounters& mc) { - static_assert(std::is_same_v || - std::is_same_v); - - static_assert(std::is_same_v || - std::is_same_v); + // Archived BucketList is only present after protocol 21, so shadows are + // never supported + out.put(entry); +} +inline void +maybePut(LiveBucketOutputIterator& out, BucketEntry const& entry, + std::vector& shadowIterators, + bool keepShadowedLifecycleEntries, MergeCounters& mc) +{ // In ledgers before protocol 11, keepShadowedLifecycleEntries will be // `false` and we will drop all shadowed entries here. // @@ -371,13 +438,6 @@ maybePut(BucketOutputIterator& out, BucketEntryT const& entry, // LiveBucketOutputIterator level, and happens independent of ledger // protocol version. - // TODO: Shadows - if constexpr (std::is_same_v) - { - releaseAssert(false); - return; - } - if (keepShadowedLifecycleEntries && (entry.type() == INITENTRY || entry.type() == DEADENTRY)) { @@ -386,7 +446,7 @@ maybePut(BucketOutputIterator& out, BucketEntryT const& entry, return; } - BucketEntryIdCmp cmp; + BucketEntryIdCmp cmp; for (auto& si : shadowIterators) { // Advance the shadowIterator while it's less than the candidate @@ -524,8 +584,14 @@ calculateMergeProtocolVersion( // we switch shadowing-behaviour to a more conservative mode, in order to // support annihilation of INITENTRY and DEADENTRY pairs. See commentary // above in `maybePut`. - // TODO: Clean up metrics for archive buckets keepShadowedLifecycleEntries = true; + + // Don't count shadow metrics for Hot Archive BucketList + if constexpr (std::is_same_v) + { + return; + } + if (protocolVersionIsBefore( protocolVersion, LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) @@ -568,10 +634,6 @@ mergeCasesWithDefaultAcceptance( { static_assert(std::is_same_v || std::is_same_v); - // using BucketEntryT = std::conditional_t, - // BucketEntry, - // HotArchiveBucketEntry>; if (!ni || (oi && ni && cmp(*oi, *ni))) { @@ -621,8 +683,20 @@ mergeCasesWithEqualKeys( std::vector& shadowIterators, uint32_t protocolVersion, bool keepShadowedLifecycleEntries) { - // TODO: - releaseAssert(false); + // If two identical keys have the same type, throw an error. Otherwise, + // take the newer key. + HotArchiveBucketEntry const& oldEntry = *oi; + HotArchiveBucketEntry const& newEntry = *ni; + if (oldEntry.type() == newEntry.type()) + { + throw std::runtime_error( + "Malformed Hot Archive bucket: two identical keys with " + "the same type."); + } + + out.put(newEntry); + ++ni; + ++oi; } static void @@ -846,7 +920,7 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, std::shared_ptr const& oldBucket, std::shared_ptr const& newBucket, std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync) { static_assert(std::is_same_v || @@ -875,8 +949,27 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, auto timer = bucketManager.getMergeTimer().TimeScope(); BucketMetadata meta; meta.ledgerVersion = protocolVersion; + + // If any inputs use the new extension of BucketMeta, the output should as + // well + if (ni.getMetadata().ext.v() == 1) + { + releaseAssertOrThrow(protocolVersionStartsFrom( + maxProtocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + meta.ext = ni.getMetadata().ext; + } + else if (oi.getMetadata().ext.v() == 1) + { + releaseAssertOrThrow(protocolVersionStartsFrom( + maxProtocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + meta.ext = oi.getMetadata().ext; + } + BucketOutputIterator out(bucketManager.getTmpDir(), - keepDeadEntries, meta, mc, ctx, doFsync); + keepTombstoneEntries, meta, mc, ctx, + doFsync); BucketEntryIdCmp cmp; size_t iter = 0; @@ -918,8 +1011,8 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, shadowHashes.push_back(s->getHash()); } - MergeKey mk{keepDeadEntries, oldBucket->getHash(), newBucket->getHash(), - shadowHashes}; + MergeKey mk{keepTombstoneEntries, oldBucket->getHash(), + newBucket->getHash(), shadowHashes}; return out.getBucket(bucketManager, bucketManager.getConfig().isUsingBucketListDB(), &mk); } @@ -966,21 +1059,16 @@ HotArchiveBucket::HotArchiveBucket() : Bucket() { } -template std::shared_ptr Bucket::merge( - BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); +bool +LiveBucket::isTombstoneEntry(BucketEntry const& e) +{ + return e.type() == DEADENTRY; +} -template std::shared_ptr Bucket::merge( - BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); +bool +HotArchiveBucket::isTombstoneEntry(HotArchiveBucketEntry const& e) +{ + return e.type() == HOT_ARCHIVE_LIVE; } BucketEntryCounters& @@ -1009,4 +1097,20 @@ BucketEntryCounters::operator!=(BucketEntryCounters const& other) const { return !(*this == other); } + +template std::shared_ptr Bucket::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); + +template std::shared_ptr Bucket::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); } \ No newline at end of file diff --git a/src/bucket/Bucket.h b/src/bucket/Bucket.h index 43d82ba8f6..0f65acffa6 100644 --- a/src/bucket/Bucket.h +++ b/src/bucket/Bucket.h @@ -37,6 +37,14 @@ namespace stellar * Two buckets can be merged together efficiently (in a single pass): elements * from the newer bucket overwrite elements from the older bucket, the rest are * merged in sorted order, and all elements are hashed while being added. + * + * Different types of BucketList vary on the type of entries they contain and by + * extension the merge logic of those entries. Additionally, some types of + * BucketList may have special operations only relevant to that specific type. + * This pure virtual base class provides the core functionality of a BucketList + * container and must be extened for each specific BucketList type. In + * particular, the fresh and merge functions must be defined for the specific + * type, while other functionality can be shared. */ class AbstractLedgerTxn; @@ -45,6 +53,8 @@ class BucketManager; struct EvictionResultEntry; class EvictionStatistics; struct BucketEntryCounters; +template class SearchableBucketListSnapshot; +enum class LedgerEntryTypeAndDurability : uint32_t; class Bucket : public NonMovableOrCopyable { @@ -62,6 +72,9 @@ class Bucket : public NonMovableOrCopyable std::string ext); public: + static constexpr ProtocolVersion + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION = ProtocolVersion::V_23; + // Create an empty bucket. The empty bucket has hash '000000...' and its // filename is the empty string. Bucket(); @@ -109,8 +122,8 @@ class Bucket : public NonMovableOrCopyable std::shared_ptr const& oldBucket, std::shared_ptr const& newBucket, std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); + bool keepTombstoneEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); static std::string randomBucketName(std::string const& tmpDir); static std::string randomBucketIndexName(std::string const& tmpDir); @@ -129,12 +142,18 @@ class Bucket : public NonMovableOrCopyable template friend class BucketSnapshotBase; }; -template class SearchableBucketListSnapshot; +/* + * Live Buckets are used by the LiveBucketList to store the current canonical + * state of the ledger. They contain entries of type BucketEntry. + */ class LiveBucket : public Bucket, public std::enable_shared_from_this { public: LiveBucket(); + virtual ~LiveBucket() + { + } LiveBucket(std::string const& filename, Hash const& hash, std::unique_ptr&& index); @@ -153,14 +172,13 @@ class LiveBucket : public Bucket, static void checkProtocolLegality(BucketEntry const& entry, uint32_t protocolVersion); -#ifdef BUILD_TESTS - static std::vector convertToBucketEntry(bool useInit, std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries); +#ifdef BUILD_TESTS // "Applies" the bucket to the database. For each entry in the bucket, // if the entry is init or live, creates or updates the corresponding // entry in the database (respectively; if the entry is dead (a @@ -196,6 +214,10 @@ class LiveBucket : public Bucket, std::vector const& deadEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync); + // Returns true if the given BucketEntry should be dropped in the bottom + // level bucket (i.e. DEADENTRY) + static bool isTombstoneEntry(BucketEntry const& e); + uint32_t getBucketVersion() const override; BucketEntryCounters const& getBucketEntryCounters() const; @@ -203,27 +225,41 @@ class LiveBucket : public Bucket, friend class LiveBucketSnapshot; }; +/* + * Hot Archive Buckets are used by the HotBucketList to store recently evicted + * entries. They contain entries of type HotArchiveBucketEntry. + */ class HotArchiveBucket : public Bucket, public std::enable_shared_from_this { + static std::vector + convertToBucketEntry(std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); + public: HotArchiveBucket(); + virtual ~HotArchiveBucket() + { + } HotArchiveBucket(std::string const& filename, Hash const& hash, std::unique_ptr&& index); uint32_t getBucketVersion() const override; - // TOOD: Change params for HotArchiveBucket static std::shared_ptr fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries, bool countMergeEvents, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync); + // Returns true if the given BucketEntry should be dropped in the bottom + // level bucket (i.e. HOT_ARCHIVE_LIVE) + static bool isTombstoneEntry(HotArchiveBucketEntry const& e); + friend class HotArchiveBucketSnapshot; }; -enum class LedgerEntryTypeAndDurability : uint32_t; struct BucketEntryCounters { std::map entryTypeCounts; diff --git a/src/bucket/BucketIndex.h b/src/bucket/BucketIndex.h index 39e15fd0fb..f4b343c839 100644 --- a/src/bucket/BucketIndex.h +++ b/src/bucket/BucketIndex.h @@ -89,6 +89,7 @@ class BucketIndex : public NonMovableOrCopyable // the largest buckets) and should only be called once. If pageSize == 0 or // if file size is less than the cutoff, individual key index is used. // Otherwise range index is used, with the range defined by pageSize. + template static std::unique_ptr createIndex(BucketManager& bm, std::filesystem::path const& filename, Hash const& hash, asio::io_context& ctx); diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp index f3aa69f301..1677670e70 100644 --- a/src/bucket/BucketIndexImpl.cpp +++ b/src/bucket/BucketIndexImpl.cpp @@ -26,6 +26,7 @@ #include #include +#include #include namespace stellar @@ -67,14 +68,19 @@ BucketIndex::typeNotSupported(LedgerEntryType t) } template +template BucketIndexImpl::BucketIndexImpl(BucketManager& bm, std::filesystem::path const& filename, std::streamoff pageSize, Hash const& hash, - asio::io_context& ctx) + asio::io_context& ctx, + BucketEntryT const& typeTag) : mBloomMissMeter(bm.getBloomMissMeter()) , mBloomLookupMeter(bm.getBloomLookupMeter()) { + static_assert(std::is_same_v || + std::is_same_v); + ZoneScoped; releaseAssert(!filename.empty()); @@ -96,7 +102,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, in.open(filename.string()); std::streamoff pos = 0; std::streamoff pageUpperBound = 0; - BucketEntry be; + BucketEntryT be; size_t iter = 0; [[maybe_unused]] size_t count = 0; @@ -128,35 +134,51 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, } } - if (be.type() != METAENTRY) + auto isMeta = [](auto const& be) { + if constexpr (std::is_same::value) + { + return be.type() == METAENTRY; + } + else + { + return be.type() == HOT_ARCHIVE_METAENTRY; + } + }; + + if (!isMeta(be)) { ++count; LedgerKey key = getBucketLedgerKey(be); - // We need an asset to poolID mapping for - // loadPoolshareTrustlineByAccountAndAsset queries. For this - // query, we only need to index INIT entries because: - // 1. PoolID is the hash of the Assets it refers to, so this - // index cannot be invalidated by newer LIVEENTRY updates - // 2. We do a join over all bucket indexes so we avoid storing - // multiple redundant index entries (i.e. LIVEENTRY updates) - // 3. We only use this index to collect the possible set of - // Trustline keys, then we load those keys. This means that - // we don't need to keep track of DEADENTRY. Even if a given - // INITENTRY has been deleted by a newer DEADENTRY, the - // trustline load will not return deleted trustlines, so the - // load result is still correct even if the index has a few - // deleted mappings. - if (be.type() == INITENTRY && key.type() == LIQUIDITY_POOL) + if constexpr (std::is_same_v) { - auto const& poolParams = be.liveEntry() - .data.liquidityPool() - .body.constantProduct() - .params; - mData.assetToPoolID[poolParams.assetA].emplace_back( - key.liquidityPool().liquidityPoolID); - mData.assetToPoolID[poolParams.assetB].emplace_back( - key.liquidityPool().liquidityPoolID); + // We need an asset to poolID mapping for + // loadPoolshareTrustlineByAccountAndAsset queries. For this + // query, we only need to index INIT entries because: + // 1. PoolID is the hash of the Assets it refers to, so this + // index cannot be invalidated by newer LIVEENTRY updates + // 2. We do a join over all bucket indexes so we avoid + // storing + // multiple redundant index entries (i.e. LIVEENTRY + // updates) + // 3. We only use this index to collect the possible set of + // Trustline keys, then we load those keys. This means + // that we don't need to keep track of DEADENTRY. Even if + // a given INITENTRY has been deleted by a newer + // DEADENTRY, the trustline load will not return deleted + // trustlines, so the load result is still correct even + // if the index has a few deleted mappings. + if (be.type() == INITENTRY && key.type() == LIQUIDITY_POOL) + { + auto const& poolParams = be.liveEntry() + .data.liquidityPool() + .body.constantProduct() + .params; + mData.assetToPoolID[poolParams.assetA].emplace_back( + key.liquidityPool().liquidityPoolID); + mData.assetToPoolID[poolParams.assetB].emplace_back( + key.liquidityPool().liquidityPoolID); + } } if constexpr (std::is_same::value) @@ -184,7 +206,11 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, { mData.keysToOffset.emplace_back(key, pos); } - countEntry(be); + + if constexpr (std::is_same::value) + { + countEntry(be); + } } pos = in.pos(); @@ -329,11 +355,15 @@ upper_bound_pred(LedgerKey const& key, IndexEntryT const& indexEntry) } } +template std::unique_ptr BucketIndex::createIndex(BucketManager& bm, std::filesystem::path const& filename, Hash const& hash, asio::io_context& ctx) { + static_assert(std::is_same_v || + std::is_same_v); + ZoneScoped; auto const& cfg = bm.getConfig(); releaseAssertOrThrow(cfg.isUsingBucketListDB()); @@ -349,8 +379,8 @@ BucketIndex::createIndex(BucketManager& bm, "bucket {}", filename); return std::unique_ptr const>( - new BucketIndexImpl(bm, filename, 0, hash, - ctx)); + new BucketIndexImpl(bm, filename, 0, hash, ctx, + BucketEntryT{})); } else { @@ -361,7 +391,7 @@ BucketIndex::createIndex(BucketManager& bm, pageSize, filename); return std::unique_ptr const>( new BucketIndexImpl(bm, filename, pageSize, hash, - ctx)); + ctx, BucketEntryT{})); } } // BucketIndexImpl throws if BucketManager shuts down before index finishes, @@ -608,4 +638,12 @@ BucketIndexImpl::getBucketEntryCounters() const { return mData.counters; } + +template std::unique_ptr +BucketIndex::createIndex(BucketManager& bm, + std::filesystem::path const& filename, + Hash const& hash); +template std::unique_ptr +BucketIndex::createIndex( + BucketManager& bm, std::filesystem::path const& filename, Hash const& hash); } diff --git a/src/bucket/BucketIndexImpl.h b/src/bucket/BucketIndexImpl.h index 74890ec3a9..eec9fb3837 100644 --- a/src/bucket/BucketIndexImpl.h +++ b/src/bucket/BucketIndexImpl.h @@ -60,9 +60,13 @@ template class BucketIndexImpl : public BucketIndex medida::Meter& mBloomMissMeter; medida::Meter& mBloomLookupMeter; + // Templated constructors are valid C++, but since this is a templated class + // already, there's no way for the compiler to deduce the type without a + // templated parameter, hence the tag + template BucketIndexImpl(BucketManager& bm, std::filesystem::path const& filename, std::streamoff pageSize, Hash const& hash, - asio::io_context& ctx); + asio::io_context& ctx, BucketEntryT const& typeTag); template BucketIndexImpl(BucketManager const& bm, Archive& ar, diff --git a/src/bucket/BucketInputIterator.cpp b/src/bucket/BucketInputIterator.cpp index c1d290c12b..da3b4a97eb 100644 --- a/src/bucket/BucketInputIterator.cpp +++ b/src/bucket/BucketInputIterator.cpp @@ -6,6 +6,7 @@ #include "bucket/Bucket.h" #include "xdr/Stellar-ledger.h" #include +#include namespace stellar { @@ -22,13 +23,13 @@ BucketInputIterator::loadEntry() { mEntryPtr = &mEntry; bool isMeta; - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { isMeta = mEntry.type() == METAENTRY; } else { - isMeta = mEntry.type() == HA_METAENTRY; + isMeta = mEntry.type() == HOT_ARCHIVE_METAENTRY; } if (isMeta) diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketList.cpp index 129f1681a8..59358d1bb6 100644 --- a/src/bucket/BucketList.cpp +++ b/src/bucket/BucketList.cpp @@ -25,6 +25,12 @@ namespace stellar { +template <> BucketListDepth BucketListBase::kNumLevels = 11; + +// TODO: This is an arbitrary number. Do some analysis and pick a better value +// or make this a configurable network config. +template <> BucketListDepth BucketListBase::kNumLevels = 9; + template BucketLevel::BucketLevel(uint32_t i) : mLevel(i) @@ -198,19 +204,16 @@ BucketLevel::prepare( LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) ? std::vector>() : shadows; - mNextCurr = FutureBucket(app, curr, snap, shadowsBasedOnProtocol, currLedgerProtocol, countMergeEvents, mLevel); } else { - // TODO: Constructor with no shadows - // mNextCurr = - // FutureBucket(app, curr, snap, shadowsBasedOnProtocol, - // currLedgerProtocol, countMergeEvents, - // mLevel); - releaseAssert(false); + // HotArchive only exists for protocol > 21, should never have shadows + mNextCurr = + FutureBucket(app, curr, snap, /*shadows=*/{}, + currLedgerProtocol, countMergeEvents, mLevel); } releaseAssert(mNextCurr.isMerging()); @@ -476,7 +479,7 @@ BucketListBase::bucketUpdatePeriod(uint32_t level, bool isCurr) template bool -BucketListBase::keepDeadEntries(uint32_t level) +BucketListBase::keepTombstoneEntries(uint32_t level) { return level < BucketListBase::kNumLevels - 1; } @@ -578,18 +581,93 @@ BucketListBase::getSize() const return sum; } -template void -BucketListBase::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) { ZoneScoped; releaseAssert(currLedger > 0); - std::vector> shadows; + for (uint32_t i = static_cast(mLevels.size()) - 1; i != 0; --i) + { + if (levelShouldSpill(currLedger, i - 1)) + { + /** + * At every ledger, level[0] prepares the new batch and commits + * it. + * + * At ledger multiples of 2, level[0] snaps, level[1] commits + * existing (promotes next to curr) and "prepares" by starting a + * merge of that new level[1] curr with the new level[0] snap. This + * is "level 0 spilling". + * + * At ledger multiples of 8, level[1] snaps, level[2] commits + * existing (promotes next to curr) and "prepares" by starting a + * merge of that new level[2] curr with the new level[1] snap. This + * is "level 1 spilling". + * + * At ledger multiples of 32, level[2] snaps, level[3] commits + * existing (promotes next to curr) and "prepares" by starting a + * merge of that new level[3] curr with the new level[2] snap. This + * is "level 2 spilling". + * + * All these have to be done in _reverse_ order (counting down + * levels) because we want a 'curr' to be pulled out of the way into + * a 'snap' the moment it's half-a-level full, not have anything + * else spilled/added to it. + */ + + auto snap = mLevels[i - 1].snap(); + mLevels[i].commit(); + mLevels[i].prepare(app, currLedger, currLedgerProtocol, snap, + /*shadows=*/{}, + /*countMergeEvents=*/true); + } + } + + // In some testing scenarios, we want to inhibit counting level 0 merges + // because they are not repeated when restarting merges on app startup, + // and we are checking for an expected number of merge events on restart. + bool countMergeEvents = + !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; + bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; + mLevels[0].prepare( + app, currLedger, currLedgerProtocol, + HotArchiveBucket::fresh(app.getBucketManager(), currLedgerProtocol, + archiveEntries, restoredEntries, deletedEntries, + countMergeEvents, app.getClock().getIOContext(), + doFsync), + /*shadows=*/{}, countMergeEvents); + mLevels[0].commit(); + + // We almost always want to try to resolve completed merges to single + // buckets, as it makes restarts less fragile: fewer saved/restored shadows, + // fewer buckets for the user to accidentally delete from their buckets + // dir. Also makes publication less likely to redo a merge that was already + // complete (but not resolved) when the snapshot gets taken. + // + // But we support the option of not-doing so, only for the sake of + // testing. Note: this is nonblocking in any case. + if (!app.getConfig().ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING) + { + resolveAnyReadyFutures(); + } +} + +void +LiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) +{ + ZoneScoped; + releaseAssert(currLedger > 0); + + std::vector> shadows; for (auto& level : mLevels) { shadows.push_back(level.getCurr()); @@ -681,9 +759,10 @@ BucketListBase::addBatch(Application& app, uint32_t currLedger, releaseAssert(shadows.size() == 0); mLevels[0].prepare( app, currLedger, currLedgerProtocol, - BucketT::fresh(app.getBucketManager(), currLedgerProtocol, initEntries, - liveEntries, deadEntries, countMergeEvents, - app.getClock().getIOContext(), doFsync), + LiveBucket::fresh(app.getBucketManager(), currLedgerProtocol, + initEntries, liveEntries, deadEntries, + countMergeEvents, app.getClock().getIOContext(), + doFsync), shadows, countMergeEvents); mLevels[0].commit(); @@ -702,7 +781,7 @@ BucketListBase::addBatch(Application& app, uint32_t currLedger, } BucketEntryCounters -BucketList::sumBucketEntryCounters() const +LiveBucketList::sumBucketEntryCounters() const { BucketEntryCounters counters; for (auto const& lev : mLevels) @@ -950,10 +1029,6 @@ BucketListBase::restartMerges(Application& app, } } -// TODO: Different depths for different types? -template -BucketListDepth BucketListBase::kNumLevels = 11; - template BucketListBase::BucketListBase() { for (uint32_t i = 0; i < kNumLevels; ++i) diff --git a/src/bucket/BucketList.h b/src/bucket/BucketList.h index 269a032d5b..5ffcf5d9a8 100644 --- a/src/bucket/BucketList.h +++ b/src/bucket/BucketList.h @@ -352,7 +352,7 @@ struct InflationWinner; namespace testutil { -class BucketListDepthModifier; +template class BucketListDepthModifier; } template class BucketLevel @@ -398,9 +398,15 @@ class BucketListDepth operator uint32_t() const; - friend class testutil::BucketListDepthModifier; + template friend class testutil::BucketListDepthModifier; }; +// While every BucketList shares the same high level structure wrt to spill +// schedules, merges at the bucket level, etc, each BucketList type hold +// different types of entries and has different merge logic at the individual +// entry level. This pure virtual base class defines the shared structure of all +// BucketLists. It must be extended for each specific BucketList type, where the +// template parameter BucketT refers to the underlying Bucket type. template class BucketListBase { static_assert(std::is_same_v || @@ -446,8 +452,11 @@ template class BucketListBase // should spill curr->snap and start merging snap into its next level. static bool levelShouldSpill(uint32_t ledger, uint32_t level); - // Returns true if at given `level` dead entries should be kept. - static bool keepDeadEntries(uint32_t level); + // Returns true if at given `level` tombstone entries should be kept. A + // "tombstone" entry is the entry type that represents null in the given + // BucketList. For LiveBucketList, this is DEADENTRY. For + // HotArchiveBucketList, HOT_ARCHIVE_LIVE. + static bool keepTombstoneEntries(uint32_t level); // Number of ledgers it takes a bucket to spill/receive an incoming spill static uint32_t bucketUpdatePeriod(uint32_t level, bool isCurr); @@ -504,22 +513,13 @@ template class BucketListBase // Returns the total size of the BucketList, in bytes, excluding all // FutureBuckets uint64_t getSize() const; - - // Add a batch of initial (created), live (updated) and dead entries to the - // bucketlist, representing the entries effected by closing - // `currLedger`. The bucketlist will incorporate these into the smallest - // (0th) level, as well as commit or prepare merges for any levels that - // should have spilled due to passing through `currLedger`. The `currLedger` - // and `currProtocolVersion` values should be taken from the ledger at which - // this batch is being added. - void addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries); - BucketEntryCounters sumBucketEntryCounters() const; }; +// The LiveBucketList stores the current canonical state of the ledger. It is +// made up of LiveBucket buckets, which in turn store individual entries of type +// BucketEntry. When an entry is "evicted" from the ledger, it is removed from +// the LiveBucketList. Depending on the evicted entry type, it may then be added +// to the HotArchiveBucketList. class LiveBucketList : public BucketListBase { public: @@ -545,8 +545,26 @@ class LiveBucketList : public BucketListBase void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, uint32_t ledgerSeq, EvictionCounters& counters, std::shared_ptr stats); + + // Add a batch of initial (created), live (updated) and dead entries to the + // bucketlist, representing the entries effected by closing + // `currLedger`. The bucketlist will incorporate these into the smallest + // (0th) level, as well as commit or prepare merges for any levels that + // should have spilled due to passing through `currLedger`. The `currLedger` + // and `currProtocolVersion` values should be taken from the ledger at which + // this batch is being added. + void addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); + + BucketEntryCounters sumBucketEntryCounters() const; }; +// The HotArchiveBucketList stores recently evicted entries. It contains Buckets +// of type HotArchiveBucket, which store individual entries of type +// HotArchiveBucketEntry. class HotArchiveBucketList : public BucketListBase { private: @@ -556,5 +574,11 @@ class HotArchiveBucketList : public BucketListBase // Merge result future // This should be the result of merging this entire list into a single file. // The MerkleBucketList is then initalized with this result + public: + void addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); }; } diff --git a/src/bucket/BucketListSnapshot.cpp b/src/bucket/BucketListSnapshot.cpp index 1e0e3a4c78..b071567b09 100644 --- a/src/bucket/BucketListSnapshot.cpp +++ b/src/bucket/BucketListSnapshot.cpp @@ -12,13 +12,14 @@ #include "medida/timer.h" #include "util/GlobalChecks.h" +#include +#include namespace stellar { template BucketListSnapshot::BucketListSnapshot( - BucketListBase const& bl, - LedgerHeader header) + BucketListBase const& bl, LedgerHeader header) : mHeader(std::move(header)) { releaseAssert(threadIsMain()); @@ -32,7 +33,7 @@ BucketListSnapshot::BucketListSnapshot( template BucketListSnapshot::BucketListSnapshot( - BucketListSnapshot const& snapshot) + BucketListSnapshot const& snapshot) : mLevels(snapshot.mLevels), mHeader(snapshot.mHeader) { } @@ -51,10 +52,20 @@ BucketListSnapshot::getLedgerSeq() const return mHeader.ledgerSeq; } +template +LedgerHeader const& +SearchableBucketListSnapshotBase::getLedgerHeader() +{ + releaseAssert(mSnapshot); + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + return mSnapshot->getLedgerHeader(); +} + template void SearchableBucketListSnapshotBase::loopAllBuckets( - std::function f, BucketListSnapshot const& snapshot) const + std::function f, + BucketListSnapshot const& snapshot) const { for (auto const& lev : snapshot.getLevels()) { @@ -75,70 +86,6 @@ SearchableBucketListSnapshotBase::loopAllBuckets( } } -// Loads bucket entry for LedgerKey k. Returns , -// where bloomMiss is true if a bloom miss occurred during the load. -std::pair, bool> -getLedgerEntryInternal(LedgerKey const& k, BucketListSnapshot const& snapshot) -{ - std::shared_ptr result{}; - auto sawBloomMiss = false; - - auto f = [&](BucketSnapshot const& b) { - auto [be, bloomMiss] = b.getBucketEntry(k); - sawBloomMiss = sawBloomMiss || bloomMiss; - - if (be.has_value()) - { - result = - be.value().type() == DEADENTRY - ? nullptr - : std::make_shared(be.value().liveEntry()); - return true; - } - else - { - return false; - } - }; - - loopAllBuckets(f, snapshot); - return {result, sawBloomMiss}; -} - -std::vector -loadKeysInternal(std::set const& inKeys, - BucketListSnapshot const& snapshot, LedgerKeyMeter* lkMeter) -{ - std::vector entries; - - // Make a copy of the key set, this loop is destructive - auto keys = inKeys; - auto f = [&](BucketSnapshot const& b) { - b.loadKeysWithLimits(keys, entries, lkMeter); - return keys.empty(); - }; - - loopAllBuckets(f, snapshot); - return entries; -} - -} - -uint32_t -SearchableBucketListSnapshot::getLedgerSeq() const -{ - releaseAssert(mSnapshot); - return mSnapshot->getLedgerSeq(); -} - -LedgerHeader const& -SearchableBucketListSnapshot::getLedgerHeader() -{ - releaseAssert(mSnapshot); - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - return mSnapshot->getLedgerHeader(); -} - EvictionResult SearchableLiveBucketListSnapshot::scanForEviction( uint32_t ledgerSeq, EvictionCounters& counters, @@ -189,77 +136,91 @@ SearchableLiveBucketListSnapshot::scanForEviction( return result; } -template -std::shared_ptr -SearchableBucketListSnapshotBase::getLedgerEntry(LedgerKey const& k) +std::vector +SearchableLiveBucketListSnapshot::loadKeysWithLimits( + std::set const& inKeys, + LedgerKeyMeter* lkMeter) { ZoneScoped; - if constexpr (std::is_same_v) + + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + std::vector entries; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(keys, entries, lkMeter); + return keys.empty(); + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + if (threadIsMain()) { - mSnapshotManager.maybeUpdateSnapshot(mSnapshot); + auto timer = + mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size()) + .TimeScope(); + loopAllBuckets(loadKeysLoop, *mSnapshot); } else { - // TODO:: - releaseAssert(false); + // TODO: Background metrics + loopAllBuckets(loadKeysLoop, *mSnapshot); } - if (threadIsMain()) + return entries; +} + +std::optional> +SearchableLiveBucketListSnapshot::loadKeysFromLedger( + std::set const& inKeys, uint32_t ledgerSeq) +{ + ZoneScoped; + + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + std::vector entries; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(keys, entries, /*lkMeter=*/nullptr); + return keys.empty(); + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + + if (ledgerSeq == mSnapshot->getLedgerSeq()) { - mSnapshotManager.startPointLoadTimer(); - auto [result, bloomMiss] = getLedgerEntryInternal(k, *mSnapshot); - mSnapshotManager.endPointLoadTimer(k.type(), bloomMiss); - return result; + loopAllBuckets(loadKeysLoop, *mSnapshot); } else { - auto [result, bloomMiss] = getLedgerEntryInternal(k, *mSnapshot); - return result; + auto iter = mHistoricalSnapshots.find(ledgerSeq); + if (iter == mHistoricalSnapshots.end()) + { + return std::nullopt; + } + + releaseAssert(iter->second); + loopAllBuckets(loadKeysLoop, *iter->second); } + + return entries; } -template -std::pair, bool> -SearchableBucketListSnapshotBase::getLedgerEntryInternal( - LedgerKey const& k, uint32_t ledgerSeq) +std::shared_ptr +SearchableLiveBucketListSnapshot::load(LedgerKey const& k) { ZoneScoped; - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - releaseAssert(mSnapshot); - // if (ledgerSeq == mSnapshot->getLedgerSeq()) - // { - // auto result = loadKeysInternal(inKeys, *mSnapshot, /*lkMeter=*/nullptr); - // return {result, true}; - // } - - // auto iter = mHistoricalSnapshots.find(ledgerSeq); - // if (iter == mHistoricalSnapshots.end()) - // { - // return {{}, false}; - // } - - // releaseAssert(iter->second); - // auto result = loadKeysInternal(inKeys, *iter->second, /*lkMeter=*/nullptr); - // return {result, true}; - auto f = [&](BucketSnapshotT const& b) { + std::shared_ptr result{}; + auto sawBloomMiss = false; + + // Search function called on each Bucket in BucketList until we find the key + auto loadKeyBucketLoop = [&](auto const& b) { auto [be, bloomMiss] = b.getBucketEntry(k); sawBloomMiss = sawBloomMiss || bloomMiss; - if (be.has_value()) + if (be) { - if constexpr (std::is_same_v) - { - result = - be.value().type() == DEADENTRY - ? nullptr - : std::make_shared(be.value().liveEntry()); - } - else - { - releaseAssert(false); - // TODO::W - } + result = LiveBucket::isTombstoneEntry(*be) + ? nullptr + : std::make_shared(be->liveEntry()); return true; } @@ -269,48 +230,19 @@ SearchableBucketListSnapshotBase::getLedgerEntryInternal( } }; - loopAllBuckets(f); - return {result, sawBloomMiss}; -} - -template -std::vector -SearchableBucketListSnapshotBase::loadKeysInternal( - std::set const& inKeys, - LedgerKeyMeter* lkMeter) -{ - std::vector entries; - - // Make a copy of the key set, this loop is destructive - auto keys = inKeys; - auto f = [&](BucketSnapshotT const& b) { - b.loadKeysWithLimits(keys, entries, lkMeter); - return keys.empty(); - }; - - loopAllBuckets(f); - return entries; -} - -std::vector -SearchableLiveBucketListSnapshot::loadKeysWithLimits( - std::set const& inKeys, - LedgerKeyMeter* lkMeter) -{ - ZoneScoped; mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - releaseAssert(mSnapshot); - if (threadIsMain()) { - auto timer = - mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size()) - .TimeScope(); - return loadKeysInternal(inKeys, *mSnapshot, lkMeter); + mSnapshotManager.startPointLoadTimer(); + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + mSnapshotManager.endPointLoadTimer(k.type(), sawBloomMiss); + return result; } else { - return loadKeysInternal(inKeys, *mSnapshot, lkMeter); + // TODO: Background metrics + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + return result; } } @@ -352,7 +284,15 @@ SearchableLiveBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( .recordBulkLoadMetrics("poolshareTrustlines", trustlinesToLoad.size()) .TimeScope(); - return loadKeysInternal(trustlinesToLoad, *mSnapshot, nullptr); + + std::vector result; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(trustlinesToLoad, result, /*lkMeter=*/nullptr); + return trustlinesToLoad.empty(); + }; + + loopAllBuckets(loadKeysLoop, *mSnapshot); + return result; } std::vector @@ -460,16 +400,8 @@ SearchableBucketListSnapshotBase::SearchableBucketListSnapshotBase( BucketSnapshotManager const& snapshotManager) : mSnapshotManager(snapshotManager), mHistoricalSnapshots() { - // Initialize snapshot from SnapshotManager - if constexpr (std::is_same_v) - { - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - } - else - { - // TODO: - releaseAssert(false); - } + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); } template @@ -483,6 +415,83 @@ SearchableLiveBucketListSnapshot::SearchableLiveBucketListSnapshot( { } +SearchableHotArchiveBucketListSnapshot::SearchableHotArchiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager) + : SearchableBucketListSnapshotBase(snapshotManager) +{ +} + +std::shared_ptr +SearchableHotArchiveBucketListSnapshot::load(LedgerKey const& k) +{ + ZoneScoped; + + // Search function called on each Bucket in BucketList until we find the key + std::shared_ptr result{}; + auto loadKeyBucketLoop = [&](auto const& b) { + auto [be, _] = b.getBucketEntry(k); + + if (be) + { + result = HotArchiveBucket::isTombstoneEntry(*be) ? nullptr : be; + return true; + } + else + { + return false; + } + }; + + // TODO: Metrics + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + return result; +} + +std::vector +SearchableHotArchiveBucketListSnapshot::loadKeys( + std::set const& inKeys) +{ + auto op = loadKeysFromLedger(inKeys, getLedgerSeq()); + releaseAssertOrThrow(op); + return std::move(*op); +} + +std::optional> +SearchableHotArchiveBucketListSnapshot::loadKeysFromLedger( + std::set const& inKeys, uint32_t ledgerSeq) +{ + ZoneScoped; + std::vector entries; + + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(keys, entries, /*lkMeter=*/nullptr); + return keys.empty(); + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + + if (ledgerSeq == mSnapshot->getLedgerSeq()) + { + loopAllBuckets(loadKeysLoop, *mSnapshot); + } + else + { + auto iter = mHistoricalSnapshots.find(ledgerSeq); + if (iter == mHistoricalSnapshots.end()) + { + return std::nullopt; + } + + releaseAssert(iter->second); + loopAllBuckets(loadKeysLoop, *iter->second); + } + + return entries; +} + template struct BucketLevelSnapshot; template struct BucketLevelSnapshot; template class BucketListSnapshot; diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshot.h index ed80a6a241..c4cd98450f 100644 --- a/src/bucket/BucketListSnapshot.h +++ b/src/bucket/BucketListSnapshot.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/BucketList.h" #include "bucket/BucketManagerImpl.h" #include "bucket/BucketSnapshot.h" @@ -47,7 +48,7 @@ template class BucketListSnapshot : public NonMovable LedgerHeader const mHeader; public: - BucketListSnapshot(BucketListBase const& bl, LedgerHeader hhe); + BucketListSnapshot(BucketListBase const& bl, LedgerHeader hhe); // Only allow copies via constructor BucketListSnapshot(BucketListSnapshot const& snapshot); @@ -99,6 +100,15 @@ class SearchableBucketListSnapshotBase : public NonMovableOrCopyable SearchableBucketListSnapshotBase( BucketSnapshotManager const& snapshotManager); + + public: + uint32_t + getLedgerSeq() const + { + return mSnapshot->getLedgerSeq(); + } + + LedgerHeader const& getLedgerHeader(); }; class SearchableLiveBucketListSnapshot @@ -108,6 +118,8 @@ class SearchableLiveBucketListSnapshot BucketSnapshotManager const& snapshotManager); public: + std::shared_ptr load(LedgerKey const& k); + std::vector loadKeysWithLimits(std::set const& inKeys, LedgerKeyMeter* lkMeter = nullptr); @@ -119,15 +131,13 @@ class SearchableLiveBucketListSnapshot std::vector loadInflationWinners(size_t maxWinners, int64_t minBalance); - std::shared_ptr load(LedgerKey const& k); - // Loads inKeys from the specified historical snapshot. Returns - // if the snapshot for the given ledger is - // available, otherwise. Note that ledgerSeq is defined + // load_result_vec if the snapshot for the given ledger is + // available, std::nullopt otherwise. Note that ledgerSeq is defined // as the state of the BucketList at the beginning of the ledger. This means // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry // in the BucketList is N - 1. - std::pair, bool> + std::optional> loadKeysFromLedger(std::set const& inKeys, uint32_t ledgerSeq); @@ -137,7 +147,33 @@ class SearchableLiveBucketListSnapshot std::shared_ptr stats, StateArchivalSettings const& sas); - uint32_t getLedgerSeq() const; - LedgerHeader const& getLedgerHeader(); + friend std::shared_ptr + BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const; +}; + +class SearchableHotArchiveBucketListSnapshot + : public SearchableBucketListSnapshotBase +{ + SearchableHotArchiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager); + + public: + std::shared_ptr load(LedgerKey const& k); + + std::vector + loadKeys(std::set const& inKeys); + + // Loads inKeys from the specified historical snapshot. Returns + // load_result_vec if the snapshot for the given ledger is + // available, std::nullopt otherwise. Note that ledgerSeq is defined + // as the state of the BucketList at the beginning of the ledger. This means + // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry + // in the BucketList is N - 1. + std::optional> + loadKeysFromLedger(std::set const& inKeys, + uint32_t ledgerSeq); + + friend std::shared_ptr + BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const; }; } \ No newline at end of file diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index d78d4322f6..88e6280a46 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -285,10 +285,10 @@ class BucketManager : NonMovableOrCopyable std::vector const& liveEntries, std::vector const& deadEntries) = 0; virtual void - addArchivalBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& deadEntries) = 0; + addHotArchiveBatch(Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) = 0; // Update the given LedgerHeader's bucketListHash to reflect the current // state of the bucket list. @@ -401,8 +401,9 @@ class BucketManager : NonMovableOrCopyable virtual Config const& getConfig() const = 0; // Get bucketlist snapshot - virtual std::shared_ptr - getSearchableBucketListSnapshot() = 0; + virtual std::shared_ptr + getSearchableLiveBucketListSnapshot() = 0; + virtual void reportBucketEntryCountMetrics() = 0; }; } diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManagerImpl.cpp index 9d5964739d..5c27cd4615 100644 --- a/src/bucket/BucketManagerImpl.cpp +++ b/src/bucket/BucketManagerImpl.cpp @@ -132,11 +132,12 @@ BucketManagerImpl::initialize() if (mConfig.isUsingBucketListDB()) { - // TODO: Archival BucketList snapshot mSnapshotManager = std::make_unique( mApp, - std::make_unique>(*mLiveBucketList, - LedgerHeader()), + std::make_unique>( + *mLiveBucketList, LedgerHeader()), + std::make_unique>( + *mHotArchiveBucketList, LedgerHeader()), mConfig.QUERY_SNAPSHOT_LEDGERS); } } @@ -1027,10 +1028,9 @@ BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header, #endif auto timer = mBucketAddLiveBatch.TimeScope(); mBucketLiveObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() + - deadEntries.size()); + deadEntries.size()); mLiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, - initEntries, liveEntries, deadEntries); - + initEntries, liveEntries, deadEntries); mLiveBucketListSizeCounter.set_count(mLiveBucketList->getSize()); if (app.getConfig().isUsingBucketListDB()) @@ -1039,29 +1039,34 @@ BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header, } } -// TODO: Fix interface to match addLiveBatch void -BucketManagerImpl::addArchivalBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& deadEntries) +BucketManagerImpl::addHotArchiveBatch( + Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) { ZoneScoped; releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); + releaseAssertOrThrow(protocolVersionStartsFrom( + header.ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); #ifdef BUILD_TESTS if (mUseFakeTestValuesForNextClose) { - currLedgerProtocol = mFakeTestProtocolVersion; + header.ledgerVersion = mFakeTestProtocolVersion; } #endif auto timer = mBucketAddArchiveBatch.TimeScope(); - mBucketArchiveObjectInsertBatch.Mark(initEntries.size() + - deadEntries.size()); + mBucketArchiveObjectInsertBatch.Mark(archivedEntries.size() + + restoredEntries.size() + + deletedEntries.size()); // Hot archive should never modify an existing entry, so there are never // live entries - mHotArchiveBucketList->addBatch(app, currLedger, currLedgerProtocol, - initEntries, {}, deadEntries); + mHotArchiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, + archivedEntries, restoredEntries, + deletedEntries); mArchiveBucketListSizeCounter.set_count(mHotArchiveBucketList->getSize()); } @@ -1102,16 +1107,12 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) Hash hash; if (mConfig.MODE_ENABLES_BUCKETLIST) { - if (protocolVersionStartsFrom(currentHeader.ledgerVersion, - ProtocolVersion::V_22)) + if (protocolVersionStartsFrom( + currentHeader.ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { // TODO: Hash Archive Bucket - // Holding off on this until buckets are written to history - - // SHA256 hasher; - // hasher.add(mLiveBucketList->getHash()); - // hasher.add(mHotArchiveBucketList->getHash()); - // hash = hasher.finish(); + // Dependency: HAS supports Hot Archive BucketList hash = mLiveBucketList->getHash(); } @@ -1164,7 +1165,8 @@ BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq) releaseAssert(!mEvictionFuture.valid()); releaseAssert(mEvictionStatistics); - auto searchableBL = mSnapshotManager->copySearchableBucketListSnapshot(); + auto searchableBL = + mSnapshotManager->copySearchableLiveBucketListSnapshot(); auto const& cfg = mApp.getLedgerManager().getSorobanNetworkConfig(); auto const& sas = cfg.stateArchivalSettings(); @@ -1181,7 +1183,7 @@ BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq) mEvictionFuture = task->get_future(); mApp.postOnEvictionBackgroundThread( bind(&task_t::operator(), task), - "SearchableBucketListSnapshot: eviction scan"); + "SearchableLiveBucketListSnapshot: eviction scan"); } void @@ -1321,6 +1323,7 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); // TODO: Assume archival bucket state + // Dependency: HAS supports Hot Archive BucketList for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto curr = @@ -1476,8 +1479,8 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) MergeCounters mc; auto& ctx = mApp.getClock().getIOContext(); meta.ledgerVersion = mConfig.LEDGER_PROTOCOL_VERSION; - LiveBucketOutputIterator out(getTmpDir(), /*keepDeadEntries=*/false, meta, - mc, ctx, /*doFsync=*/true); + LiveBucketOutputIterator out(getTmpDir(), /*keepTombstoneEntries=*/false, + meta, mc, ctx, /*doFsync=*/true); for (auto const& pair : ledgerMap) { BucketEntry be; @@ -1672,6 +1675,7 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork() } // TODO: Update verify to for ArchiveBucket + // Dependency: HAS supports Hot Archive BucketList auto b = getBucketByHash(h); if (!b) { @@ -1691,8 +1695,8 @@ BucketManagerImpl::getConfig() const return mConfig; } -std::shared_ptr -BucketManagerImpl::getSearchableBucketListSnapshot() +std::shared_ptr +BucketManagerImpl::getSearchableLiveBucketListSnapshot() { releaseAssert(mConfig.isUsingBucketListDB()); // Any other threads must maintain their own snapshot @@ -1700,7 +1704,7 @@ BucketManagerImpl::getSearchableBucketListSnapshot() if (!mSearchableBucketListSnapshot) { mSearchableBucketListSnapshot = - mSnapshotManager->copySearchableBucketListSnapshot(); + mSnapshotManager->copySearchableLiveBucketListSnapshot(); } return mSearchableBucketListSnapshot; @@ -1713,7 +1717,7 @@ BucketManagerImpl::reportBucketEntryCountMetrics() { return; } - auto bucketEntryCounters = mBucketList->sumBucketEntryCounters(); + auto bucketEntryCounters = mLiveBucketList->sumBucketEntryCounters(); for (auto [type, count] : bucketEntryCounters.entryTypeCounts) { auto countCounter = mBucketListEntryCountCounters.find(type); diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h index 51cf16a468..9fb4415b2e 100644 --- a/src/bucket/BucketManagerImpl.h +++ b/src/bucket/BucketManagerImpl.h @@ -48,7 +48,7 @@ class BucketManagerImpl : public BucketManager std::unique_ptr mTmpDirManager; std::unique_ptr mWorkDir; std::map> mSharedBuckets; - std::shared_ptr + std::shared_ptr mSearchableBucketListSnapshot{}; // Lock for managing raw Bucket files or the bucket directory. This lock is @@ -189,10 +189,11 @@ class BucketManagerImpl : public BucketManager std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries) override; - void addArchivalBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& deadEntries) override; + void + addHotArchiveBatch(Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) override; void snapshotLedger(LedgerHeader& currentHeader) override; void maybeSetIndex(std::shared_ptr b, std::unique_ptr&& index) override; @@ -244,8 +245,9 @@ class BucketManagerImpl : public BucketManager Config const& getConfig() const override; - std::shared_ptr - getSearchableBucketListSnapshot() override; + std::shared_ptr + getSearchableLiveBucketListSnapshot() override; + void reportBucketEntryCountMetrics() override; }; diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp index b560ab3cb7..a5ae8a9efb 100644 --- a/src/bucket/BucketOutputIterator.cpp +++ b/src/bucket/BucketOutputIterator.cpp @@ -6,6 +6,7 @@ #include "bucket/Bucket.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" +#include "ledger/LedgerTypeUtils.h" #include "util/GlobalChecks.h" #include "util/ProtocolVersion.h" #include "xdr/Stellar-ledger.h" @@ -20,14 +21,17 @@ namespace stellar * hashes them while writing to either destination. Produces a Bucket when done. */ template -BucketOutputIterator::BucketOutputIterator( - std::string const& tmpDir, bool keepDeadEntries, BucketMetadata const& meta, - MergeCounters& mc, asio::io_context& ctx, bool doFsync) +BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, + bool keepTombstoneEntries, + BucketMetadata const& meta, + MergeCounters& mc, + asio::io_context& ctx, + bool doFsync) : mFilename(Bucket::randomBucketName(tmpDir)) , mOut(ctx, doFsync) , mCtx(ctx) , mBuf(nullptr) - , mKeepDeadEntries(keepDeadEntries) + , mKeepTombstoneEntries(keepTombstoneEntries) , mMeta(meta) , mMergeCounters(mc) { @@ -52,10 +56,11 @@ BucketOutputIterator::BucketOutputIterator( else { releaseAssertOrThrow(protocolVersionStartsFrom( - meta.ledgerVersion, ProtocolVersion::V_22)); + meta.ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); HotArchiveBucketEntry bme; - bme.type(HA_METAENTRY); + bme.type(HOT_ARCHIVE_METAENTRY); bme.metaEntry() = mMeta; put(bme); } @@ -82,7 +87,7 @@ BucketOutputIterator::put(BucketEntryT const& e) } } - if (!mKeepDeadEntries && e.type() == DEADENTRY) + if (!mKeepTombstoneEntries && BucketT::isTombstoneEntry(e)) { ++mMergeCounters.mOutputIteratorTombstoneElisions; return; @@ -90,7 +95,7 @@ BucketOutputIterator::put(BucketEntryT const& e) } else { - if (e.type() == HA_METAENTRY) + if (e.type() == HOT_ARCHIVE_METAENTRY) { if (mPutMeta) { @@ -98,10 +103,29 @@ BucketOutputIterator::put(BucketEntryT const& e) "putting META entry in bucket after initial entry"); } } + else + { + if (e.type() == HOT_ARCHIVE_ARCHIVED) + { + if (!isSorobanEntry(e.archivedEntry().data)) + { + throw std::runtime_error( + "putting non-soroban entry in hot archive bucket"); + } + } + else + { + if (!isSorobanEntry(e.key())) + { + throw std::runtime_error( + "putting non-soroban entry in hot archive bucket"); + } + } + } - // RESTORED entries are dropped in the last bucket level (similar to - // DEADENTRY) on live BucketLists - if (!mKeepDeadEntries && e.type() == HA_RESTORED) + // HOT_ARCHIVE_LIVE entries are dropped in the last bucket level + // (similar to DEADENTRY) on live BucketLists + if (!mKeepTombstoneEntries && BucketT::isTombstoneEntry(e)) { ++mMergeCounters.mOutputIteratorTombstoneElisions; return; @@ -173,9 +197,8 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, if (auto b = bucketManager.getBucketIfExists(hash); !b || !b->isIndexed()) { - index = - BucketIndex::createIndex(bucketManager, mFilename, hash, mCtx); - releaseAssertOrThrow(index); + index = BucketIndex::createIndex( + bucketManager, mFilename, hash, mCtx); } } @@ -186,9 +209,9 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, } else { - // TODO: - releaseAssert(false); - return std::shared_ptr(); + + return bucketManager.adoptFileAsHotArchiveBucket( + mFilename.string(), hash, mergeKey, std::move(index)); } } diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h index 0a72659890..911b638914 100644 --- a/src/bucket/BucketOutputIterator.h +++ b/src/bucket/BucketOutputIterator.h @@ -38,7 +38,7 @@ template class BucketOutputIterator SHA256 mHasher; size_t mBytesPut{0}; size_t mObjectsPut{0}; - bool mKeepDeadEntries{true}; + bool mKeepTombstoneEntries{true}; BucketMetadata mMeta; bool mPutMeta{false}; MergeCounters& mMergeCounters; @@ -51,7 +51,7 @@ template class BucketOutputIterator // version new enough that it should _write_ the metadata to the stream in // the form of a METAENTRY; but that's not a thing the caller gets to decide // (or forget to do), it's handled automatically. - BucketOutputIterator(std::string const& tmpDir, bool keepDeadEntries, + BucketOutputIterator(std::string const& tmpDir, bool keepTombstoneEntries, BucketMetadata const& meta, MergeCounters& mc, asio::io_context& ctx, bool doFsync); diff --git a/src/bucket/BucketSnapshot.cpp b/src/bucket/BucketSnapshot.cpp index 3786c618ae..50dbe30b58 100644 --- a/src/bucket/BucketSnapshot.cpp +++ b/src/bucket/BucketSnapshot.cpp @@ -8,6 +8,7 @@ #include "ledger/LedgerTxn.h" #include "ledger/LedgerTypeUtils.h" #include "util/XDRStream.h" +#include namespace stellar { @@ -36,7 +37,7 @@ BucketSnapshotBase::isEmpty() const } template -std::pair::BucketEntryT>, +std::pair::BucketEntryT>, bool> BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, std::streamoff pos, @@ -45,7 +46,7 @@ BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, ZoneScoped; if (isEmpty()) { - return {std::nullopt, false}; + return {nullptr, false}; } auto& stream = getStream(); @@ -56,28 +57,28 @@ BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, { if (stream.readOne(be)) { - return {std::make_optional(be), false}; + return {std::make_shared(be), false}; } } else if (stream.readPage(be, k, pageSize)) { - return {std::make_optional(be), false}; + return {std::make_shared(be), false}; } // Mark entry miss for metrics mBucket->getIndex().markBloomMiss(); - return {std::nullopt, true}; + return {nullptr, true}; } template -std::pair::BucketEntryT>, +std::pair::BucketEntryT>, bool> BucketSnapshotBase::getBucketEntry(LedgerKey const& k) const { ZoneScoped; if (isEmpty()) { - return {std::nullopt, false}; + return {nullptr, false}; } auto pos = mBucket->getIndex().lookup(k); @@ -87,7 +88,7 @@ BucketSnapshotBase::getBucketEntry(LedgerKey const& k) const mBucket->getIndex().getPageSize()); } - return {std::nullopt, false}; + return {nullptr, false}; } // When searching for an entry, BucketList calls this function on every bucket. @@ -97,9 +98,9 @@ BucketSnapshotBase::getBucketEntry(LedgerKey const& k) const // from keys so that it will be searched for again at a lower level. template void -BucketSnapshotBase::loadKeysWithLimits( +BucketSnapshotBase::loadKeys( std::set& keys, - std::vector& result, LedgerKeyMeter* lkMeter) const + std::vector& result, LedgerKeyMeter* lkMeter) const { ZoneScoped; if (isEmpty()) @@ -112,19 +113,40 @@ BucketSnapshotBase::loadKeysWithLimits( auto indexIter = index.begin(); while (currKeyIt != keys.end() && indexIter != index.end()) { + // lkMeter only supported for LiveBucketList + if (std::is_same_v && lkMeter) + { + auto keySize = xdr::xdr_size(*currKeyIt); + if (!lkMeter->canLoad(*currKeyIt, keySize)) + { + // If the transactions containing this key have a remaining + // quota less than the size of the key, we cannot load the + // entry, as xdr_size(key) <= xdr_size(entry). Here we consume + // keySize bytes from the quotas of transactions containing the + // key so that they will have zero remaining quota and + // additional entries belonging to only those same transactions + // will not be loaded even if they would fit in the remaining + // quota before this update. + lkMeter->updateReadQuotasForKey(*currKeyIt, keySize); + currKeyIt = keys.erase(currKeyIt); + continue; + } + } auto [offOp, newIndexIter] = index.scan(indexIter, *currKeyIt); indexIter = newIndexIter; if (offOp) { auto [entryOp, bloomMiss] = getEntryAtOffset( *currKeyIt, *offOp, mBucket->getIndex().getPageSize()); + if (entryOp) { - // Only live bucket loads can be metered: - // TODO: Refactor metering to only LiveBucket - if constexpr (std::is_same_v) + // Don't return tombstone entries, as these do not exist wrt + // ledger state + if (!BucketT::isTombstoneEntry(*entryOp)) { - if (entryOp->type() != DEADENTRY) + // Only live bucket loads can be metered + if constexpr (std::is_same_v) { bool addEntry = true; if (lkMeter) @@ -144,7 +166,12 @@ BucketSnapshotBase::loadKeysWithLimits( result.push_back(entryOp->liveEntry()); } } + else + { + result.push_back(*entryOp); + } } + currKeyIt = keys.erase(currKeyIt); continue; } diff --git a/src/bucket/BucketSnapshot.h b/src/bucket/BucketSnapshot.h index 0bf6a74b7d..6da683ea6e 100644 --- a/src/bucket/BucketSnapshot.h +++ b/src/bucket/BucketSnapshot.h @@ -7,11 +7,10 @@ #include "bucket/Bucket.h" #include "bucket/LedgerCmp.h" #include "util/NonCopyable.h" +#include "xdr/Stellar-ledger-entries.h" #include #include -#include - namespace stellar { @@ -30,6 +29,12 @@ template class BucketSnapshotBase : public NonMovable using BucketEntryT = std::conditional_t, BucketEntry, HotArchiveBucketEntry>; + // LiveBucket returns LedgerEntry vector on call to loadKeys, + // HotArchiveBucket returns HotArchiveBucketEntry + using BulkLoadReturnT = + std::conditional_t, LedgerEntry, + HotArchiveBucketEntry>; + std::shared_ptr const mBucket; // Lazily-constructed and retained for read path. @@ -44,7 +49,7 @@ template class BucketSnapshotBase : public NonMovable // reads until key is found or the end of the page. Returns , where bloomMiss is true if a bloomMiss occurred during the // load. - std::pair, bool> + std::pair, bool> getEntryAtOffset(LedgerKey const& k, std::streamoff pos, size_t pageSize) const; @@ -60,19 +65,17 @@ template class BucketSnapshotBase : public NonMovable // Loads bucket entry for LedgerKey k. Returns , // where bloomMiss is true if a bloomMiss occurred during the load. - std::pair, bool> + std::pair, bool> getBucketEntry(LedgerKey const& k) const; - // TODO: Restrict limits to LiveBucket only // Loads LedgerEntry's for given keys. When a key is found, the // entry is added to result and the key is removed from keys. // If a pointer to a LedgerKeyMeter is provided, a key will only be loaded // if the meter has a transaction with sufficient read quota for the key. - void loadKeysWithLimits(std::set& keys, - std::vector& result, - LedgerKeyMeter* lkMeter) const; - - // friend struct BucketLevelSnapshot; + // If Bucket is not of type LiveBucket, lkMeter is ignored. + void loadKeys(std::set& keys, + std::vector& result, + LedgerKeyMeter* lkMeter) const; }; class LiveBucketSnapshot : public BucketSnapshotBase diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index ac68a20a73..703da5c21f 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -3,8 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketSnapshotManager.h" +#include "bucket/Bucket.h" #include "bucket/BucketListSnapshot.h" #include "main/Application.h" +#include "util/GlobalChecks.h" #include "util/XDRStream.h" // IWYU pragma: keep #include "medida/meter.h" @@ -15,12 +17,17 @@ namespace stellar { BucketSnapshotManager::BucketSnapshotManager( - Application& app, std::unique_ptr const>&& snapshot, - uint32_t numHistoricalSnapshots) + Application& app, + std::unique_ptr const>&& snapshot, + std::unique_ptr const>&& + hotArchiveSnapshot, + uint32_t numLiveHistoricalSnapshots) : mApp(app) - , mCurrentSnapshot(std::move(snapshot)) - , mHistoricalSnapshots() - , mNumHistoricalSnapshots(numHistoricalSnapshots) + , mCurrLiveSnapshot(std::move(snapshot)) + , mCurrHotArchiveSnapshot(std::move(hotArchiveSnapshot)) + , mLiveHistoricalSnapshots() + , mHotArchiveHistoricalSnapshots() + , mNumHistoricalSnapshots(numLiveHistoricalSnapshots) , mBulkLoadMeter(app.getMetrics().NewMeter( {"bucketlistDB", "query", "loads"}, "query")) , mBloomMisses(app.getMetrics().NewMeter( @@ -29,16 +36,27 @@ BucketSnapshotManager::BucketSnapshotManager( {"bucketlistDB", "bloom", "lookups"}, "bloom")) { releaseAssert(threadIsMain()); + releaseAssert(mCurrLiveSnapshot); + releaseAssert(mCurrHotArchiveSnapshot); } std::shared_ptr -BucketSnapshotManager::copySearchableBucketListSnapshot() const +BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const { // Can't use std::make_shared due to private constructor return std::shared_ptr( new SearchableLiveBucketListSnapshot(*this)); } +std::shared_ptr +BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const +{ + releaseAssert(mCurrHotArchiveSnapshot); + // Can't use std::make_shared due to private constructor + return std::shared_ptr( + new SearchableHotArchiveBucketListSnapshot(*this)); +} + medida::Timer& BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label, size_t numEntries) const @@ -63,12 +81,43 @@ BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label, return iter->second; } +template void BucketSnapshotManager::maybeUpdateSnapshot( - std::unique_ptr const>& snapshot, - std::map const>>& - historicalSnapshots) const + std::unique_ptr& snapshot, + std::map>& historicalSnapshots) + const { + static_assert( + std::is_same_v> || + std::is_same_v>); + + auto const& managerSnapshot = [&]() -> auto const& + { + if constexpr (std::is_same_v>) + { + return mCurrLiveSnapshot; + } + else + { + return mCurrHotArchiveSnapshot; + } + } + (); + + auto const& managerHistoricalSnapshots = [&]() -> auto const& + { + if constexpr (std::is_same_v>) + { + return mLiveHistoricalSnapshots; + } + else + { + return mHotArchiveHistoricalSnapshots; + } + } + (); + // The canonical snapshot held by the BucketSnapshotManager is not being // modified. Rather, a thread is checking it's copy against the canonical // snapshot, so use a shared lock. @@ -76,65 +125,74 @@ BucketSnapshotManager::maybeUpdateSnapshot( // First update current snapshot if (!snapshot || - snapshot->getLedgerSeq() != mCurrentSnapshot->getLedgerSeq()) + snapshot->getLedgerSeq() != managerSnapshot->getLedgerSeq()) { // Should only update with a newer snapshot releaseAssert(!snapshot || snapshot->getLedgerSeq() < - mCurrentSnapshot->getLedgerSeq()); - snapshot = - std::make_unique>(*mCurrentSnapshot); + managerSnapshot->getLedgerSeq()); + snapshot = std::make_unique(*managerSnapshot); } // Then update historical snapshots (if any exist) - if (mHistoricalSnapshots.empty()) + if (managerHistoricalSnapshots.empty()) { return; } // If size of manager's history map is different, or if the oldest snapshot // ledger seq is different, we need to update. - if (mHistoricalSnapshots.size() != historicalSnapshots.size() || - mHistoricalSnapshots.begin()->first != + if (managerHistoricalSnapshots.size() != historicalSnapshots.size() || + managerHistoricalSnapshots.begin()->first != historicalSnapshots.begin()->first) { // Copy current snapshot map into historicalSnapshots historicalSnapshots.clear(); - for (auto const& [ledgerSeq, snap] : mHistoricalSnapshots) + for (auto const& [ledgerSeq, snap] : managerHistoricalSnapshots) { - historicalSnapshots.emplace( - ledgerSeq, std::make_unique(*snap)); + historicalSnapshots.emplace(ledgerSeq, + std::make_unique(*snap)); } } } void BucketSnapshotManager::updateCurrentSnapshot( - std::unique_ptr const>&& newSnapshot) + std::unique_ptr const>&& liveSnapshot, + std::unique_ptr const>&& + hotArchiveSnapshot) { - releaseAssert(newSnapshot); releaseAssert(threadIsMain()); - // Updating the BucketSnapshotManager canonical snapshot, must lock - // exclusively for write access. - std::unique_lock lock(mSnapshotMutex); - releaseAssert(!mCurrentSnapshot || newSnapshot->getLedgerSeq() >= - mCurrentSnapshot->getLedgerSeq()); + auto updateSnapshot = [numHistoricalSnapshots = mNumHistoricalSnapshots]( + auto& currentSnapshot, auto& historicalSnapshots, + auto&& newSnapshot) { + releaseAssert(newSnapshot); + releaseAssert(!currentSnapshot || newSnapshot->getLedgerSeq() >= + currentSnapshot->getLedgerSeq()); - // First update historical snapshots - if (mNumHistoricalSnapshots != 0) - { - // If historical snapshots are full, delete the oldest one - if (mHistoricalSnapshots.size() == mNumHistoricalSnapshots) + // First update historical snapshots + if (numHistoricalSnapshots != 0) { - mHistoricalSnapshots.erase(mHistoricalSnapshots.begin()); + // If historical snapshots are full, delete the oldest one + if (historicalSnapshots.size() == numHistoricalSnapshots) + { + historicalSnapshots.erase(historicalSnapshots.begin()); + } + + historicalSnapshots.emplace(currentSnapshot->getLedgerSeq(), + std::move(currentSnapshot)); + currentSnapshot = nullptr; } - mHistoricalSnapshots.emplace(mCurrentSnapshot->getLedgerSeq(), - std::move(mCurrentSnapshot)); - mCurrentSnapshot = nullptr; - } + currentSnapshot.swap(newSnapshot); + }; - mCurrentSnapshot.swap(newSnapshot); + // Updating the BucketSnapshotManager canonical snapshot, must lock + // exclusively for write access. + std::unique_lock lock(mSnapshotMutex); + updateSnapshot(mCurrLiveSnapshot, mLiveHistoricalSnapshots, liveSnapshot); + updateSnapshot(mCurrHotArchiveSnapshot, mHotArchiveHistoricalSnapshots, + hotArchiveSnapshot); } void @@ -171,4 +229,16 @@ BucketSnapshotManager::endPointLoadTimer(LedgerEntryType t, iter->second.Update(duration); } } + +template void +BucketSnapshotManager::maybeUpdateSnapshot>( + std::unique_ptr const>& snapshot, + std::map const>>& + historicalSnapshots) const; +template void BucketSnapshotManager::maybeUpdateSnapshot< + BucketListSnapshot>( + std::unique_ptr const>& snapshot, + std::map const>>& + historicalSnapshots) const; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index 97ccf1ae30..de44f6f165 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -4,6 +4,8 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" +#include "bucket/BucketList.h" #include "bucket/BucketManagerImpl.h" #include "util/NonCopyable.h" #include "util/UnorderedMap.h" @@ -26,6 +28,7 @@ class Application; class LiveBucketList; template class BucketListSnapshot; class SearchableLiveBucketListSnapshot; +class SearchableHotArchiveBucketListSnapshot; // This class serves as the boundary between non-threadsafe singleton classes // (BucketManager, BucketList, Metrics, etc) and threadsafe, parallel BucketList @@ -38,16 +41,20 @@ class BucketSnapshotManager : NonMovableOrCopyable // Snapshot that is maintained and periodically updated by BucketManager on // the main thread. When background threads need to generate or refresh a // snapshot, they will copy this snapshot. - std::unique_ptr const> mCurrentSnapshot{}; + std::unique_ptr const> mCurrLiveSnapshot{}; + std::unique_ptr const> + mCurrHotArchiveSnapshot{}; // ledgerSeq that the snapshot is based on -> snapshot std::map const>> - mHistoricalSnapshots; + mLiveHistoricalSnapshots; + std::map const>> + mHotArchiveHistoricalSnapshots; uint32_t const mNumHistoricalSnapshots; - // Lock must be held when accessing mCurrentSnapshot and - // mHistoricalSnapshots + // Lock must be held when accessing any snapshot mutable std::shared_mutex mSnapshotMutex; mutable UnorderedMap mPointTimers{}; @@ -60,27 +67,35 @@ class BucketSnapshotManager : NonMovableOrCopyable mutable std::optional mTimerStart; public: - // Called by main thread to update mCurrentSnapshot whenever the BucketList + // Called by main thread to update snapshots whenever the BucketList // is updated void updateCurrentSnapshot( - std::unique_ptr const>&& newSnapshot); + std::unique_ptr const>&& liveSnapshot, + std::unique_ptr const>&& + hotArchiveSnapshot); // numHistoricalLedgers is the number of historical snapshots that the // snapshot manager will maintain. If numHistoricalLedgers is 5, snapshots // will be capable of querying state from ledger [lcl, lcl - 5]. - BucketSnapshotManager(Application& app, - std::unique_ptr const>&& snapshot, - uint32_t numHistoricalLedgers); + BucketSnapshotManager( + Application& app, + std::unique_ptr const>&& snapshot, + std::unique_ptr const>&& + hotArchiveSnapshot, + uint32_t numHistoricalLedgers); std::shared_ptr - copySearchableBucketListSnapshot() const; - - // Checks if snapshot is out of date with mCurrentSnapshot and updates - // it accordingly - void maybeUpdateSnapshot( - std::unique_ptr const>& snapshot, - std::map const>>& - historicalSnapshots) const; + copySearchableLiveBucketListSnapshot() const; + + std::shared_ptr + copySearchableHotArchiveBucketListSnapshot() const; + + // Checks if snapshot is out of date and updates it accordingly + template + void + maybeUpdateSnapshot(std::unique_ptr& snapshot, + std::map>& + historicalSnapshots) const; // All metric recording functions must only be called by the main thread void startPointLoadTimer() const; diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp index 3fa3a24e02..bc2dec6d16 100644 --- a/src/bucket/FutureBucket.cpp +++ b/src/bucket/FutureBucket.cpp @@ -62,8 +62,10 @@ FutureBucket::FutureBucket( if constexpr (!std::is_same_v) { - if (protocolVersionIsBefore(snap->getBucketVersion(), - ProtocolVersion::V_22)) + if (!snap->isEmpty() && + protocolVersionIsBefore( + snap->getBucketVersion(), + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { throw std::runtime_error( "Invalid ArchivalFutureBucket: ledger version doesn't support " @@ -372,7 +374,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, // deserialized. In this case we want to attach to the existing merge, which // will have left a std::shared_future behind in a shared cache in the // bucket manager. - MergeKey mk{BucketListBase::keepDeadEntries(level), + MergeKey mk{BucketListBase::keepTombstoneEntries(level), curr->getHash(), snap->getHash(), shadowHashes}; std::shared_future> f; @@ -414,7 +416,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, auto res = Bucket::merge( bm, maxProtocolVersion, curr, snap, shadows, - BucketListBase::keepDeadEntries(level), + BucketListBase::keepTombstoneEntries(level), countMergeEvents, ctx, doFsync); if (res) diff --git a/src/bucket/LedgerCmp.h b/src/bucket/LedgerCmp.h index 2a84cad211..cc550a4e96 100644 --- a/src/bucket/LedgerCmp.h +++ b/src/bucket/LedgerCmp.h @@ -145,45 +145,45 @@ template struct BucketEntryIdCmp HotArchiveBucketEntryType bty = b.type(); // METAENTRY sorts below all other entries, comes first in buckets. - if (aty == HA_METAENTRY || bty == HA_METAENTRY) + if (aty == HOT_ARCHIVE_METAENTRY || bty == HOT_ARCHIVE_METAENTRY) { return aty < bty; } - if (aty == HA_ARCHIVED) + if (aty == HOT_ARCHIVE_ARCHIVED) { - if (bty == HA_ARCHIVED) + if (bty == HOT_ARCHIVE_ARCHIVED) { return LedgerEntryIdCmp{}(a.archivedEntry().data, b.archivedEntry().data); } else { - if (bty != HA_DELETED || bty != HA_RESTORED) + if (bty != HOT_ARCHIVE_DELETED && bty != HOT_ARCHIVE_LIVE) { - throw std::runtime_error("Malformed bucket: unexpected " - "DELETED/RESTORED key."); + throw std::runtime_error("Malformed bucket: expected " + "DELETED/LIVE key."); } return LedgerEntryIdCmp{}(a.archivedEntry().data, b.key()); } } else { - if (aty != HA_DELETED || aty != HA_RESTORED) + if (aty != HOT_ARCHIVE_DELETED && aty != HOT_ARCHIVE_LIVE) { throw std::runtime_error( - "Malformed bucket: unexpected DELETED/RESTORED key."); + "Malformed bucket: expected DELETED/LIVE key."); } - if (bty == HA_ARCHIVED) + if (bty == HOT_ARCHIVE_ARCHIVED) { return LedgerEntryIdCmp{}(a.key(), b.archivedEntry().data); } else { - if (bty != HA_DELETED || bty != HA_RESTORED) + if (bty != HOT_ARCHIVE_DELETED && bty != HOT_ARCHIVE_LIVE) { - throw std::runtime_error("Malformed bucket: unexpected " + throw std::runtime_error("Malformed bucket: expected " "DELETED/RESTORED key."); } return LedgerEntryIdCmp{}(a.key(), b.key()); diff --git a/src/bucket/MergeKey.cpp b/src/bucket/MergeKey.cpp index c52eeca08c..f3932195f0 100644 --- a/src/bucket/MergeKey.cpp +++ b/src/bucket/MergeKey.cpp @@ -10,9 +10,9 @@ namespace stellar { -MergeKey::MergeKey(bool keepDeadEntries, Hash const& currHash, +MergeKey::MergeKey(bool keepTombstoneEntries, Hash const& currHash, Hash const& snapHash, std::vector const& shadowHashes) - : mKeepDeadEntries(keepDeadEntries) + : mKeepTombstoneEntries(keepTombstoneEntries) , mInputCurrBucket(currHash) , mInputSnapBucket(snapHash) , mInputShadowBuckets(shadowHashes) @@ -22,7 +22,7 @@ MergeKey::MergeKey(bool keepDeadEntries, Hash const& currHash, bool MergeKey::operator==(MergeKey const& other) const { - return mKeepDeadEntries == other.mKeepDeadEntries && + return mKeepTombstoneEntries == other.mKeepTombstoneEntries && mInputCurrBucket == other.mInputCurrBucket && mInputSnapBucket == other.mInputSnapBucket && mInputShadowBuckets == other.mInputShadowBuckets; @@ -43,7 +43,7 @@ operator<<(std::ostream& out, MergeKey const& b) first = false; out << hexAbbrev(s); } - out << fmt::format(FMT_STRING("], keep={}]"), b.mKeepDeadEntries); + out << fmt::format(FMT_STRING("], keep={}]"), b.mKeepTombstoneEntries); return out; } @@ -62,7 +62,7 @@ size_t hash::operator()(stellar::MergeKey const& key) const noexcept { std::ostringstream oss; - oss << key.mKeepDeadEntries << ',' + oss << key.mKeepTombstoneEntries << ',' << stellar::binToHex(key.mInputCurrBucket) << ',' << stellar::binToHex(key.mInputSnapBucket); for (auto const& e : key.mInputShadowBuckets) diff --git a/src/bucket/MergeKey.h b/src/bucket/MergeKey.h index 205d4c5d17..d33a73672b 100644 --- a/src/bucket/MergeKey.h +++ b/src/bucket/MergeKey.h @@ -17,10 +17,10 @@ namespace stellar // pre-resolved std::shared_future containing that output. struct MergeKey { - MergeKey(bool keepDeadEntries, Hash const& currHash, Hash const& snapHash, - std::vector const& shadowHashes); + MergeKey(bool keepTombstoneEntries, Hash const& currHash, + Hash const& snapHash, std::vector const& shadowHashes); - bool mKeepDeadEntries; + bool mKeepTombstoneEntries; Hash mInputCurrBucket; Hash mInputSnapBucket; std::vector mInputShadowBuckets; diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index 7ad466bbfe..ae3af67dc6 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -16,6 +16,12 @@ #include "main/Config.h" #include "test/test.h" +#include "util/ProtocolVersion.h" +#include "util/UnorderedMap.h" +#include "util/UnorderedSet.h" +#include "util/XDRCereal.h" +#include "util/types.h" + using namespace stellar; using namespace BucketTestUtils; @@ -129,7 +135,7 @@ class BucketIndexTest auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); auto lk = LedgerEntryKey(canonicalEntry); auto currentLoadedEntry = searchableBL->load(lk); @@ -143,20 +149,18 @@ class BucketIndexTest for (uint32_t currLedger = ledger; currLedger > 0; --currLedger) { - auto [loadRes, snapshotExists] = - searchableBL->loadKeysFromLedger({lk}, currLedger); + auto loadRes = searchableBL->loadKeysFromLedger({lk}, currLedger); // If we query an older snapshot, should return if (currLedger < ledger - mApp->getConfig().QUERY_SNAPSHOT_LEDGERS) { - REQUIRE(!snapshotExists); - REQUIRE(loadRes.empty()); + REQUIRE(!loadRes); } else { - REQUIRE(snapshotExists); - REQUIRE(loadRes.size() == 1); - REQUIRE(loadRes[0].lastModifiedLedgerSeq == currLedger - 1); + REQUIRE(loadRes); + REQUIRE(loadRes->size() == 1); + REQUIRE(loadRes->at(0).lastModifiedLedgerSeq == currLedger - 1); } } } @@ -250,7 +254,7 @@ class BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); // Test bulk load lookup auto loadResult = @@ -277,7 +281,7 @@ class BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); for (size_t i = 0; i < n; ++i) { LedgerKeySet searchSubset; @@ -317,7 +321,7 @@ class BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); // Load should return empty vector for keys not in bucket list auto keysNotInBL = @@ -494,7 +498,7 @@ class BucketIndexPoolShareTest : public BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); auto loadResult = searchableBL->loadPoolShareTrustLinesByAccountAndAsset( mAccountToSearch.accountID, mAssetToSearch); @@ -682,4 +686,201 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") REQUIRE((inMemoryIndex == *onDiskIndex)); } } + +// The majority of BucketListDB functionality is shared by all bucketlist types. +// This test is a simple sanity check and tests the interface differences +// between the live bucketlist and the hot archive bucketlist. +TEST_CASE("hot archive bucket lookups", "[bucket][bucketindex][archive]") +{ + auto f = [&](Config& cfg) { + auto clock = VirtualClock(); + auto app = createTestApplication(clock, cfg); + + UnorderedMap expectedArchiveEntries; + UnorderedSet expectedDeletedEntries; + UnorderedSet expectedRestoredEntries; + UnorderedSet keysToSearch; + + auto ledger = 1; + + // Use snapshot across ledger to test update behavior + auto searchableBL = app->getBucketManager() + .getBucketSnapshotManager() + .copySearchableHotArchiveBucketListSnapshot(); + + auto checkLoad = [&](LedgerKey const& k, + std::shared_ptr entryPtr) { + // Restored entries should be null + if (expectedRestoredEntries.find(k) != + expectedRestoredEntries.end()) + { + REQUIRE(!entryPtr); + } + + // Deleted entries should be HotArchiveBucketEntry of type + // DELETED + else if (expectedDeletedEntries.find(k) != + expectedDeletedEntries.end()) + { + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_DELETED); + REQUIRE(entryPtr->key() == k); + } + + // Archived entries should contain full LedgerEntry + else + { + auto expectedIter = expectedArchiveEntries.find(k); + REQUIRE(expectedIter != expectedArchiveEntries.end()); + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED); + REQUIRE(entryPtr->archivedEntry() == expectedIter->second); + } + }; + + auto checkResult = [&] { + LedgerKeySet bulkLoadKeys; + for (auto const& k : keysToSearch) + { + auto entryPtr = searchableBL->load(k); + checkLoad(k, entryPtr); + bulkLoadKeys.emplace(k); + } + + auto bulkLoadResult = searchableBL->loadKeys(bulkLoadKeys); + for (auto entry : bulkLoadResult) + { + if (entry.type() == HOT_ARCHIVE_DELETED) + { + auto k = entry.key(); + auto iter = expectedDeletedEntries.find(k); + REQUIRE(iter != expectedDeletedEntries.end()); + expectedDeletedEntries.erase(iter); + } + else + { + REQUIRE(entry.type() == HOT_ARCHIVE_ARCHIVED); + auto le = entry.archivedEntry(); + auto k = LedgerEntryKey(le); + auto iter = expectedArchiveEntries.find(k); + REQUIRE(iter != expectedArchiveEntries.end()); + REQUIRE(iter->second == le); + expectedArchiveEntries.erase(iter); + } + } + + REQUIRE(expectedDeletedEntries.empty()); + REQUIRE(expectedArchiveEntries.empty()); + }; + + auto archivedEntries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 10); + for (auto const& e : archivedEntries) + { + auto k = LedgerEntryKey(e); + expectedArchiveEntries.emplace(k, e); + keysToSearch.emplace(k); + } + + // Note: keys to search automatically populated by these functions + auto deletedEntries = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 10, keysToSearch); + for (auto const& k : deletedEntries) + { + expectedDeletedEntries.emplace(k); + } + + auto restoredEntries = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 10, keysToSearch); + for (auto const& k : restoredEntries) + { + expectedRestoredEntries.emplace(k); + } + + auto header = + app->getLedgerManager().getLastClosedLedgerHeader().header; + header.ledgerSeq += 1; + header.ledgerVersion = static_cast( + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); + addHotArchiveBatchAndUpdateSnapshot(*app, header, archivedEntries, + restoredEntries, deletedEntries); + checkResult(); + + // Add a few batches so that entries are no longer in the top bucket + for (auto i = 0; i < 100; ++i) + { + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, {}, {}); + } + + // Shadow entries via liveEntry + auto liveShadow1 = LedgerEntryKey(archivedEntries[0]); + auto liveShadow2 = deletedEntries[1]; + + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, + {liveShadow1, liveShadow2}, {}); + + // Point load + for (auto const& k : {liveShadow1, liveShadow2}) + { + auto entryPtr = searchableBL->load(k); + REQUIRE(!entryPtr); + } + + // Bulk load + auto bulkLoadResult = + searchableBL->loadKeys({liveShadow1, liveShadow2}); + REQUIRE(bulkLoadResult.size() == 0); + + // Shadow via deletedEntry + auto deletedShadow = LedgerEntryKey(archivedEntries[1]); + + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, {}, + {deletedShadow}); + + // Point load + auto entryPtr = searchableBL->load(deletedShadow); + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_DELETED); + REQUIRE(entryPtr->key() == deletedShadow); + + // Bulk load + auto bulkLoadResult2 = searchableBL->loadKeys({deletedShadow}); + REQUIRE(bulkLoadResult2.size() == 1); + REQUIRE(bulkLoadResult2[0].type() == HOT_ARCHIVE_DELETED); + REQUIRE(bulkLoadResult2[0].key() == deletedShadow); + + // Shadow via archivedEntry + auto archivedShadow = archivedEntries[3]; + archivedShadow.lastModifiedLedgerSeq = ledger; + + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {archivedShadow}, {}, + {}); + + // Point load + entryPtr = searchableBL->load(LedgerEntryKey(archivedShadow)); + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED); + REQUIRE(entryPtr->archivedEntry() == archivedShadow); + + // Bulk load + auto bulkLoadResult3 = + searchableBL->loadKeys({LedgerEntryKey(archivedShadow)}); + REQUIRE(bulkLoadResult3.size() == 1); + REQUIRE(bulkLoadResult3[0].type() == HOT_ARCHIVE_ARCHIVED); + REQUIRE(bulkLoadResult3[0].archivedEntry() == archivedShadow); + }; + + testAllIndexTypes(f); +} } diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index bd1be99898..d3bb8a2a9b 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -29,6 +29,8 @@ #include "util/Math.h" #include "util/ProtocolVersion.h" #include "util/Timer.h" +#include "util/UnorderedSet.h" +#include "xdr/Stellar-ledger.h" #include "xdrpp/autocheck.h" #include @@ -129,66 +131,104 @@ binarySearchForLedger(uint32_t lbound, uint32_t ubound, using namespace BucketListTests; -TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") +template +static void +basicBucketListTest() { VirtualClock clock; Config const& cfg = getTestConfig(); - try - { - for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { + + auto test = [&](Config const& cfg) { + try + { Application::pointer app = createTestApplication(clock, cfg); - LiveBucketList bl; + BucketListT bl; CLOG_DEBUG(Bucket, "Adding batches to bucket list"); + + UnorderedSet seenKeys; for (uint32_t i = 1; !app->getClock().getIOContext().stopped() && i < 130; ++i) { app->getClock().crank(false); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, - LedgerTestUtils:: - generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), - LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( - {CONFIG_SETTING}, 5)); + if constexpr (std::is_same_v) + { + bl.addBatch( + *app, i, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerEntries(8), + LedgerTestUtils:: + generateValidLedgerEntryKeysWithExclusions( + {CONFIG_SETTING}, 5)); + } + else + { + bl.addBatch( + *app, i, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 8, seenKeys), + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 5, seenKeys)); + } + if (i % 10 == 0) CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i, binToHex(bl.getHash())); - for (uint32_t j = 0; j < LiveBucketList::kNumLevels; ++j) + for (uint32_t j = 0; j < BucketListT::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto currSz = countEntries(lev.getCurr()); auto snapSz = countEntries(lev.getSnap()); - CHECK(currSz <= LiveBucketList::levelHalf(j) * 100); - CHECK(snapSz <= LiveBucketList::levelHalf(j) * 100); + CHECK(currSz <= BucketListT::levelHalf(j) * 100); + CHECK(snapSz <= BucketListT::levelHalf(j) * 100); } } - }); + } + catch (std::future_error& e) + { + CLOG_DEBUG(Bucket, "Test caught std::future_error {}: {}", + e.code().value(), e.what()); + REQUIRE(false); + } + }; + + if constexpr (std::is_same_v) + { + for_versions_with_differing_bucket_logic(cfg, test); } - catch (std::future_error& e) + else { - CLOG_DEBUG(Bucket, "Test caught std::future_error {}: {}", - e.code().value(), e.what()); - REQUIRE(false); + for_versions_from(23, cfg, test); } } -TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") +TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") +{ + SECTION("live bl") + { + basicBucketListTest(); + } + + SECTION("hot archive bl") + { + basicBucketListTest(); + } +} + +template +static void +updatePeriodTest() { std::map currCalculatedUpdatePeriods; std::map snapCalculatedUpdatePeriods; - for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListT::kNumLevels; ++i) { currCalculatedUpdatePeriods.emplace( - i, LiveBucketList::bucketUpdatePeriod(i, /*isCurr=*/true)); + i, BucketListT::bucketUpdatePeriod(i, /*isCurr=*/true)); // Last level has no snap - if (i != LiveBucketList::kNumLevels - 1) + if (i != BucketListT::kNumLevels - 1) { snapCalculatedUpdatePeriods.emplace( - i, LiveBucketList::bucketUpdatePeriod(i, /*isSnap=*/false)); + i, BucketListT::bucketUpdatePeriod(i, /*isSnap=*/false)); } } @@ -197,7 +237,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") !snapCalculatedUpdatePeriods.empty(); ++ledgerSeq) { - for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListT::kNumLevels; ++level) { // Check if curr bucket is updated auto currIter = currCalculatedUpdatePeriods.find(level); @@ -213,7 +253,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") { // For all other levels, an update occurs when the level // above spills - if (LiveBucketList::levelShouldSpill(ledgerSeq, level - 1)) + if (BucketListT::levelShouldSpill(ledgerSeq, level - 1)) { REQUIRE(currIter->second == ledgerSeq); currCalculatedUpdatePeriods.erase(currIter); @@ -225,7 +265,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") auto snapIter = snapCalculatedUpdatePeriods.find(level); if (snapIter != snapCalculatedUpdatePeriods.end()) { - if (LiveBucketList::levelShouldSpill(ledgerSeq, level)) + if (BucketListT::levelShouldSpill(ledgerSeq, level)) { // Check that snap bucket calculation is correct REQUIRE(snapIter->second == ledgerSeq); @@ -236,6 +276,19 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") } } +TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") +{ + SECTION("live bl") + { + updatePeriodTest(); + } + + SECTION("hot archive bl") + { + updatePeriodTest(); + } +} + TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", "[bucket][bucketlist]") { @@ -258,8 +311,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", { app->getClock().crank(false); auto liveBatch = - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 5); + LedgerTestUtils::generateValidUniqueLedgerEntries(5); BucketEntry BucketEntryAlice, BucketEntryBob; alice.balance++; @@ -274,11 +326,8 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", BucketEntryBob.liveEntry().data.account() = bob; liveBatch.push_back(BucketEntryBob.liveEntry()); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, liveBatch, + bl.addBatch( + *app, i, getAppLedgerVersion(app), {}, liveBatch, LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); if (i % 100 == 0) @@ -337,7 +386,74 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", }); } -TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", +TEST_CASE_VERSIONS("hot archive bucket tombstones expire at bottom level", + "[bucket][bucketlist][tombstones]") +{ + VirtualClock clock; + Config const& cfg = getTestConfig(); + + testutil::BucketListDepthModifier bldm(5); + auto app = createTestApplication(clock, cfg); + for_versions_from(23, *app, [&] { + HotArchiveBucketList bl; + + auto lastSnapSize = [&] { + auto& level = bl.getLevel(HotArchiveBucketList::kNumLevels - 2); + return countEntries(level.getSnap()); + }; + + auto countNonBottomLevelEntries = [&] { + auto size = 0; + for (uint32_t i = 0; i < HotArchiveBucketList::kNumLevels - 1; ++i) + { + auto& level = bl.getLevel(i); + size += countEntries(level.getCurr()); + size += countEntries(level.getSnap()); + } + return size; + }; + + // Populate a BucketList so everything but the bottom level is full. + UnorderedSet keys; + auto numExpectedEntries = 0; + auto ledger = 1; + while (lastSnapSize() == 0) + { + bl.addBatch(*app, ledger, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 5, keys), + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 5, keys)); + + // Once all entries merge to the bottom level, only deleted entries + // should remain + numExpectedEntries += 5; + + ++ledger; + } + + // Close ledgers until all entries have merged into the bottom level + // bucket + while (countNonBottomLevelEntries() != 0) + { + bl.addBatch(*app, ledger, getAppLedgerVersion(app), {}, {}, {}); + ++ledger; + } + + auto bottomCurr = + bl.getLevel(HotArchiveBucketList::kNumLevels - 1).getCurr(); + REQUIRE(countEntries(bottomCurr) == numExpectedEntries); + + for (HotArchiveBucketInputIterator iter(bottomCurr); iter; ++iter) + { + auto be = *iter; + REQUIRE(be.type() == HOT_ARCHIVE_DELETED); + REQUIRE(keys.find(be.key()) != keys.end()); + } + }); +} + +TEST_CASE_VERSIONS("live bucket tombstones expire at bottom level", "[bucket][bucketlist][tombstones]") { VirtualClock clock; @@ -354,16 +470,14 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", auto& level = bl.getLevel(i); level.setCurr(LiveBucket::fresh( bm, getAppLedgerVersion(app), {}, - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), + LedgerTestUtils::generateValidUniqueLedgerEntries(8), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5), /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true)); level.setSnap(LiveBucket::fresh( bm, getAppLedgerVersion(app), {}, - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), + LedgerTestUtils::generateValidUniqueLedgerEntries(8), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5), /*countMergeEvents=*/true, clock.getIOContext(), @@ -377,14 +491,9 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", for (auto j : ledgers) { auto n = mergeTimer.count(); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = j; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, - LedgerTestUtils:: - generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), + bl.addBatch( + *app, j, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerEntries(8), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); app->getClock().crank(false); @@ -423,6 +532,7 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); LiveBucketList bl; + auto vers = getAppLedgerVersion(app); autocheck::generator flip; std::deque entriesToModify; for (uint32_t i = 1; i < 512; ++i) @@ -458,11 +568,7 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", deadEntries.push_back(LedgerEntryKey(e)); } } - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot(bl, *app, lh, initEntries, liveEntries, - deadEntries); + bl.addBatch(*app, i, vers, initEntries, liveEntries, deadEntries); app->getClock().crank(false); for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { @@ -507,23 +613,17 @@ TEST_CASE_VERSIONS("single entry bubbling up", std::vector emptySetEntry; CLOG_DEBUG(Bucket, "Adding single entry in lowest level"); - addBatchAndUpdateSnapshot( - bl, *app, - app->getLedgerManager().getLastClosedLedgerHeader().header, {}, - LedgerTestUtils::generateValidLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 1), - emptySet); + bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidLedgerEntries(1), + emptySet); CLOG_DEBUG(Bucket, "Adding empty batches to bucket list"); for (uint32_t i = 2; !app->getClock().getIOContext().stopped() && i < 300; ++i) { app->getClock().crank(false); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot(bl, *app, lh, {}, emptySetEntry, - emptySet); + bl.addBatch(*app, i, getAppLedgerVersion(app), {}, + emptySetEntry, emptySet); if (i % 10 == 0) CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i, binToHex(bl.getHash())); @@ -562,45 +662,62 @@ TEST_CASE_VERSIONS("single entry bubbling up", } } -TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", - "[bucket][bucketlist][count]") +template +static void +sizeOfTests() { stellar::uniform_int_distribution dist; for (uint32_t i = 0; i < 1000; ++i) { - for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListT::kNumLevels; ++level) { uint32_t ledger = dist(gRandomEngine); - if (LiveBucketList::sizeOfSnap(ledger, level) > 0) + if (BucketListT::sizeOfSnap(ledger, level) > 0) { uint32_t oldestInCurr = - LiveBucketList::oldestLedgerInSnap(ledger, level) + - LiveBucketList::sizeOfSnap(ledger, level); + BucketListT::oldestLedgerInSnap(ledger, level) + + BucketListT::sizeOfSnap(ledger, level); REQUIRE(oldestInCurr == - LiveBucketList::oldestLedgerInCurr(ledger, level)); + BucketListT::oldestLedgerInCurr(ledger, level)); } - if (LiveBucketList::sizeOfCurr(ledger, level) > 0) + if (BucketListT::sizeOfCurr(ledger, level) > 0) { uint32_t newestInCurr = - LiveBucketList::oldestLedgerInCurr(ledger, level) + - LiveBucketList::sizeOfCurr(ledger, level) - 1; - REQUIRE(newestInCurr == - (level == 0 ? ledger - : LiveBucketList::oldestLedgerInSnap( - ledger, level - 1) - - 1)); + BucketListT::oldestLedgerInCurr(ledger, level) + + BucketListT::sizeOfCurr(ledger, level) - 1; + REQUIRE(newestInCurr == (level == 0 + ? ledger + : BucketListT::oldestLedgerInSnap( + ledger, level - 1) - + 1)); } } } } -TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") +TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", + "[bucket][bucketlist][count]") +{ + SECTION("live bl") + { + sizeOfTests(); + } + + SECTION("hot archive bl") + { + sizeOfTests(); + } +} + +template +static void +snapSteadyStateTest() { // Deliberately exclude deepest level since snap on the deepest level // is always empty. - for (uint32_t level = 0; level < LiveBucketList::kNumLevels - 1; ++level) + for (uint32_t level = 0; level < BucketListT::kNumLevels - 1; ++level) { - uint32_t const half = LiveBucketList::levelHalf(level); + uint32_t const half = BucketListT::levelHalf(level); // Use binary search (assuming that it does reach steady state) // to find the ledger where the snap at this level first reaches @@ -608,7 +725,7 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [level, half](uint32_t ledger) { - return (LiveBucketList::sizeOfSnap(ledger, level) == half); + return (BucketListT::sizeOfSnap(ledger, level) == half); }); // Generate random ledgers above and below the split to test that @@ -619,21 +736,36 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(LiveBucketList::sizeOfSnap(low, level) < half); - REQUIRE(LiveBucketList::sizeOfSnap(high, level) == half); + REQUIRE(BucketListT::sizeOfSnap(low, level) < half); + REQUIRE(BucketListT::sizeOfSnap(high, level) == half); } } } -TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") +TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") +{ + SECTION("live bl") + { + snapSteadyStateTest(); + } + + SECTION("hot archive bl") + { + snapSteadyStateTest(); + } +} + +template +static void +deepestCurrTest() { - uint32_t const deepest = LiveBucketList::kNumLevels - 1; + uint32_t const deepest = BucketListT::kNumLevels - 1; // Use binary search to find the first ledger where the deepest curr // first is non-empty. uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [deepest](uint32_t ledger) { - return (LiveBucketList::sizeOfCurr(ledger, deepest) > 0); + return (BucketListT::sizeOfCurr(ledger, deepest) > 0); }); stellar::uniform_int_distribution distLow(1, boundary - 1); stellar::uniform_int_distribution distHigh(boundary); @@ -641,29 +773,57 @@ TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(LiveBucketList::sizeOfCurr(low, deepest) == 0); - REQUIRE(LiveBucketList::oldestLedgerInCurr(low, deepest) == + REQUIRE(BucketListT::sizeOfCurr(low, deepest) == 0); + REQUIRE(BucketListT::oldestLedgerInCurr(low, deepest) == std::numeric_limits::max()); - REQUIRE(LiveBucketList::sizeOfCurr(high, deepest) > 0); - REQUIRE(LiveBucketList::oldestLedgerInCurr(high, deepest) == 1); + REQUIRE(BucketListT::sizeOfCurr(high, deepest) > 0); + REQUIRE(BucketListT::oldestLedgerInCurr(high, deepest) == 1); - REQUIRE(LiveBucketList::sizeOfSnap(low, deepest) == 0); - REQUIRE(LiveBucketList::oldestLedgerInSnap(low, deepest) == + REQUIRE(BucketListT::sizeOfSnap(low, deepest) == 0); + REQUIRE(BucketListT::oldestLedgerInSnap(low, deepest) == std::numeric_limits::max()); - REQUIRE(LiveBucketList::sizeOfSnap(high, deepest) == 0); - REQUIRE(LiveBucketList::oldestLedgerInSnap(high, deepest) == + REQUIRE(BucketListT::sizeOfSnap(high, deepest) == 0); + REQUIRE(BucketListT::oldestLedgerInSnap(high, deepest) == std::numeric_limits::max()); } } +TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") +{ + SECTION("live bl") + { + deepestCurrTest(); + } + + SECTION("hot archive bl") + { + deepestCurrTest(); + } +} + +template +static void +blSizesAtLedger1Test() +{ + REQUIRE(BucketListT::sizeOfCurr(1, 0) == 1); + REQUIRE(BucketListT::sizeOfSnap(1, 0) == 0); + for (uint32_t level = 1; level < BucketListT::kNumLevels; ++level) + { + REQUIRE(BucketListT::sizeOfCurr(1, level) == 0); + REQUIRE(BucketListT::sizeOfSnap(1, level) == 0); + } +} + TEST_CASE("BucketList sizes at ledger 1", "[bucket][bucketlist][count]") { - REQUIRE(LiveBucketList::sizeOfCurr(1, 0) == 1); - REQUIRE(LiveBucketList::sizeOfSnap(1, 0) == 0); - for (uint32_t level = 1; level < LiveBucketList::kNumLevels; ++level) + SECTION("live bl") + { + blSizesAtLedger1Test(); + } + + SECTION("hot archive bl") { - REQUIRE(LiveBucketList::sizeOfCurr(1, level) == 0); - REQUIRE(LiveBucketList::sizeOfSnap(1, level) == 0); + blSizesAtLedger1Test(); } } @@ -686,8 +846,8 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq = ledgerSeq; - addBatchAndUpdateSnapshot(bl, *app, lh, {}, - {ledgers[ledgerSeq - 1]}, emptySet); + addLiveBatchAndUpdateSnapshot(*app, lh, {}, + {ledgers[ledgerSeq - 1]}, emptySet); } for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { @@ -1278,7 +1438,7 @@ TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]") entry.data.claimableBalance().amount = 0; auto searchableBL = - bm.getBucketSnapshotManager().copySearchableBucketListSnapshot(); + bm.getBucketSnapshotManager().copySearchableLiveBucketListSnapshot(); // Update entry every 5 ledgers so we can see bucket merge events for (auto ledgerSeq = 1; ledgerSeq < 101; ++ledgerSeq) diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 701e3ca7ff..f2a500f6e2 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -365,8 +365,8 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq = ledger; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, + addLiveBatchAndUpdateSnapshot( + *app, lh, {}, LedgerTestUtils::generateValidLedgerEntriesWithExclusions( {CONFIG_SETTING}, 10), {}); @@ -396,7 +396,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", // Reattach to _finished_ merge future on level. has2.currentBuckets[level].next.makeLive( - *app, vers, LiveBucketList::keepDeadEntries(level)); + *app, vers, LiveBucketList::keepTombstoneEntries(level)); REQUIRE(has2.currentBuckets[level].next.isMerging()); // Resolve reattached future. @@ -454,8 +454,8 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq = ledger; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, + addLiveBatchAndUpdateSnapshot( + *app, lh, {}, LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100), {}); @@ -479,7 +479,8 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", if (has2.currentBuckets[level].next.hasHashes()) { has2.currentBuckets[level].next.makeLive( - *app, vers, LiveBucketList::keepDeadEntries(level)); + *app, vers, + LiveBucketList::keepTombstoneEntries(level)); } } } @@ -579,7 +580,6 @@ TEST_CASE_VERSIONS( auto vers = getAppLedgerVersion(app); auto& hm = app->getHistoryManager(); auto& bm = app->getBucketManager(); - auto& bl = bm.getLiveBucketList(); hm.setPublicationEnabled(false); app->getHistoryArchiveManager().initializeHistoryArchive( tcfg.getArchiveDirName()); @@ -595,8 +595,8 @@ TEST_CASE_VERSIONS( auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq++; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, + addLiveBatchAndUpdateSnapshot( + *app, lh, {}, LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100), {}); diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index 795d9a7a06..d39c5384b2 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -3,6 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "BucketTestUtils.h" +#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" #include "crypto/Hex.h" @@ -10,6 +11,8 @@ #include "ledger/LedgerTxn.h" #include "main/Application.h" #include "test/test.h" +#include "xdr/Stellar-ledger.h" +#include namespace stellar { @@ -30,18 +33,47 @@ getAppLedgerVersion(Application::pointer app) } void -addBatchAndUpdateSnapshot(LiveBucketList& bl, Application& app, - LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { - bl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, - liveEntries, deadEntries); + auto& liveBl = app.getBucketManager().getLiveBucketList(); + liveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, + liveEntries, deadEntries); if (app.getConfig().isUsingBucketListDB()) { + auto liveSnapshot = + std::make_unique>(liveBl, header); + auto hotArchiveSnapshot = + std::make_unique>( + app.getBucketManager().getHotArchiveBucketList(), header); + + app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); + } +} + +void +addHotArchiveBatchAndUpdateSnapshot( + Application& app, LedgerHeader header, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + auto& hotArchiveBl = app.getBucketManager().getHotArchiveBucketList(); + hotArchiveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, + archiveEntries, restoredEntries, deletedEntries); + if (app.getConfig().isUsingBucketListDB()) + { + auto liveSnapshot = std::make_unique>( + app.getBucketManager().getLiveBucketList(), header); + auto hotArchiveSnapshot = + std::make_unique>(hotArchiveBl, + header); + app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( - std::make_unique(bl, header)); + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); } } @@ -59,13 +91,6 @@ for_versions_with_differing_bucket_logic( cfg, f); } -size_t -countEntries(std::shared_ptr bucket) -{ - EntryCounts e(bucket); - return e.sum(); -} - Hash closeLedger(Application& app, std::optional skToSignValue, xdr::xvector upgrades) @@ -88,7 +113,8 @@ closeLedger(Application& app) return closeLedger(app, std::nullopt); } -EntryCounts::EntryCounts(std::shared_ptr bucket) +template <> +EntryCounts::EntryCounts(std::shared_ptr bucket) { LiveBucketInputIterator iter(bucket); if (iter.seenMetadata()) @@ -100,7 +126,7 @@ EntryCounts::EntryCounts(std::shared_ptr bucket) switch ((*iter).type()) { case INITENTRY: - ++nInit; + ++nInitOrArchived; break; case LIVEENTRY: ++nLive; @@ -117,6 +143,48 @@ EntryCounts::EntryCounts(std::shared_ptr bucket) } } +template <> +EntryCounts::EntryCounts( + std::shared_ptr bucket) +{ + HotArchiveBucketInputIterator iter(bucket); + if (iter.seenMetadata()) + { + ++nMeta; + } + while (iter) + { + switch ((*iter).type()) + { + case HOT_ARCHIVE_ARCHIVED: + ++nInitOrArchived; + break; + case HOT_ARCHIVE_LIVE: + ++nLive; + break; + case HOT_ARCHIVE_DELETED: + ++nDead; + break; + case HOT_ARCHIVE_METAENTRY: + // This should never happen: only the first record can be METAENTRY + // and it is counted above. + abort(); + } + ++iter; + } +} + +template +size_t +countEntries(std::shared_ptr bucket) +{ + EntryCounts e(bucket); + return e.sum(); +} + +template size_t countEntries(std::shared_ptr bucket); +template size_t countEntries(std::shared_ptr bucket); + void LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( AbstractLedgerTxn& ltx, diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index 8fff139f37..c77794a80a 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -5,17 +5,23 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "test/TestUtils.h" +#include "xdr/Stellar-ledger.h" namespace stellar { namespace BucketTestUtils { -void addBatchAndUpdateSnapshot(LiveBucketList& bl, Application& app, - LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries); +void addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); + +void addHotArchiveBatchAndUpdateSnapshot( + Application& app, LedgerHeader header, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); uint32_t getAppLedgerVersion(Application& app); @@ -24,27 +30,30 @@ uint32_t getAppLedgerVersion(std::shared_ptr app); void for_versions_with_differing_bucket_logic( Config const& cfg, std::function const& f); -struct EntryCounts +template struct EntryCounts { + static_assert(std::is_same_v || + std::is_same_v); + size_t nMeta{0}; - size_t nInit{0}; + size_t nInitOrArchived{0}; size_t nLive{0}; size_t nDead{0}; size_t sum() const { - return nLive + nInit + nDead; + return nLive + nInitOrArchived + nDead; } size_t sumIncludingMeta() const { - return nLive + nInit + nDead + nMeta; + return nLive + nInitOrArchived + nDead + nMeta; } - EntryCounts(std::shared_ptr bucket); + EntryCounts(std::shared_ptr bucket); }; -size_t countEntries(std::shared_ptr bucket); +template size_t countEntries(std::shared_ptr bucket); Hash closeLedger(Application& app, std::optional skToSignValue, xdr::xvector upgrades = emptyUpgradeSteps); diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index 6f08b4c8bf..841d0ca6e6 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -89,7 +89,7 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]") clock.getIOContext(), /*doFsync=*/true), /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); } @@ -170,7 +170,7 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") clock.getIOContext(), /*doFsync=*/true); auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -210,7 +210,7 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") /*doFsync=*/true); auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e(b1); @@ -265,7 +265,7 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") std::shared_ptr b3 = Bucket::merge(app->getBucketManager(), app->getConfig().LEDGER_PROTOCOL_VERSION, b1, b2, - /*shadows=*/{}, /*keepDeadEntries=*/true, + /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); CHECK(countEntries(b3) == liveCount); @@ -273,6 +273,99 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") }); } +TEST_CASE_VERSIONS("merging hot archive bucket entries", "[bucket][archival]") +{ + VirtualClock clock; + Config const& cfg = getTestConfig(); + + auto app = createTestApplication(clock, cfg); + for_versions_from(23, *app, [&] { + auto& bm = app->getBucketManager(); + auto vers = getAppLedgerVersion(app); + + SECTION("new annihilates old") + { + auto e1 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE); + auto e2 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE); + auto e3 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + auto e4 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + + // Old bucket: + // e1 -> ARCHIVED + // e2 -> LIVE + // e3 -> DELETED + // e4 -> DELETED + auto b1 = HotArchiveBucket::fresh( + bm, vers, {e1}, {LedgerEntryKey(e2)}, + {LedgerEntryKey(e3), LedgerEntryKey(e4)}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + + // New bucket: + // e1 -> DELETED + // e2 -> ARCHIVED + // e3 -> LIVE + auto b2 = HotArchiveBucket::fresh( + bm, vers, {e2}, {LedgerEntryKey(e3)}, {LedgerEntryKey(e1)}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + + // Expected result: + // e1 -> DELETED + // e2 -> ARCHIVED + // e3 -> LIVE + // e4 -> DELETED + auto merged = + Bucket::merge(bm, vers, b1, b2, /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + + bool seen1 = false; + bool seen4 = false; + auto count = 0; + for (HotArchiveBucketInputIterator iter(merged); iter; ++iter) + { + ++count; + auto const& e = *iter; + if (e.type() == HOT_ARCHIVE_ARCHIVED) + { + REQUIRE(e.archivedEntry() == e2); + } + else if (e.type() == HOT_ARCHIVE_LIVE) + { + REQUIRE(e.key() == LedgerEntryKey(e3)); + } + else if (e.type() == HOT_ARCHIVE_DELETED) + { + if (e.key() == LedgerEntryKey(e1)) + { + REQUIRE(!seen1); + seen1 = true; + } + else if (e.key() == LedgerEntryKey(e4)) + { + REQUIRE(!seen4); + seen4 = true; + } + } + else + { + FAIL(); + } + } + + REQUIRE(seen1); + REQUIRE(seen4); + REQUIRE(count == 4); + } + }); +} + static LedgerEntry generateAccount() { @@ -371,7 +464,7 @@ TEST_CASE("merges proceed old-style despite newer shadows", auto bucket = Bucket::merge(bm, v12, b11first, b11second, /*shadows=*/{b12first}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); REQUIRE(bucket->getBucketVersion() == v11); @@ -383,7 +476,7 @@ TEST_CASE("merges proceed old-style despite newer shadows", auto bucket = Bucket::merge(bm, v12, b10first, b10second, /*shadows=*/{b12first, b11second}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); REQUIRE(bucket->getBucketVersion() == v11); @@ -392,7 +485,7 @@ TEST_CASE("merges proceed old-style despite newer shadows", { REQUIRE_THROWS_AS(Bucket::merge(bm, v12, b12first, b12second, /*shadows=*/{b12first}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true), @@ -428,7 +521,7 @@ TEST_CASE("merges refuse to exceed max protocol version", /*doFsync=*/true); REQUIRE_THROWS_AS(Bucket::merge(bm, vers - 1, bnew1, bnew2, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true), @@ -504,7 +597,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", /*doFsync=*/true); auto b1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); // In initEra, the INIT will make it through fresh() to the bucket, @@ -513,7 +606,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", // fresh(), and that will be killed by the DEAD, leaving 1 // (tombstone) entry. EntryCounts e(b1); - CHECK(e.nInit == 0); + CHECK(e.nInitOrArchived == 0); CHECK(e.nLive == 0); if (initEra) { @@ -544,18 +637,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", /*doFsync=*/true); auto bmerge1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bLive, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); auto b1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); // The same thing should happen here as above, except that the INIT // will merge-over the LIVE during fresh(). EntryCounts e(b1); - CHECK(e.nInit == 0); + CHECK(e.nInitOrArchived == 0); CHECK(e.nLive == 0); if (initEra) { @@ -590,7 +683,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", CHECK(eold.nMeta == 1); CHECK(emed.nMeta == 1); CHECK(enew.nMeta == 1); - CHECK(eold.nInit == 1); + CHECK(eold.nInitOrArchived == 1); CHECK(eold.nLive == 0); } else @@ -598,35 +691,35 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", CHECK(eold.nMeta == 0); CHECK(emed.nMeta == 0); CHECK(enew.nMeta == 0); - CHECK(eold.nInit == 0); + CHECK(eold.nInitOrArchived == 0); CHECK(eold.nLive == 1); } CHECK(eold.nDead == 0); - CHECK(emed.nInit == 0); + CHECK(emed.nInitOrArchived == 0); CHECK(emed.nLive == 4); CHECK(emed.nDead == 0); - CHECK(enew.nInit == 0); + CHECK(enew.nInitOrArchived == 0); CHECK(enew.nLive == 0); CHECK(enew.nDead == 1); auto bmerge1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bold, bmed, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); auto bmerge2 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bnew, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts emerge1(bmerge1), emerge2(bmerge2); if (initEra) { CHECK(emerge1.nMeta == 1); - CHECK(emerge1.nInit == 1); + CHECK(emerge1.nInitOrArchived == 1); CHECK(emerge1.nLive == 3); CHECK(emerge2.nMeta == 1); @@ -635,14 +728,14 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", else { CHECK(emerge1.nMeta == 0); - CHECK(emerge1.nInit == 0); + CHECK(emerge1.nInitOrArchived == 0); CHECK(emerge1.nLive == 4); CHECK(emerge2.nMeta == 0); CHECK(emerge2.nDead == 1); } CHECK(emerge1.nDead == 0); - CHECK(emerge2.nInit == 0); + CHECK(emerge2.nInitOrArchived == 0); CHECK(emerge2.nLive == 3); } }); @@ -702,21 +795,21 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merged = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, b1, b2, /*shadows=*/{shadow}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e(merged); if (initEra) { CHECK(e.nMeta == 1); - CHECK(e.nInit == 2); + CHECK(e.nInitOrArchived == 2); CHECK(e.nLive == 0); CHECK(e.nDead == 0); } else { CHECK(e.nMeta == 0); - CHECK(e.nInit == 0); + CHECK(e.nInitOrArchived == 0); CHECK(e.nLive == 1); CHECK(e.nDead == 0); } @@ -757,7 +850,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge43 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level4, level3, /*shadows=*/{level2, level1}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e43(merge43); @@ -765,7 +858,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, we preserve the dead entry. CHECK(e43.nMeta == 1); - CHECK(e43.nInit == 0); + CHECK(e43.nInitOrArchived == 0); CHECK(e43.nLive == 0); CHECK(e43.nDead == 1); } @@ -773,7 +866,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, we shadowed-out the dead entry. CHECK(e43.nMeta == 0); - CHECK(e43.nInit == 0); + CHECK(e43.nInitOrArchived == 0); CHECK(e43.nLive == 0); CHECK(e43.nDead == 0); } @@ -783,7 +876,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge21 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level2, level1, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e21(merge21); @@ -791,7 +884,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, they mutually annihilate. CHECK(e21.nMeta == 1); - CHECK(e21.nInit == 0); + CHECK(e21.nInitOrArchived == 0); CHECK(e21.nLive == 0); CHECK(e21.nDead == 0); } @@ -799,7 +892,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, we keep the tombstone around. CHECK(e21.nMeta == 0); - CHECK(e21.nInit == 0); + CHECK(e21.nInitOrArchived == 0); CHECK(e21.nLive == 0); CHECK(e21.nDead == 1); } @@ -809,13 +902,13 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge4321 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge43, merge21, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); auto merge54321 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, level5, merge4321, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e54321(merge21); @@ -823,7 +916,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, we should get a second mutual annihilation. CHECK(e54321.nMeta == 1); - CHECK(e54321.nInit == 0); + CHECK(e54321.nInitOrArchived == 0); CHECK(e54321.nLive == 0); CHECK(e54321.nDead == 0); } @@ -831,7 +924,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, the tombstone should clobber the live entry. CHECK(e54321.nMeta == 0); - CHECK(e54321.nInit == 0); + CHECK(e54321.nInitOrArchived == 0); CHECK(e54321.nLive == 0); CHECK(e54321.nDead == 1); } @@ -866,7 +959,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge32 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level3, level2, /*shadows=*/{level1}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e32(merge32); @@ -874,7 +967,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, we preserve the init entry. CHECK(e32.nMeta == 1); - CHECK(e32.nInit == 1); + CHECK(e32.nInitOrArchived == 1); CHECK(e32.nLive == 0); CHECK(e32.nDead == 0); } @@ -882,7 +975,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, we shadowed-out the live and init entries. CHECK(e32.nMeta == 0); - CHECK(e32.nInit == 0); + CHECK(e32.nInitOrArchived == 0); CHECK(e32.nLive == 0); CHECK(e32.nDead == 0); } @@ -893,7 +986,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge321 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge32, level1, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e321(merge321); @@ -901,7 +994,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, init meets dead and they annihilate. CHECK(e321.nMeta == 1); - CHECK(e321.nInit == 0); + CHECK(e321.nInitOrArchived == 0); CHECK(e321.nLive == 0); CHECK(e321.nDead == 0); } @@ -910,7 +1003,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Old-style, init was already shadowed-out, so dead // accumulates. CHECK(e321.nMeta == 0); - CHECK(e321.nInit == 0); + CHECK(e321.nInitOrArchived == 0); CHECK(e321.nLive == 0); CHECK(e321.nDead == 1); } diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 8a5ad90936..c8764b5ce2 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -86,9 +86,10 @@ IndexBucketsWork::IndexWork::postWork() if (!self->mIndex) { - self->mIndex = - BucketIndex::createIndex(bm, self->mBucket->getFilename(), - self->mBucket->getHash(), ctx); + // TODO: Fix this when archive BucketLists assume state + self->mIndex = BucketIndex::createIndex( + bm, self->mBucket->getFilename(), self->mBucket->getHash(), + ctx); } app.postOnMainThread( diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index 002ab4fd00..bf733a00bf 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -2007,7 +2007,8 @@ TEST_CASE("upgrade to version 11", "[upgrades]") Bucket, "post-ledger {} close, init counts: level {}, {} in curr, " "{} in snap", - ledgerSeq, level, currCounts.nInit, snapCounts.nInit); + ledgerSeq, level, currCounts.nInitOrArchived, + snapCounts.nInitOrArchived); } if (ledgerSeq < 5) { @@ -2047,14 +2048,14 @@ TEST_CASE("upgrade to version 11", "[upgrades]") default: case 8: REQUIRE(getVers(lev1Curr) == newProto); - REQUIRE(lev1CurrCounts.nInit != 0); + REQUIRE(lev1CurrCounts.nInitOrArchived != 0); case 7: case 6: REQUIRE(getVers(lev0Snap) == newProto); - REQUIRE(lev0SnapCounts.nInit != 0); + REQUIRE(lev0SnapCounts.nInitOrArchived != 0); case 5: REQUIRE(getVers(lev0Curr) == newProto); - REQUIRE(lev0CurrCounts.nInit != 0); + REQUIRE(lev0CurrCounts.nInitOrArchived != 0); } } } diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 875f9f90f6..1f3bf185d8 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -1318,8 +1318,8 @@ TEST_CASE_VERSIONS( { auto lcl = lm.getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; - BucketTestUtils::addBatchAndUpdateSnapshot( - bl, *app, lcl.header, {}, + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *app, lcl.header, {}, LedgerTestUtils::generateValidUniqueLedgerEntries(8), {}); clock.crank(true); } diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index f95b1660c6..27a7ae7f89 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -2,6 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" @@ -145,8 +146,7 @@ struct BucketListGenerator auto header = ltx.loadHeader().current(); ltx.getAllEntries(initEntries, liveEntries, deadEntries); BucketTestUtils::addLiveBatchAndUpdateSnapshot( - *app, header, initEntries, - liveEntries, deadEntries); + *app, header, initEntries, liveEntries, deadEntries); ltx.commit(); } @@ -214,7 +214,7 @@ struct BucketListGenerator { auto& level = blGenerate.getLevel(i); auto meta = testutil::testBucketMetadata(vers); - auto keepDead = LiveBucketList::keepDeadEntries(i); + auto keepDead = LiveBucketList::keepTombstoneEntries(i); auto writeBucketFile = [&](auto b) { LiveBucketOutputIterator out(bmApply.getTmpDir(), keepDead, @@ -944,7 +944,7 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; - testutil::BucketListDepthModifier bldm(3); + testutil::BucketListDepthModifier bldm(3); for (auto t : xdr::xdr_traits::enum_values()) { if (t == CONFIG_SETTING) diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index 80d551eee3..c5436f841e 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -1298,10 +1298,14 @@ LedgerManagerImpl::advanceLedgerPointers(LedgerHeader const& header, if (mApp.getConfig().isUsingBucketListDB() && header.ledgerSeq != prevLedgerSeq) { - mApp.getBucketManager() - .getBucketSnapshotManager() - .updateCurrentSnapshot(std::make_unique( - mApp.getBucketManager().getBucketList(), header)); + auto& bm = mApp.getBucketManager(); + auto liveSnapshot = std::make_unique>( + bm.getLiveBucketList(), header); + auto hotArchiveSnapshot = + std::make_unique>( + bm.getHotArchiveBucketList(), header); + bm.getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); } } diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp index 6f0228884e..e97864e0d1 100644 --- a/src/ledger/LedgerStateSnapshot.cpp +++ b/src/ledger/LedgerStateSnapshot.cpp @@ -164,7 +164,7 @@ LedgerTxnReadOnly::executeWithMaybeInnerSnapshot( } BucketSnapshotState::BucketSnapshotState(BucketManager& bm) - : mSnapshot(bm.getSearchableBucketListSnapshot()) + : mSnapshot(bm.getSearchableLiveBucketListSnapshot()) , mLedgerHeader(LedgerHeaderWrapper( std::make_shared(mSnapshot->getLedgerHeader()))) { diff --git a/src/ledger/LedgerStateSnapshot.h b/src/ledger/LedgerStateSnapshot.h index 7a57b1c771..dc4f6f76f9 100644 --- a/src/ledger/LedgerStateSnapshot.h +++ b/src/ledger/LedgerStateSnapshot.h @@ -105,7 +105,7 @@ class LedgerTxnReadOnly : public AbstractLedgerStateSnapshot // A concrete implementation of read-only BucketList snapshot wrapper class BucketSnapshotState : public AbstractLedgerStateSnapshot { - std::shared_ptr mSnapshot; + std::shared_ptr mSnapshot; // Store a copy of the header from mSnapshot. This is needed for // validation flow where for certain validation scenarios the header needs // to be modified diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index 8cf1e52261..348cb6fc68 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -3103,7 +3103,7 @@ LedgerTxnRoot::Impl::prefetchInternal(UnorderedSet const& keys, { insertIfNotLoaded(keysToSearch, key); } - auto blLoad = getSearchableBucketListSnapshot().loadKeysWithLimits( + auto blLoad = getSearchableLiveBucketListSnapshot().loadKeysWithLimits( keysToSearch, lkMeter); cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter)); } @@ -3487,14 +3487,15 @@ LedgerTxnRoot::Impl::areEntriesMissingInCacheForOffer(OfferEntry const& oe) } SearchableLiveBucketListSnapshot& -LedgerTxnRoot::Impl::getSearchableBucketListSnapshot() const +LedgerTxnRoot::Impl::getSearchableLiveBucketListSnapshot() const { releaseAssert(mApp.getConfig().isUsingBucketListDB()); if (!mSearchableBucketListSnapshot) { - mSearchableBucketListSnapshot = mApp.getBucketManager() - .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + mSearchableBucketListSnapshot = + mApp.getBucketManager() + .getBucketSnapshotManager() + .copySearchableLiveBucketListSnapshot(); } return *mSearchableBucketListSnapshot; @@ -3635,7 +3636,7 @@ LedgerTxnRoot::Impl::getPoolShareTrustLinesByAccountAndAsset( if (mApp.getConfig().isUsingBucketListDB()) { trustLines = - getSearchableBucketListSnapshot() + getSearchableLiveBucketListSnapshot() .loadPoolShareTrustLinesByAccountAndAsset(account, asset); } else @@ -3698,7 +3699,7 @@ LedgerTxnRoot::Impl::getInflationWinners(size_t maxWinners, int64_t minVotes) { if (mApp.getConfig().isUsingBucketListDB()) { - return getSearchableBucketListSnapshot().loadInflationWinners( + return getSearchableLiveBucketListSnapshot().loadInflationWinners( maxWinners, minVotes); } else @@ -3754,7 +3755,7 @@ LedgerTxnRoot::Impl::getNewestVersion(InternalLedgerKey const& gkey) const { if (mApp.getConfig().isUsingBucketListDB() && key.type() != OFFER) { - entry = getSearchableBucketListSnapshot().load(key); + entry = getSearchableLiveBucketListSnapshot().load(key); } else { diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index 42b4571514..5b6d9299e7 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -871,7 +871,8 @@ class LedgerTxnRoot::Impl bool areEntriesMissingInCacheForOffer(OfferEntry const& oe); - SearchableLiveBucketListSnapshot& getSearchableBucketListSnapshot() const; + SearchableLiveBucketListSnapshot& + getSearchableLiveBucketListSnapshot() const; uint32_t prefetchInternal(UnorderedSet const& keys, LedgerKeyMeter* lkMeter = nullptr); diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp index 89db21694b..7edb34ff09 100644 --- a/src/ledger/NetworkConfig.cpp +++ b/src/ledger/NetworkConfig.cpp @@ -1871,9 +1871,8 @@ SorobanNetworkConfig::writeAllSettings(AbstractLedgerTxn& ltx, { auto lcl = app.getLedgerManager().getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; - BucketTestUtils::addBatchAndUpdateSnapshot( - app.getBucketManager().getLiveBucketList(), app, lcl.header, {}, - entries, {}); + BucketTestUtils::addLiveBatchAndUpdateSnapshot(app, lcl.header, {}, + entries, {}); } } #endif diff --git a/src/ledger/test/LedgerTestUtils.cpp b/src/ledger/test/LedgerTestUtils.cpp index 6835e3445f..930b7334da 100644 --- a/src/ledger/test/LedgerTestUtils.cpp +++ b/src/ledger/test/LedgerTestUtils.cpp @@ -15,6 +15,7 @@ #include "util/types.h" #include "xdr/Stellar-contract.h" #include "xdr/Stellar-ledger-entries.h" +#include "xdr/Stellar-types.h" #include #include #include @@ -741,6 +742,29 @@ generateValidLedgerEntryWithTypes( } } +std::vector +generateValidUniqueLedgerKeysWithTypes( + std::unordered_set const& types, size_t n, + UnorderedSet& seenKeys) +{ + std::vector res; + res.reserve(n); + while (res.size() < n) + { + + auto entry = generateValidLedgerEntryWithTypes(types); + auto key = LedgerEntryKey(entry); + if (seenKeys.find(key) != seenKeys.end()) + { + continue; + } + + seenKeys.insert(key); + res.emplace_back(key); + } + return res; +} + std::vector generateValidUniqueLedgerEntriesWithTypes( std::unordered_set const& types, size_t n) diff --git a/src/ledger/test/LedgerTestUtils.h b/src/ledger/test/LedgerTestUtils.h index 27277b0ac3..ca85ea1d75 100644 --- a/src/ledger/test/LedgerTestUtils.h +++ b/src/ledger/test/LedgerTestUtils.h @@ -6,6 +6,8 @@ #include "history/HistoryManager.h" #include "overlay/StellarXDR.h" +#include "util/UnorderedSet.h" +#include "util/types.h" namespace stellar { @@ -45,6 +47,10 @@ std::vector generateValidUniqueLedgerEntries(size_t n); std::vector generateValidLedgerEntryKeysWithExclusions( std::unordered_set const& excludedTypes, size_t n); +std::vector generateValidUniqueLedgerKeysWithTypes( + std::unordered_set const& types, size_t n, + UnorderedSet& seenKeys); + std::vector generateUniqueValidSorobanLedgerEntryKeys(size_t n); std::vector generateValidUniqueLedgerEntryKeysWithExclusions( diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp index e98ad0acce..10200eea2d 100644 --- a/src/ledger/test/LedgerTxnTests.cpp +++ b/src/ledger/test/LedgerTxnTests.cpp @@ -2747,9 +2747,8 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") .getLastClosedLedgerHeader() .header.ledgerVersion; lh.ledgerSeq = 2; - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getLiveBucketList(), *app, lh, {}, - ledgerVect, {}); + BucketTestUtils::addLiveBatchAndUpdateSnapshot(*app, lh, {}, + ledgerVect, {}); } ltx.commit(); @@ -2980,9 +2979,8 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]") .getLastClosedLedgerHeader() .header.ledgerVersion; lh.ledgerSeq = 2; - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getLiveBucketList(), *app, lh, {}, ledgerVect, - deadKeyVect); + BucketTestUtils::addLiveBatchAndUpdateSnapshot(*app, lh, {}, ledgerVect, + deadKeyVect); ltx.commit(); auto addTxn = [&](bool enoughQuota, std::vector entries, diff --git a/src/main/QueryServer.cpp b/src/main/QueryServer.cpp index 97657105a1..95f1d80a44 100644 --- a/src/main/QueryServer.cpp +++ b/src/main/QueryServer.cpp @@ -66,8 +66,8 @@ QueryServer::QueryServer(const std::string& address, unsigned short port, auto workerPids = mServer.start(); for (auto pid : workerPids) { - mBucketListSnapshots[pid] = - std::move(bucketSnapshotManager.copySearchableBucketListSnapshot()); + mBucketListSnapshots[pid] = std::move( + bucketSnapshotManager.copySearchableLiveBucketListSnapshot()); } } @@ -149,16 +149,17 @@ QueryServer::getLedgerEntryRaw(std::string const& params, { root["ledgerSeq"] = *snapshotLedger; - bool snapshotExists; - std::tie(loadedKeys, snapshotExists) = + auto loadedKeysOp = bl.loadKeysFromLedger(orderedKeys, *snapshotLedger); // Return 404 if ledgerSeq not found - if (!snapshotExists) + if (!loadedKeysOp) { retStr = "LedgerSeq not found"; return false; } + + loadedKeys = std::move(*loadedKeysOp); } // Otherwise default to current ledger else diff --git a/src/main/QueryServer.h b/src/main/QueryServer.h index f16a79c945..53ee434087 100644 --- a/src/main/QueryServer.h +++ b/src/main/QueryServer.h @@ -14,7 +14,7 @@ namespace stellar { -class SearchableBucketListSnapshot; +class SearchableLiveBucketListSnapshot; class BucketSnapshotManager; class QueryServer @@ -26,7 +26,7 @@ class QueryServer httpThreaded::server::server mServer; std::unordered_map> + std::shared_ptr> mBucketListSnapshots; bool safeRouter(HandlerRoute route, std::string const& params, diff --git a/src/overlay/test/FloodTests.cpp b/src/overlay/test/FloodTests.cpp index e224530931..e2f9b50ac1 100644 --- a/src/overlay/test/FloodTests.cpp +++ b/src/overlay/test/FloodTests.cpp @@ -73,9 +73,8 @@ TEST_CASE("Flooding", "[flood][overlay][acceptance]") auto const& header = n->getLedgerManager() .getLastClosedLedgerHeader() .header; - BucketTestUtils::addBatchAndUpdateSnapshot( - n->getBucketManager().getBucketList(), *n, header, {}, - {gen}, {}); + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *n, header, {}, {gen}, {}); } } } diff --git a/src/simulation/CoreTests.cpp b/src/simulation/CoreTests.cpp index b3304dcdfd..423b77e211 100644 --- a/src/simulation/CoreTests.cpp +++ b/src/simulation/CoreTests.cpp @@ -687,8 +687,7 @@ TEST_CASE("Bucket list entries vs write throughput", "[scalability][!hide]") lh.ledgerVersion = Config::CURRENT_LEDGER_PROTOCOL_VERSION; lh.ledgerSeq = i; BucketTestUtils::addLiveBatchAndUpdateSnapshot( - *app, lh, - LedgerTestUtils::generateValidLedgerEntries(100), + *app, lh, LedgerTestUtils::generateValidLedgerEntries(100), LedgerTestUtils::generateValidLedgerEntries(20), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index 89eecff723..3cf7cef02c 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -3,6 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "TestUtils.h" +#include "bucket/BucketList.h" #include "overlay/test/LoopbackPeer.h" #include "simulation/LoadGenerator.h" #include "simulation/Simulation.h" @@ -111,16 +112,21 @@ computeMultiplier(LedgerEntry const& le) } } -BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth) - : mPrevDepth(LiveBucketList::kNumLevels) +template +BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth) + : mPrevDepth(BucketListBase::kNumLevels) { - LiveBucketList::kNumLevels = newDepth; + BucketListBase::kNumLevels = newDepth; } -BucketListDepthModifier::~BucketListDepthModifier() +template +BucketListDepthModifier::~BucketListDepthModifier() { - LiveBucketList::kNumLevels = mPrevDepth; + BucketListBase::kNumLevels = mPrevDepth; } + +template class BucketListDepthModifier; +template class BucketListDepthModifier; } TestInvariantManager::TestInvariantManager(medida::MetricsRegistry& registry) diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h index f134956125..e2360b10f8 100644 --- a/src/test/TestUtils.h +++ b/src/test/TestUtils.h @@ -29,8 +29,11 @@ std::vector getInvalidAssets(SecretKey const& issuer); int32_t computeMultiplier(LedgerEntry const& le); -class BucketListDepthModifier +template class BucketListDepthModifier { + static_assert(std::is_same_v || + std::is_same_v); + uint32_t const mPrevDepth; public: @@ -44,6 +47,14 @@ testBucketMetadata(uint32_t protocolVersion) { BucketMetadata meta; meta.ledgerVersion = protocolVersion; + if (protocolVersionStartsFrom( + protocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::LIVE; + } + return meta; } } diff --git a/src/test/test.cpp b/src/test/test.cpp index 1227314e78..9fbb2e7ae4 100644 --- a/src/test/test.cpp +++ b/src/test/test.cpp @@ -516,6 +516,13 @@ for_versions_from(std::vector const& versions, Application& app, for_versions_from(versions.back() + 1, app, f); } +void +for_versions_from(uint32 from, Config const& cfg, + std::function const& f) +{ + for_versions(from, Config::CURRENT_LEDGER_PROTOCOL_VERSION, cfg, f); +} + void for_all_versions(Application& app, std::function const& f) { diff --git a/src/test/test.h b/src/test/test.h index ad41a1f5f1..75a98adbaa 100644 --- a/src/test/test.h +++ b/src/test/test.h @@ -55,6 +55,9 @@ void for_versions_from(uint32 from, Application& app, void for_versions_from(std::vector const& versions, Application& app, std::function const& f); +void for_versions_from(uint32 from, Config const& cfg, + std::function const& f); + void for_all_versions(Application& app, std::function const& f); void for_all_versions(Config const& cfg, diff --git a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json index c3893b7f4d..dce4c10068 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "6ee5024b08095c9d7e2df55efca112c670a09c433bb21614c360d6be556ae57f", + "hash": "b38e640c116c5c20e6f29bb3263c3958838612894976459bcc3919f20a810f17", "header": { "ledgerVersion": 23, - "previousLedgerHash": "40667fafcadeb2372487f935ce89bfc018f9e60e68bbe01e6de6ad4e607c9645", + "previousLedgerHash": "5f811c592f6e64c3c1887ac5e87a800705e24973f2a421e73b195a9b536ce16e", "scpValue": { - "txSetHash": "22533247312c90534ebcd9691c93d9e61dc443dd15c522cc913d1d939af90fd1", + "txSetHash": "52700cda108028294a84b828eb308a308bfa92e9c22ddd7b232e1528377182de", "closeTime": 1451692800, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "c9437f7d1bb05bd94f8fa3fc5b1d1d7f6cd59a36bbc26b315fda47b87d62fc37464053aac9ed547dd834c3b4f1707bdfe000c932ae151118e7a1fc1dfe839b0e" + "signature": "3f162636d634ea16f9c120aa7868a0dbfd5ec9159af00a2f86f086009f280b70ee53ace9710f4e72ed96b79c2431620eb4823989e1ae4d54f1e1c675f2490903" } } }, - "txSetResultHash": "65b6fe91abfe43ed98fa2163f08fdf3f2f3231101bba05102521186c25a1cc4b", - "bucketListHash": "42be10fa8d2ad869f309db62c432d94231a89533f7671be4b759f24aa0ee2234", + "txSetResultHash": "665c93b57fa49e038bb55b65055f7e3e11474e925aa7a5ad22aa77d07151eace", + "bucketListHash": "1cc6467cfd1511d8dc3a5e83d5fee466693801eea4f992b779e3ec6df7a5698f", "ledgerSeq": 28, "totalCoins": 1000000000000000000, "feePool": 804520, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "40667fafcadeb2372487f935ce89bfc018f9e60e68bbe01e6de6ad4e607c9645", + "previousLedgerHash": "5f811c592f6e64c3c1887ac5e87a800705e24973f2a421e73b195a9b536ce16e", "phases": [ { "v": 0, @@ -503,9 +503,9 @@ "txProcessing": [ { "result": { - "transactionHash": "62d28c373389d447341e9d75bc84e2c91437169a2a70d3606c8b3aa7d198ef5c", + "transactionHash": "bb0a6b13caea6b015555dfd332aca1099e8654896bf7d1bcce8432e833a2572a", "result": { - "feeCharged": 42954, + "feeCharged": 61612, "result": { "code": "txFAILED", "results": [ @@ -514,7 +514,7 @@ "tr": { "type": "INVOKE_HOST_FUNCTION", "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED" + "code": "INVOKE_HOST_FUNCTION_TRAPPED" } } } @@ -529,13 +529,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 12, + "lastModifiedLedgerSeq": 11, "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", "balance": 400000000, - "seqNum": 51539607552, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -559,9 +559,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607552, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -593,9 +593,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607552, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -619,9 +619,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607553, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -672,9 +672,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607553, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -722,9 +722,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 399957046, - "seqNum": 51539607553, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399938388, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -782,9 +782,9 @@ }, { "result": { - "transactionHash": "bb0a6b13caea6b015555dfd332aca1099e8654896bf7d1bcce8432e833a2572a", + "transactionHash": "62d28c373389d447341e9d75bc84e2c91437169a2a70d3606c8b3aa7d198ef5c", "result": { - "feeCharged": 61612, + "feeCharged": 42954, "result": { "code": "txFAILED", "results": [ @@ -793,7 +793,7 @@ "tr": { "type": "INVOKE_HOST_FUNCTION", "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_TRAPPED" + "code": "INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED" } } } @@ -808,13 +808,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 11, + "lastModifiedLedgerSeq": 12, "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", "balance": 400000000, - "seqNum": 47244640256, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -838,9 +838,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640256, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -872,9 +872,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640256, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -898,9 +898,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640257, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -951,9 +951,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640257, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1001,9 +1001,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399938388, - "seqNum": 47244640257, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 399957046, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1061,19 +1061,18 @@ }, { "result": { - "transactionHash": "e310227a8c0d8d1f78632e65ebca281cd60d8619c9afc64491bcce98e7cd7ee3", + "transactionHash": "ee68d27257fa137933de22b3fdfbc4a736ec01af29a9e25e5b807252b1a1ca0a", "result": { - "feeCharged": 106775, + "feeCharged": 51547, "result": { "code": "txSUCCESS", "results": [ { "code": "opINNER", "tr": { - "type": "INVOKE_HOST_FUNCTION", - "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_SUCCESS", - "success": "cbbc48750debb8535093b3deaf88ac7f4cff87425576a58de2bac754acdb4616" + "type": "RESTORE_FOOTPRINT", + "restoreFootprintResult": { + "code": "RESTORE_FOOTPRINT_SUCCESS" } } } @@ -1088,13 +1087,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 10, + "lastModifiedLedgerSeq": 8, "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", "balance": 400000000, - "seqNum": 42949672960, + "seqNum": 34359738368, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1118,9 +1117,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672960, + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "balance": 398999900, + "seqNum": 34359738368, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1152,9 +1151,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672960, + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "balance": 398999900, + "seqNum": 34359738368, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1178,9 +1177,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672961, + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "balance": 398999900, + "seqNum": 34359738369, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1226,25 +1225,14 @@ { "changes": [ { - "type": "LEDGER_ENTRY_CREATED", - "created": { - "lastModifiedLedgerSeq": 28, + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 7, "data": { - "type": "CONTRACT_DATA", - "contractData": { - "ext": { - "v": 0 - }, - "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", - "key": { - "type": "SCV_SYMBOL", - "sym": "key" - }, - "durability": "PERSISTENT", - "val": { - "type": "SCV_U64", - "u64": 42 - } + "type": "TTL", + "ttl": { + "keyHash": "4791962cd1e2c7b8f8af3f96514f9777f0156a48261fb885a571a7f69b33a058", + "liveUntilLedgerSeq": 26 } }, "ext": { @@ -1253,13 +1241,13 @@ } }, { - "type": "LEDGER_ENTRY_CREATED", - "created": { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { "lastModifiedLedgerSeq": 28, "data": { "type": "TTL", "ttl": { - "keyHash": "764f4e59e20ac1a357f9f26ab0eaf46d196ab74822db44f039353a6f114864aa", + "keyHash": "4791962cd1e2c7b8f8af3f96514f9777f0156a48261fb885a571a7f69b33a058", "liveUntilLedgerSeq": 47 } }, @@ -1279,9 +1267,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672961, + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "balance": 398999900, + "seqNum": 34359738369, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1329,9 +1317,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399893225, - "seqNum": 42949672961, + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "balance": 399948453, + "seqNum": 34359738369, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1379,7 +1367,8 @@ }, "events": [], "returnValue": { - "type": "SCV_VOID" + "type": "SCV_BOOL", + "b": "FALSE" }, "diagnosticEvents": [] } @@ -1388,18 +1377,19 @@ }, { "result": { - "transactionHash": "364ec41dce0a678476ea3ebfc5caa28165ef3bf0976071d858b1c4044f187d25", + "transactionHash": "e310227a8c0d8d1f78632e65ebca281cd60d8619c9afc64491bcce98e7cd7ee3", "result": { - "feeCharged": 60559, + "feeCharged": 106775, "result": { "code": "txSUCCESS", "results": [ { "code": "opINNER", "tr": { - "type": "EXTEND_FOOTPRINT_TTL", - "extendFootprintTTLResult": { - "code": "EXTEND_FOOTPRINT_TTL_SUCCESS" + "type": "INVOKE_HOST_FUNCTION", + "invokeHostFunctionResult": { + "code": "INVOKE_HOST_FUNCTION_SUCCESS", + "success": "cbbc48750debb8535093b3deaf88ac7f4cff87425576a58de2bac754acdb4616" } } } @@ -1414,13 +1404,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 9, + "lastModifiedLedgerSeq": 10, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", "balance": 400000000, - "seqNum": 38654705664, + "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1444,9 +1434,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705664, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1478,9 +1468,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705664, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1504,9 +1494,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705665, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1552,46 +1542,25 @@ { "changes": [ { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 6, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", - "liveUntilLedgerSeq": 10006 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { + "type": "LEDGER_ENTRY_CREATED", + "created": { "lastModifiedLedgerSeq": 28, "data": { - "type": "TTL", - "ttl": { - "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", - "liveUntilLedgerSeq": 10028 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 6, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", - "liveUntilLedgerSeq": 10006 + "type": "CONTRACT_DATA", + "contractData": { + "ext": { + "v": 0 + }, + "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", + "key": { + "type": "SCV_SYMBOL", + "sym": "key" + }, + "durability": "PERSISTENT", + "val": { + "type": "SCV_U64", + "u64": 42 + } } }, "ext": { @@ -1600,14 +1569,14 @@ } }, { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { + "type": "LEDGER_ENTRY_CREATED", + "created": { "lastModifiedLedgerSeq": 28, "data": { "type": "TTL", "ttl": { - "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", - "liveUntilLedgerSeq": 10028 + "keyHash": "764f4e59e20ac1a357f9f26ab0eaf46d196ab74822db44f039353a6f114864aa", + "liveUntilLedgerSeq": 47 } }, "ext": { @@ -1626,9 +1595,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705665, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1676,9 +1645,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399939441, - "seqNum": 38654705665, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399893225, + "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1726,8 +1695,7 @@ }, "events": [], "returnValue": { - "type": "SCV_BOOL", - "b": "FALSE" + "type": "SCV_VOID" }, "diagnosticEvents": [] } @@ -1736,18 +1704,18 @@ }, { "result": { - "transactionHash": "ee68d27257fa137933de22b3fdfbc4a736ec01af29a9e25e5b807252b1a1ca0a", + "transactionHash": "364ec41dce0a678476ea3ebfc5caa28165ef3bf0976071d858b1c4044f187d25", "result": { - "feeCharged": 51547, + "feeCharged": 60559, "result": { "code": "txSUCCESS", "results": [ { "code": "opINNER", "tr": { - "type": "RESTORE_FOOTPRINT", - "restoreFootprintResult": { - "code": "RESTORE_FOOTPRINT_SUCCESS" + "type": "EXTEND_FOOTPRINT_TTL", + "extendFootprintTTLResult": { + "code": "EXTEND_FOOTPRINT_TTL_SUCCESS" } } } @@ -1762,13 +1730,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 8, + "lastModifiedLedgerSeq": 9, "data": { "type": "ACCOUNT", "account": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 400000000, - "seqNum": 34359738368, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1792,9 +1760,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 398999900, - "seqNum": 34359738368, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1826,9 +1794,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 398999900, - "seqNum": 34359738368, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1852,9 +1820,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 398999900, - "seqNum": 34359738369, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1902,12 +1870,12 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 7, + "lastModifiedLedgerSeq": 6, "data": { "type": "TTL", "ttl": { - "keyHash": "4791962cd1e2c7b8f8af3f96514f9777f0156a48261fb885a571a7f69b33a058", - "liveUntilLedgerSeq": 26 + "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", + "liveUntilLedgerSeq": 10006 } }, "ext": { @@ -1922,8 +1890,40 @@ "data": { "type": "TTL", "ttl": { - "keyHash": "4791962cd1e2c7b8f8af3f96514f9777f0156a48261fb885a571a7f69b33a058", - "liveUntilLedgerSeq": 47 + "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", + "liveUntilLedgerSeq": 10028 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 6, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", + "liveUntilLedgerSeq": 10006 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", + "liveUntilLedgerSeq": 10028 } }, "ext": { @@ -1942,9 +1942,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 398999900, - "seqNum": 34359738369, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1992,9 +1992,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 399948453, - "seqNum": 34359738369, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399939441, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, diff --git a/src/testdata/ledger-close-meta-v1-protocol-23.json b/src/testdata/ledger-close-meta-v1-protocol-23.json index 8ff9a283c1..164bfe461b 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "955521810ce73e205a011f6f7313ff47da2d9ff3779ade702ded8654d6382211", + "hash": "df0caa7841395c80b0f7b3dec4f13e5058a406fd0cb0959375c6830ea311e32e", "header": { "ledgerVersion": 23, - "previousLedgerHash": "7a215a87c49cfb4d83abd92241b2059cb2270f2e7d93bd672fa28f700ef7dc20", + "previousLedgerHash": "238186da4a6e457877adec84246cbb50dd054cc81cd913ef97cffb492ff6ac74", "scpValue": { - "txSetHash": "6319480f16c93c4a9b31b0b4dc3646275418630d507a6ec6a2a0b2527a0c88ba", + "txSetHash": "6755cddd3f4b967d42930b3eb84bbd991ccf2f0ddec05fc85f24e77dcd6746d7", "closeTime": 0, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "0372a515f08fb718defc47f5bfb1236dc4cbff9bccdab5514ef71b3bbbe51c49c1bf47abfc166c5782bd42ea20b01dc2614fde91b8fb887b5d4fc43e1dd8c408" + "signature": "2d46696da63265a28b3055ccc9cdb2aa32d1008f4732f130dcda8842235a2419ac5432290892243f636563e42fa9b3829f31b8daef8b1e1142de52a770c3130f" } } }, "txSetResultHash": "249b974bacf8b5c4a8f0b5598194c1b9eca64af0b5c1506daa871c1533b6baac", - "bucketListHash": "486d15e891ed49429510506d016999b3a8b74349c3104000bc0a102e7d34ed81", + "bucketListHash": "5ba9bbd81fb831cf30cf89b221629d376e563373bc6b56e1c44e82adca5e427f", "ledgerSeq": 7, "totalCoins": 1000000000000000000, "feePool": 800, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "7a215a87c49cfb4d83abd92241b2059cb2270f2e7d93bd672fa28f700ef7dc20", + "previousLedgerHash": "238186da4a6e457877adec84246cbb50dd054cc81cd913ef97cffb492ff6ac74", "phases": [ { "v": 0, @@ -981,7 +981,7 @@ ], "upgradesProcessing": [], "scpInfo": [], - "totalByteSizeOfBucketList": 1021, + "totalByteSizeOfBucketList": 1023, "evictedTemporaryLedgerKeys": [], "evictedPersistentLedgerEntries": [] } diff --git a/src/util/ProtocolVersion.h b/src/util/ProtocolVersion.h index b908b8f4a9..32341840c9 100644 --- a/src/util/ProtocolVersion.h +++ b/src/util/ProtocolVersion.h @@ -34,7 +34,8 @@ enum class ProtocolVersion : uint32_t V_19, V_20, V_21, - V_22 + V_22, + V_23 }; // Checks whether provided protocolVersion is before (i.e. strictly lower than) diff --git a/src/util/types.h b/src/util/types.h index 6ead1c844c..e646a5c35a 100644 --- a/src/util/types.h +++ b/src/util/types.h @@ -133,12 +133,12 @@ getBucketLedgerKey(HotArchiveBucketEntry const& be) { switch (be.type()) { - case HA_RESTORED: - case HA_DELETED: + case HOT_ARCHIVE_LIVE: + case HOT_ARCHIVE_DELETED: return be.key(); - case HA_ARCHIVED: + case HOT_ARCHIVE_ARCHIVED: return LedgerEntryKey(be.archivedEntry()); - case HA_METAENTRY: + case HOT_ARCHIVE_METAENTRY: default: throw std::invalid_argument("Tried to get key for METAENTRY"); } From 0218ebedf97369e0f0789f89594efd85c0eaf675 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Fri, 1 Nov 2024 16:39:46 -0700 Subject: [PATCH 4/5] Manage bucket types seperately in BucketManager --- src/bucket/Bucket.h | 14 +- src/bucket/BucketIndex.h | 2 +- src/bucket/BucketIndexImpl.cpp | 25 +- src/bucket/BucketInputIterator.cpp | 51 ++-- src/bucket/BucketInputIterator.h | 13 +- src/bucket/BucketList.cpp | 156 +++------- src/bucket/BucketList.h | 29 +- src/bucket/BucketListSnapshot.h | 15 +- src/bucket/BucketManager.h | 58 ++-- src/bucket/BucketManagerImpl.cpp | 372 +++++++++++++++--------- src/bucket/BucketManagerImpl.h | 74 +++-- src/bucket/BucketOutputIterator.cpp | 27 +- src/bucket/BucketOutputIterator.h | 11 +- src/bucket/BucketSnapshot.cpp | 33 +-- src/bucket/BucketSnapshot.h | 19 +- src/bucket/BucketSnapshotManager.cpp | 92 +++--- src/bucket/BucketSnapshotManager.h | 49 ++-- src/bucket/BucketUtils.h | 14 + src/bucket/FutureBucket.cpp | 60 +--- src/bucket/FutureBucket.h | 4 +- src/bucket/LedgerCmp.h | 14 +- src/bucket/test/BucketIndexTests.cpp | 6 +- src/bucket/test/BucketTestUtils.h | 3 +- src/catchup/ApplyBucketsWork.cpp | 8 +- src/catchup/AssumeStateWork.cpp | 12 +- src/catchup/IndexBucketsWork.cpp | 3 +- src/history/HistoryArchive.cpp | 8 +- src/history/StateSnapshot.cpp | 3 +- src/history/test/HistoryTestsUtils.cpp | 6 +- src/historywork/DownloadBucketsWork.cpp | 4 +- src/main/ApplicationUtils.cpp | 6 +- src/main/test/ApplicationUtilsTests.cpp | 3 +- src/test/TestUtils.h | 3 +- 33 files changed, 584 insertions(+), 613 deletions(-) create mode 100644 src/bucket/BucketUtils.h diff --git a/src/bucket/Bucket.h b/src/bucket/Bucket.h index 0f65acffa6..48de2cf497 100644 --- a/src/bucket/Bucket.h +++ b/src/bucket/Bucket.h @@ -5,10 +5,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketIndex.h" -#include "bucket/BucketSnapshot.h" #include "util/NonCopyable.h" #include "util/ProtocolVersion.h" -#include "xdr/Stellar-ledger.h" #include #include #include @@ -150,6 +148,12 @@ class LiveBucket : public Bucket, public std::enable_shared_from_this { public: + // Entry type that this bucket stores + using EntryT = BucketEntry; + + // Entry type returned by loadKeys + using LoadT = LedgerEntry; + LiveBucket(); virtual ~LiveBucket() { @@ -238,6 +242,12 @@ class HotArchiveBucket : public Bucket, std::vector const& deletedEntries); public: + // Entry type that this bucket stores + using EntryT = HotArchiveBucketEntry; + + // Entry type returned by loadKeys + using LoadT = HotArchiveBucketEntry; + HotArchiveBucket(); virtual ~HotArchiveBucket() { diff --git a/src/bucket/BucketIndex.h b/src/bucket/BucketIndex.h index f4b343c839..ee15031fc1 100644 --- a/src/bucket/BucketIndex.h +++ b/src/bucket/BucketIndex.h @@ -89,7 +89,7 @@ class BucketIndex : public NonMovableOrCopyable // the largest buckets) and should only be called once. If pageSize == 0 or // if file size is less than the cutoff, individual key index is used. // Otherwise range index is used, with the range defined by pageSize. - template + template static std::unique_ptr createIndex(BucketManager& bm, std::filesystem::path const& filename, Hash const& hash, asio::io_context& ctx); diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp index 1677670e70..2c032f00a2 100644 --- a/src/bucket/BucketIndexImpl.cpp +++ b/src/bucket/BucketIndexImpl.cpp @@ -5,6 +5,7 @@ #include "bucket/BucketIndexImpl.h" #include "bucket/Bucket.h" #include "bucket/BucketManager.h" +#include "bucket/BucketUtils.h" #include "crypto/Hex.h" #include "crypto/ShortHash.h" #include "ledger/LedgerTypeUtils.h" @@ -135,7 +136,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, } auto isMeta = [](auto const& be) { - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { return be.type() == METAENTRY; } @@ -207,7 +208,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, mData.keysToOffset.emplace_back(key, pos); } - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { countEntry(be); } @@ -355,14 +356,13 @@ upper_bound_pred(LedgerKey const& key, IndexEntryT const& indexEntry) } } -template +template std::unique_ptr BucketIndex::createIndex(BucketManager& bm, std::filesystem::path const& filename, Hash const& hash, asio::io_context& ctx) { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; auto const& cfg = bm.getConfig(); @@ -379,8 +379,8 @@ BucketIndex::createIndex(BucketManager& bm, "bucket {}", filename); return std::unique_ptr const>( - new BucketIndexImpl(bm, filename, 0, hash, ctx, - BucketEntryT{})); + new BucketIndexImpl( + bm, filename, 0, hash, ctx, typename BucketT::EntryT{})); } else { @@ -391,7 +391,8 @@ BucketIndex::createIndex(BucketManager& bm, pageSize, filename); return std::unique_ptr const>( new BucketIndexImpl(bm, filename, pageSize, hash, - ctx, BucketEntryT{})); + ctx, + typename BucketT::EntryT{})); } } // BucketIndexImpl throws if BucketManager shuts down before index finishes, @@ -640,10 +641,10 @@ BucketIndexImpl::getBucketEntryCounters() const } template std::unique_ptr -BucketIndex::createIndex(BucketManager& bm, - std::filesystem::path const& filename, - Hash const& hash); +BucketIndex::createIndex(BucketManager& bm, + std::filesystem::path const& filename, + Hash const& hash); template std::unique_ptr -BucketIndex::createIndex( +BucketIndex::createIndex( BucketManager& bm, std::filesystem::path const& filename, Hash const& hash); } diff --git a/src/bucket/BucketInputIterator.cpp b/src/bucket/BucketInputIterator.cpp index da3b4a97eb..8716bdee52 100644 --- a/src/bucket/BucketInputIterator.cpp +++ b/src/bucket/BucketInputIterator.cpp @@ -14,16 +14,16 @@ namespace stellar * Helper class that reads from the file underlying a bucket, keeping the bucket * alive for the duration of its existence. */ -template +template void -BucketInputIterator::loadEntry() +BucketInputIterator::loadEntry() { ZoneScoped; if (mIn.readOne(mEntry)) { mEntryPtr = &mEntry; bool isMeta; - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { isMeta = mEntry.type() == METAENTRY; } @@ -48,7 +48,7 @@ BucketInputIterator::loadEntry() } mMetadata = mEntry.metaEntry(); - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { if (mMetadata.ext.v() != 1 || mMetadata.ext.bucketListType() != HOT_ARCHIVE) @@ -67,7 +67,7 @@ BucketInputIterator::loadEntry() mSeenOtherEntries = true; if (mSeenMetadata) { - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { LiveBucket::checkProtocolLegality(mEntry, mMetadata.ledgerVersion); @@ -81,48 +81,49 @@ BucketInputIterator::loadEntry() } } -template +template std::streamoff -BucketInputIterator::pos() +BucketInputIterator::pos() { return mIn.pos(); } -template +template size_t -BucketInputIterator::size() const +BucketInputIterator::size() const { return mIn.size(); } -template BucketInputIterator::operator bool() const +template BucketInputIterator::operator bool() const { return mEntryPtr != nullptr; } -template -typename BucketInputIterator::BucketEntryT const& -BucketInputIterator::operator*() +template +typename BucketT::EntryT const& +BucketInputIterator::operator*() { return *mEntryPtr; } -template +template bool -BucketInputIterator::seenMetadata() const +BucketInputIterator::seenMetadata() const { return mSeenMetadata; } -template +template BucketMetadata const& -BucketInputIterator::getMetadata() const +BucketInputIterator::getMetadata() const { return mMetadata; } -template -BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) +template +BucketInputIterator::BucketInputIterator( + std::shared_ptr bucket) : mBucket(bucket), mEntryPtr(nullptr), mSeenMetadata(false) { // In absence of metadata, we treat every bucket as though it is from ledger @@ -141,14 +142,14 @@ BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) } } -template BucketInputIterator::~BucketInputIterator() +template BucketInputIterator::~BucketInputIterator() { mIn.close(); } -template -BucketInputIterator& -BucketInputIterator::operator++() +template +BucketInputIterator& +BucketInputIterator::operator++() { if (mIn) { @@ -161,9 +162,9 @@ BucketInputIterator::operator++() return *this; } -template +template void -BucketInputIterator::seek(std::streamoff offset) +BucketInputIterator::seek(std::streamoff offset) { mIn.seek(offset); loadEntry(); diff --git a/src/bucket/BucketInputIterator.h b/src/bucket/BucketInputIterator.h index ffccf33cd0..4f13d76f36 100644 --- a/src/bucket/BucketInputIterator.h +++ b/src/bucket/BucketInputIterator.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/BucketUtils.h" #include "util/XDRStream.h" #include "xdr/Stellar-ledger.h" @@ -20,20 +21,16 @@ class HotArchiveBucket; // Helper class that reads through the entries in a bucket. template class BucketInputIterator { - static_assert(std::is_same_v || - std::is_same_v); - - using BucketEntryT = std::conditional_t, - BucketEntry, HotArchiveBucketEntry>; + BUCKET_TYPE_ASSERT(BucketT); std::shared_ptr mBucket; // Validity and current-value of the iterator is funneled into a // pointer. If // non-null, it points to mEntry. - BucketEntryT const* mEntryPtr{nullptr}; + typename BucketT::EntryT const* mEntryPtr{nullptr}; XDRInputFileStream mIn; - BucketEntryT mEntry; + typename BucketT::EntryT mEntry; bool mSeenMetadata{false}; bool mSeenOtherEntries{false}; BucketMetadata mMetadata; @@ -52,7 +49,7 @@ template class BucketInputIterator bool seenMetadata() const; BucketMetadata const& getMetadata() const; - BucketEntryT const& operator*(); + typename BucketT::EntryT const& operator*(); BucketInputIterator(std::shared_ptr bucket); diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketList.cpp index 59358d1bb6..357912f28f 100644 --- a/src/bucket/BucketList.cpp +++ b/src/bucket/BucketList.cpp @@ -196,26 +196,14 @@ BucketLevel::prepare( ? std::make_shared() : mCurr; - if constexpr (std::is_same_v) - { - auto shadowsBasedOnProtocol = - protocolVersionStartsFrom( - snap->getBucketVersion(), - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - ? std::vector>() - : shadows; - mNextCurr = - FutureBucket(app, curr, snap, shadowsBasedOnProtocol, - currLedgerProtocol, countMergeEvents, mLevel); - } - else - { - // HotArchive only exists for protocol > 21, should never have shadows - mNextCurr = - FutureBucket(app, curr, snap, /*shadows=*/{}, - currLedgerProtocol, countMergeEvents, mLevel); - } - + auto shadowsBasedOnProtocol = + protocolVersionStartsFrom(snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) + ? std::vector>() + : shadows; + mNextCurr = + FutureBucket(app, curr, snap, shadowsBasedOnProtocol, + currLedgerProtocol, countMergeEvents, mLevel); releaseAssert(mNextCurr.isMerging()); } @@ -581,93 +569,17 @@ BucketListBase::getSize() const return sum; } +template +template void -HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& archiveEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries) -{ - ZoneScoped; - releaseAssert(currLedger > 0); - - for (uint32_t i = static_cast(mLevels.size()) - 1; i != 0; --i) - { - if (levelShouldSpill(currLedger, i - 1)) - { - /** - * At every ledger, level[0] prepares the new batch and commits - * it. - * - * At ledger multiples of 2, level[0] snaps, level[1] commits - * existing (promotes next to curr) and "prepares" by starting a - * merge of that new level[1] curr with the new level[0] snap. This - * is "level 0 spilling". - * - * At ledger multiples of 8, level[1] snaps, level[2] commits - * existing (promotes next to curr) and "prepares" by starting a - * merge of that new level[2] curr with the new level[1] snap. This - * is "level 1 spilling". - * - * At ledger multiples of 32, level[2] snaps, level[3] commits - * existing (promotes next to curr) and "prepares" by starting a - * merge of that new level[3] curr with the new level[2] snap. This - * is "level 2 spilling". - * - * All these have to be done in _reverse_ order (counting down - * levels) because we want a 'curr' to be pulled out of the way into - * a 'snap' the moment it's half-a-level full, not have anything - * else spilled/added to it. - */ - - auto snap = mLevels[i - 1].snap(); - mLevels[i].commit(); - mLevels[i].prepare(app, currLedger, currLedgerProtocol, snap, - /*shadows=*/{}, - /*countMergeEvents=*/true); - } - } - - // In some testing scenarios, we want to inhibit counting level 0 merges - // because they are not repeated when restarting merges on app startup, - // and we are checking for an expected number of merge events on restart. - bool countMergeEvents = - !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; - bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; - mLevels[0].prepare( - app, currLedger, currLedgerProtocol, - HotArchiveBucket::fresh(app.getBucketManager(), currLedgerProtocol, - archiveEntries, restoredEntries, deletedEntries, - countMergeEvents, app.getClock().getIOContext(), - doFsync), - /*shadows=*/{}, countMergeEvents); - mLevels[0].commit(); - - // We almost always want to try to resolve completed merges to single - // buckets, as it makes restarts less fragile: fewer saved/restored shadows, - // fewer buckets for the user to accidentally delete from their buckets - // dir. Also makes publication less likely to redo a merge that was already - // complete (but not resolved) when the snapshot gets taken. - // - // But we support the option of not-doing so, only for the sake of - // testing. Note: this is nonblocking in any case. - if (!app.getConfig().ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING) - { - resolveAnyReadyFutures(); - } -} - -void -LiveBucketList::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +BucketListBase::addBatchInternal(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + VectorT const&... inputVectors) { ZoneScoped; releaseAssert(currLedger > 0); - std::vector> shadows; + std::vector> shadows; for (auto& level : mLevels) { shadows.push_back(level.getCurr()); @@ -757,13 +669,12 @@ LiveBucketList::addBatch(Application& app, uint32_t currLedger, !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; releaseAssert(shadows.size() == 0); - mLevels[0].prepare( - app, currLedger, currLedgerProtocol, - LiveBucket::fresh(app.getBucketManager(), currLedgerProtocol, - initEntries, liveEntries, deadEntries, - countMergeEvents, app.getClock().getIOContext(), - doFsync), - shadows, countMergeEvents); + mLevels[0].prepare(app, currLedger, currLedgerProtocol, + BucketT::fresh(app.getBucketManager(), + currLedgerProtocol, inputVectors..., + countMergeEvents, + app.getClock().getIOContext(), doFsync), + shadows, countMergeEvents); mLevels[0].commit(); // We almost always want to try to resolve completed merges to single @@ -780,6 +691,33 @@ LiveBucketList::addBatch(Application& app, uint32_t currLedger, } } +void +HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + ZoneScoped; + releaseAssertOrThrow(protocolVersionStartsFrom( + currLedgerProtocol, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + addBatchInternal(app, currLedger, currLedgerProtocol, archiveEntries, + restoredEntries, deletedEntries); +} + +void +LiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) +{ + ZoneScoped; + addBatchInternal(app, currLedger, currLedgerProtocol, initEntries, + liveEntries, deadEntries); +} + BucketEntryCounters LiveBucketList::sumBucketEntryCounters() const { diff --git a/src/bucket/BucketList.h b/src/bucket/BucketList.h index 5ffcf5d9a8..cb7d90980b 100644 --- a/src/bucket/BucketList.h +++ b/src/bucket/BucketList.h @@ -5,6 +5,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/Bucket.h" +#include "bucket/BucketUtils.h" #include "bucket/FutureBucket.h" namespace medida @@ -357,8 +358,7 @@ template class BucketListDepthModifier; template class BucketLevel { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); uint32_t mLevel; FutureBucket mNextCurr; @@ -402,19 +402,36 @@ class BucketListDepth }; // While every BucketList shares the same high level structure wrt to spill -// schedules, merges at the bucket level, etc, each BucketList type hold +// schedules, merges at the bucket level, etc, each BucketList type holds // different types of entries and has different merge logic at the individual -// entry level. This pure virtual base class defines the shared structure of all +// entry level. This abstract base class defines the shared structure of all // BucketLists. It must be extended for each specific BucketList type, where the // template parameter BucketT refers to the underlying Bucket type. template class BucketListBase { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); protected: std::vector> mLevels; + // Add a batch of entries to the + // bucketlist, representing the entries effected by closing + // `currLedger`. The bucketlist will incorporate these into the smallest + // (0th) level, as well as commit or prepare merges for any levels that + // should have spilled due to passing through `currLedger`. The `currLedger` + // and `currProtocolVersion` values should be taken from the ledger at which + // this batch is being added. `inputVectors` should contain a vector of + // entries to insert for each corresponding BucketEntry type, i.e. + // initEntry, liveEntry, and deadEntry for the LiveBucketList. These must be + // the same input vector types for the corresponding BucketT::fresh + // function. + // This is an internal function, derived classes should define a + // public addBatch function with explicit input vector types. + template + void addBatchInternal(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + VectorT const&... inputVectors); + public: // Trivial pure virtual destructor to make this an abstract class virtual ~BucketListBase() = 0; diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshot.h index c4cd98450f..e767ce002f 100644 --- a/src/bucket/BucketListSnapshot.h +++ b/src/bucket/BucketListSnapshot.h @@ -9,6 +9,7 @@ #include "bucket/BucketManagerImpl.h" #include "bucket/BucketSnapshot.h" #include "bucket/BucketSnapshotManager.h" +#include "bucket/BucketUtils.h" namespace medida { @@ -20,8 +21,7 @@ namespace stellar template struct BucketLevelSnapshot { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); using BucketSnapshotT = std::conditional_t, @@ -35,8 +35,7 @@ template struct BucketLevelSnapshot template class BucketListSnapshot : public NonMovable { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); using BucketSnapshotT = std::conditional_t, LiveBucketSnapshot, HotArchiveBucketSnapshot>; @@ -75,8 +74,7 @@ template class BucketListSnapshot : public NonMovable template class SearchableBucketListSnapshotBase : public NonMovableOrCopyable { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); using BucketSnapshotT = std::conditional_t, @@ -88,9 +86,8 @@ class SearchableBucketListSnapshotBase : public NonMovableOrCopyable BucketSnapshotManager const& mSnapshotManager; // Snapshot managed by SnapshotManager - std::unique_ptr const> mSnapshot{}; - std::map const>> - mHistoricalSnapshots; + SnapshotPtrT mSnapshot{}; + std::map> mHistoricalSnapshots; // Loops through all buckets, starting with curr at level 0, then snap at // level 0, etc. Calls f on each bucket. Exits early if function diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 88e6280a46..039610ed77 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -30,6 +30,7 @@ class LiveBucketList; class HotArchiveBucketList; class BucketSnapshotManager; class Config; +class SearchableLiveBucketListSnapshot; class TmpDirManager; struct HistoryArchiveState; struct InflationWinner; @@ -216,14 +217,13 @@ class BucketManager : NonMovableOrCopyable // This method is mostly-threadsafe -- assuming you don't destruct the // BucketManager mid-call -- and is intended to be called from both main and // worker threads. Very carefully. - virtual std::shared_ptr - adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index) = 0; - virtual std::shared_ptr - adoptFileAsHotArchiveBucket(std::string const& filename, - uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index) = 0; + // Unfortunately, virtual methods cannot be templated, so we use this weird + // static interface to allow for a templated return type. + template + static std::shared_ptr + adoptFileAsBucket(BucketManager& bm, std::string const& filename, + uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index); // Companion method to `adoptFileAsLiveBucket` also called from the // `BucketOutputIterator::getBucket` merge-completion path. This method @@ -231,39 +231,49 @@ class BucketManager : NonMovableOrCopyable // doesn't correspond to a file on disk; the method forgets about the // `FutureBucket` associated with the in-progress merge, allowing the merge // inputs to be GC'ed. - virtual void noteEmptyMergeOutput(MergeKey const& mergeKey) = 0; + // Unfortunately, virtual methods cannot be templated, so we use this weird + // static interface to allow for a templated return type. + template + static void noteEmptyMergeOutput(BucketManager& bm, + MergeKey const& mergeKey); // Returns a bucket by hash if it exists and is currently managed by the // bucket list. - virtual std::shared_ptr getBucketIfExists(uint256 const& hash) = 0; + // Unfortunately, virtual methods cannot be templated, so we use this weird + // static interface to allow for a templated return type. + template + static std::shared_ptr getBucketIfExists(BucketManager const& bm, + uint256 const& hash); // Return a bucket by hash if we have it, else return nullptr. - virtual std::shared_ptr - getLiveBucketByHash(uint256 const& hash) = 0; - virtual std::shared_ptr - getHotArchiveBucketByHash(uint256 const& hash) = 0; + // Unfortunately, virtual methods cannot be templated, so we use this weird + // static getter interface to allow for a templated return type. + template + static std::shared_ptr getBucketByHash(BucketManager& bm, + uint256 const& hash); // Get a reference to a merge-future that's either running (or finished // somewhat recently) from either a map of the std::shared_futures doing the // merges and/or a set of records mapping merge inputs to outputs and the // set of outputs held in the BucketManager. Returns an invalid future if no // such future can be found or synthesized. - virtual std::shared_future> - getLiveMergeFuture(MergeKey const& key) = 0; - virtual std::shared_future> - getHotArchiveMergeFuture(MergeKey const& key) = 0; + // Unfortunately, virtual methods cannot be templated, so we use this weird + // static getter interface to allow for a templated return type. + template + static std::shared_future> + getMergeFuture(BucketManager& bucketManager, MergeKey const& key); // Add a reference to a merge _in progress_ (not yet adopted as a file) to // the BucketManager's internal map of std::shared_futures doing merges. // There is no corresponding entry-removal API: the std::shared_future will // be removed from the map when the merge completes and the output file is // adopted. - virtual void - putLiveMergeFuture(MergeKey const& key, - std::shared_future>) = 0; - virtual void putHotArchiveMergeFuture( - MergeKey const& key, - std::shared_future>) = 0; + // Unfortunately, virtual methods cannot be templated, so we use this weird + // static interface to allow for a templated return type. + template + static void + putMergeFuture(BucketManager& bm, MergeKey const& key, + std::shared_future> future); #ifdef BUILD_TESTS // Drop all references to merge futures in progress. diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManagerImpl.cpp index 5c27cd4615..90556a89cd 100644 --- a/src/bucket/BucketManagerImpl.cpp +++ b/src/bucket/BucketManagerImpl.cpp @@ -8,8 +8,10 @@ #include "bucket/BucketInputIterator.h" #include "bucket/BucketList.h" #include "bucket/BucketListSnapshot.h" +#include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" #include "bucket/BucketSnapshotManager.h" +#include "bucket/BucketUtils.h" #include "crypto/BLAKE2.h" #include "crypto/Hex.h" #include "crypto/SHA.h" @@ -256,6 +258,13 @@ extractFromFilename(std::string const& name) }; } +void +BucketManagerImpl::updateSharedBucketSize() +{ + mSharedBucketsSize.set_count(mSharedHotArchiveBuckets.size() + + mSharedLiveBuckets.size()); +} + std::string BucketManagerImpl::bucketFilename(std::string const& bucketHexHash) { @@ -507,30 +516,39 @@ BucketManagerImpl::renameBucketDirFile(std::filesystem::path const& src, } } +template <> std::shared_ptr -BucketManagerImpl::adoptFileAsLiveBucket( - std::string const& filename, uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index) +BucketManager::adoptFileAsBucket(BucketManager& bm, std::string const& filename, + uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) { - return adoptFileAsBucket(filename, hash, mergeKey, - std::move(index)); + auto& bmImpl = static_cast(bm); + return bmImpl.adoptFileAsBucket(filename, hash, mergeKey, std::move(index), + bmImpl.mSharedLiveBuckets, + bmImpl.mLiveBucketFutures); } +template <> std::shared_ptr -BucketManagerImpl::adoptFileAsHotArchiveBucket( - std::string const& filename, uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index) +BucketManager::adoptFileAsBucket(BucketManager& bm, std::string const& filename, + uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) { - return adoptFileAsBucket(filename, hash, mergeKey, - std::move(index)); + auto& bmImpl = static_cast(bm); + return bmImpl.adoptFileAsBucket(filename, hash, mergeKey, std::move(index), + bmImpl.mSharedHotArchiveBuckets, + bmImpl.mHotArchiveBucketFutures); } template std::shared_ptr BucketManagerImpl::adoptFileAsBucket(std::string const& filename, uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index) + std::unique_ptr index, + BucketMapT& bucketMap, + FutureMapT& futureMap) { + BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); std::lock_guard lock(mBucketMutex); @@ -549,11 +567,11 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, "BucketManager::adoptFileAsLiveBucket switching merge {} from " "live to finished for output={}", *mergeKey, hexAbbrev(hash)); - mLiveFutures.erase(*mergeKey); + futureMap.erase(*mergeKey); } // Check to see if we have an existing bucket (either in-memory or on-disk) - std::shared_ptr b = getBucketByHash(hash); + std::shared_ptr b = getBucketByHash(hash, bucketMap); if (b) { CLOG_DEBUG( @@ -590,8 +608,8 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, b = std::make_shared(canonicalName, hash, std::move(index)); { - mSharedBuckets.emplace(hash, b); - mSharedBucketsSize.set_count(mSharedBuckets.size()); + bucketMap.emplace(hash, b); + updateSharedBucketSize(); } } releaseAssert(b); @@ -604,9 +622,30 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, return b; } +template <> +void +BucketManager::noteEmptyMergeOutput(BucketManager& bm, + MergeKey const& mergeKey) +{ + auto& bmImpl = static_cast(bm); + bmImpl.noteEmptyMergeOutput(mergeKey, bmImpl.mLiveBucketFutures); +} + +template <> +void +BucketManager::noteEmptyMergeOutput(BucketManager& bm, + MergeKey const& mergeKey) +{ + auto& bmImpl = static_cast(bm); + bmImpl.noteEmptyMergeOutput(mergeKey, bmImpl.mHotArchiveBucketFutures); +} + +template void -BucketManagerImpl::noteEmptyMergeOutput(MergeKey const& mergeKey) +BucketManagerImpl::noteEmptyMergeOutput(MergeKey const& mergeKey, + FutureMapT& futureMap) { + BUCKET_TYPE_ASSERT(BucketT); releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); // We _do_ want to remove the mergeKey from mLiveFutures, both so that that @@ -620,16 +659,35 @@ BucketManagerImpl::noteEmptyMergeOutput(MergeKey const& mergeKey) // mergeKeys result in an empty output. std::lock_guard lock(mBucketMutex); CLOG_TRACE(Bucket, "BucketManager::noteEmptyMergeOutput({})", mergeKey); - mLiveFutures.erase(mergeKey); + futureMap.erase(mergeKey); +} + +template <> +std::shared_ptr +BucketManager::getBucketIfExists(BucketManager const& bm, uint256 const& hash) +{ + auto const& bmImpl = static_cast(bm); + return bmImpl.getBucketIfExists(hash, bmImpl.mSharedLiveBuckets); +} + +template <> +std::shared_ptr +BucketManager::getBucketIfExists(BucketManager const& bm, uint256 const& hash) +{ + auto const& bmImpl = static_cast(bm); + return bmImpl.getBucketIfExists(hash, bmImpl.mSharedHotArchiveBuckets); } -std::shared_ptr -BucketManagerImpl::getBucketIfExists(uint256 const& hash) +template +std::shared_ptr +BucketManagerImpl::getBucketIfExists(uint256 const& hash, + BucketMapT const& bucketMap) const { + BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; std::lock_guard lock(mBucketMutex); - auto i = mSharedBuckets.find(hash); - if (i != mSharedBuckets.end()) + auto i = bucketMap.find(hash); + if (i != bucketMap.end()) { CLOG_TRACE(Bucket, "BucketManager::getBucketIfExists({}) found bucket {}", @@ -640,41 +698,40 @@ BucketManagerImpl::getBucketIfExists(uint256 const& hash) return nullptr; } +template <> std::shared_ptr -BucketManagerImpl::getLiveBucketByHash(uint256 const& hash) +BucketManager::getBucketByHash(BucketManager& bm, uint256 const& hash) { - return getBucketByHash(hash); + auto& bmImpl = static_cast(bm); + return bmImpl.getBucketByHash(hash, bmImpl.mSharedLiveBuckets); } +template <> std::shared_ptr -BucketManagerImpl::getHotArchiveBucketByHash(uint256 const& hash) +BucketManager::getBucketByHash(BucketManager& bm, uint256 const& hash) { - return getBucketByHash(hash); + auto& bmImpl = static_cast(bm); + return bmImpl.getBucketByHash(hash, bmImpl.mSharedHotArchiveBuckets); } template std::shared_ptr -BucketManagerImpl::getBucketByHash(uint256 const& hash) +BucketManagerImpl::getBucketByHash(uint256 const& hash, + BucketMapT& bucketMap) { + BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; std::lock_guard lock(mBucketMutex); if (isZero(hash)) { return std::make_shared(); } - auto i = mSharedBuckets.find(hash); - if (i != mSharedBuckets.end()) + auto i = bucketMap.find(hash); + if (i != bucketMap.end()) { CLOG_TRACE(Bucket, "BucketManager::getBucketByHash({}) found bucket {}", binToHex(hash), i->second->getFilename()); - - // Because BucketManger has an impl class, no public templated functions - // can be declared. This means we have to manually enforce types via - // `getLiveBucketByHash` and `getHotBucketByHash`, leading to this ugly - // cast. - auto ret = std::dynamic_pointer_cast(i->second); - releaseAssertOrThrow(ret); - return ret; + return i->second; } std::string canonicalName = bucketFilename(hash); if (fs::exists(canonicalName)) @@ -686,41 +743,48 @@ BucketManagerImpl::getBucketByHash(uint256 const& hash) auto p = std::make_shared(canonicalName, hash, /*index=*/nullptr); - mSharedBuckets.emplace(hash, p); - mSharedBucketsSize.set_count(mSharedBuckets.size()); + bucketMap.emplace(hash, p); + updateSharedBucketSize(); return p; } return std::shared_ptr(); } +template <> std::shared_future> -BucketManagerImpl::getLiveMergeFuture(MergeKey const& key) +BucketManager::getMergeFuture(BucketManager& bucketManager, MergeKey const& key) { - return getMergeFuture(key); + auto& bmImpl = static_cast(bucketManager); + return bmImpl.getMergeFuture(key, bmImpl.mLiveBucketFutures); } +template <> std::shared_future> -BucketManagerImpl::getHotArchiveMergeFuture(MergeKey const& key) +BucketManager::getMergeFuture(BucketManager& bucketManager, MergeKey const& key) { - return getMergeFuture(key); + auto& bmImpl = static_cast(bucketManager); + return bmImpl.getMergeFuture(key, bmImpl.mHotArchiveBucketFutures); } template std::shared_future> -BucketManagerImpl::getMergeFuture(MergeKey const& key) +BucketManagerImpl::getMergeFuture(MergeKey const& key, + FutureMapT& futureMap) { + BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; std::lock_guard lock(mBucketMutex); MergeCounters mc; - auto i = mLiveFutures.find(key); - if (i == mLiveFutures.end()) + auto i = futureMap.find(key); + if (i == futureMap.end()) { // If there's no live (running) future, we might be able to _make_ one // for a retained bucket, if we still know its inputs. Hash bucketHash; if (mFinishedMerges.findMergeFor(key, bucketHash)) { - auto bucket = getBucketByHash(bucketHash); + auto bucket = + BucketManager::getBucketByHash(*this, bucketHash); if (bucket) { CLOG_TRACE(Bucket, @@ -747,33 +811,36 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) key); mc.mRunningMergeReattachments++; incrMergeCounters(mc); - - // Because BucketManger has an impl class, no public templated functions - // can be declared. This means we have to manually enforce types via - // leading to this ugly variadic get that throws if the type is not correct. - return std::get>>(i->second); + return i->second; } +template <> void -BucketManagerImpl::putLiveMergeFuture( - MergeKey const& key, std::shared_future> wp) +BucketManager::putMergeFuture( + BucketManager& bm, MergeKey const& key, + std::shared_future> future) { - putMergeFuture(key, wp); + auto& bmImpl = static_cast(bm); + bmImpl.putMergeFuture(key, future, bmImpl.mLiveBucketFutures); } +template <> void -BucketManagerImpl::putHotArchiveMergeFuture( - MergeKey const& key, - std::shared_future> wp) +BucketManager::putMergeFuture( + BucketManager& bm, MergeKey const& key, + std::shared_future> future) { - putMergeFuture(key, wp); + auto& bmImpl = static_cast(bm); + bmImpl.putMergeFuture(key, future, bmImpl.mHotArchiveBucketFutures); } template void BucketManagerImpl::putMergeFuture( - MergeKey const& key, std::shared_future> wp) + MergeKey const& key, std::shared_future> future, + FutureMapT& futureMap) { + BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); std::lock_guard lock(mBucketMutex); @@ -781,7 +848,7 @@ BucketManagerImpl::putMergeFuture( Bucket, "BucketManager::putMergeFuture storing future for running merge {}", key); - mLiveFutures.emplace(key, wp); + futureMap.emplace(key, future); } #ifdef BUILD_TESTS @@ -789,7 +856,8 @@ void BucketManagerImpl::clearMergeFuturesForTesting() { std::lock_guard lock(mBucketMutex); - mLiveFutures.clear(); + mLiveBucketFutures.clear(); + mHotArchiveBucketFutures.clear(); } #endif @@ -895,7 +963,14 @@ BucketManagerImpl::cleanupStaleFiles() std::lock_guard lock(mBucketMutex); auto referenced = getAllReferencedBuckets(); - std::transform(std::begin(mSharedBuckets), std::end(mSharedBuckets), + std::transform(std::begin(mSharedLiveBuckets), std::end(mSharedLiveBuckets), + std::inserter(referenced, std::end(referenced)), + [](std::pair> const& p) { + return p.first; + }); + + std::transform(std::begin(mSharedHotArchiveBuckets), + std::end(mSharedHotArchiveBuckets), std::inserter(referenced, std::end(referenced)), [](std::pair> const& p) { return p.first; @@ -927,89 +1002,96 @@ BucketManagerImpl::forgetUnreferencedBuckets() auto referenced = getAllReferencedBuckets(); auto blReferenced = getBucketListReferencedBuckets(); - for (auto i = mSharedBuckets.begin(); i != mSharedBuckets.end();) - { - // Standard says map iterators other than the one you're erasing - // remain valid. - auto j = i; - ++i; - - // Delete indexes for buckets no longer in bucketlist. There is a race - // condition on startup where future buckets for a level will be - // finished and have an index but will not yet be referred to by the - // bucket level's next pointer. Checking use_count == 1 makes sure no - // other in-progress structures will add bucket to bucket list after - // deleting index - if (j->second->isIndexed() && j->second.use_count() == 1 && - blReferenced.find(j->first) == blReferenced.end()) - { - CLOG_TRACE(Bucket, - "BucketManager::forgetUnreferencedBuckets deleting " - "index for {}", - j->second->getFilename()); - j->second->freeIndex(); - } - - // Only drop buckets if the bucketlist has forgotten them _and_ - // no other in-progress structures (worker threads, shadow lists) - // have references to them, just us. It's ok to retain a few too - // many buckets, a little longer than necessary. - // - // This conservatism is important because we want to enforce that - // only one bucket ever exists in memory with a given filename, and - // that we're the first and last to know about it. Otherwise buckets - // might race on deleting the underlying file from one another. - - if (referenced.find(j->first) == referenced.end() && - j->second.use_count() == 1) + auto bucketMapLoop = [&](auto& bucketMap, auto& futureMap) { + for (auto i = bucketMap.begin(); i != bucketMap.end();) { - auto filename = j->second->getFilename(); - CLOG_TRACE(Bucket, - "BucketManager::forgetUnreferencedBuckets dropping {}", - filename); - if (!filename.empty() && !mConfig.DISABLE_BUCKET_GC) + // Standard says map iterators other than the one you're erasing + // remain valid. + auto j = i; + ++i; + + // Delete indexes for buckets no longer in bucketlist. There is a + // race condition on startup where future buckets for a level will + // be finished and have an index but will not yet be referred to by + // the bucket level's next pointer. Checking use_count == 1 makes + // sure no other in-progress structures will add bucket to bucket + // list after deleting index + if (j->second->isIndexed() && j->second.use_count() == 1 && + blReferenced.find(j->first) == blReferenced.end()) { - CLOG_TRACE(Bucket, "removing bucket file: {}", filename); - std::filesystem::remove(filename); - auto gzfilename = filename.string() + ".gz"; - std::remove(gzfilename.c_str()); - auto indexFilename = bucketIndexFilename(j->second->getHash()); - std::remove(indexFilename.c_str()); + CLOG_TRACE(Bucket, + "BucketManager::forgetUnreferencedBuckets deleting " + "index for {}", + j->second->getFilename()); + j->second->freeIndex(); } - // Dropping this bucket means we'll no longer be able to - // resynthesize a std::shared_future pointing directly to it - // as a short-cut to performing a merge we've already seen. - // Therefore we should forget it from the weak map we use - // for that resynthesis. - for (auto const& forgottenMergeKey : - mFinishedMerges.forgetAllMergesProducing(j->first)) + // Only drop buckets if the bucketlist has forgotten them _and_ + // no other in-progress structures (worker threads, shadow lists) + // have references to them, just us. It's ok to retain a few too + // many buckets, a little longer than necessary. + // + // This conservatism is important because we want to enforce that + // only one bucket ever exists in memory with a given filename, and + // that we're the first and last to know about it. Otherwise buckets + // might race on deleting the underlying file from one another. + + if (referenced.find(j->first) == referenced.end() && + j->second.use_count() == 1) { - // There should be no futures alive with this output: we - // switched to storing only weak input/output mappings - // when any merge producing the bucket completed (in - // adoptFileAsLiveBucket), and we believe there's only one - // reference to the bucket anyways -- our own in - // mSharedBuckets. But there might be a race we missed, - // so double check & mop up here. Worst case we prevent - // a slow memory leak at the cost of redoing merges we - // might have been able to reattach to. - auto f = mLiveFutures.find(forgottenMergeKey); - if (f != mLiveFutures.end()) + auto filename = j->second->getFilename(); + CLOG_TRACE( + Bucket, + "BucketManager::forgetUnreferencedBuckets dropping {}", + filename); + if (!filename.empty() && !mApp.getConfig().DISABLE_BUCKET_GC) { - CLOG_WARNING(Bucket, - "Unexpected live future for unreferenced " - "bucket: {}", - binToHex(i->first)); - mLiveFutures.erase(f); + CLOG_TRACE(Bucket, "removing bucket file: {}", filename); + std::filesystem::remove(filename); + auto gzfilename = filename.string() + ".gz"; + std::remove(gzfilename.c_str()); + auto indexFilename = + bucketIndexFilename(j->second->getHash()); + std::remove(indexFilename.c_str()); + } + + // Dropping this bucket means we'll no longer be able to + // resynthesize a std::shared_future pointing directly to it + // as a short-cut to performing a merge we've already seen. + // Therefore we should forget it from the weak map we use + // for that resynthesis. + for (auto const& forgottenMergeKey : + mFinishedMerges.forgetAllMergesProducing(j->first)) + { + // There should be no futures alive with this output: we + // switched to storing only weak input/output mappings + // when any merge producing the bucket completed (in + // adoptFileAsLiveBucket), and we believe there's only one + // reference to the bucket anyways -- our own in + // mSharedBuckets. But there might be a race we missed, + // so double check & mop up here. Worst case we prevent + // a slow memory leak at the cost of redoing merges we + // might have been able to reattach to. + auto f = futureMap.find(forgottenMergeKey); + if (f != futureMap.end()) + { + CLOG_WARNING(Bucket, + "Unexpected live future for unreferenced " + "bucket: {}", + binToHex(i->first)); + futureMap.erase(f); + } } - } - // All done, delete the bucket from the shared map. - mSharedBuckets.erase(j); + // All done, delete the bucket from the shared map. + bucketMap.erase(j); + } } - } - mSharedBucketsSize.set_count(mSharedBuckets.size()); + }; + + bucketMapLoop(mSharedLiveBuckets, mLiveBucketFutures); + bucketMapLoop(mSharedHotArchiveBuckets, mHotArchiveBucketFutures); + updateSharedBucketSize(); } void @@ -1326,10 +1408,10 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, // Dependency: HAS supports Hot Archive BucketList for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - auto curr = - getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).curr)); - auto snap = - getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).snap)); + auto curr = getBucketByHash(hexToBin256(has.currentBuckets.at(i).curr), + mSharedLiveBuckets); + auto snap = getBucketByHash(hexToBin256(has.currentBuckets.at(i).snap), + mSharedLiveBuckets); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while assuming " @@ -1340,8 +1422,8 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, std::shared_ptr nextBucket = nullptr; if (nextFuture.hasOutputHash()) { - nextBucket = - getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash())); + nextBucket = getBucketByHash( + hexToBin256(nextFuture.getOutputHash()), mSharedLiveBuckets); if (!nextBucket) { throw std::runtime_error( @@ -1458,7 +1540,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) { continue; } - auto b = getLiveBucketByHash(pair.first); + auto b = getBucketByHash(pair.first, mSharedLiveBuckets); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1637,7 +1719,7 @@ BucketManagerImpl::visitLedgerEntries( { continue; } - auto b = getLiveBucketByHash(pair.first); + auto b = getBucketByHash(pair.first, mSharedLiveBuckets); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1676,7 +1758,7 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork() // TODO: Update verify to for ArchiveBucket // Dependency: HAS supports Hot Archive BucketList - auto b = getBucketByHash(h); + auto b = getBucketByHash(h, mSharedLiveBuckets); if (!b) { throw std::runtime_error(fmt::format( diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h index 9fb4415b2e..b3de18c8c2 100644 --- a/src/bucket/BucketManagerImpl.h +++ b/src/bucket/BucketManagerImpl.h @@ -1,5 +1,6 @@ #pragma once +#include "bucket/Bucket.h" #include "bucket/BucketList.h" #include "bucket/BucketManager.h" #include "bucket/BucketMergeMap.h" @@ -39,6 +40,13 @@ struct HistoryArchiveState; class BucketManagerImpl : public BucketManager { + template + using BucketMapT = std::map>; + + template + using FutureMapT = + UnorderedMap>>; + static std::string const kLockFilename; Application& mApp; @@ -47,7 +55,8 @@ class BucketManagerImpl : public BucketManager std::unique_ptr mSnapshotManager; std::unique_ptr mTmpDirManager; std::unique_ptr mWorkDir; - std::map> mSharedBuckets; + BucketMapT mSharedLiveBuckets; + BucketMapT mSharedHotArchiveBuckets; std::shared_ptr mSearchableBucketListSnapshot{}; @@ -85,14 +94,8 @@ class BucketManagerImpl : public BucketManager // FutureBucket being resolved). Entries in this map will be cleared when // the FutureBucket is _cleared_ (typically when the owning BucketList level // is committed). - - using LiveBucketFutureT = std::shared_future>; - using HotArchiveBucketFutureT = - std::shared_future>; - using BucketFutureT = - std::variant; - - UnorderedMap mLiveFutures; + FutureMapT mLiveBucketFutures; + FutureMapT mHotArchiveBucketFutures; // Records bucket-merges that are _finished_, i.e. have been adopted as // (possibly redundant) bucket files. This is a "weak" (bi-multi-)map of @@ -112,21 +115,33 @@ class BucketManagerImpl : public BucketManager medida::Timer& getPointLoadTimer(LedgerEntryType t) const; template - std::shared_ptr - adoptFileAsBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index); + std::shared_ptr adoptFileAsBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index, + BucketMapT& bucketMap, FutureMapT& futureMap); template - std::shared_ptr getBucketByHash(uint256 const& hash); + std::shared_ptr getBucketByHash(uint256 const& hash, + BucketMapT& bucketMap); + template + std::shared_ptr + getBucketIfExists(uint256 const& hash, + BucketMapT const& bucketMap) const; template std::shared_future> - getMergeFuture(MergeKey const& key); + getMergeFuture(MergeKey const& key, FutureMapT& futureMap); template void putMergeFuture(MergeKey const& key, - std::shared_future>); + std::shared_future> future, + FutureMapT& futureMap); + + template + void noteEmptyMergeOutput(MergeKey const& mergeKey, + FutureMapT& futureMap); + + void updateSharedBucketSize(); #ifdef BUILD_TESTS bool mUseFakeTestValuesForNextClose{false}; @@ -156,30 +171,7 @@ class BucketManagerImpl : public BucketManager TmpDirManager& getTmpDirManager() override; bool renameBucketDirFile(std::filesystem::path const& src, std::filesystem::path const& dst) override; - std::shared_ptr - adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index) override; - std::shared_ptr adoptFileAsHotArchiveBucket( - std::string const& filename, uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index) override; - void noteEmptyMergeOutput(MergeKey const& mergeKey) override; - std::shared_ptr getBucketIfExists(uint256 const& hash) override; - std::shared_ptr - getLiveBucketByHash(uint256 const& hash) override; - std::shared_ptr - getHotArchiveBucketByHash(uint256 const& hash) override; - - std::shared_future> - getLiveMergeFuture(MergeKey const& key) override; - std::shared_future> - getHotArchiveMergeFuture(MergeKey const& key) override; - void putLiveMergeFuture( - MergeKey const& key, - std::shared_future>) override; - void putHotArchiveMergeFuture( - MergeKey const& key, - std::shared_future>) override; + #ifdef BUILD_TESTS void clearMergeFuturesForTesting() override; #endif @@ -249,6 +241,8 @@ class BucketManagerImpl : public BucketManager getSearchableLiveBucketListSnapshot() override; void reportBucketEntryCountMetrics() override; + + friend class BucketManager; }; #define SKIP_1 50 diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp index a5ae8a9efb..0f8e3f2c81 100644 --- a/src/bucket/BucketOutputIterator.cpp +++ b/src/bucket/BucketOutputIterator.cpp @@ -71,7 +71,7 @@ BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, template void -BucketOutputIterator::put(BucketEntryT const& e) +BucketOutputIterator::put(typename BucketT::EntryT const& e) { ZoneScoped; @@ -150,7 +150,7 @@ BucketOutputIterator::put(BucketEntryT const& e) } else { - mBuf = std::make_unique(); + mBuf = std::make_unique(); } // In any case, replace *mBuf with e. @@ -181,7 +181,8 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, std::filesystem::remove(mFilename); if (mergeKey) { - bucketManager.noteEmptyMergeOutput(*mergeKey); + BucketManager::noteEmptyMergeOutput(bucketManager, + *mergeKey); } return std::make_shared(); } @@ -194,25 +195,17 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, { // either it's a new bucket or we just reconstructed a bucket // we already have, in any case ensure we have an index - if (auto b = bucketManager.getBucketIfExists(hash); + if (auto b = + BucketManager::getBucketIfExists(bucketManager, hash); !b || !b->isIndexed()) { - index = BucketIndex::createIndex( - bucketManager, mFilename, hash, mCtx); + index = BucketIndex::createIndex(bucketManager, mFilename, + hash, mCtx); } } - if constexpr (std::is_same_v) - { - return bucketManager.adoptFileAsLiveBucket(mFilename.string(), hash, - mergeKey, std::move(index)); - } - else - { - - return bucketManager.adoptFileAsHotArchiveBucket( - mFilename.string(), hash, mergeKey, std::move(index)); - } + return BucketManager::adoptFileAsBucket( + bucketManager, mFilename.string(), hash, mergeKey, std::move(index)); } template class BucketOutputIterator; diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h index 911b638914..7757ac56cf 100644 --- a/src/bucket/BucketOutputIterator.h +++ b/src/bucket/BucketOutputIterator.h @@ -6,6 +6,7 @@ #include "bucket/Bucket.h" #include "bucket/BucketManager.h" +#include "bucket/BucketUtils.h" #include "bucket/LedgerCmp.h" #include "util/XDRStream.h" #include "xdr/Stellar-ledger.h" @@ -23,18 +24,14 @@ class BucketManager; // when finished. template class BucketOutputIterator { - static_assert(std::is_same_v || - std::is_same_v); - - using BucketEntryT = std::conditional_t, - BucketEntry, HotArchiveBucketEntry>; + BUCKET_TYPE_ASSERT(BucketT); protected: std::filesystem::path mFilename; XDROutputFileStream mOut; BucketEntryIdCmp mCmp; asio::io_context& mCtx; - std::unique_ptr mBuf; + std::unique_ptr mBuf; SHA256 mHasher; size_t mBytesPut{0}; size_t mObjectsPut{0}; @@ -55,7 +52,7 @@ template class BucketOutputIterator BucketMetadata const& meta, MergeCounters& mc, asio::io_context& ctx, bool doFsync); - void put(BucketEntryT const& e); + void put(typename BucketT::EntryT const& e); std::shared_ptr getBucket(BucketManager& bucketManager, bool shouldSynchronouslyIndex, diff --git a/src/bucket/BucketSnapshot.cpp b/src/bucket/BucketSnapshot.cpp index 50dbe30b58..a419c84f00 100644 --- a/src/bucket/BucketSnapshot.cpp +++ b/src/bucket/BucketSnapshot.cpp @@ -37,8 +37,7 @@ BucketSnapshotBase::isEmpty() const } template -std::pair::BucketEntryT>, - bool> +std::pair, bool> BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, std::streamoff pos, size_t pageSize) const @@ -52,17 +51,17 @@ BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, auto& stream = getStream(); stream.seek(pos); - BucketEntryT be; + typename BucketT::EntryT be; if (pageSize == 0) { if (stream.readOne(be)) { - return {std::make_shared(be), false}; + return {std::make_shared(be), false}; } } else if (stream.readPage(be, k, pageSize)) { - return {std::make_shared(be), false}; + return {std::make_shared(be), false}; } // Mark entry miss for metrics @@ -71,8 +70,7 @@ BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, } template -std::pair::BucketEntryT>, - bool> +std::pair, bool> BucketSnapshotBase::getBucketEntry(LedgerKey const& k) const { ZoneScoped; @@ -100,7 +98,7 @@ template void BucketSnapshotBase::loadKeys( std::set& keys, - std::vector& result, LedgerKeyMeter* lkMeter) const + std::vector& result, LedgerKeyMeter* lkMeter) const { ZoneScoped; if (isEmpty()) @@ -113,25 +111,6 @@ BucketSnapshotBase::loadKeys( auto indexIter = index.begin(); while (currKeyIt != keys.end() && indexIter != index.end()) { - // lkMeter only supported for LiveBucketList - if (std::is_same_v && lkMeter) - { - auto keySize = xdr::xdr_size(*currKeyIt); - if (!lkMeter->canLoad(*currKeyIt, keySize)) - { - // If the transactions containing this key have a remaining - // quota less than the size of the key, we cannot load the - // entry, as xdr_size(key) <= xdr_size(entry). Here we consume - // keySize bytes from the quotas of transactions containing the - // key so that they will have zero remaining quota and - // additional entries belonging to only those same transactions - // will not be loaded even if they would fit in the remaining - // quota before this update. - lkMeter->updateReadQuotasForKey(*currKeyIt, keySize); - currKeyIt = keys.erase(currKeyIt); - continue; - } - } auto [offOp, newIndexIter] = index.scan(indexIter, *currKeyIt); indexIter = newIndexIter; if (offOp) diff --git a/src/bucket/BucketSnapshot.h b/src/bucket/BucketSnapshot.h index 6da683ea6e..71f33e448f 100644 --- a/src/bucket/BucketSnapshot.h +++ b/src/bucket/BucketSnapshot.h @@ -5,6 +5,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/Bucket.h" +#include "bucket/BucketUtils.h" #include "bucket/LedgerCmp.h" #include "util/NonCopyable.h" #include "xdr/Stellar-ledger-entries.h" @@ -22,19 +23,9 @@ class SearchableLiveBucketListSnapshot; // A lightweight wrapper around Bucket for thread safe BucketListDB lookups template class BucketSnapshotBase : public NonMovable { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); protected: - using BucketEntryT = std::conditional_t, - BucketEntry, HotArchiveBucketEntry>; - - // LiveBucket returns LedgerEntry vector on call to loadKeys, - // HotArchiveBucket returns HotArchiveBucketEntry - using BulkLoadReturnT = - std::conditional_t, LedgerEntry, - HotArchiveBucketEntry>; - std::shared_ptr const mBucket; // Lazily-constructed and retained for read path. @@ -49,7 +40,7 @@ template class BucketSnapshotBase : public NonMovable // reads until key is found or the end of the page. Returns , where bloomMiss is true if a bloomMiss occurred during the // load. - std::pair, bool> + std::pair, bool> getEntryAtOffset(LedgerKey const& k, std::streamoff pos, size_t pageSize) const; @@ -65,7 +56,7 @@ template class BucketSnapshotBase : public NonMovable // Loads bucket entry for LedgerKey k. Returns , // where bloomMiss is true if a bloomMiss occurred during the load. - std::pair, bool> + std::pair, bool> getBucketEntry(LedgerKey const& k) const; // Loads LedgerEntry's for given keys. When a key is found, the @@ -74,7 +65,7 @@ template class BucketSnapshotBase : public NonMovable // if the meter has a transaction with sufficient read quota for the key. // If Bucket is not of type LiveBucket, lkMeter is ignored. void loadKeys(std::set& keys, - std::vector& result, + std::vector& result, LedgerKeyMeter* lkMeter) const; }; diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index 703da5c21f..36faf9519a 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -5,6 +5,7 @@ #include "bucket/BucketSnapshotManager.h" #include "bucket/Bucket.h" #include "bucket/BucketListSnapshot.h" +#include "bucket/BucketUtils.h" #include "main/Application.h" #include "util/GlobalChecks.h" #include "util/XDRStream.h" // IWYU pragma: keep @@ -17,10 +18,8 @@ namespace stellar { BucketSnapshotManager::BucketSnapshotManager( - Application& app, - std::unique_ptr const>&& snapshot, - std::unique_ptr const>&& - hotArchiveSnapshot, + Application& app, SnapshotPtrT&& snapshot, + SnapshotPtrT&& hotArchiveSnapshot, uint32_t numLiveHistoricalSnapshots) : mApp(app) , mCurrLiveSnapshot(std::move(snapshot)) @@ -81,42 +80,38 @@ BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label, return iter->second; } -template +template <> void -BucketSnapshotManager::maybeUpdateSnapshot( - std::unique_ptr& snapshot, - std::map>& historicalSnapshots) - const +BucketSnapshotManager::maybeUpdateSnapshot( + SnapshotPtrT& snapshot, + std::map>& historicalSnapshots) const { - static_assert( - std::is_same_v> || - std::is_same_v>); + maybeUpdateSnapshotInternal(snapshot, historicalSnapshots, + mCurrLiveSnapshot, mLiveHistoricalSnapshots); +} - auto const& managerSnapshot = [&]() -> auto const& - { - if constexpr (std::is_same_v>) - { - return mCurrLiveSnapshot; - } - else - { - return mCurrHotArchiveSnapshot; - } - } - (); +template <> +void +BucketSnapshotManager::maybeUpdateSnapshot( + SnapshotPtrT& snapshot, + std::map>& historicalSnapshots) + const +{ + maybeUpdateSnapshotInternal(snapshot, historicalSnapshots, + mCurrHotArchiveSnapshot, + mHotArchiveHistoricalSnapshots); +} - auto const& managerHistoricalSnapshots = [&]() -> auto const& - { - if constexpr (std::is_same_v>) - { - return mLiveHistoricalSnapshots; - } - else - { - return mHotArchiveHistoricalSnapshots; - } - } - (); +template +void +BucketSnapshotManager::maybeUpdateSnapshotInternal( + SnapshotPtrT& snapshot, + std::map>& historicalSnapshots, + SnapshotPtrT const& managerSnapshot, + std::map> const& managerHistoricalSnapshots) + const +{ + BUCKET_TYPE_ASSERT(BucketT); // The canonical snapshot held by the BucketSnapshotManager is not being // modified. Rather, a thread is checking it's copy against the canonical @@ -130,7 +125,8 @@ BucketSnapshotManager::maybeUpdateSnapshot( // Should only update with a newer snapshot releaseAssert(!snapshot || snapshot->getLedgerSeq() < managerSnapshot->getLedgerSeq()); - snapshot = std::make_unique(*managerSnapshot); + snapshot = std::make_unique const>( + *managerSnapshot); } // Then update historical snapshots (if any exist) @@ -149,17 +145,17 @@ BucketSnapshotManager::maybeUpdateSnapshot( historicalSnapshots.clear(); for (auto const& [ledgerSeq, snap] : managerHistoricalSnapshots) { - historicalSnapshots.emplace(ledgerSeq, - std::make_unique(*snap)); + historicalSnapshots.emplace( + ledgerSeq, + std::make_unique const>(*snap)); } } } void BucketSnapshotManager::updateCurrentSnapshot( - std::unique_ptr const>&& liveSnapshot, - std::unique_ptr const>&& - hotArchiveSnapshot) + SnapshotPtrT&& liveSnapshot, + SnapshotPtrT&& hotArchiveSnapshot) { releaseAssert(threadIsMain()); @@ -229,16 +225,4 @@ BucketSnapshotManager::endPointLoadTimer(LedgerEntryType t, iter->second.Update(duration); } } - -template void -BucketSnapshotManager::maybeUpdateSnapshot>( - std::unique_ptr const>& snapshot, - std::map const>>& - historicalSnapshots) const; -template void BucketSnapshotManager::maybeUpdateSnapshot< - BucketListSnapshot>( - std::unique_ptr const>& snapshot, - std::map const>>& - historicalSnapshots) const; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index de44f6f165..af66e918b6 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -30,6 +30,9 @@ template class BucketListSnapshot; class SearchableLiveBucketListSnapshot; class SearchableHotArchiveBucketListSnapshot; +template +using SnapshotPtrT = std::unique_ptr const>; + // This class serves as the boundary between non-threadsafe singleton classes // (BucketManager, BucketList, Metrics, etc) and threadsafe, parallel BucketList // snapshots. @@ -41,20 +44,17 @@ class BucketSnapshotManager : NonMovableOrCopyable // Snapshot that is maintained and periodically updated by BucketManager on // the main thread. When background threads need to generate or refresh a // snapshot, they will copy this snapshot. - std::unique_ptr const> mCurrLiveSnapshot{}; - std::unique_ptr const> - mCurrHotArchiveSnapshot{}; + SnapshotPtrT mCurrLiveSnapshot{}; + SnapshotPtrT mCurrHotArchiveSnapshot{}; // ledgerSeq that the snapshot is based on -> snapshot - std::map const>> - mLiveHistoricalSnapshots; - std::map const>> + std::map> mLiveHistoricalSnapshots; + std::map> mHotArchiveHistoricalSnapshots; uint32_t const mNumHistoricalSnapshots; - // Lock must be held when accessing any snapshot + // Lock must be held when accessing any member variables holding snapshots mutable std::shared_mutex mSnapshotMutex; mutable UnorderedMap mPointTimers{}; @@ -66,23 +66,27 @@ class BucketSnapshotManager : NonMovableOrCopyable mutable std::optional mTimerStart; + template + void maybeUpdateSnapshotInternal( + SnapshotPtrT& snapshot, + std::map>& historicalSnapshots, + SnapshotPtrT const& managerSnapshot, + std::map> const& + managerHistoricalSnapshots) const; + public: // Called by main thread to update snapshots whenever the BucketList // is updated - void updateCurrentSnapshot( - std::unique_ptr const>&& liveSnapshot, - std::unique_ptr const>&& - hotArchiveSnapshot); + void + updateCurrentSnapshot(SnapshotPtrT&& liveSnapshot, + SnapshotPtrT&& hotArchiveSnapshot); // numHistoricalLedgers is the number of historical snapshots that the // snapshot manager will maintain. If numHistoricalLedgers is 5, snapshots // will be capable of querying state from ledger [lcl, lcl - 5]. - BucketSnapshotManager( - Application& app, - std::unique_ptr const>&& snapshot, - std::unique_ptr const>&& - hotArchiveSnapshot, - uint32_t numHistoricalLedgers); + BucketSnapshotManager(Application& app, SnapshotPtrT&& snapshot, + SnapshotPtrT&& hotArchiveSnapshot, + uint32_t numHistoricalLedgers); std::shared_ptr copySearchableLiveBucketListSnapshot() const; @@ -91,11 +95,10 @@ class BucketSnapshotManager : NonMovableOrCopyable copySearchableHotArchiveBucketListSnapshot() const; // Checks if snapshot is out of date and updates it accordingly - template - void - maybeUpdateSnapshot(std::unique_ptr& snapshot, - std::map>& - historicalSnapshots) const; + template + void maybeUpdateSnapshot( + SnapshotPtrT& snapshot, + std::map>& historicalSnapshots) const; // All metric recording functions must only be called by the main thread void startPointLoadTimer() const; diff --git a/src/bucket/BucketUtils.h b/src/bucket/BucketUtils.h new file mode 100644 index 0000000000..0186671edc --- /dev/null +++ b/src/bucket/BucketUtils.h @@ -0,0 +1,14 @@ +#pragma once + +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +namespace stellar +{ + +#define BUCKET_TYPE_ASSERT(BucketT) \ + static_assert(std::is_same_v || \ + std::is_same_v, \ + "BucketT must be a Bucket type") +} \ No newline at end of file diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp index bc2dec6d16..8aa6720b9e 100644 --- a/src/bucket/FutureBucket.cpp +++ b/src/bucket/FutureBucket.cpp @@ -378,14 +378,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, curr->getHash(), snap->getHash(), shadowHashes}; std::shared_future> f; - if constexpr (std::is_same_v) - { - f = bm.getLiveMergeFuture(mk); - } - else - { - f = bm.getHotArchiveMergeFuture(mk); - } + f = BucketManager::getMergeFuture(bm, mk); if (f.valid()) { @@ -447,15 +440,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, }); mOutputBucketFuture = task->get_future().share(); - if constexpr (std::is_same_v) - { - bm.putLiveMergeFuture(mk, mOutputBucketFuture); - } - else - { - bm.putHotArchiveMergeFuture(mk, mOutputBucketFuture); - } - + BucketManager::putMergeFuture(bm, mk, mOutputBucketFuture); app.postOnBackgroundThread(bind(&task_t::operator(), task), "FutureBucket: merge"); checkState(); @@ -473,47 +458,24 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, auto& bm = app.getBucketManager(); if (hasOutputHash()) { - std::shared_ptr b; - if constexpr (std::is_same_v) - { - b = bm.getLiveBucketByHash(hexToBin256(getOutputHash())); - } - else - { - b = bm.getHotArchiveBucketByHash(hexToBin256(getOutputHash())); - } + auto b = BucketManager::getBucketByHash( + bm, hexToBin256(getOutputHash())); setLiveOutput(b); } else { releaseAssert(mState == FB_HASH_INPUTS); - if constexpr (std::is_same_v) - { - mInputCurrBucket = - bm.getLiveBucketByHash(hexToBin256(mInputCurrBucketHash)); - mInputSnapBucket = - bm.getLiveBucketByHash(hexToBin256(mInputSnapBucketHash)); - } - else - { - mInputCurrBucket = - bm.getHotArchiveBucketByHash(hexToBin256(mInputCurrBucketHash)); - mInputSnapBucket = - bm.getHotArchiveBucketByHash(hexToBin256(mInputSnapBucketHash)); - } + mInputCurrBucket = BucketManager::getBucketByHash( + bm, hexToBin256(mInputCurrBucketHash)); + mInputSnapBucket = BucketManager::getBucketByHash( + bm, hexToBin256(mInputSnapBucketHash)); + releaseAssert(mInputShadowBuckets.empty()); for (auto const& h : mInputShadowBucketHashes) { - std::shared_ptr b; - if constexpr (std::is_same_v) - { - b = bm.getLiveBucketByHash(hexToBin256(h)); - } - else - { - b = bm.getHotArchiveBucketByHash(hexToBin256(h)); - } + auto b = + BucketManager::getBucketByHash(bm, hexToBin256(h)); releaseAssert(b); CLOG_DEBUG(Bucket, "Reconstituting shadow {}", h); diff --git a/src/bucket/FutureBucket.h b/src/bucket/FutureBucket.h index cda7e6b61c..b8d64742bd 100644 --- a/src/bucket/FutureBucket.h +++ b/src/bucket/FutureBucket.h @@ -5,6 +5,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/Bucket.h" +#include "bucket/BucketUtils.h" #include "util/GlobalChecks.h" #include #include @@ -36,8 +37,7 @@ class HotArchiveBucket; */ template class FutureBucket { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); // There are two lifecycles of a FutureBucket: // diff --git a/src/bucket/LedgerCmp.h b/src/bucket/LedgerCmp.h index cc550a4e96..01d6de81b9 100644 --- a/src/bucket/LedgerCmp.h +++ b/src/bucket/LedgerCmp.h @@ -6,6 +6,8 @@ #include +#include "bucket/Bucket.h" +#include "bucket/BucketUtils.h" #include "util/XDROperators.h" // IWYU pragma: keep #include "xdr/Stellar-ledger-entries.h" #include "xdr/Stellar-ledger.h" @@ -13,9 +15,6 @@ namespace stellar { -class LiveBucket; -class HotArchiveBucket; - template bool lexCompare(T&& lhs1, T&& rhs1) @@ -131,11 +130,7 @@ struct LedgerEntryIdCmp */ template struct BucketEntryIdCmp { - static_assert(std::is_same_v || - std::is_same_v); - - using BucketEntryT = std::conditional_t, - BucketEntry, HotArchiveBucketEntry>; + BUCKET_TYPE_ASSERT(BucketT); bool compareHotArchive(HotArchiveBucketEntry const& a, @@ -244,7 +239,8 @@ template struct BucketEntryIdCmp } bool - operator()(BucketEntryT const& a, BucketEntryT const& b) const + operator()(typename BucketT::EntryT const& a, + typename BucketT::EntryT const& b) const { if constexpr (std::is_same_v) { diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index ae3af67dc6..584f0d8a51 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -635,7 +635,8 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") auto indexFilename = test.getBM().bucketIndexFilename(bucketHash); REQUIRE(fs::exists(indexFilename)); - auto b = test.getBM().getLiveBucketByHash(bucketHash); + auto b = BucketManager::getBucketByHash(test.getBM(), + bucketHash); REQUIRE(b->isIndexed()); auto onDiskIndex = @@ -661,7 +662,8 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") } // Check if in-memory index has correct params - auto b = test.getBM().getLiveBucketByHash(bucketHash); + auto b = BucketManager::getBucketByHash(test.getBM(), + bucketHash); REQUIRE(!b->isEmpty()); REQUIRE(b->isIndexed()); diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index c77794a80a..62aa7265b5 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -32,8 +32,7 @@ void for_versions_with_differing_bucket_logic( template struct EntryCounts { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); size_t nMeta{0}; size_t nInitOrArchived{0}; diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index a71b672f0b..e5ff014a86 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -88,10 +88,10 @@ std::shared_ptr ApplyBucketsWork::getBucket(std::string const& hash) { auto i = mBuckets.find(hash); - auto b = - (i != mBuckets.end()) - ? i->second - : mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash)); + auto b = (i != mBuckets.end()) + ? i->second + : BucketManager::getBucketByHash( + mApp.getBucketManager(), hexToBin256(hash)); releaseAssert(b); return b; } diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index 0325180c23..adc2d47adf 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -28,10 +28,10 @@ AssumeStateWork::AssumeStateWork(Application& app, auto& bm = mApp.getBucketManager(); for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - auto curr = - bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr)); - auto snap = - bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap)); + auto curr = BucketManager::getBucketByHash( + bm, hexToBin256(mHas.currentBuckets.at(i).curr)); + auto snap = BucketManager::getBucketByHash( + bm, hexToBin256(mHas.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while " @@ -43,8 +43,8 @@ AssumeStateWork::AssumeStateWork(Application& app, auto& nextFuture = mHas.currentBuckets.at(i).next; if (nextFuture.hasOutputHash()) { - auto nextBucket = - bm.getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash())); + auto nextBucket = BucketManager::getBucketByHash( + bm, hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { throw std::runtime_error("Missing future bucket files while " diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index c8764b5ce2..900e0d0386 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -3,6 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "IndexBucketsWork.h" +#include "bucket/Bucket.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" #include "util/HashOfHash.h" @@ -87,7 +88,7 @@ IndexBucketsWork::IndexWork::postWork() if (!self->mIndex) { // TODO: Fix this when archive BucketLists assume state - self->mIndex = BucketIndex::createIndex( + self->mIndex = BucketIndex::createIndex( bm, self->mBucket->getFilename(), self->mBucket->getHash(), ctx); } diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index a2f8992547..235d4f6206 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -306,8 +306,8 @@ HistoryArchiveState::containsValidBuckets(Application& app) const // Process bucket, return version auto processBucket = [&](std::string const& bucketHash) { - auto bucket = - app.getBucketManager().getLiveBucketByHash(hexToBin256(bucketHash)); + auto bucket = BucketManager::getBucketByHash( + app.getBucketManager(), hexToBin256(bucketHash)); releaseAssert(bucket); int32_t version = 0; if (!bucket->isEmpty()) @@ -390,8 +390,8 @@ HistoryArchiveState::prepareForPublish(Application& app) auto& level = currentBuckets[i]; auto& prev = currentBuckets[i - 1]; - auto snap = - app.getBucketManager().getLiveBucketByHash(hexToBin256(prev.snap)); + auto snap = BucketManager::getBucketByHash( + app.getBucketManager(), hexToBin256(prev.snap)); if (!level.next.isClear() && protocolVersionStartsFrom( snap->getBucketVersion(), diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index 8e28261745..1520d9168d 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -121,7 +121,8 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) for (auto const& hash : mLocalState.differingBuckets(other)) { - auto b = mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash)); + auto b = BucketManager::getBucketByHash( + mApp.getBucketManager(), hexToBin256(hash)); releaseAssert(b); addIfExists(std::make_shared(*b)); } diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index 6f7a880638..f510be9d59 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -1021,8 +1021,10 @@ CatchupSimulation::validateCatchup(Application::pointer app) CHECK(wantBucketListHash == haveBucketListHash); CHECK(wantHash == haveHash); - CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket0Hash)); - CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket1Hash)); + CHECK(BucketManager::getBucketByHash(app->getBucketManager(), + wantBucket0Hash)); + CHECK(BucketManager::getBucketByHash(app->getBucketManager(), + wantBucket1Hash)); CHECK(wantBucket0Hash == haveBucket0Hash); CHECK(wantBucket1Hash == haveBucket1Hash); diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp index 0ef542af58..c31f71f75d 100644 --- a/src/historywork/DownloadBucketsWork.cpp +++ b/src/historywork/DownloadBucketsWork.cpp @@ -95,8 +95,8 @@ DownloadBucketsWork::yieldMoreWork() if (self) { auto bucketPath = ft.localPath_nogz(); - auto b = app.getBucketManager().adoptFileAsLiveBucket( - bucketPath, hexToBin256(hash), + auto b = BucketManager::adoptFileAsBucket( + app.getBucketManager(), bucketPath, hexToBin256(hash), /*mergeKey=*/nullptr, /*index=*/nullptr); self->mBuckets[hash] = b; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 6e1c2e4a49..2e41fe21b3 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -240,8 +240,8 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, std::set> retained; for (auto const& b : has.allBuckets()) { - auto bPtr = - app->getBucketManager().getLiveBucketByHash(hexToBin256(b)); + auto bPtr = BucketManager::getBucketByHash( + app->getBucketManager(), hexToBin256(b)); releaseAssert(bPtr); retained.insert(bPtr); } @@ -665,7 +665,7 @@ dumpStateArchivalStatistics(Config cfg) { continue; } - auto b = bm.getLiveBucketByHash(hash); + auto b = BucketManager::getBucketByHash(bm, hash); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index a2897175c4..9484d39543 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -131,7 +131,8 @@ checkState(Application& app) if (nextFuture.hasOutputHash()) { auto hash = hexToBin256(nextFuture.getOutputHash()); - checkBucket(bm.getLiveBucketByHash(hash)); + checkBucket( + BucketManager::getBucketByHash(bm, hash)); } } } diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h index e2360b10f8..3588f21d1a 100644 --- a/src/test/TestUtils.h +++ b/src/test/TestUtils.h @@ -31,8 +31,7 @@ int32_t computeMultiplier(LedgerEntry const& le); template class BucketListDepthModifier { - static_assert(std::is_same_v || - std::is_same_v); + BUCKET_TYPE_ASSERT(BucketT); uint32_t const mPrevDepth; From 167388898f3ac4e10f6800fbbe997c8706230d8f Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Tue, 5 Nov 2024 01:46:28 -0800 Subject: [PATCH 5/5] Aditional BucketList refactoring --- src/bucket/Bucket.cpp | 1116 ----------------- src/bucket/Bucket.h | 289 ----- src/bucket/BucketApplicator.cpp | 4 +- src/bucket/BucketApplicator.h | 2 +- src/bucket/BucketBase.cpp | 419 +++++++ src/bucket/BucketBase.h | 135 ++ src/bucket/BucketIndexImpl.cpp | 44 +- src/bucket/BucketIndexImpl.h | 2 +- src/bucket/BucketInputIterator.cpp | 5 +- src/bucket/BucketInputIterator.h | 5 - .../{BucketList.cpp => BucketListBase.cpp} | 234 +--- src/bucket/{BucketList.h => BucketListBase.h} | 74 +- src/bucket/BucketListSnapshotBase.cpp | 162 +++ ...istSnapshot.h => BucketListSnapshotBase.h} | 74 +- ...ucketManagerImpl.cpp => BucketManager.cpp} | 414 ++---- src/bucket/BucketManager.h | 441 ++++--- src/bucket/BucketManagerImpl.h | 252 ---- src/bucket/BucketOutputIterator.cpp | 22 +- src/bucket/BucketOutputIterator.h | 5 - src/bucket/BucketSnapshot.cpp | 20 +- src/bucket/BucketSnapshot.h | 7 +- src/bucket/BucketSnapshotManager.cpp | 5 +- src/bucket/BucketSnapshotManager.h | 6 +- src/bucket/BucketUtils.cpp | 153 +++ src/bucket/BucketUtils.h | 132 ++ src/bucket/FutureBucket.cpp | 26 +- src/bucket/FutureBucket.h | 1 - src/bucket/HotArchiveBucket.cpp | 149 +++ src/bucket/HotArchiveBucket.h | 96 ++ src/bucket/HotArchiveBucketList.cpp | 25 + src/bucket/HotArchiveBucketList.h | 24 + src/bucket/LedgerCmp.h | 4 +- src/bucket/LiveBucket.cpp | 578 +++++++++ src/bucket/LiveBucket.h | 147 +++ src/bucket/LiveBucketList.cpp | 201 +++ src/bucket/LiveBucketList.h | 59 + src/bucket/MergeKey.h | 1 - ...tSnapshot.cpp => SearchableBucketList.cpp} | 275 +--- src/bucket/SearchableBucketList.h | 54 + src/bucket/test/BucketIndexTests.cpp | 12 +- src/bucket/test/BucketListTests.cpp | 9 +- src/bucket/test/BucketManagerTests.cpp | 10 +- src/bucket/test/BucketTestUtils.cpp | 3 +- src/bucket/test/BucketTests.cpp | 175 +-- src/catchup/ApplyBucketsWork.cpp | 8 +- src/catchup/ApplyBufferedLedgersWork.cpp | 2 +- src/catchup/ApplyCheckpointWork.cpp | 5 +- src/catchup/AssumeStateWork.cpp | 14 +- src/catchup/CatchupWork.cpp | 2 - src/catchup/DownloadApplyTxsWork.cpp | 2 +- src/catchup/IndexBucketsWork.cpp | 7 +- src/herder/test/UpgradesTests.cpp | 4 +- src/history/FileTransferInfo.h | 3 +- src/history/HistoryArchive.cpp | 15 +- src/history/HistoryManagerImpl.cpp | 10 +- src/history/StateSnapshot.cpp | 9 +- src/history/test/HistoryTestsUtils.cpp | 6 +- src/history/test/HistoryTestsUtils.h | 3 +- src/historywork/DownloadBucketsWork.cpp | 4 +- src/historywork/DownloadBucketsWork.h | 2 +- .../BucketListIsConsistentWithDatabase.cpp | 5 +- src/invariant/InvariantManagerImpl.cpp | 4 +- ...ucketListIsConsistentWithDatabaseTests.cpp | 2 +- src/invariant/test/InvariantTests.cpp | 1 - src/ledger/LedgerManagerImpl.cpp | 6 +- src/ledger/LedgerStateSnapshot.cpp | 1 - src/ledger/LedgerStateSnapshot.h | 3 +- src/ledger/LedgerTxn.cpp | 9 +- src/ledger/LedgerTxnImpl.h | 1 - src/ledger/NetworkConfig.cpp | 2 +- .../test/LedgerCloseMetaStreamTests.cpp | 1 - src/main/ApplicationImpl.cpp | 5 - src/main/ApplicationUtils.cpp | 10 +- src/main/CommandHandler.cpp | 10 - src/main/CommandLine.cpp | 6 +- src/main/Config.cpp | 4 - src/main/QueryServer.cpp | 2 +- src/main/test/ApplicationUtilsTests.cpp | 3 +- src/simulation/CoreTests.cpp | 8 - src/simulation/test/LoadGeneratorTests.cpp | 4 +- src/test/TestUtils.cpp | 1 - src/test/TestUtils.h | 5 +- ...ger-close-meta-v1-protocol-23-soroban.json | 448 +++---- .../ledger-close-meta-v1-protocol-23.json | 818 ++++++------ src/transactions/TransactionUtils.cpp | 3 - src/util/test/XDRStreamTests.cpp | 1 - 86 files changed, 3613 insertions(+), 3717 deletions(-) delete mode 100644 src/bucket/Bucket.cpp delete mode 100644 src/bucket/Bucket.h create mode 100644 src/bucket/BucketBase.cpp create mode 100644 src/bucket/BucketBase.h rename src/bucket/{BucketList.cpp => BucketListBase.cpp} (76%) rename src/bucket/{BucketList.h => BucketListBase.h} (88%) create mode 100644 src/bucket/BucketListSnapshotBase.cpp rename src/bucket/{BucketListSnapshot.h => BucketListSnapshotBase.h} (59%) rename src/bucket/{BucketManagerImpl.cpp => BucketManager.cpp} (76%) delete mode 100644 src/bucket/BucketManagerImpl.h create mode 100644 src/bucket/BucketUtils.cpp create mode 100644 src/bucket/HotArchiveBucket.cpp create mode 100644 src/bucket/HotArchiveBucket.h create mode 100644 src/bucket/HotArchiveBucketList.cpp create mode 100644 src/bucket/HotArchiveBucketList.h create mode 100644 src/bucket/LiveBucket.cpp create mode 100644 src/bucket/LiveBucket.h create mode 100644 src/bucket/LiveBucketList.cpp create mode 100644 src/bucket/LiveBucketList.h rename src/bucket/{BucketListSnapshot.cpp => SearchableBucketList.cpp} (52%) create mode 100644 src/bucket/SearchableBucketList.h diff --git a/src/bucket/Bucket.cpp b/src/bucket/Bucket.cpp deleted file mode 100644 index d533f688b6..0000000000 --- a/src/bucket/Bucket.cpp +++ /dev/null @@ -1,1116 +0,0 @@ -// Copyright 2015 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -// ASIO is somewhat particular about when it gets included -- it wants to be the -// first to include -- so we try to include it before everything -// else. -#include "util/asio.h" // IWYU pragma: keep -#include "bucket/Bucket.h" -#include "bucket/BucketApplicator.h" -#include "bucket/BucketInputIterator.h" -#include "bucket/BucketList.h" -#include "bucket/BucketListSnapshot.h" -#include "bucket/BucketManager.h" -#include "bucket/BucketOutputIterator.h" -#include "bucket/LedgerCmp.h" -#include "bucket/MergeKey.h" -#include "crypto/Hex.h" -#include "crypto/Random.h" -#include "database/Database.h" -#include "ledger/LedgerTxn.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "medida/timer.h" -#include "util/Fs.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/ProtocolVersion.h" -#include "util/XDRStream.h" -#include "util/types.h" -#include - -#include "medida/counter.h" -#include "xdr/Stellar-ledger.h" - -namespace stellar -{ - -BucketIndex const& -Bucket::getIndex() const -{ - ZoneScoped; - releaseAssertOrThrow(!mFilename.empty()); - releaseAssertOrThrow(mIndex); - return *mIndex; -} - -bool -Bucket::isIndexed() const -{ - return static_cast(mIndex); -} - -std::optional> -Bucket::getOfferRange() const -{ - return getIndex().getOfferRange(); -} - -void -Bucket::setIndex(std::unique_ptr&& index) -{ - releaseAssertOrThrow(!mIndex); - mIndex = std::move(index); -} - -Bucket::Bucket(std::string const& filename, Hash const& hash, - std::unique_ptr&& index) - : mFilename(filename), mHash(hash), mIndex(std::move(index)) -{ - releaseAssert(filename.empty() || fs::exists(filename)); - if (!filename.empty()) - { - CLOG_TRACE(Bucket, "Bucket::Bucket() created, file exists : {}", - mFilename); - mSize = fs::size(filename); - } -} - -Bucket::Bucket() -{ -} - -Hash const& -Bucket::getHash() const -{ - return mHash; -} - -std::filesystem::path const& -Bucket::getFilename() const -{ - return mFilename; -} - -size_t -Bucket::getSize() const -{ - return mSize; -} - -bool -LiveBucket::containsBucketIdentity(BucketEntry const& id) const -{ - BucketEntryIdCmp cmp; - LiveBucketInputIterator iter(shared_from_this()); - while (iter) - { - if (!(cmp(*iter, id) || cmp(id, *iter))) - { - return true; - } - ++iter; - } - return false; -} - -bool -Bucket::isEmpty() const -{ - if (mFilename.empty() || isZero(mHash)) - { - releaseAssertOrThrow(mFilename.empty() && isZero(mHash)); - return true; - } - - return false; -} - -void -Bucket::freeIndex() -{ - mIndex.reset(nullptr); -} - -#ifdef BUILD_TESTS -void -LiveBucket::apply(Application& app) const -{ - ZoneScoped; - - auto filter = [&](LedgerEntryType t) { - if (app.getConfig().isUsingBucketListDB()) - { - return t == OFFER; - } - - return true; - }; - - std::unordered_set emptySet; - BucketApplicator applicator( - app, app.getConfig().LEDGER_PROTOCOL_VERSION, - 0 /*set to 0 so we always load from the parent to check state*/, - 0 /*set to a level that's not the bottom so we don't treat live entries - as init*/ - , - shared_from_this(), filter, emptySet); - BucketApplicator::Counters counters(app.getClock().now()); - while (applicator) - { - applicator.advance(counters); - } - counters.logInfo("direct", 0, app.getClock().now()); -} -#endif // BUILD_TESTS - -std::vector -LiveBucket::convertToBucketEntry(bool useInit, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) -{ - std::vector bucket; - for (auto const& e : initEntries) - { - BucketEntry ce; - ce.type(useInit ? INITENTRY : LIVEENTRY); - ce.liveEntry() = e; - bucket.push_back(ce); - } - for (auto const& e : liveEntries) - { - BucketEntry ce; - ce.type(LIVEENTRY); - ce.liveEntry() = e; - bucket.push_back(ce); - } - for (auto const& e : deadEntries) - { - BucketEntry ce; - ce.type(DEADENTRY); - ce.deadEntry() = e; - bucket.push_back(ce); - } - - BucketEntryIdCmp cmp; - std::sort(bucket.begin(), bucket.end(), cmp); - releaseAssert(std::adjacent_find( - bucket.begin(), bucket.end(), - [&cmp](BucketEntry const& lhs, BucketEntry const& rhs) { - return !cmp(lhs, rhs); - }) == bucket.end()); - return bucket; -} - -std::string -Bucket::randomFileName(std::string const& tmpDir, std::string ext) -{ - ZoneScoped; - for (;;) - { - std::string name = - tmpDir + "/tmp-bucket-" + binToHex(randomBytes(8)) + ext; - std::ifstream ifile(name); - if (!ifile) - { - return name; - } - } -} - -std::string -Bucket::randomBucketName(std::string const& tmpDir) -{ - return randomFileName(tmpDir, ".xdr"); -} - -std::string -Bucket::randomBucketIndexName(std::string const& tmpDir) -{ - return randomFileName(tmpDir, ".index"); -} - -std::vector -HotArchiveBucket::convertToBucketEntry( - std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries) -{ - std::vector bucket; - for (auto const& e : archivedEntries) - { - HotArchiveBucketEntry be; - be.type(HOT_ARCHIVE_ARCHIVED); - be.archivedEntry() = e; - bucket.push_back(be); - } - for (auto const& k : restoredEntries) - { - HotArchiveBucketEntry be; - be.type(HOT_ARCHIVE_LIVE); - be.key() = k; - bucket.push_back(be); - } - for (auto const& k : deletedEntries) - { - HotArchiveBucketEntry be; - be.type(HOT_ARCHIVE_DELETED); - be.key() = k; - bucket.push_back(be); - } - - BucketEntryIdCmp cmp; - std::sort(bucket.begin(), bucket.end(), cmp); - releaseAssert(std::adjacent_find(bucket.begin(), bucket.end(), - [&cmp](HotArchiveBucketEntry const& lhs, - HotArchiveBucketEntry const& rhs) { - return !cmp(lhs, rhs); - }) == bucket.end()); - return bucket; -} - -std::shared_ptr -HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries, - bool countMergeEvents, asio::io_context& ctx, - bool doFsync) -{ - ZoneScoped; - BucketMetadata meta; - meta.ledgerVersion = protocolVersion; - meta.ext.v(1); - meta.ext.bucketListType() = BucketListType::HOT_ARCHIVE; - auto entries = - convertToBucketEntry(archivedEntries, restoredEntries, deletedEntries); - - MergeCounters mc; - HotArchiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, - mc, ctx, doFsync); - for (auto const& e : entries) - { - out.put(e); - } - - if (countMergeEvents) - { - bucketManager.incrMergeCounters(mc); - } - - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB()); -} - -std::shared_ptr -LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries, - bool countMergeEvents, asio::io_context& ctx, bool doFsync) -{ - ZoneScoped; - // When building fresh buckets after protocol version 10 (i.e. version - // 11-or-after) we differentiate INITENTRY from LIVEENTRY. In older - // protocols, for compatibility sake, we mark both cases as LIVEENTRY. - bool useInit = protocolVersionStartsFrom( - protocolVersion, FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); - - BucketMetadata meta; - meta.ledgerVersion = protocolVersion; - - if (protocolVersionStartsFrom( - protocolVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) - { - meta.ext.v(1); - meta.ext.bucketListType() = BucketListType::LIVE; - } - - auto entries = - convertToBucketEntry(useInit, initEntries, liveEntries, deadEntries); - - MergeCounters mc; - LiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx, - doFsync); - for (auto const& e : entries) - { - out.put(e); - } - - if (countMergeEvents) - { - bucketManager.incrMergeCounters(mc); - } - - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB()); -} - -static void -countShadowedEntryType(MergeCounters& mc, BucketEntry const& e) -{ - switch (e.type()) - { - case METAENTRY: - ++mc.mMetaEntryShadowElisions; - break; - case INITENTRY: - ++mc.mInitEntryShadowElisions; - break; - case LIVEENTRY: - ++mc.mLiveEntryShadowElisions; - break; - case DEADENTRY: - ++mc.mDeadEntryShadowElisions; - break; - } -} - -void -LiveBucket::checkProtocolLegality(BucketEntry const& entry, - uint32_t protocolVersion) -{ - if (protocolVersionIsBefore( - protocolVersion, - FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) && - (entry.type() == INITENTRY || entry.type() == METAENTRY)) - { - throw std::runtime_error(fmt::format( - FMT_STRING("unsupported entry type {} in protocol {:d} bucket"), - (entry.type() == INITENTRY ? "INIT" : "META"), protocolVersion)); - } -} - -inline void -maybePut(HotArchiveBucketOutputIterator& out, - HotArchiveBucketEntry const& entry, - std::vector& shadowIterators, - bool keepShadowedLifecycleEntries, MergeCounters& mc) -{ - // Archived BucketList is only present after protocol 21, so shadows are - // never supported - out.put(entry); -} - -inline void -maybePut(LiveBucketOutputIterator& out, BucketEntry const& entry, - std::vector& shadowIterators, - bool keepShadowedLifecycleEntries, MergeCounters& mc) -{ - // In ledgers before protocol 11, keepShadowedLifecycleEntries will be - // `false` and we will drop all shadowed entries here. - // - // In ledgers at-or-after protocol 11, it will be `true` which means that we - // only elide 'put'ing an entry if it is in LIVEENTRY state; we keep entries - // in DEADENTRY and INITENTRY states, for two reasons: - // - // - DEADENTRY is preserved to ensure that old live-or-init entries that - // were killed remain dead, are not brought back to life accidentally by - // having a newer shadow eliding their later DEADENTRY (tombstone). This - // is possible because newer shadowing entries may both refer to the - // same key as an older dead entry, and may occur as an INIT/DEAD pair - // that subsequently annihilate one another. - // - // IOW we want to prevent the following scenario: - // - // lev1:DEAD, lev2:INIT, lev3:DEAD, lev4:INIT - // - // from turning into the following by shadowing: - // - // lev1:DEAD, lev2:INIT, -elided-, lev4:INIT - // - // and then the following by pairwise annihilation: - // - // -annihilated-, -elided-, lev4:INIT - // - // - INITENTRY is preserved to ensure that a DEADENTRY preserved by the - // previous rule does not itself shadow-out its own INITENTRY, but - // rather eventually ages and encounters (and is annihilated-by) that - // INITENTRY in an older level. Thus preventing the accumulation of - // redundant tombstones. - // - // Note that this decision only controls whether to elide dead entries due - // to _shadows_. There is a secondary elision of dead entries at the _oldest - // level_ of the bucketlist that is accomplished through filtering at the - // LiveBucketOutputIterator level, and happens independent of ledger - // protocol version. - - if (keepShadowedLifecycleEntries && - (entry.type() == INITENTRY || entry.type() == DEADENTRY)) - { - // Never shadow-out entries in this case; no point scanning shadows. - out.put(entry); - return; - } - - BucketEntryIdCmp cmp; - for (auto& si : shadowIterators) - { - // Advance the shadowIterator while it's less than the candidate - while (si && cmp(*si, entry)) - { - ++mc.mShadowScanSteps; - ++si; - } - // We have stepped si forward to the point that either si is exhausted, - // or else *si >= entry; we now check the opposite direction to see if - // we have equality. - if (si && !cmp(entry, *si)) - { - // If so, then entry is shadowed in at least one level. - countShadowedEntryType(mc, entry); - return; - } - } - // Nothing shadowed. - out.put(entry); -} - -static void -countOldEntryType(MergeCounters& mc, BucketEntry const& e) -{ - switch (e.type()) - { - case METAENTRY: - ++mc.mOldMetaEntries; - break; - case INITENTRY: - ++mc.mOldInitEntries; - break; - case LIVEENTRY: - ++mc.mOldLiveEntries; - break; - case DEADENTRY: - ++mc.mOldDeadEntries; - break; - } -} - -static void -countNewEntryType(MergeCounters& mc, BucketEntry const& e) -{ - switch (e.type()) - { - case METAENTRY: - ++mc.mNewMetaEntries; - break; - case INITENTRY: - ++mc.mNewInitEntries; - break; - case LIVEENTRY: - ++mc.mNewLiveEntries; - break; - case DEADENTRY: - ++mc.mNewDeadEntries; - break; - } -} - -// The protocol used in a merge is the maximum of any of the protocols used in -// its input buckets, _including_ any of its shadows. We need to be strict about -// this for the same reason we change shadow algorithms along with merge -// algorithms: because once _any_ newer bucket levels have cut-over to merging -// with the new INITENTRY-supporting merge algorithm, there may be "INIT + DEAD -// => nothing" mutual annihilations occurring, which can "revive" the state of -// an entry on older levels. It's imperative then that older levels' -// lifecycle-event-pairing structure be preserved -- that the state-before INIT -// is in fact DEAD or nonexistent -- from the instant we begin using the new -// merge protocol: that the old lifecycle-event-eliding shadowing behaviour be -// disabled, and we switch to the more conservative shadowing behaviour that -// preserves lifecycle-events. -// -// IOW we want to prevent the following scenario -// (assuming lev1 and lev2 are on the new protocol, but 3 and 4 -// are on the old protocol): -// -// lev1:DEAD, lev2:INIT, lev3:DEAD, lev4:LIVE -// -// from turning into the following by shadowing -// (using the old shadow algorithm on a lev3 merge): -// -// lev1:DEAD, lev2:INIT, -elided-, lev4:LIVE -// -// and then the following by pairwise annihilation -// (using the new merge algorithm on new lev1 and lev2): -// -// -annihilated-, -elided-, lev4:LIVE -// -// To prevent this, we cut over _all_ levels of the bucket list to the new merge -// and shadowing protocol simultaneously, the moment the first new-protocol -// bucket enters the youngest level. At least one new bucket is in every merge's -// shadows from then on in, so they all upgrade (and preserve lifecycle events). -template -static void -calculateMergeProtocolVersion( - MergeCounters& mc, uint32_t maxProtocolVersion, - BucketInputIterator const& oi, - BucketInputIterator const& ni, - std::vector> const& shadowIterators, - uint32& protocolVersion, bool& keepShadowedLifecycleEntries) -{ - protocolVersion = std::max(oi.getMetadata().ledgerVersion, - ni.getMetadata().ledgerVersion); - - // Starting with FIRST_PROTOCOL_SHADOWS_REMOVED, - // protocol version is determined as a max of curr, snap, and any shadow of - // version < FIRST_PROTOCOL_SHADOWS_REMOVED. This means that a bucket may - // still perform an old style merge despite the presence of the new protocol - // shadows. - for (auto const& si : shadowIterators) - { - auto version = si.getMetadata().ledgerVersion; - if (protocolVersionIsBefore(version, - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) - { - protocolVersion = std::max(version, protocolVersion); - } - } - - CLOG_TRACE(Bucket, "Bucket merge protocolVersion={}, maxProtocolVersion={}", - protocolVersion, maxProtocolVersion); - - if (protocolVersion > maxProtocolVersion) - { - throw std::runtime_error(fmt::format( - FMT_STRING( - "bucket protocol version {:d} exceeds maxProtocolVersion {:d}"), - protocolVersion, maxProtocolVersion)); - } - - // When merging buckets after protocol version 10 (i.e. version 11-or-after) - // we switch shadowing-behaviour to a more conservative mode, in order to - // support annihilation of INITENTRY and DEADENTRY pairs. See commentary - // above in `maybePut`. - keepShadowedLifecycleEntries = true; - - // Don't count shadow metrics for Hot Archive BucketList - if constexpr (std::is_same_v) - { - return; - } - - if (protocolVersionIsBefore( - protocolVersion, - LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) - { - ++mc.mPreInitEntryProtocolMerges; - keepShadowedLifecycleEntries = false; - } - else - { - ++mc.mPostInitEntryProtocolMerges; - } - - if (protocolVersionIsBefore(protocolVersion, - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) - { - ++mc.mPreShadowRemovalProtocolMerges; - } - else - { - if (!shadowIterators.empty()) - { - throw std::runtime_error("Shadows are not supported"); - } - ++mc.mPostShadowRemovalProtocolMerges; - } -} - -// There are 4 "easy" cases for merging: exhausted iterators on either -// side, or entries that compare non-equal. In all these cases we just -// take the lesser (or existing) entry and advance only one iterator, -// not scrutinizing the entry type further. -template -static bool -mergeCasesWithDefaultAcceptance( - BucketEntryIdCmp const& cmp, MergeCounters& mc, - BucketInputIterator& oi, BucketInputIterator& ni, - BucketOutputIterator& out, - std::vector>& shadowIterators, - uint32_t protocolVersion, bool keepShadowedLifecycleEntries) -{ - static_assert(std::is_same_v || - std::is_same_v); - - if (!ni || (oi && ni && cmp(*oi, *ni))) - { - // Either of: - // - // - Out of new entries. - // - Old entry has smaller key. - // - // In both cases: take old entry. - ++mc.mOldEntriesDefaultAccepted; - if constexpr (std::is_same_v) - { - LiveBucket::checkProtocolLegality(*oi, protocolVersion); - countOldEntryType(mc, *oi); - } - maybePut(out, *oi, shadowIterators, keepShadowedLifecycleEntries, mc); - ++oi; - return true; - } - else if (!oi || (oi && ni && cmp(*ni, *oi))) - { - // Either of: - // - // - Out of old entries. - // - New entry has smaller key. - // - // In both cases: take new entry. - ++mc.mNewEntriesDefaultAccepted; - if constexpr (std::is_same_v) - { - LiveBucket::checkProtocolLegality(*ni, protocolVersion); - countNewEntryType(mc, *ni); - } - maybePut(out, *ni, shadowIterators, keepShadowedLifecycleEntries, mc); - ++ni; - return true; - } - return false; -} - -// The remaining cases happen when keys are equal and we have to reason -// through the relationships of their bucket lifecycle states. Trickier. -static void -mergeCasesWithEqualKeys( - MergeCounters& mc, HotArchiveBucketInputIterator& oi, - HotArchiveBucketInputIterator& ni, HotArchiveBucketOutputIterator& out, - std::vector& shadowIterators, - uint32_t protocolVersion, bool keepShadowedLifecycleEntries) -{ - // If two identical keys have the same type, throw an error. Otherwise, - // take the newer key. - HotArchiveBucketEntry const& oldEntry = *oi; - HotArchiveBucketEntry const& newEntry = *ni; - if (oldEntry.type() == newEntry.type()) - { - throw std::runtime_error( - "Malformed Hot Archive bucket: two identical keys with " - "the same type."); - } - - out.put(newEntry); - ++ni; - ++oi; -} - -static void -mergeCasesWithEqualKeys(MergeCounters& mc, LiveBucketInputIterator& oi, - LiveBucketInputIterator& ni, - LiveBucketOutputIterator& out, - std::vector& shadowIterators, - uint32_t protocolVersion, - bool keepShadowedLifecycleEntries) -{ - // Old and new are for the same key and neither is INIT, take the new - // key. If either key is INIT, we have to make some adjustments: - // - // old | new | result - // ---------+---------+----------- - // INIT | INIT | error - // LIVE | INIT | error - // DEAD | INIT=x | LIVE=x - // INIT=x | LIVE=y | INIT=y - // INIT | DEAD | empty - // - // - // What does this mean / why is it correct? - // - // Performing a merge between two same-key entries is about maintaining two - // invariants: - // - // 1. From the perspective of a reader (eg. the database) the pre-merge - // pair of entries and post-merge single entry are indistinguishable, - // at least in terms that the reader/database cares about (liveness & - // value). This is the most important invariant since it's what makes - // the database have the right values! - // - // 2. From the perspective of chronological _sequences_ of lifecycle - // transitions, if an entry is in INIT state then its (chronological) - // predecessor state is DEAD either by the next-oldest state being an - // _explicit_ DEAD tombstone, or by the INIT being the oldest state in - // the bucket list. This invariant allows us to assume that INIT - // followed by DEAD can be safely merged to empty (eliding the record) - // without revealing and reviving the key in some older non-DEAD state - // preceding the INIT. - // - // When merging a pair of non-INIT entries and taking the 'new' value, - // invariant #1 is easy to see as preserved (an LSM tree is defined as - // returning the newest value for an entry, so preserving the newest of any - // pair is correct), and by assumption neither entry is INIT-state so - // invariant #2 isn't relevant / is unaffected. - // - // When merging a pair with an INIT, we can go case-by-case through the - // table above and see that both invariants are preserved: - // - // - INIT,INIT and LIVE,INIT violate invariant #2, so by assumption should - // never be occurring. - // - // - DEAD,INIT=x are indistinguishable from LIVE=x from the perspective of - // the reader, satisfying invariant #1. And since LIVE=x is not - // INIT-state anymore invariant #2 is trivially preserved (does not - // apply). - // - // - INIT=x,LIVE=y is indistinguishable from INIT=y from the perspective - // of the reader, satisfying invariant #1. And assuming invariant #2 - // holds for INIT=x,LIVE=y, then it holds for INIT=y. - // - // - INIT,DEAD is indistinguishable from absence-of-an-entry from the - // perspective of a reader, maintaining invariant #1, _if_ invariant #2 - // also holds (the predecessor state _before_ INIT was - // absent-or-DEAD). And invariant #2 holds trivially _locally_ for this - // merge because there is no resulting state (i.e. it's not in - // INIT-state); and it holds slightly-less-trivially non-locally, - // because even if there is a subsequent (newer) INIT entry, the - // invariant is maintained for that newer entry too (it is still - // preceded by a DEAD state). - - BucketEntry const& oldEntry = *oi; - BucketEntry const& newEntry = *ni; - LiveBucket::checkProtocolLegality(oldEntry, protocolVersion); - LiveBucket::checkProtocolLegality(newEntry, protocolVersion); - countOldEntryType(mc, oldEntry); - countNewEntryType(mc, newEntry); - - if (newEntry.type() == INITENTRY) - { - // The only legal new-is-INIT case is merging a delete+create to an - // update. - if (oldEntry.type() != DEADENTRY) - { - throw std::runtime_error( - "Malformed bucket: old non-DEAD + new INIT."); - } - BucketEntry newLive; - newLive.type(LIVEENTRY); - newLive.liveEntry() = newEntry.liveEntry(); - ++mc.mNewInitEntriesMergedWithOldDead; - maybePut(out, newLive, shadowIterators, keepShadowedLifecycleEntries, - mc); - } - else if (oldEntry.type() == INITENTRY) - { - // If we get here, new is not INIT; may be LIVE or DEAD. - if (newEntry.type() == LIVEENTRY) - { - // Merge a create+update to a fresher create. - BucketEntry newInit; - newInit.type(INITENTRY); - newInit.liveEntry() = newEntry.liveEntry(); - ++mc.mOldInitEntriesMergedWithNewLive; - maybePut(out, newInit, shadowIterators, - keepShadowedLifecycleEntries, mc); - } - else - { - // Merge a create+delete to nothingness. - ++mc.mOldInitEntriesMergedWithNewDead; - } - } - else - { - // Neither is in INIT state, take the newer one. - ++mc.mNewEntriesMergedWithOldNeitherInit; - maybePut(out, newEntry, shadowIterators, keepShadowedLifecycleEntries, - mc); - } - ++oi; - ++ni; -} - -bool -LiveBucket::scanForEvictionLegacy( - AbstractLedgerTxn& ltx, EvictionIterator& iter, uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const -{ - ZoneScoped; - releaseAssert(stats); - - if (isEmpty() || - protocolVersionIsBefore(getBucketVersion(), SOROBAN_PROTOCOL_VERSION)) - { - // EOF, skip to next bucket - return false; - } - - if (remainingEntriesToEvict == 0 || bytesToScan == 0) - { - // Reached end of scan region - return true; - } - - XDRInputFileStream stream{}; - stream.open(mFilename.string()); - stream.seek(iter.bucketFileOffset); - - BucketEntry be; - while (stream.readOne(be)) - { - if (be.type() == INITENTRY || be.type() == LIVEENTRY) - { - auto const& le = be.liveEntry(); - if (isTemporaryEntry(le.data)) - { - ZoneNamedN(maybeEvict, "maybe evict entry", true); - - auto ttlKey = getTTLKey(le); - uint32_t liveUntilLedger = 0; - auto shouldEvict = [&] { - auto ttlLtxe = ltx.loadWithoutRecord(ttlKey); - if (!ttlLtxe) - { - // Entry was already deleted either manually or by an - // earlier eviction scan, do nothing - return false; - } - - releaseAssert(ttlLtxe); - liveUntilLedger = - ttlLtxe.current().data.ttl().liveUntilLedgerSeq; - return !isLive(ttlLtxe.current(), ledgerSeq); - }; - - if (shouldEvict()) - { - ZoneNamedN(evict, "evict entry", true); - auto age = ledgerSeq - liveUntilLedger; - stats->recordEvictedEntry(age); - - ltx.erase(ttlKey); - ltx.erase(LedgerEntryKey(le)); - entriesEvictedCounter.inc(); - --remainingEntriesToEvict; - } - } - } - - auto newPos = stream.pos(); - auto bytesRead = newPos - iter.bucketFileOffset; - iter.bucketFileOffset = newPos; - bytesScannedForEvictionCounter.inc(bytesRead); - if (bytesRead >= bytesToScan) - { - // Reached end of scan region - bytesToScan = 0; - return true; - } - else if (remainingEntriesToEvict == 0) - { - return true; - } - - bytesToScan -= bytesRead; - } - - // Hit eof - return false; -} - -template -std::shared_ptr -Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepTombstoneEntries, bool countMergeEvents, - asio::io_context& ctx, bool doFsync) -{ - static_assert(std::is_same_v || - std::is_same_v); - - ZoneScoped; - // This is the key operation in the scheme: merging two (read-only) - // buckets together into a new 3rd bucket, while calculating its hash, - // in a single pass. - - releaseAssert(oldBucket); - releaseAssert(newBucket); - - MergeCounters mc; - BucketInputIterator oi(oldBucket); - BucketInputIterator ni(newBucket); - std::vector> shadowIterators(shadows.begin(), - shadows.end()); - - uint32_t protocolVersion; - bool keepShadowedLifecycleEntries; - calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni, - shadowIterators, protocolVersion, - keepShadowedLifecycleEntries); - - auto timer = bucketManager.getMergeTimer().TimeScope(); - BucketMetadata meta; - meta.ledgerVersion = protocolVersion; - - // If any inputs use the new extension of BucketMeta, the output should as - // well - if (ni.getMetadata().ext.v() == 1) - { - releaseAssertOrThrow(protocolVersionStartsFrom( - maxProtocolVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); - meta.ext = ni.getMetadata().ext; - } - else if (oi.getMetadata().ext.v() == 1) - { - releaseAssertOrThrow(protocolVersionStartsFrom( - maxProtocolVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); - meta.ext = oi.getMetadata().ext; - } - - BucketOutputIterator out(bucketManager.getTmpDir(), - keepTombstoneEntries, meta, mc, ctx, - doFsync); - - BucketEntryIdCmp cmp; - size_t iter = 0; - - while (oi || ni) - { - // Check if the merge should be stopped every few entries - if (++iter >= 1000) - { - iter = 0; - if (bucketManager.isShutdown()) - { - // Stop merging, as BucketManager is now shutdown - // This is safe as temp file has not been adopted yet, - // so it will be removed with the tmp dir - throw std::runtime_error( - "Incomplete bucket merge due to BucketManager shutdown"); - } - } - - if (!mergeCasesWithDefaultAcceptance(cmp, mc, oi, ni, out, - shadowIterators, protocolVersion, - keepShadowedLifecycleEntries)) - { - mergeCasesWithEqualKeys(mc, oi, ni, out, shadowIterators, - protocolVersion, - keepShadowedLifecycleEntries); - } - } - if (countMergeEvents) - { - bucketManager.incrMergeCounters(mc); - } - - std::vector shadowHashes; - shadowHashes.reserve(shadows.size()); - for (auto const& s : shadows) - { - shadowHashes.push_back(s->getHash()); - } - - MergeKey mk{keepTombstoneEntries, oldBucket->getHash(), - newBucket->getHash(), shadowHashes}; - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB(), &mk); -} - -LiveBucket::LiveBucket(std::string const& filename, Hash const& hash, - std::unique_ptr&& index) - : Bucket(filename, hash, std::move(index)) -{ -} - -LiveBucket::LiveBucket() : Bucket() -{ -} - -uint32_t -LiveBucket::getBucketVersion() const -{ - LiveBucketInputIterator it(shared_from_this()); - return it.getMetadata().ledgerVersion; -} - -uint32_t -HotArchiveBucket::getBucketVersion() const -{ - HotArchiveBucketInputIterator it(shared_from_this()); - return it.getMetadata().ledgerVersion; -} - -BucketEntryCounters const& -LiveBucket::getBucketEntryCounters() const -{ - releaseAssert(mIndex); - return mIndex->getBucketEntryCounters(); -} - -HotArchiveBucket::HotArchiveBucket(std::string const& filename, - Hash const& hash, - std::unique_ptr&& index) - : Bucket(filename, hash, std::move(index)) -{ -} - -HotArchiveBucket::HotArchiveBucket() : Bucket() -{ -} - -bool -LiveBucket::isTombstoneEntry(BucketEntry const& e) -{ - return e.type() == DEADENTRY; -} - -bool -HotArchiveBucket::isTombstoneEntry(HotArchiveBucketEntry const& e) -{ - return e.type() == HOT_ARCHIVE_LIVE; -} - -BucketEntryCounters& -BucketEntryCounters::operator+=(BucketEntryCounters const& other) -{ - for (auto [type, count] : other.entryTypeCounts) - { - this->entryTypeCounts[type] += count; - } - for (auto [type, size] : other.entryTypeSizes) - { - this->entryTypeSizes[type] += size; - } - return *this; -} - -bool -BucketEntryCounters::operator==(BucketEntryCounters const& other) const -{ - return this->entryTypeCounts == other.entryTypeCounts && - this->entryTypeSizes == other.entryTypeSizes; -} - -bool -BucketEntryCounters::operator!=(BucketEntryCounters const& other) const -{ - return !(*this == other); -} - -template std::shared_ptr Bucket::merge( - BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); - -template std::shared_ptr Bucket::merge( - BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); -} \ No newline at end of file diff --git a/src/bucket/Bucket.h b/src/bucket/Bucket.h deleted file mode 100644 index 48de2cf497..0000000000 --- a/src/bucket/Bucket.h +++ /dev/null @@ -1,289 +0,0 @@ -#pragma once - -// Copyright 2015 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "bucket/BucketIndex.h" -#include "util/NonCopyable.h" -#include "util/ProtocolVersion.h" -#include -#include -#include -#include - -namespace asio -{ -class io_context; -} - -namespace medida -{ -class Counter; -class Meter; -} - -namespace stellar -{ - -/** - * Bucket is an immutable container for a sorted set of "Entries" (object ID, - * hash, xdr-message tuples) which is designed to be held in a shared_ptr<> - * which is referenced between threads, to minimize copying. It is therefore - * imperative that it be _really_ immutable, not just faking it. - * - * Two buckets can be merged together efficiently (in a single pass): elements - * from the newer bucket overwrite elements from the older bucket, the rest are - * merged in sorted order, and all elements are hashed while being added. - * - * Different types of BucketList vary on the type of entries they contain and by - * extension the merge logic of those entries. Additionally, some types of - * BucketList may have special operations only relevant to that specific type. - * This pure virtual base class provides the core functionality of a BucketList - * container and must be extened for each specific BucketList type. In - * particular, the fresh and merge functions must be defined for the specific - * type, while other functionality can be shared. - */ - -class AbstractLedgerTxn; -class Application; -class BucketManager; -struct EvictionResultEntry; -class EvictionStatistics; -struct BucketEntryCounters; -template class SearchableBucketListSnapshot; -enum class LedgerEntryTypeAndDurability : uint32_t; - -class Bucket : public NonMovableOrCopyable -{ - protected: - std::filesystem::path const mFilename; - Hash const mHash; - size_t mSize{0}; - - std::unique_ptr mIndex{}; - - // Returns index, throws if index not yet initialized - BucketIndex const& getIndex() const; - - static std::string randomFileName(std::string const& tmpDir, - std::string ext); - - public: - static constexpr ProtocolVersion - FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION = ProtocolVersion::V_23; - - // Create an empty bucket. The empty bucket has hash '000000...' and its - // filename is the empty string. - Bucket(); - - // Construct a bucket with a given filename and hash. Asserts that the file - // exists, but does not check that the hash is the bucket's hash. Caller - // needs to ensure that. - Bucket(std::string const& filename, Hash const& hash, - std::unique_ptr&& index); - - Hash const& getHash() const; - std::filesystem::path const& getFilename() const; - size_t getSize() const; - - bool isEmpty() const; - - // Delete index and close file stream - void freeIndex(); - - // Returns true if bucket is indexed, false otherwise - bool isIndexed() const; - - // Returns [lowerBound, upperBound) of file offsets for all offers in the - // bucket, or std::nullopt if no offers exist - std::optional> - getOfferRange() const; - - // Sets index, throws if index is already set - void setIndex(std::unique_ptr&& index); - - // Merge two buckets together, producing a fresh one. Entries in `oldBucket` - // are overridden in the fresh bucket by keywise-equal entries in - // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal - // entries in any of the buckets in the provided `shadows` vector. - // - // Each bucket is self-describing in terms of the ledger protocol version it - // was constructed under, and the merge algorithm adjusts to the maximum of - // the versions attached to each input or shadow bucket. The provided - // `maxProtocolVersion` bounds this (for error checking) and should usually - // be the protocol of the ledger header at which the merge is starting. An - // exception will be thrown if any provided bucket versions exceed it. - template - static std::shared_ptr - merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepTombstoneEntries, bool countMergeEvents, - asio::io_context& ctx, bool doFsync); - - static std::string randomBucketName(std::string const& tmpDir); - static std::string randomBucketIndexName(std::string const& tmpDir); - -#ifdef BUILD_TESTS - BucketIndex const& - getIndexForTesting() const - { - return getIndex(); - } - -#endif // BUILD_TESTS - - virtual uint32_t getBucketVersion() const = 0; - - template friend class BucketSnapshotBase; -}; - -/* - * Live Buckets are used by the LiveBucketList to store the current canonical - * state of the ledger. They contain entries of type BucketEntry. - */ -class LiveBucket : public Bucket, - public std::enable_shared_from_this -{ - public: - // Entry type that this bucket stores - using EntryT = BucketEntry; - - // Entry type returned by loadKeys - using LoadT = LedgerEntry; - - LiveBucket(); - virtual ~LiveBucket() - { - } - LiveBucket(std::string const& filename, Hash const& hash, - std::unique_ptr&& index); - - // Returns true if a BucketEntry that is key-wise identical to the given - // BucketEntry exists in the bucket. For testing. - bool containsBucketIdentity(BucketEntry const& id) const; - - // At version 11, we added support for INITENTRY and METAENTRY. Before this - // we were only supporting LIVEENTRY and DEADENTRY. - static constexpr ProtocolVersion - FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY = - ProtocolVersion::V_11; - static constexpr ProtocolVersion FIRST_PROTOCOL_SHADOWS_REMOVED = - ProtocolVersion::V_12; - - static void checkProtocolLegality(BucketEntry const& entry, - uint32_t protocolVersion); - - static std::vector - convertToBucketEntry(bool useInit, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries); - -#ifdef BUILD_TESTS - // "Applies" the bucket to the database. For each entry in the bucket, - // if the entry is init or live, creates or updates the corresponding - // entry in the database (respectively; if the entry is dead (a - // tombstone), deletes the corresponding entry in the database. - void apply(Application& app) const; -#endif - - // Returns false if eof reached, true otherwise. Modifies iter as the bucket - // is scanned. Also modifies bytesToScan and maxEntriesToEvict such that - // after this function returns: - // bytesToScan -= amount_bytes_scanned - // maxEntriesToEvict -= entries_evicted - bool scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, - uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, - uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const; - - bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, - uint32_t ledgerSeq, - std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const; - - // Create a fresh bucket from given vectors of init (created) and live - // (updated) LedgerEntries, and dead LedgerEntryKeys. The bucket will - // be sorted, hashed, and adopted in the provided BucketManager. - static std::shared_ptr - fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries, bool countMergeEvents, - asio::io_context& ctx, bool doFsync); - - // Returns true if the given BucketEntry should be dropped in the bottom - // level bucket (i.e. DEADENTRY) - static bool isTombstoneEntry(BucketEntry const& e); - - uint32_t getBucketVersion() const override; - - BucketEntryCounters const& getBucketEntryCounters() const; - - friend class LiveBucketSnapshot; -}; - -/* - * Hot Archive Buckets are used by the HotBucketList to store recently evicted - * entries. They contain entries of type HotArchiveBucketEntry. - */ -class HotArchiveBucket : public Bucket, - public std::enable_shared_from_this -{ - static std::vector - convertToBucketEntry(std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries); - - public: - // Entry type that this bucket stores - using EntryT = HotArchiveBucketEntry; - - // Entry type returned by loadKeys - using LoadT = HotArchiveBucketEntry; - - HotArchiveBucket(); - virtual ~HotArchiveBucket() - { - } - HotArchiveBucket(std::string const& filename, Hash const& hash, - std::unique_ptr&& index); - uint32_t getBucketVersion() const override; - - static std::shared_ptr - fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries, bool countMergeEvents, - asio::io_context& ctx, bool doFsync); - - // Returns true if the given BucketEntry should be dropped in the bottom - // level bucket (i.e. HOT_ARCHIVE_LIVE) - static bool isTombstoneEntry(HotArchiveBucketEntry const& e); - - friend class HotArchiveBucketSnapshot; -}; - -struct BucketEntryCounters -{ - std::map entryTypeCounts; - std::map entryTypeSizes; - - BucketEntryCounters& operator+=(BucketEntryCounters const& other); - bool operator==(BucketEntryCounters const& other) const; - bool operator!=(BucketEntryCounters const& other) const; - - template - void - serialize(Archive& ar) - { - ar(entryTypeCounts, entryTypeSizes); - } -}; -} diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp index 8fab1ed483..f9001d3113 100644 --- a/src/bucket/BucketApplicator.cpp +++ b/src/bucket/BucketApplicator.cpp @@ -4,8 +4,8 @@ #include "util/asio.h" // IWYU pragma: keep #include "bucket/BucketApplicator.h" -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnEntry.h" #include "main/Application.h" diff --git a/src/bucket/BucketApplicator.h b/src/bucket/BucketApplicator.h index 5218ac162f..7495d35a02 100644 --- a/src/bucket/BucketApplicator.h +++ b/src/bucket/BucketApplicator.h @@ -4,8 +4,8 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" +#include "bucket/LiveBucket.h" #include "util/Timer.h" #include diff --git a/src/bucket/BucketBase.cpp b/src/bucket/BucketBase.cpp new file mode 100644 index 0000000000..becc41d501 --- /dev/null +++ b/src/bucket/BucketBase.cpp @@ -0,0 +1,419 @@ +// Copyright 2015 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +// ASIO is somewhat particular about when it gets included -- it wants to be the +// first to include -- so we try to include it before everything +// else. +#include "util/asio.h" // IWYU pragma: keep +#include "bucket/BucketBase.h" +#include "bucket/BucketIndex.h" +#include "bucket/BucketInputIterator.h" +#include "bucket/BucketManager.h" +#include "bucket/BucketOutputIterator.h" +#include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LedgerCmp.h" +#include "bucket/LiveBucket.h" +#include "bucket/MergeKey.h" +#include "crypto/Hex.h" +#include "crypto/Random.h" +#include "main/Application.h" +#include "medida/timer.h" +#include "util/Fs.h" +#include "util/GlobalChecks.h" +#include "util/Logging.h" +#include "util/ProtocolVersion.h" +#include "util/types.h" +#include + +namespace stellar +{ + +BucketIndex const& +BucketBase::getIndex() const +{ + ZoneScoped; + releaseAssertOrThrow(!mFilename.empty()); + releaseAssertOrThrow(mIndex); + return *mIndex; +} + +bool +BucketBase::isIndexed() const +{ + return static_cast(mIndex); +} + +std::optional> +BucketBase::getOfferRange() const +{ + return getIndex().getOfferRange(); +} + +void +BucketBase::setIndex(std::unique_ptr&& index) +{ + releaseAssertOrThrow(!mIndex); + mIndex = std::move(index); +} + +BucketBase::BucketBase(std::string const& filename, Hash const& hash, + std::unique_ptr&& index) + : mFilename(filename), mHash(hash), mIndex(std::move(index)) +{ + releaseAssert(filename.empty() || fs::exists(filename)); + if (!filename.empty()) + { + CLOG_TRACE(Bucket, "BucketBase::Bucket() created, file exists : {}", + mFilename); + mSize = fs::size(filename); + } +} + +BucketBase::BucketBase() +{ +} + +Hash const& +BucketBase::getHash() const +{ + return mHash; +} + +std::filesystem::path const& +BucketBase::getFilename() const +{ + return mFilename; +} + +size_t +BucketBase::getSize() const +{ + return mSize; +} + +bool +BucketBase::isEmpty() const +{ + if (mFilename.empty() || isZero(mHash)) + { + releaseAssertOrThrow(mFilename.empty() && isZero(mHash)); + return true; + } + + return false; +} + +void +BucketBase::freeIndex() +{ + mIndex.reset(nullptr); +} + +std::string +BucketBase::randomFileName(std::string const& tmpDir, std::string ext) +{ + ZoneScoped; + for (;;) + { + std::string name = + tmpDir + "/tmp-bucket-" + binToHex(randomBytes(8)) + ext; + std::ifstream ifile(name); + if (!ifile) + { + return name; + } + } +} + +std::string +BucketBase::randomBucketName(std::string const& tmpDir) +{ + return randomFileName(tmpDir, ".xdr"); +} + +std::string +BucketBase::randomBucketIndexName(std::string const& tmpDir) +{ + return randomFileName(tmpDir, ".index"); +} + +// The protocol used in a merge is the maximum of any of the protocols used in +// its input buckets, _including_ any of its shadows. We need to be strict about +// this for the same reason we change shadow algorithms along with merge +// algorithms: because once _any_ newer bucket levels have cut-over to merging +// with the new INITENTRY-supporting merge algorithm, there may be "INIT + DEAD +// => nothing" mutual annihilations occurring, which can "revive" the state of +// an entry on older levels. It's imperative then that older levels' +// lifecycle-event-pairing structure be preserved -- that the state-before INIT +// is in fact DEAD or nonexistent -- from the instant we begin using the new +// merge protocol: that the old lifecycle-event-eliding shadowing behaviour be +// disabled, and we switch to the more conservative shadowing behaviour that +// preserves lifecycle-events. +// +// IOW we want to prevent the following scenario +// (assuming lev1 and lev2 are on the new protocol, but 3 and 4 +// are on the old protocol): +// +// lev1:DEAD, lev2:INIT, lev3:DEAD, lev4:LIVE +// +// from turning into the following by shadowing +// (using the old shadow algorithm on a lev3 merge): +// +// lev1:DEAD, lev2:INIT, -elided-, lev4:LIVE +// +// and then the following by pairwise annihilation +// (using the new merge algorithm on new lev1 and lev2): +// +// -annihilated-, -elided-, lev4:LIVE +// +// To prevent this, we cut over _all_ levels of the bucket list to the new merge +// and shadowing protocol simultaneously, the moment the first new-protocol +// bucket enters the youngest level. At least one new bucket is in every merge's +// shadows from then on in, so they all upgrade (and preserve lifecycle events). +template +static void +calculateMergeProtocolVersion( + MergeCounters& mc, uint32_t maxProtocolVersion, + BucketInputIterator const& oi, + BucketInputIterator const& ni, + std::vector> const& shadowIterators, + uint32& protocolVersion, bool& keepShadowedLifecycleEntries) +{ + protocolVersion = std::max(oi.getMetadata().ledgerVersion, + ni.getMetadata().ledgerVersion); + + // Starting with FIRST_PROTOCOL_SHADOWS_REMOVED, + // protocol version is determined as a max of curr, snap, and any shadow of + // version < FIRST_PROTOCOL_SHADOWS_REMOVED. This means that a bucket may + // still perform an old style merge despite the presence of the new protocol + // shadows. + for (auto const& si : shadowIterators) + { + auto version = si.getMetadata().ledgerVersion; + if (protocolVersionIsBefore(version, + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + { + protocolVersion = std::max(version, protocolVersion); + } + } + + CLOG_TRACE(Bucket, "Bucket merge protocolVersion={}, maxProtocolVersion={}", + protocolVersion, maxProtocolVersion); + + if (protocolVersion > maxProtocolVersion) + { + throw std::runtime_error(fmt::format( + FMT_STRING( + "bucket protocol version {:d} exceeds maxProtocolVersion {:d}"), + protocolVersion, maxProtocolVersion)); + } + + // When merging buckets after protocol version 10 (i.e. version 11-or-after) + // we switch shadowing-behaviour to a more conservative mode, in order to + // support annihilation of INITENTRY and DEADENTRY pairs. See commentary + // above in `maybePut`. + keepShadowedLifecycleEntries = true; + + // Don't count shadow metrics for Hot Archive BucketList + if constexpr (std::is_same_v) + { + return; + } + + if (protocolVersionIsBefore( + protocolVersion, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + { + ++mc.mPreInitEntryProtocolMerges; + keepShadowedLifecycleEntries = false; + } + else + { + ++mc.mPostInitEntryProtocolMerges; + } + + if (protocolVersionIsBefore(protocolVersion, + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + { + ++mc.mPreShadowRemovalProtocolMerges; + } + else + { + if (!shadowIterators.empty()) + { + throw std::runtime_error("Shadows are not supported"); + } + ++mc.mPostShadowRemovalProtocolMerges; + } +} + +// There are 4 "easy" cases for merging: exhausted iterators on either +// side, or entries that compare non-equal. In all these cases we just +// take the lesser (or existing) entry and advance only one iterator, +// not scrutinizing the entry type further. +template +static bool +mergeCasesWithDefaultAcceptance( + BucketEntryIdCmp const& cmp, MergeCounters& mc, + BucketInputIterator& oi, BucketInputIterator& ni, + BucketOutputIterator& out, + std::vector>& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) +{ + BUCKET_TYPE_ASSERT(BucketT); + + if (!ni || (oi && ni && cmp(*oi, *ni))) + { + // Either of: + // + // - Out of new entries. + // - Old entry has smaller key. + // + // In both cases: take old entry. + ++mc.mOldEntriesDefaultAccepted; + BucketT::checkProtocolLegality(*oi, protocolVersion); + BucketT::countOldEntryType(mc, *oi); + BucketT::maybePut(out, *oi, shadowIterators, + keepShadowedLifecycleEntries, mc); + ++oi; + return true; + } + else if (!oi || (oi && ni && cmp(*ni, *oi))) + { + // Either of: + // + // - Out of old entries. + // - New entry has smaller key. + // + // In both cases: take new entry. + ++mc.mNewEntriesDefaultAccepted; + BucketT::checkProtocolLegality(*ni, protocolVersion); + BucketT::countNewEntryType(mc, *ni); + BucketT::maybePut(out, *ni, shadowIterators, + keepShadowedLifecycleEntries, mc); + ++ni; + return true; + } + return false; +} + +template +std::shared_ptr +BucketBase::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync) +{ + BUCKET_TYPE_ASSERT(BucketT); + + ZoneScoped; + // This is the key operation in the scheme: merging two (read-only) + // buckets together into a new 3rd bucket, while calculating its hash, + // in a single pass. + + releaseAssert(oldBucket); + releaseAssert(newBucket); + + MergeCounters mc; + BucketInputIterator oi(oldBucket); + BucketInputIterator ni(newBucket); + std::vector> shadowIterators(shadows.begin(), + shadows.end()); + + uint32_t protocolVersion; + bool keepShadowedLifecycleEntries; + calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni, + shadowIterators, protocolVersion, + keepShadowedLifecycleEntries); + + auto timer = bucketManager.getMergeTimer().TimeScope(); + BucketMetadata meta; + meta.ledgerVersion = protocolVersion; + + // If any inputs use the new extension of BucketMeta, the output should as + // well + if (ni.getMetadata().ext.v() == 1) + { + releaseAssertOrThrow(protocolVersionStartsFrom( + maxProtocolVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + meta.ext = ni.getMetadata().ext; + } + else if (oi.getMetadata().ext.v() == 1) + { + releaseAssertOrThrow(protocolVersionStartsFrom( + maxProtocolVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + meta.ext = oi.getMetadata().ext; + } + + BucketOutputIterator out(bucketManager.getTmpDir(), + keepTombstoneEntries, meta, mc, ctx, + doFsync); + + BucketEntryIdCmp cmp; + size_t iter = 0; + + while (oi || ni) + { + // Check if the merge should be stopped every few entries + if (++iter >= 1000) + { + iter = 0; + if (bucketManager.isShutdown()) + { + // Stop merging, as BucketManager is now shutdown + // This is safe as temp file has not been adopted yet, + // so it will be removed with the tmp dir + throw std::runtime_error( + "Incomplete bucket merge due to BucketManager shutdown"); + } + } + + if (!mergeCasesWithDefaultAcceptance(cmp, mc, oi, ni, out, + shadowIterators, protocolVersion, + keepShadowedLifecycleEntries)) + { + BucketT::mergeCasesWithEqualKeys(mc, oi, ni, out, shadowIterators, + protocolVersion, + keepShadowedLifecycleEntries); + } + } + if (countMergeEvents) + { + bucketManager.incrMergeCounters(mc); + } + + std::vector shadowHashes; + shadowHashes.reserve(shadows.size()); + for (auto const& s : shadows) + { + shadowHashes.push_back(s->getHash()); + } + + MergeKey mk{keepTombstoneEntries, oldBucket->getHash(), + newBucket->getHash(), shadowHashes}; + return out.getBucket(bucketManager, + bucketManager.getConfig().isUsingBucketListDB(), &mk); +} + +template std::shared_ptr BucketBase::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); + +template std::shared_ptr BucketBase::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); +} \ No newline at end of file diff --git a/src/bucket/BucketBase.h b/src/bucket/BucketBase.h new file mode 100644 index 0000000000..bd472f6fef --- /dev/null +++ b/src/bucket/BucketBase.h @@ -0,0 +1,135 @@ +#pragma once + +// copyright 2015 stellar development foundation and contributors. licensed +// under the apache license, version 2.0. see the copying file at the root +// of this distribution or at http://www.apache.org/licenses/license-2.0 + +#include "bucket/BucketIndex.h" +#include "util/NonCopyable.h" +#include "util/ProtocolVersion.h" +#include "xdr/Stellar-types.h" +#include +#include +#include + +namespace asio +{ +class io_context; +} + +namespace stellar +{ + +/** + * Bucket is an immutable container for a sorted set of "Entries" (object ID, + * hash, xdr-message tuples) which is designed to be held in a shared_ptr<> + * which is referenced between threads, to minimize copying. It is therefore + * imperative that it be _really_ immutable, not just faking it. + * + * Two buckets can be merged together efficiently (in a single pass): elements + * from the newer bucket overwrite elements from the older bucket, the rest are + * merged in sorted order, and all elements are hashed while being added. + * + * Different types of BucketList vary on the type of entries they contain and by + * extension the merge logic of those entries. Additionally, some types of + * BucketList may have special operations only relevant to that specific type. + * This pure virtual base class provides the core functionality of a BucketList + * container and must be extened for each specific BucketList type. In + * particular, the fresh and merge functions must be defined for the specific + * type, while other functionality can be shared. + */ + +class BucketManager; + +enum class Loop +{ + COMPLETE, + INCOMPLETE +}; + +class BucketBase : public NonMovableOrCopyable +{ + protected: + std::filesystem::path const mFilename; + Hash const mHash; + size_t mSize{0}; + + std::unique_ptr mIndex{}; + + // Returns index, throws if index not yet initialized + BucketIndex const& getIndex() const; + + static std::string randomFileName(std::string const& tmpDir, + std::string ext); + + public: + static constexpr ProtocolVersion + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION = ProtocolVersion::V_23; + + // Create an empty bucket. The empty bucket has hash '000000...' and its + // filename is the empty string. + BucketBase(); + + // Construct a bucket with a given filename and hash. Asserts that the file + // exists, but does not check that the hash is the bucket's hash. Caller + // needs to ensure that. + BucketBase(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + + Hash const& getHash() const; + std::filesystem::path const& getFilename() const; + size_t getSize() const; + + bool isEmpty() const; + + // Delete index and close file stream + void freeIndex(); + + // Returns true if bucket is indexed, false otherwise + bool isIndexed() const; + + // Returns [lowerBound, upperBound) of file offsets for all offers in the + // bucket, or std::nullopt if no offers exist + std::optional> + getOfferRange() const; + + // Sets index, throws if index is already set + void setIndex(std::unique_ptr&& index); + + // Merge two buckets together, producing a fresh one. Entries in `oldBucket` + // are overridden in the fresh bucket by keywise-equal entries in + // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal + // entries in any of the buckets in the provided `shadows` vector. + // + // Each bucket is self-describing in terms of the ledger protocol version it + // was constructed under, and the merge algorithm adjusts to the maximum of + // the versions attached to each input or shadow bucket. The provided + // `maxProtocolVersion` bounds this (for error checking) and should usually + // be the protocol of the ledger header at which the merge is starting. An + // exception will be thrown if any provided bucket versions exceed it. + template + static std::shared_ptr + merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); + + static std::string randomBucketName(std::string const& tmpDir); + static std::string randomBucketIndexName(std::string const& tmpDir); + +#ifdef BUILD_TESTS + BucketIndex const& + getIndexForTesting() const + { + return getIndex(); + } + +#endif // BUILD_TESTS + + virtual uint32_t getBucketVersion() const = 0; + + template friend class BucketSnapshotBase; +}; +} diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp index 2c032f00a2..2255785f3d 100644 --- a/src/bucket/BucketIndexImpl.cpp +++ b/src/bucket/BucketIndexImpl.cpp @@ -3,9 +3,11 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketIndexImpl.h" -#include "bucket/Bucket.h" +#include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" #include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "crypto/Hex.h" #include "crypto/ShortHash.h" #include "ledger/LedgerTypeUtils.h" @@ -142,6 +144,9 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, } else { + static_assert( + std::is_same_v, + "unexpected bucket type"); return be.type() == HOT_ARCHIVE_METAENTRY; } }; @@ -151,7 +156,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, ++count; LedgerKey key = getBucketLedgerKey(be); - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { // We need an asset to poolID mapping for // loadPoolshareTrustlineByAccountAndAsset queries. For this @@ -182,7 +187,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, } } - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { auto keyBuf = xdr::xdr_to_opaque(key); SipHash24 hasher(seed.data()); @@ -205,6 +210,8 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, } else { + static_assert(std::is_same_v, + "unexpected index type"); mData.keysToOffset.emplace_back(key, pos); } @@ -217,7 +224,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, pos = in.pos(); } - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { // Binary Fuse filter requires at least 2 elements if (keyHashes.size() > 1) @@ -259,7 +266,7 @@ BucketIndexImpl::saveToDisk( "took", std::chrono::milliseconds(100)); std::filesystem::path tmpFilename = - Bucket::randomBucketIndexName(bm.getTmpDir()); + BucketBase::randomBucketIndexName(bm.getTmpDir()); CLOG_DEBUG(Bucket, "Saving bucket index for {}: {}", hexAbbrev(hash), tmpFilename); @@ -306,12 +313,14 @@ template static bool keyNotInIndexEntry(LedgerKey const& key, IndexEntryT const& indexEntry) { - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { return key < indexEntry.lowerBound || indexEntry.upperBound < key; } else { + static_assert(std::is_same_v, + "unexpected index entry type"); return !(key == indexEntry); } } @@ -325,13 +334,16 @@ template static bool lower_bound_pred(IndexEntryT const& indexEntry, LedgerKey const& key) { - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { return indexEntry.first.upperBound < key; } else { + static_assert(std::is_same_v, + "unexpected index entry type"); return indexEntry.first < key; } } @@ -345,13 +357,16 @@ template static bool upper_bound_pred(LedgerKey const& key, IndexEntryT const& indexEntry) { - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { return key < indexEntry.first.lowerBound; } else { + static_assert(std::is_same_v, + "unexpected index entry type"); return key < indexEntry.first; } } @@ -573,7 +588,7 @@ BucketIndexImpl::operator==(BucketIndex const& inRaw) const return false; } - if constexpr (std::is_same::value) + if constexpr (std::is_same_v) { releaseAssert(mData.filter); releaseAssert(in.mData.filter); @@ -584,6 +599,8 @@ BucketIndexImpl::operator==(BucketIndex const& inRaw) const } else { + static_assert(std::is_same_v, + "unexpected index type"); releaseAssert(!mData.filter); releaseAssert(!in.mData.filter); } @@ -643,8 +660,9 @@ BucketIndexImpl::getBucketEntryCounters() const template std::unique_ptr BucketIndex::createIndex(BucketManager& bm, std::filesystem::path const& filename, - Hash const& hash); + Hash const& hash, asio::io_context& ctx); template std::unique_ptr BucketIndex::createIndex( - BucketManager& bm, std::filesystem::path const& filename, Hash const& hash); + BucketManager& bm, std::filesystem::path const& filename, Hash const& hash, + asio::io_context& ctx); } diff --git a/src/bucket/BucketIndexImpl.h b/src/bucket/BucketIndexImpl.h index eec9fb3837..52630a70e6 100644 --- a/src/bucket/BucketIndexImpl.h +++ b/src/bucket/BucketIndexImpl.h @@ -4,8 +4,8 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketIndex.h" +#include "bucket/LiveBucket.h" #include "medida/meter.h" #include "util/BinaryFuseFilter.h" #include "xdr/Stellar-types.h" diff --git a/src/bucket/BucketInputIterator.cpp b/src/bucket/BucketInputIterator.cpp index 8716bdee52..9f11dcef8d 100644 --- a/src/bucket/BucketInputIterator.cpp +++ b/src/bucket/BucketInputIterator.cpp @@ -3,7 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketInputIterator.h" -#include "bucket/Bucket.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "xdr/Stellar-ledger.h" #include #include @@ -29,6 +30,8 @@ BucketInputIterator::loadEntry() } else { + static_assert(std::is_same_v, + "unexpected bucket type"); isMeta = mEntry.type() == HOT_ARCHIVE_METAENTRY; } diff --git a/src/bucket/BucketInputIterator.h b/src/bucket/BucketInputIterator.h index 4f13d76f36..393671d428 100644 --- a/src/bucket/BucketInputIterator.h +++ b/src/bucket/BucketInputIterator.h @@ -6,10 +6,8 @@ #include "bucket/BucketUtils.h" #include "util/XDRStream.h" -#include "xdr/Stellar-ledger.h" #include -#include namespace stellar { @@ -61,7 +59,4 @@ template class BucketInputIterator size_t size() const; void seek(std::streamoff offset); }; - -typedef BucketInputIterator LiveBucketInputIterator; -typedef BucketInputIterator HotArchiveBucketInputIterator; } diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketListBase.cpp similarity index 76% rename from src/bucket/BucketList.cpp rename to src/bucket/BucketListBase.cpp index 357912f28f..86daf45421 100644 --- a/src/bucket/BucketList.cpp +++ b/src/bucket/BucketListBase.cpp @@ -1,15 +1,13 @@ -// Copyright 2014 Stellar Development Foundation and contributors. Licensed +// Copyright 2024 Stellar Development Foundation and contributors. Licensed // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "BucketList.h" -#include "bucket/Bucket.h" -#include "bucket/BucketIndexImpl.h" +#include "bucket/BucketListBase.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" -#include "bucket/BucketSnapshot.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "crypto/SHA.h" -#include "ledger/LedgerManager.h" #include "ledger/LedgerTxn.h" #include "main/Application.h" #include "util/GlobalChecks.h" @@ -17,20 +15,12 @@ #include "util/ProtocolVersion.h" #include "util/types.h" -#include "medida/counter.h" - #include #include namespace stellar { -template <> BucketListDepth BucketListBase::kNumLevels = 11; - -// TODO: This is an arbitrary number. Do some analysis and pick a better value -// or make this a configurable network config. -template <> BucketListDepth BucketListBase::kNumLevels = 9; - template BucketLevel::BucketLevel(uint32_t i) : mLevel(i) @@ -691,208 +681,6 @@ BucketListBase::addBatchInternal(Application& app, uint32_t currLedger, } } -void -HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& archiveEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries) -{ - ZoneScoped; - releaseAssertOrThrow(protocolVersionStartsFrom( - currLedgerProtocol, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); - addBatchInternal(app, currLedger, currLedgerProtocol, archiveEntries, - restoredEntries, deletedEntries); -} - -void -LiveBucketList::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) -{ - ZoneScoped; - addBatchInternal(app, currLedger, currLedgerProtocol, initEntries, - liveEntries, deadEntries); -} - -BucketEntryCounters -LiveBucketList::sumBucketEntryCounters() const -{ - BucketEntryCounters counters; - for (auto const& lev : mLevels) - { - for (auto const& b : {lev.getCurr(), lev.getSnap()}) - { - if (b->isIndexed()) - { - auto c = b->getBucketEntryCounters(); - counters += c; - } - } - } - return counters; -} - -void -LiveBucketList::updateStartingEvictionIterator(EvictionIterator& iter, - uint32_t firstScanLevel, - uint32_t ledgerSeq) -{ - // Check if an upgrade has changed the starting scan level to below the - // current iterator level - if (iter.bucketListLevel < firstScanLevel) - { - // Reset iterator to the new minimum level - iter.bucketFileOffset = 0; - iter.isCurrBucket = true; - iter.bucketListLevel = firstScanLevel; - } - - // Whenever a Bucket changes (spills or receives an incoming spill), the - // iterator offset in that bucket is invalidated. After scanning, we - // must write the iterator to the BucketList then close the ledger. - // Bucket spills occur on ledger close after we've already written the - // iterator, so the iterator may be invalidated. Because of this, we - // must check if the Bucket the iterator currently points to changed on - // the previous ledger, indicating the current iterator is invalid. - if (iter.isCurrBucket) - { - // Check if bucket received an incoming spill - releaseAssert(iter.bucketListLevel != 0); - if (BucketListBase::levelShouldSpill(ledgerSeq - 1, - iter.bucketListLevel - 1)) - { - // If Bucket changed, reset to start of bucket - iter.bucketFileOffset = 0; - } - } - else - { - if (BucketListBase::levelShouldSpill(ledgerSeq - 1, - iter.bucketListLevel)) - { - // If Bucket changed, reset to start of bucket - iter.bucketFileOffset = 0; - } - } -} - -bool -LiveBucketList::updateEvictionIterAndRecordStats( - EvictionIterator& iter, EvictionIterator startIter, - uint32_t configFirstScanLevel, uint32_t ledgerSeq, - std::shared_ptr stats, EvictionCounters& counters) -{ - releaseAssert(stats); - - // If we reached eof in curr bucket, start scanning snap. - // Last level has no snap so cycle back to the initial level. - if (iter.isCurrBucket && iter.bucketListLevel != kNumLevels - 1) - { - iter.isCurrBucket = false; - iter.bucketFileOffset = 0; - } - else - { - // If we reached eof in snap, move to next level - ++iter.bucketListLevel; - iter.isCurrBucket = true; - iter.bucketFileOffset = 0; - - // If we have scanned the last level, cycle back to initial - // level - if (iter.bucketListLevel == kNumLevels) - { - iter.bucketListLevel = configFirstScanLevel; - - // Record then reset metrics at beginning of new eviction cycle - stats->submitMetricsAndRestartCycle(ledgerSeq, counters); - } - } - - // If we are back to the bucket we started at, break - if (iter.bucketListLevel == startIter.bucketListLevel && - iter.isCurrBucket == startIter.isCurrBucket) - { - return true; - } - - return false; -} - -void -LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, - uint32_t scanSize, - std::shared_ptr b, - EvictionCounters& counters) -{ - // Check to see if we can finish scanning the new bucket before it - // receives an update - uint64_t period = bucketUpdatePeriod(evictionIter.bucketListLevel, - evictionIter.isCurrBucket); - if (period * scanSize < b->getSize()) - { - CLOG_WARNING(Bucket, - "Bucket too large for current eviction scan size."); - counters.incompleteBucketScan.inc(); - } -} - -// To avoid noisy data, only count metrics that encompass a complete -// eviction cycle. If a node joins the network mid cycle, metrics will be -// nullopt and be initialized at the start of the next cycle. -void -LiveBucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, - EvictionCounters& counters, - std::shared_ptr stats) -{ - releaseAssert(stats); - - auto getBucketFromIter = [&levels = mLevels](EvictionIterator const& iter) { - auto& level = levels.at(iter.bucketListLevel); - return iter.isCurrBucket ? level.getCurr() : level.getSnap(); - }; - - auto const& networkConfig = - app.getLedgerManager().getSorobanNetworkConfig(); - auto const firstScanLevel = - networkConfig.stateArchivalSettings().startingEvictionScanLevel; - auto evictionIter = networkConfig.evictionIterator(); - auto scanSize = networkConfig.stateArchivalSettings().evictionScanSize; - auto maxEntriesToEvict = - networkConfig.stateArchivalSettings().maxEntriesToArchive; - - updateStartingEvictionIterator(evictionIter, firstScanLevel, ledgerSeq); - - auto startIter = evictionIter; - auto b = getBucketFromIter(evictionIter); - - while (!b->scanForEvictionLegacy( - ltx, evictionIter, scanSize, maxEntriesToEvict, ledgerSeq, - counters.entriesEvicted, counters.bytesScannedForEviction, stats)) - { - - if (updateEvictionIterAndRecordStats(evictionIter, startIter, - firstScanLevel, ledgerSeq, stats, - counters)) - { - break; - } - - b = getBucketFromIter(evictionIter); - checkIfEvictionScanIsStuck( - evictionIter, - networkConfig.stateArchivalSettings().evictionScanSize, b, - counters); - } - - networkConfig.updateEvictionIterator(ltx, evictionIter); -} - template void BucketListBase::restartMerges(Application& app, @@ -979,4 +767,18 @@ template class BucketListBase; template class BucketListBase; template class BucketLevel; template class BucketLevel; + +template void BucketListBase::addBatchInternal< + std::vector, std::vector, std::vector>( + Application& app, uint32_t currLedger, uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); + +template void BucketListBase::addBatchInternal< + std::vector, std::vector, std::vector>( + Application& app, uint32_t currLedger, uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); } diff --git a/src/bucket/BucketList.h b/src/bucket/BucketListBase.h similarity index 88% rename from src/bucket/BucketList.h rename to src/bucket/BucketListBase.h index cb7d90980b..ac4bfc0ccb 100644 --- a/src/bucket/BucketList.h +++ b/src/bucket/BucketListBase.h @@ -4,7 +4,6 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketUtils.h" #include "bucket/FutureBucket.h" @@ -439,7 +438,11 @@ template class BucketListBase // Number of bucket levels in the bucketlist. Every bucketlist in the system // will have this many levels and it effectively gets wired-in to the // protocol. Careful about changing it. - static BucketListDepth kNumLevels; + // LiveBucketList = 11 levels + // HotArchiveBucketList = 11 levels + // Note: this is temporary, HotArchiveBucketList will need to have a runtime + // defined number of levels in the future. + static inline BucketListDepth kNumLevels = 11; static bool shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level); @@ -531,71 +534,4 @@ template class BucketListBase // FutureBuckets uint64_t getSize() const; }; - -// The LiveBucketList stores the current canonical state of the ledger. It is -// made up of LiveBucket buckets, which in turn store individual entries of type -// BucketEntry. When an entry is "evicted" from the ledger, it is removed from -// the LiveBucketList. Depending on the evicted entry type, it may then be added -// to the HotArchiveBucketList. -class LiveBucketList : public BucketListBase -{ - public: - // Reset Eviction Iterator position if an incoming spill or upgrade has - // invalidated the previous position - static void updateStartingEvictionIterator(EvictionIterator& iter, - uint32_t firstScanLevel, - uint32_t ledgerSeq); - - // Update eviction iter and record stats after scanning a region in one - // bucket. Returns true if scan has looped back to startIter, false - // otherwise. - static bool updateEvictionIterAndRecordStats( - EvictionIterator& iter, EvictionIterator startIter, - uint32_t configFirstScanLevel, uint32_t ledgerSeq, - std::shared_ptr stats, EvictionCounters& counters); - - static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, - uint32_t scanSize, - std::shared_ptr b, - EvictionCounters& counters); - - void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, EvictionCounters& counters, - std::shared_ptr stats); - - // Add a batch of initial (created), live (updated) and dead entries to the - // bucketlist, representing the entries effected by closing - // `currLedger`. The bucketlist will incorporate these into the smallest - // (0th) level, as well as commit or prepare merges for any levels that - // should have spilled due to passing through `currLedger`. The `currLedger` - // and `currProtocolVersion` values should be taken from the ledger at which - // this batch is being added. - void addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries); - - BucketEntryCounters sumBucketEntryCounters() const; -}; - -// The HotArchiveBucketList stores recently evicted entries. It contains Buckets -// of type HotArchiveBucket, which store individual entries of type -// HotArchiveBucketEntry. -class HotArchiveBucketList : public BucketListBase -{ - private: - // For now, this class is identical to LiveBucketList. Later PRs will add - // additional functionality. - - // Merge result future - // This should be the result of merging this entire list into a single file. - // The MerkleBucketList is then initalized with this result - public: - void addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& archiveEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries); -}; } diff --git a/src/bucket/BucketListSnapshotBase.cpp b/src/bucket/BucketListSnapshotBase.cpp new file mode 100644 index 0000000000..2dcfcee435 --- /dev/null +++ b/src/bucket/BucketListSnapshotBase.cpp @@ -0,0 +1,162 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/BucketListSnapshotBase.h" +#include "bucket/BucketListBase.h" +#include "bucket/LiveBucket.h" +#include "crypto/SecretKey.h" // IWYU pragma: keep +#include "ledger/LedgerTxn.h" + +#include "util/GlobalChecks.h" +#include +#include + +namespace stellar +{ +template +BucketListSnapshot::BucketListSnapshot( + BucketListBase const& bl, LedgerHeader header) + : mHeader(std::move(header)) +{ + releaseAssert(threadIsMain()); + + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) + { + auto const& level = bl.getLevel(i); + mLevels.emplace_back(BucketLevelSnapshot(level)); + } +} + +template +BucketListSnapshot::BucketListSnapshot( + BucketListSnapshot const& snapshot) + : mLevels(snapshot.mLevels), mHeader(snapshot.mHeader) +{ +} + +template +std::vector> const& +BucketListSnapshot::getLevels() const +{ + return mLevels; +} + +template +uint32_t +BucketListSnapshot::getLedgerSeq() const +{ + return mHeader.ledgerSeq; +} + +template +LedgerHeader const& +SearchableBucketListSnapshotBase::getLedgerHeader() +{ + releaseAssert(mSnapshot); + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + return mSnapshot->getLedgerHeader(); +} + +template +void +SearchableBucketListSnapshotBase::loopAllBuckets( + std::function f, + BucketListSnapshot const& snapshot) const +{ + for (auto const& lev : snapshot.getLevels()) + { + auto processBucket = [f](BucketSnapshotT const& b) { + if (b.isEmpty()) + { + return Loop::INCOMPLETE; + } + + return f(b); + }; + + if (processBucket(lev.curr) == Loop::COMPLETE || + processBucket(lev.snap) == Loop::COMPLETE) + { + return; + } + } +} + +template +std::shared_ptr +SearchableBucketListSnapshotBase::load(LedgerKey const& k) +{ + ZoneScoped; + + std::shared_ptr result{}; + auto sawBloomMiss = false; + + // Search function called on each Bucket in BucketList until we find the key + auto loadKeyBucketLoop = [&](auto const& b) { + auto [be, bloomMiss] = b.getBucketEntry(k); + sawBloomMiss = sawBloomMiss || bloomMiss; + + if (be) + { + result = BucketT::bucketEntryToLoadResult(be); + return Loop::COMPLETE; + } + else + { + return Loop::INCOMPLETE; + } + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + if (threadIsMain()) + { + mSnapshotManager.startPointLoadTimer(); + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + mSnapshotManager.endPointLoadTimer(k.type(), sawBloomMiss); + return result; + } + else + { + // TODO: Background metrics + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + return result; + } +} + +template +std::optional> +SearchableBucketListSnapshotBase::loadKeysFromLedger( + std::set const& inKeys, uint32_t ledgerSeq) +{ + return loadKeysInternal(inKeys, /*lkMeter=*/nullptr, ledgerSeq); +} + +template +BucketLevelSnapshot::BucketLevelSnapshot( + BucketLevel const& level) + : curr(level.getCurr()), snap(level.getSnap()) +{ +} + +template +SearchableBucketListSnapshotBase::SearchableBucketListSnapshotBase( + BucketSnapshotManager const& snapshotManager) + : mSnapshotManager(snapshotManager), mHistoricalSnapshots() +{ + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); +} + +template +SearchableBucketListSnapshotBase::~SearchableBucketListSnapshotBase() +{ +} + +template struct BucketLevelSnapshot; +template struct BucketLevelSnapshot; +template class BucketListSnapshot; +template class BucketListSnapshot; +template class SearchableBucketListSnapshotBase; +template class SearchableBucketListSnapshotBase; +} \ No newline at end of file diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshotBase.h similarity index 59% rename from src/bucket/BucketListSnapshot.h rename to src/bucket/BucketListSnapshotBase.h index e767ce002f..1a8aef9b31 100644 --- a/src/bucket/BucketListSnapshot.h +++ b/src/bucket/BucketListSnapshotBase.h @@ -4,12 +4,13 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" -#include "bucket/BucketManagerImpl.h" +#include "bucket/BucketListBase.h" +#include "bucket/BucketManager.h" #include "bucket/BucketSnapshot.h" #include "bucket/BucketSnapshotManager.h" #include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" namespace medida { @@ -91,13 +92,18 @@ class SearchableBucketListSnapshotBase : public NonMovableOrCopyable // Loops through all buckets, starting with curr at level 0, then snap at // level 0, etc. Calls f on each bucket. Exits early if function - // returns true - void loopAllBuckets(std::function f, + // returns Loop::COMPLETE. + void loopAllBuckets(std::function f, BucketListSnapshot const& snapshot) const; SearchableBucketListSnapshotBase( BucketSnapshotManager const& snapshotManager); + std::optional> + loadKeysInternal(std::set const& inKeys, + LedgerKeyMeter* lkMeter, + std::optional ledgerSeq); + public: uint32_t getLedgerSeq() const @@ -106,59 +112,6 @@ class SearchableBucketListSnapshotBase : public NonMovableOrCopyable } LedgerHeader const& getLedgerHeader(); -}; - -class SearchableLiveBucketListSnapshot - : public SearchableBucketListSnapshotBase -{ - SearchableLiveBucketListSnapshot( - BucketSnapshotManager const& snapshotManager); - - public: - std::shared_ptr load(LedgerKey const& k); - - std::vector - loadKeysWithLimits(std::set const& inKeys, - LedgerKeyMeter* lkMeter = nullptr); - - std::vector - loadPoolShareTrustLinesByAccountAndAsset(AccountID const& accountID, - Asset const& asset); - - std::vector loadInflationWinners(size_t maxWinners, - int64_t minBalance); - - // Loads inKeys from the specified historical snapshot. Returns - // load_result_vec if the snapshot for the given ledger is - // available, std::nullopt otherwise. Note that ledgerSeq is defined - // as the state of the BucketList at the beginning of the ledger. This means - // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry - // in the BucketList is N - 1. - std::optional> - loadKeysFromLedger(std::set const& inKeys, - uint32_t ledgerSeq); - - EvictionResult scanForEviction(uint32_t ledgerSeq, - EvictionCounters& counters, - EvictionIterator evictionIter, - std::shared_ptr stats, - StateArchivalSettings const& sas); - - friend std::shared_ptr - BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const; -}; - -class SearchableHotArchiveBucketListSnapshot - : public SearchableBucketListSnapshotBase -{ - SearchableHotArchiveBucketListSnapshot( - BucketSnapshotManager const& snapshotManager); - - public: - std::shared_ptr load(LedgerKey const& k); - - std::vector - loadKeys(std::set const& inKeys); // Loads inKeys from the specified historical snapshot. Returns // load_result_vec if the snapshot for the given ledger is @@ -166,11 +119,10 @@ class SearchableHotArchiveBucketListSnapshot // as the state of the BucketList at the beginning of the ledger. This means // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry // in the BucketList is N - 1. - std::optional> + std::optional> loadKeysFromLedger(std::set const& inKeys, uint32_t ledgerSeq); - friend std::shared_ptr - BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const; + std::shared_ptr load(LedgerKey const& k); }; } \ No newline at end of file diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManager.cpp similarity index 76% rename from src/bucket/BucketManagerImpl.cpp rename to src/bucket/BucketManager.cpp index 90556a89cd..e3e5f5f524 100644 --- a/src/bucket/BucketManagerImpl.cpp +++ b/src/bucket/BucketManager.cpp @@ -2,19 +2,17 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/BucketManagerImpl.h" -#include "bucket/Bucket.h" -#include "bucket/BucketIndexImpl.h" +#include "bucket/BucketManager.h" #include "bucket/BucketInputIterator.h" -#include "bucket/BucketList.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" #include "bucket/BucketSnapshotManager.h" #include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" +#include "bucket/SearchableBucketList.h" #include "crypto/BLAKE2.h" #include "crypto/Hex.h" -#include "crypto/SHA.h" #include "history/HistoryManager.h" #include "historywork/VerifyBucketWork.h" #include "ledger/LedgerManager.h" @@ -51,49 +49,17 @@ namespace stellar { -void -EvictionStatistics::recordEvictedEntry(uint64_t age) -{ - std::lock_guard l(mLock); - ++mNumEntriesEvicted; - mEvictedEntriesAgeSum += age; -} - -void -EvictionStatistics::submitMetricsAndRestartCycle(uint32_t currLedgerSeq, - EvictionCounters& counters) -{ - std::lock_guard l(mLock); - - // Only record metrics if we've seen a complete cycle to avoid noise - if (mCompleteCycle) - { - counters.evictionCyclePeriod.set_count(currLedgerSeq - - mEvictionCycleStartLedger); - - auto averageAge = mNumEntriesEvicted == 0 - ? 0 - : mEvictedEntriesAgeSum / mNumEntriesEvicted; - counters.averageEvictedEntryAge.set_count(averageAge); - } - - // Reset to start new cycle - mCompleteCycle = true; - mEvictedEntriesAgeSum = 0; - mNumEntriesEvicted = 0; - mEvictionCycleStartLedger = currLedgerSeq; -} - std::unique_ptr BucketManager::create(Application& app) { - auto bucketManagerPtr = std::make_unique(app); + auto bucketManagerPtr = + std::unique_ptr(new BucketManager(app)); bucketManagerPtr->initialize(); return bucketManagerPtr; } void -BucketManagerImpl::initialize() +BucketManager::initialize() { ZoneScoped; std::string d = mConfig.BUCKET_DIR_PATH; @@ -156,7 +122,7 @@ BucketManagerImpl::initialize() } void -BucketManagerImpl::dropAll() +BucketManager::dropAll() { ZoneScoped; deleteEntireBucketDir(); @@ -164,26 +130,12 @@ BucketManagerImpl::dropAll() } TmpDirManager& -BucketManagerImpl::getTmpDirManager() +BucketManager::getTmpDirManager() { return *mTmpDirManager; } -EvictionCounters::EvictionCounters(Application& app) - : entriesEvicted(app.getMetrics().NewCounter( - {"state-archival", "eviction", "entries-evicted"})) - , bytesScannedForEviction(app.getMetrics().NewCounter( - {"state-archival", "eviction", "bytes-scanned"})) - , incompleteBucketScan(app.getMetrics().NewCounter( - {"state-archival", "eviction", "incomplete-scan"})) - , evictionCyclePeriod( - app.getMetrics().NewCounter({"state-archival", "eviction", "period"})) - , averageEvictedEntryAge( - app.getMetrics().NewCounter({"state-archival", "eviction", "age"})) -{ -} - -BucketManagerImpl::BucketManagerImpl(Application& app) +BucketManager::BucketManager(Application& app) : mApp(app) , mLiveBucketList(nullptr) , mHotArchiveBucketList(nullptr) @@ -234,7 +186,7 @@ BucketManagerImpl::BucketManagerImpl(Application& app) } } -const std::string BucketManagerImpl::kLockFilename = "stellar-core.lock"; +const std::string BucketManager::kLockFilename = "stellar-core.lock"; namespace { @@ -259,27 +211,27 @@ extractFromFilename(std::string const& name) } void -BucketManagerImpl::updateSharedBucketSize() +BucketManager::updateSharedBucketSize() { mSharedBucketsSize.set_count(mSharedHotArchiveBuckets.size() + mSharedLiveBuckets.size()); } std::string -BucketManagerImpl::bucketFilename(std::string const& bucketHexHash) +BucketManager::bucketFilename(std::string const& bucketHexHash) { std::string basename = bucketBasename(bucketHexHash); return getBucketDir() + "/" + basename; } std::string -BucketManagerImpl::bucketFilename(Hash const& hash) +BucketManager::bucketFilename(Hash const& hash) { return bucketFilename(binToHex(hash)); } std::string -BucketManagerImpl::bucketIndexFilename(Hash const& hash) const +BucketManager::bucketIndexFilename(Hash const& hash) const { auto hashStr = binToHex(hash); auto basename = "bucket-" + hashStr + ".index"; @@ -287,7 +239,7 @@ BucketManagerImpl::bucketIndexFilename(Hash const& hash) const } std::string const& -BucketManagerImpl::getTmpDir() +BucketManager::getTmpDir() { ZoneScoped; std::lock_guard lock(mBucketMutex); @@ -300,12 +252,12 @@ BucketManagerImpl::getTmpDir() } std::string const& -BucketManagerImpl::getBucketDir() const +BucketManager::getBucketDir() const { return *(mLockedBucketDir); } -BucketManagerImpl::~BucketManagerImpl() +BucketManager::~BucketManager() { ZoneScoped; if (mDeleteEntireBucketDirInDtor) @@ -319,7 +271,7 @@ BucketManagerImpl::~BucketManagerImpl() } void -BucketManagerImpl::deleteEntireBucketDir() +BucketManager::deleteEntireBucketDir() { ZoneScoped; std::string d = mConfig.BUCKET_DIR_PATH; @@ -338,7 +290,7 @@ BucketManagerImpl::deleteEntireBucketDir() } void -BucketManagerImpl::deleteTmpDirAndUnlockBucketDir() +BucketManager::deleteTmpDirAndUnlockBucketDir() { ZoneScoped; @@ -364,21 +316,21 @@ BucketManagerImpl::deleteTmpDirAndUnlockBucketDir() } LiveBucketList& -BucketManagerImpl::getLiveBucketList() +BucketManager::getLiveBucketList() { releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); - return *LiveBucketList; + return *mLiveBucketList; } HotArchiveBucketList& -BucketManagerImpl::getHotArchiveBucketList() +BucketManager::getHotArchiveBucketList() { releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); return *mHotArchiveBucketList; } BucketSnapshotManager& -BucketManagerImpl::getBucketSnapshotManager() const +BucketManager::getBucketSnapshotManager() const { releaseAssertOrThrow(mConfig.isUsingBucketListDB()); releaseAssert(mSnapshotManager); @@ -386,124 +338,28 @@ BucketManagerImpl::getBucketSnapshotManager() const } medida::Timer& -BucketManagerImpl::getMergeTimer() +BucketManager::getMergeTimer() { return mBucketSnapMerge; } MergeCounters -BucketManagerImpl::readMergeCounters() +BucketManager::readMergeCounters() { std::lock_guard lock(mBucketMutex); return mMergeCounters; } -// Check that eviction scan is based off of current ledger snapshot and that -// archival settings have not changed -bool -EvictionResult::isValid(uint32_t currLedger, - StateArchivalSettings const& currSas) const -{ - return initialLedger == currLedger && - initialSas.maxEntriesToArchive == currSas.maxEntriesToArchive && - initialSas.evictionScanSize == currSas.evictionScanSize && - initialSas.startingEvictionScanLevel == - currSas.startingEvictionScanLevel; -} - -MergeCounters& -MergeCounters::operator+=(MergeCounters const& delta) -{ - mPreInitEntryProtocolMerges += delta.mPreInitEntryProtocolMerges; - mPostInitEntryProtocolMerges += delta.mPostInitEntryProtocolMerges; - - mRunningMergeReattachments += delta.mRunningMergeReattachments; - mFinishedMergeReattachments += delta.mFinishedMergeReattachments; - - mPreShadowRemovalProtocolMerges += delta.mPreShadowRemovalProtocolMerges; - mPostShadowRemovalProtocolMerges += delta.mPostShadowRemovalProtocolMerges; - - mNewMetaEntries += delta.mNewMetaEntries; - mNewInitEntries += delta.mNewInitEntries; - mNewLiveEntries += delta.mNewLiveEntries; - mNewDeadEntries += delta.mNewDeadEntries; - mOldMetaEntries += delta.mOldMetaEntries; - mOldInitEntries += delta.mOldInitEntries; - mOldLiveEntries += delta.mOldLiveEntries; - mOldDeadEntries += delta.mOldDeadEntries; - - mOldEntriesDefaultAccepted += delta.mOldEntriesDefaultAccepted; - mNewEntriesDefaultAccepted += delta.mNewEntriesDefaultAccepted; - mNewInitEntriesMergedWithOldDead += delta.mNewInitEntriesMergedWithOldDead; - mOldInitEntriesMergedWithNewLive += delta.mOldInitEntriesMergedWithNewLive; - mOldInitEntriesMergedWithNewDead += delta.mOldInitEntriesMergedWithNewDead; - mNewEntriesMergedWithOldNeitherInit += - delta.mNewEntriesMergedWithOldNeitherInit; - - mShadowScanSteps += delta.mShadowScanSteps; - mMetaEntryShadowElisions += delta.mMetaEntryShadowElisions; - mLiveEntryShadowElisions += delta.mLiveEntryShadowElisions; - mInitEntryShadowElisions += delta.mInitEntryShadowElisions; - mDeadEntryShadowElisions += delta.mDeadEntryShadowElisions; - - mOutputIteratorTombstoneElisions += delta.mOutputIteratorTombstoneElisions; - mOutputIteratorBufferUpdates += delta.mOutputIteratorBufferUpdates; - mOutputIteratorActualWrites += delta.mOutputIteratorActualWrites; - return *this; -} - -bool -MergeCounters::operator==(MergeCounters const& other) const -{ - return ( - mPreInitEntryProtocolMerges == other.mPreInitEntryProtocolMerges && - mPostInitEntryProtocolMerges == other.mPostInitEntryProtocolMerges && - - mRunningMergeReattachments == other.mRunningMergeReattachments && - mFinishedMergeReattachments == other.mFinishedMergeReattachments && - - mNewMetaEntries == other.mNewMetaEntries && - mNewInitEntries == other.mNewInitEntries && - mNewLiveEntries == other.mNewLiveEntries && - mNewDeadEntries == other.mNewDeadEntries && - mOldMetaEntries == other.mOldMetaEntries && - mOldInitEntries == other.mOldInitEntries && - mOldLiveEntries == other.mOldLiveEntries && - mOldDeadEntries == other.mOldDeadEntries && - - mOldEntriesDefaultAccepted == other.mOldEntriesDefaultAccepted && - mNewEntriesDefaultAccepted == other.mNewEntriesDefaultAccepted && - mNewInitEntriesMergedWithOldDead == - other.mNewInitEntriesMergedWithOldDead && - mOldInitEntriesMergedWithNewLive == - other.mOldInitEntriesMergedWithNewLive && - mOldInitEntriesMergedWithNewDead == - other.mOldInitEntriesMergedWithNewDead && - mNewEntriesMergedWithOldNeitherInit == - other.mNewEntriesMergedWithOldNeitherInit && - - mShadowScanSteps == other.mShadowScanSteps && - mMetaEntryShadowElisions == other.mMetaEntryShadowElisions && - mLiveEntryShadowElisions == other.mLiveEntryShadowElisions && - mInitEntryShadowElisions == other.mInitEntryShadowElisions && - mDeadEntryShadowElisions == other.mDeadEntryShadowElisions && - - mOutputIteratorTombstoneElisions == - other.mOutputIteratorTombstoneElisions && - mOutputIteratorBufferUpdates == other.mOutputIteratorBufferUpdates && - mOutputIteratorActualWrites == other.mOutputIteratorActualWrites); -} - void -BucketManagerImpl::incrMergeCounters(MergeCounters const& delta) +BucketManager::incrMergeCounters(MergeCounters const& delta) { std::lock_guard lock(mBucketMutex); mMergeCounters += delta; } bool -BucketManagerImpl::renameBucketDirFile(std::filesystem::path const& src, - std::filesystem::path const& dst) +BucketManager::renameBucketDirFile(std::filesystem::path const& src, + std::filesystem::path const& dst) { ZoneScoped; if (mConfig.DISABLE_XDR_FSYNC) @@ -518,35 +374,31 @@ BucketManagerImpl::renameBucketDirFile(std::filesystem::path const& src, template <> std::shared_ptr -BucketManager::adoptFileAsBucket(BucketManager& bm, std::string const& filename, +BucketManager::adoptFileAsBucket(std::string const& filename, uint256 const& hash, MergeKey* mergeKey, std::unique_ptr index) { - auto& bmImpl = static_cast(bm); - return bmImpl.adoptFileAsBucket(filename, hash, mergeKey, std::move(index), - bmImpl.mSharedLiveBuckets, - bmImpl.mLiveBucketFutures); + return adoptFileAsBucketInternal(filename, hash, mergeKey, std::move(index), + mSharedLiveBuckets, mLiveBucketFutures); } template <> std::shared_ptr -BucketManager::adoptFileAsBucket(BucketManager& bm, std::string const& filename, +BucketManager::adoptFileAsBucket(std::string const& filename, uint256 const& hash, MergeKey* mergeKey, std::unique_ptr index) { - auto& bmImpl = static_cast(bm); - return bmImpl.adoptFileAsBucket(filename, hash, mergeKey, std::move(index), - bmImpl.mSharedHotArchiveBuckets, - bmImpl.mHotArchiveBucketFutures); + return adoptFileAsBucketInternal(filename, hash, mergeKey, std::move(index), + mSharedHotArchiveBuckets, + mHotArchiveBucketFutures); } template std::shared_ptr -BucketManagerImpl::adoptFileAsBucket(std::string const& filename, - uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index, - BucketMapT& bucketMap, - FutureMapT& futureMap) +BucketManager::adoptFileAsBucketInternal( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index, BucketMapT& bucketMap, + FutureMapT& futureMap) { BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; @@ -571,7 +423,7 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, } // Check to see if we have an existing bucket (either in-memory or on-disk) - std::shared_ptr b = getBucketByHash(hash, bucketMap); + std::shared_ptr b = getBucketByHashInternal(hash, bucketMap); if (b) { CLOG_DEBUG( @@ -624,26 +476,22 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, template <> void -BucketManager::noteEmptyMergeOutput(BucketManager& bm, - MergeKey const& mergeKey) +BucketManager::noteEmptyMergeOutput(MergeKey const& mergeKey) { - auto& bmImpl = static_cast(bm); - bmImpl.noteEmptyMergeOutput(mergeKey, bmImpl.mLiveBucketFutures); + noteEmptyMergeOutputInternal(mergeKey, mLiveBucketFutures); } template <> void -BucketManager::noteEmptyMergeOutput(BucketManager& bm, - MergeKey const& mergeKey) +BucketManager::noteEmptyMergeOutput(MergeKey const& mergeKey) { - auto& bmImpl = static_cast(bm); - bmImpl.noteEmptyMergeOutput(mergeKey, bmImpl.mHotArchiveBucketFutures); + noteEmptyMergeOutputInternal(mergeKey, mHotArchiveBucketFutures); } template void -BucketManagerImpl::noteEmptyMergeOutput(MergeKey const& mergeKey, - FutureMapT& futureMap) +BucketManager::noteEmptyMergeOutputInternal(MergeKey const& mergeKey, + FutureMapT& futureMap) { BUCKET_TYPE_ASSERT(BucketT); releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); @@ -664,24 +512,22 @@ BucketManagerImpl::noteEmptyMergeOutput(MergeKey const& mergeKey, template <> std::shared_ptr -BucketManager::getBucketIfExists(BucketManager const& bm, uint256 const& hash) +BucketManager::getBucketIfExists(uint256 const& hash) { - auto const& bmImpl = static_cast(bm); - return bmImpl.getBucketIfExists(hash, bmImpl.mSharedLiveBuckets); + return getBucketIfExistsInternal(hash, mSharedLiveBuckets); } template <> std::shared_ptr -BucketManager::getBucketIfExists(BucketManager const& bm, uint256 const& hash) +BucketManager::getBucketIfExists(uint256 const& hash) { - auto const& bmImpl = static_cast(bm); - return bmImpl.getBucketIfExists(hash, bmImpl.mSharedHotArchiveBuckets); + return getBucketIfExistsInternal(hash, mSharedHotArchiveBuckets); } template std::shared_ptr -BucketManagerImpl::getBucketIfExists(uint256 const& hash, - BucketMapT const& bucketMap) const +BucketManager::getBucketIfExistsInternal( + uint256 const& hash, BucketMapT const& bucketMap) const { BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; @@ -700,24 +546,22 @@ BucketManagerImpl::getBucketIfExists(uint256 const& hash, template <> std::shared_ptr -BucketManager::getBucketByHash(BucketManager& bm, uint256 const& hash) +BucketManager::getBucketByHash(uint256 const& hash) { - auto& bmImpl = static_cast(bm); - return bmImpl.getBucketByHash(hash, bmImpl.mSharedLiveBuckets); + return getBucketByHashInternal(hash, mSharedLiveBuckets); } template <> std::shared_ptr -BucketManager::getBucketByHash(BucketManager& bm, uint256 const& hash) +BucketManager::getBucketByHash(uint256 const& hash) { - auto& bmImpl = static_cast(bm); - return bmImpl.getBucketByHash(hash, bmImpl.mSharedHotArchiveBuckets); + return getBucketByHashInternal(hash, mSharedHotArchiveBuckets); } template std::shared_ptr -BucketManagerImpl::getBucketByHash(uint256 const& hash, - BucketMapT& bucketMap) +BucketManager::getBucketByHashInternal(uint256 const& hash, + BucketMapT& bucketMap) { BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; @@ -752,24 +596,22 @@ BucketManagerImpl::getBucketByHash(uint256 const& hash, template <> std::shared_future> -BucketManager::getMergeFuture(BucketManager& bucketManager, MergeKey const& key) +BucketManager::getMergeFuture(MergeKey const& key) { - auto& bmImpl = static_cast(bucketManager); - return bmImpl.getMergeFuture(key, bmImpl.mLiveBucketFutures); + return getMergeFutureInternal(key, mLiveBucketFutures); } template <> std::shared_future> -BucketManager::getMergeFuture(BucketManager& bucketManager, MergeKey const& key) +BucketManager::getMergeFuture(MergeKey const& key) { - auto& bmImpl = static_cast(bucketManager); - return bmImpl.getMergeFuture(key, bmImpl.mHotArchiveBucketFutures); + return getMergeFutureInternal(key, mHotArchiveBucketFutures); } template std::shared_future> -BucketManagerImpl::getMergeFuture(MergeKey const& key, - FutureMapT& futureMap) +BucketManager::getMergeFutureInternal(MergeKey const& key, + FutureMapT& futureMap) { BUCKET_TYPE_ASSERT(BucketT); ZoneScoped; @@ -783,8 +625,7 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key, Hash bucketHash; if (mFinishedMerges.findMergeFor(key, bucketHash)) { - auto bucket = - BucketManager::getBucketByHash(*this, bucketHash); + auto bucket = BucketManager::getBucketByHash(bucketHash); if (bucket) { CLOG_TRACE(Bucket, @@ -817,26 +658,23 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key, template <> void BucketManager::putMergeFuture( - BucketManager& bm, MergeKey const& key, - std::shared_future> future) + MergeKey const& key, std::shared_future> future) { - auto& bmImpl = static_cast(bm); - bmImpl.putMergeFuture(key, future, bmImpl.mLiveBucketFutures); + putMergeFutureInternal(key, future, mLiveBucketFutures); } template <> void BucketManager::putMergeFuture( - BucketManager& bm, MergeKey const& key, + MergeKey const& key, std::shared_future> future) { - auto& bmImpl = static_cast(bm); - bmImpl.putMergeFuture(key, future, bmImpl.mHotArchiveBucketFutures); + putMergeFutureInternal(key, future, mHotArchiveBucketFutures); } template void -BucketManagerImpl::putMergeFuture( +BucketManager::putMergeFutureInternal( MergeKey const& key, std::shared_future> future, FutureMapT& futureMap) { @@ -853,7 +691,7 @@ BucketManagerImpl::putMergeFuture( #ifdef BUILD_TESTS void -BucketManagerImpl::clearMergeFuturesForTesting() +BucketManager::clearMergeFuturesForTesting() { std::lock_guard lock(mBucketMutex); mLiveBucketFutures.clear(); @@ -862,7 +700,7 @@ BucketManagerImpl::clearMergeFuturesForTesting() #endif std::set -BucketManagerImpl::getBucketListReferencedBuckets() const +BucketManager::getBucketListReferencedBuckets() const { ZoneScoped; std::set referenced; @@ -907,7 +745,7 @@ BucketManagerImpl::getBucketListReferencedBuckets() const } std::set -BucketManagerImpl::getAllReferencedBuckets() const +BucketManager::getAllReferencedBuckets() const { ZoneScoped; auto referenced = getBucketListReferencedBuckets(); @@ -953,7 +791,7 @@ BucketManagerImpl::getAllReferencedBuckets() const } void -BucketManagerImpl::cleanupStaleFiles() +BucketManager::cleanupStaleFiles() { ZoneScoped; if (mConfig.DISABLE_BUCKET_GC) @@ -965,16 +803,17 @@ BucketManagerImpl::cleanupStaleFiles() auto referenced = getAllReferencedBuckets(); std::transform(std::begin(mSharedLiveBuckets), std::end(mSharedLiveBuckets), std::inserter(referenced, std::end(referenced)), - [](std::pair> const& p) { + [](std::pair> const& p) { return p.first; }); - std::transform(std::begin(mSharedHotArchiveBuckets), - std::end(mSharedHotArchiveBuckets), - std::inserter(referenced, std::end(referenced)), - [](std::pair> const& p) { - return p.first; - }); + std::transform( + std::begin(mSharedHotArchiveBuckets), + std::end(mSharedHotArchiveBuckets), + std::inserter(referenced, std::end(referenced)), + [](std::pair> const& p) { + return p.first; + }); for (auto f : fs::findfiles(getBucketDir(), isBucketFile)) { @@ -995,7 +834,7 @@ BucketManagerImpl::cleanupStaleFiles() } void -BucketManagerImpl::forgetUnreferencedBuckets() +BucketManager::forgetUnreferencedBuckets() { ZoneScoped; std::lock_guard lock(mBucketMutex); @@ -1095,10 +934,10 @@ BucketManagerImpl::forgetUnreferencedBuckets() } void -BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +BucketManager::addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { ZoneScoped; releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); @@ -1122,7 +961,7 @@ BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header, } void -BucketManagerImpl::addHotArchiveBatch( +BucketManager::addHotArchiveBatch( Application& app, LedgerHeader header, std::vector const& archivedEntries, std::vector const& restoredEntries, @@ -1132,7 +971,7 @@ BucketManagerImpl::addHotArchiveBatch( releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); releaseAssertOrThrow(protocolVersionStartsFrom( header.ledgerVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); #ifdef BUILD_TESTS if (mUseFakeTestValuesForNextClose) { @@ -1154,8 +993,8 @@ BucketManagerImpl::addHotArchiveBatch( #ifdef BUILD_TESTS void -BucketManagerImpl::setNextCloseVersionAndHashForTesting(uint32_t protocolVers, - uint256 const& hash) +BucketManager::setNextCloseVersionAndHashForTesting(uint32_t protocolVers, + uint256 const& hash) { mUseFakeTestValuesForNextClose = true; mFakeTestProtocolVersion = protocolVers; @@ -1163,7 +1002,7 @@ BucketManagerImpl::setNextCloseVersionAndHashForTesting(uint32_t protocolVers, } std::set -BucketManagerImpl::getBucketHashesInBucketDirForTesting() const +BucketManager::getBucketHashesInBucketDirForTesting() const { std::set hashes; for (auto f : fs::findfiles(getBucketDir(), isBucketFile)) @@ -1174,7 +1013,7 @@ BucketManagerImpl::getBucketHashesInBucketDirForTesting() const } medida::Counter& -BucketManagerImpl::getEntriesEvictedCounter() const +BucketManager::getEntriesEvictedCounter() const { return mBucketListEvictionCounters.entriesEvicted; } @@ -1183,7 +1022,7 @@ BucketManagerImpl::getEntriesEvictedCounter() const // updates the given LedgerHeader to reflect the current state of the bucket // list void -BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) +BucketManager::snapshotLedger(LedgerHeader& currentHeader) { ZoneScoped; Hash hash; @@ -1191,7 +1030,7 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) { if (protocolVersionStartsFrom( currentHeader.ledgerVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { // TODO: Hash Archive Bucket // Dependency: HAS supports Hot Archive BucketList @@ -1217,8 +1056,8 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) } void -BucketManagerImpl::maybeSetIndex(std::shared_ptr b, - std::unique_ptr&& index) +BucketManager::maybeSetIndex(std::shared_ptr b, + std::unique_ptr&& index) { ZoneScoped; @@ -1229,8 +1068,7 @@ BucketManagerImpl::maybeSetIndex(std::shared_ptr b, } void -BucketManagerImpl::scanForEvictionLegacy(AbstractLedgerTxn& ltx, - uint32_t ledgerSeq) +BucketManager::scanForEvictionLegacy(AbstractLedgerTxn& ltx, uint32_t ledgerSeq) { ZoneScoped; releaseAssert(protocolVersionStartsFrom(ltx.getHeader().ledgerVersion, @@ -1240,7 +1078,7 @@ BucketManagerImpl::scanForEvictionLegacy(AbstractLedgerTxn& ltx, } void -BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq) +BucketManager::startBackgroundEvictionScan(uint32_t ledgerSeq) { releaseAssert(mConfig.isUsingBucketListDB()); releaseAssert(mSnapshotManager); @@ -1269,9 +1107,9 @@ BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq) } void -BucketManagerImpl::resolveBackgroundEvictionScan( - AbstractLedgerTxn& ltx, uint32_t ledgerSeq, - LedgerKeySet const& modifiedKeys) +BucketManager::resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, + uint32_t ledgerSeq, + LedgerKeySet const& modifiedKeys) { ZoneScoped; releaseAssert(threadIsMain()); @@ -1345,19 +1183,19 @@ BucketManagerImpl::resolveBackgroundEvictionScan( } medida::Meter& -BucketManagerImpl::getBloomMissMeter() const +BucketManager::getBloomMissMeter() const { return mBucketListDBBloomMisses; } medida::Meter& -BucketManagerImpl::getBloomLookupMeter() const +BucketManager::getBloomLookupMeter() const { return mBucketListDBBloomLookups; } void -BucketManagerImpl::calculateSkipValues(LedgerHeader& currentHeader) +BucketManager::calculateSkipValues(LedgerHeader& currentHeader) { if ((currentHeader.ledgerSeq % SKIP_1) == 0) @@ -1383,7 +1221,7 @@ BucketManagerImpl::calculateSkipValues(LedgerHeader& currentHeader) } std::vector -BucketManagerImpl::checkForMissingBucketsFiles(HistoryArchiveState const& has) +BucketManager::checkForMissingBucketsFiles(HistoryArchiveState const& has) { ZoneScoped; std::vector buckets = has.allBuckets(); @@ -1398,8 +1236,8 @@ BucketManagerImpl::checkForMissingBucketsFiles(HistoryArchiveState const& has) } void -BucketManagerImpl::assumeState(HistoryArchiveState const& has, - uint32_t maxProtocolVersion, bool restartMerges) +BucketManager::assumeState(HistoryArchiveState const& has, + uint32_t maxProtocolVersion, bool restartMerges) { ZoneScoped; releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); @@ -1408,10 +1246,10 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, // Dependency: HAS supports Hot Archive BucketList for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - auto curr = getBucketByHash(hexToBin256(has.currentBuckets.at(i).curr), - mSharedLiveBuckets); - auto snap = getBucketByHash(hexToBin256(has.currentBuckets.at(i).snap), - mSharedLiveBuckets); + auto curr = getBucketByHashInternal( + hexToBin256(has.currentBuckets.at(i).curr), mSharedLiveBuckets); + auto snap = getBucketByHashInternal( + hexToBin256(has.currentBuckets.at(i).snap), mSharedLiveBuckets); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while assuming " @@ -1422,7 +1260,7 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, std::shared_ptr nextBucket = nullptr; if (nextFuture.hasOutputHash()) { - nextBucket = getBucketByHash( + nextBucket = getBucketByHashInternal( hexToBin256(nextFuture.getOutputHash()), mSharedLiveBuckets); if (!nextBucket) { @@ -1458,13 +1296,13 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, } void -BucketManagerImpl::shutdown() +BucketManager::shutdown() { mIsShutdown = true; } bool -BucketManagerImpl::isShutdown() const +BucketManager::isShutdown() const { return mIsShutdown; } @@ -1520,7 +1358,7 @@ loadEntriesFromBucket(std::shared_ptr b, std::string const& name, } std::map -BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) +BucketManager::loadCompleteLedgerState(HistoryArchiveState const& has) { ZoneScoped; @@ -1540,7 +1378,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) { continue; } - auto b = getBucketByHash(pair.first, mSharedLiveBuckets); + auto b = getBucketByHashInternal(pair.first, mSharedLiveBuckets); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1552,7 +1390,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) } std::shared_ptr -BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) +BucketManager::mergeBuckets(HistoryArchiveState const& has) { ZoneScoped; @@ -1693,7 +1531,7 @@ visitAllEntriesInBucket( } void -BucketManagerImpl::visitLedgerEntries( +BucketManager::visitLedgerEntries( HistoryArchiveState const& has, std::optional minLedger, std::function const& filterEntry, std::function const& acceptEntry, @@ -1719,7 +1557,7 @@ BucketManagerImpl::visitLedgerEntries( { continue; } - auto b = getBucketByHash(pair.first, mSharedLiveBuckets); + auto b = getBucketByHashInternal(pair.first, mSharedLiveBuckets); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1745,7 +1583,7 @@ BucketManagerImpl::visitLedgerEntries( } std::shared_ptr -BucketManagerImpl::scheduleVerifyReferencedBucketsWork() +BucketManager::scheduleVerifyReferencedBucketsWork() { std::set hashes = getAllReferencedBuckets(); std::vector> seq; @@ -1758,7 +1596,7 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork() // TODO: Update verify to for ArchiveBucket // Dependency: HAS supports Hot Archive BucketList - auto b = getBucketByHash(h, mSharedLiveBuckets); + auto b = getBucketByHashInternal(h, mSharedLiveBuckets); if (!b) { throw std::runtime_error(fmt::format( @@ -1772,13 +1610,13 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork() } Config const& -BucketManagerImpl::getConfig() const +BucketManager::getConfig() const { return mConfig; } std::shared_ptr -BucketManagerImpl::getSearchableLiveBucketListSnapshot() +BucketManager::getSearchableLiveBucketListSnapshot() { releaseAssert(mConfig.isUsingBucketListDB()); // Any other threads must maintain their own snapshot @@ -1793,7 +1631,7 @@ BucketManagerImpl::getSearchableLiveBucketListSnapshot() } void -BucketManagerImpl::reportBucketEntryCountMetrics() +BucketManager::reportBucketEntryCountMetrics() { if (!mConfig.isUsingBucketListDB()) { diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 039610ed77..3b5f85a65b 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -1,161 +1,43 @@ #pragma once -// Copyright 2014 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "bucket/Bucket.h" -#include "util/NonCopyable.h" +#include "bucket/BucketMergeMap.h" +#include "bucket/HotArchiveBucketList.h" +#include "bucket/LiveBucketList.h" +#include "main/Config.h" #include "util/types.h" -#include +#include "xdr/Stellar-ledger.h" + #include #include -#include +#include #include +#include -#include "medida/timer_context.h" +// Copyright 2015 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 namespace medida { +class Timer; class Meter; +class Counter; } namespace stellar { +class TmpDir; class AbstractLedgerTxn; class Application; -class BasicWork; +class Bucket; class LiveBucketList; -class HotArchiveBucketList; class BucketSnapshotManager; -class Config; class SearchableLiveBucketListSnapshot; -class TmpDirManager; -struct HistoryArchiveState; -struct InflationWinner; -struct LedgerHeader; -struct MergeKey; - -// A fine-grained merge-operation-counter structure for tracking various -// events during merges. These are not medida counters because we do not -// want or need to publish this level of granularity outside of testing, and -// we do want merges to run as quickly as possible. -struct MergeCounters -{ - uint64_t mPreInitEntryProtocolMerges{0}; - uint64_t mPostInitEntryProtocolMerges{0}; - - uint64_t mRunningMergeReattachments{0}; - uint64_t mFinishedMergeReattachments{0}; - - uint64_t mPreShadowRemovalProtocolMerges{0}; - uint64_t mPostShadowRemovalProtocolMerges{0}; - - uint64_t mNewMetaEntries{0}; - uint64_t mNewInitEntries{0}; - uint64_t mNewLiveEntries{0}; - uint64_t mNewDeadEntries{0}; - uint64_t mOldMetaEntries{0}; - uint64_t mOldInitEntries{0}; - uint64_t mOldLiveEntries{0}; - uint64_t mOldDeadEntries{0}; - - uint64_t mOldEntriesDefaultAccepted{0}; - uint64_t mNewEntriesDefaultAccepted{0}; - uint64_t mNewInitEntriesMergedWithOldDead{0}; - uint64_t mOldInitEntriesMergedWithNewLive{0}; - uint64_t mOldInitEntriesMergedWithNewDead{0}; - uint64_t mNewEntriesMergedWithOldNeitherInit{0}; - - uint64_t mShadowScanSteps{0}; - uint64_t mMetaEntryShadowElisions{0}; - uint64_t mLiveEntryShadowElisions{0}; - uint64_t mInitEntryShadowElisions{0}; - uint64_t mDeadEntryShadowElisions{0}; - - uint64_t mOutputIteratorTombstoneElisions{0}; - uint64_t mOutputIteratorBufferUpdates{0}; - uint64_t mOutputIteratorActualWrites{0}; - MergeCounters& operator+=(MergeCounters const& delta); - bool operator==(MergeCounters const& other) const; -}; - -// Stores key that is eligible for eviction and the position of the eviction -// iterator as if that key was the last entry evicted -struct EvictionResultEntry -{ - LedgerKey key; - EvictionIterator iter; - uint32_t liveUntilLedger; - - EvictionResultEntry(LedgerKey const& key, EvictionIterator const& iter, - uint32_t liveUntilLedger) - : key(key), iter(iter), liveUntilLedger(liveUntilLedger) - { - } -}; - -struct EvictionResult -{ - // List of keys eligible for eviction in the order in which they occur in - // the bucket - std::list eligibleKeys{}; - - // Eviction iterator at the end of the scan region - EvictionIterator endOfRegionIterator; - - // LedgerSeq which this scan is based on - uint32_t initialLedger{}; - - // State archival settings that this scan is based on - StateArchivalSettings initialSas; - - EvictionResult(StateArchivalSettings const& sas) : initialSas(sas) - { - } - - // Returns true if this is a valid archival scan for the current ledger - // and archival settings. This is necessary because we start the scan - // for ledger N immediately after N - 1 closes. However, ledger N may - // contain a network upgrade changing eviction scan settings. Legacy SQL - // scans will run based on the changes that occurred during ledger N, - // meaning the scan we started at ledger N - 1 is invalid since it was based - // off of older settings. - bool isValid(uint32_t currLedger, - StateArchivalSettings const& currSas) const; -}; - -struct EvictionCounters -{ - medida::Counter& entriesEvicted; - medida::Counter& bytesScannedForEviction; - medida::Counter& incompleteBucketScan; - medida::Counter& evictionCyclePeriod; - medida::Counter& averageEvictedEntryAge; - - EvictionCounters(Application& app); -}; - -class EvictionStatistics -{ - private: - std::mutex mLock{}; - - // Only record metrics if we've seen a complete cycle to avoid noise - bool mCompleteCycle{false}; - uint64_t mEvictedEntriesAgeSum{}; - uint64_t mNumEntriesEvicted{}; - uint32_t mEvictionCycleStartLedger{}; - - public: - // Evicted entry "age" is the delta between its liveUntilLedger and the - // ledger when the entry is actually evicted - void recordEvictedEntry(uint64_t age); +struct BucketEntryCounters; +enum class LedgerEntryTypeAndDurability : uint32_t; - void submitMetricsAndRestartCycle(uint32_t currLedgerSeq, - EvictionCounters& counters); -}; +struct HistoryArchiveState; /** * BucketManager is responsible for maintaining a collection of Buckets of @@ -177,34 +59,147 @@ class EvictionStatistics * in temporary directories -- and then "adopted" by the BucketManager, moved * into its directory and managed by it. */ - class BucketManager : NonMovableOrCopyable { + template + using BucketMapT = std::map>; + + template + using FutureMapT = + UnorderedMap>>; + + static std::string const kLockFilename; + + Application& mApp; + std::unique_ptr mLiveBucketList; + std::unique_ptr mHotArchiveBucketList; + std::unique_ptr mSnapshotManager; + std::unique_ptr mTmpDirManager; + std::unique_ptr mWorkDir; + BucketMapT mSharedLiveBuckets; + BucketMapT mSharedHotArchiveBuckets; + std::shared_ptr + mSearchableBucketListSnapshot{}; + + // Lock for managing raw Bucket files or the bucket directory. This lock is + // only required for file access, but is not required for logical changes to + // a BucketList (i.e. addLiveBatch). + mutable std::recursive_mutex mBucketMutex; + std::unique_ptr mLockedBucketDir; + medida::Meter& mBucketLiveObjectInsertBatch; + medida::Meter& mBucketArchiveObjectInsertBatch; + medida::Timer& mBucketAddLiveBatch; + medida::Timer& mBucketAddArchiveBatch; + medida::Timer& mBucketSnapMerge; + medida::Counter& mSharedBucketsSize; + medida::Meter& mBucketListDBBloomMisses; + medida::Meter& mBucketListDBBloomLookups; + medida::Counter& mLiveBucketListSizeCounter; + medida::Counter& mArchiveBucketListSizeCounter; + EvictionCounters mBucketListEvictionCounters; + MergeCounters mMergeCounters; + std::shared_ptr mEvictionStatistics{}; + std::map + mBucketListEntryCountCounters; + std::map + mBucketListEntrySizeCounters; + + std::future mEvictionFuture{}; + + bool const mDeleteEntireBucketDirInDtor; + // Copy app's config for thread-safe access + Config const mConfig; + + // Records bucket-merges that are currently _live_ in some FutureBucket, in + // the sense of either running, or finished (with or without the + // FutureBucket being resolved). Entries in this map will be cleared when + // the FutureBucket is _cleared_ (typically when the owning BucketList level + // is committed). + FutureMapT mLiveBucketFutures; + FutureMapT mHotArchiveBucketFutures; + + // Records bucket-merges that are _finished_, i.e. have been adopted as + // (possibly redundant) bucket files. This is a "weak" (bi-multi-)map of + // hashes, that does not count towards std::shared_ptr refcounts, i.e. does + // not keep either the output bucket or any of its input buckets + // alive. Needs to be queried and updated on mSharedBuckets GC events. + BucketMergeMap mFinishedMerges; + + std::atomic mIsShutdown{false}; + + void cleanupStaleFiles(); + void deleteTmpDirAndUnlockBucketDir(); + void deleteEntireBucketDir(); + + medida::Timer& recordBulkLoadMetrics(std::string const& label, + size_t numEntries) const; + medida::Timer& getPointLoadTimer(LedgerEntryType t) const; + + void updateSharedBucketSize(); + + template + std::shared_ptr adoptFileAsBucketInternal( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index, + BucketMapT& bucketMap, FutureMapT& futureMap); + + template + std::shared_ptr + getBucketByHashInternal(uint256 const& hash, + BucketMapT& bucketMap); + template + std::shared_ptr + getBucketIfExistsInternal(uint256 const& hash, + BucketMapT const& bucketMap) const; + + template + std::shared_future> + getMergeFutureInternal(MergeKey const& key, FutureMapT& futureMap); + + template + void + putMergeFutureInternal(MergeKey const& key, + std::shared_future> future, + FutureMapT& futureMap); + + template + void noteEmptyMergeOutputInternal(MergeKey const& mergeKey, + FutureMapT& futureMap); + +#ifdef BUILD_TESTS + bool mUseFakeTestValuesForNextClose{false}; + uint32_t mFakeTestProtocolVersion; + uint256 mFakeTestBucketListHash; +#endif + + protected: + BucketManager(Application& app); + void calculateSkipValues(LedgerHeader& currentHeader); + std::string bucketFilename(std::string const& bucketHexHash); + std::string bucketFilename(Hash const& hash); public: - static std::unique_ptr create(Application&); - - virtual ~BucketManager() - { - } - virtual void initialize() = 0; - virtual void dropAll() = 0; - virtual std::string bucketIndexFilename(Hash const& hash) const = 0; - virtual std::string const& getTmpDir() = 0; - virtual TmpDirManager& getTmpDirManager() = 0; - virtual std::string const& getBucketDir() const = 0; - virtual LiveBucketList& getLiveBucketList() = 0; - virtual HotArchiveBucketList& getHotArchiveBucketList() = 0; - virtual BucketSnapshotManager& getBucketSnapshotManager() const = 0; - virtual bool renameBucketDirFile(std::filesystem::path const& src, - std::filesystem::path const& dst) = 0; - - virtual medida::Timer& getMergeTimer() = 0; + static std::unique_ptr create(Application& app); + virtual ~BucketManager(); + + void initialize(); + void dropAll(); + std::string bucketIndexFilename(Hash const& hash) const; + std::string const& getTmpDir(); + TmpDirManager& getTmpDirManager(); + std::string const& getBucketDir() const; + LiveBucketList& getLiveBucketList(); + HotArchiveBucketList& getHotArchiveBucketList(); + BucketSnapshotManager& getBucketSnapshotManager() const; + bool renameBucketDirFile(std::filesystem::path const& src, + std::filesystem::path const& dst); + + medida::Timer& getMergeTimer(); // Reading and writing the merge counters is done in bulk, and takes a lock // briefly; this can be done from any thread. - virtual MergeCounters readMergeCounters() = 0; - virtual void incrMergeCounters(MergeCounters const& delta) = 0; + MergeCounters readMergeCounters(); + void incrMergeCounters(MergeCounters const& delta); // Get a reference to a persistent bucket (in the BucketManager's bucket // directory), from the BucketManager's shared bucket-set. @@ -217,12 +212,10 @@ class BucketManager : NonMovableOrCopyable // This method is mostly-threadsafe -- assuming you don't destruct the // BucketManager mid-call -- and is intended to be called from both main and // worker threads. Very carefully. - // Unfortunately, virtual methods cannot be templated, so we use this weird - // static interface to allow for a templated return type. template - static std::shared_ptr - adoptFileAsBucket(BucketManager& bm, std::string const& filename, - uint256 const& hash, MergeKey* mergeKey, + std::shared_ptr + adoptFileAsBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, std::unique_ptr index); // Companion method to `adoptFileAsLiveBucket` also called from the @@ -231,137 +224,120 @@ class BucketManager : NonMovableOrCopyable // doesn't correspond to a file on disk; the method forgets about the // `FutureBucket` associated with the in-progress merge, allowing the merge // inputs to be GC'ed. - // Unfortunately, virtual methods cannot be templated, so we use this weird - // static interface to allow for a templated return type. template - static void noteEmptyMergeOutput(BucketManager& bm, - MergeKey const& mergeKey); + void noteEmptyMergeOutput(MergeKey const& mergeKey); // Returns a bucket by hash if it exists and is currently managed by the // bucket list. - // Unfortunately, virtual methods cannot be templated, so we use this weird - // static interface to allow for a templated return type. template - static std::shared_ptr getBucketIfExists(BucketManager const& bm, - uint256 const& hash); + std::shared_ptr getBucketIfExists(uint256 const& hash); // Return a bucket by hash if we have it, else return nullptr. - // Unfortunately, virtual methods cannot be templated, so we use this weird - // static getter interface to allow for a templated return type. template - static std::shared_ptr getBucketByHash(BucketManager& bm, - uint256 const& hash); + std::shared_ptr getBucketByHash(uint256 const& hash); // Get a reference to a merge-future that's either running (or finished // somewhat recently) from either a map of the std::shared_futures doing the // merges and/or a set of records mapping merge inputs to outputs and the // set of outputs held in the BucketManager. Returns an invalid future if no // such future can be found or synthesized. - // Unfortunately, virtual methods cannot be templated, so we use this weird - // static getter interface to allow for a templated return type. template - static std::shared_future> - getMergeFuture(BucketManager& bucketManager, MergeKey const& key); + std::shared_future> + getMergeFuture(MergeKey const& key); // Add a reference to a merge _in progress_ (not yet adopted as a file) to // the BucketManager's internal map of std::shared_futures doing merges. // There is no corresponding entry-removal API: the std::shared_future will // be removed from the map when the merge completes and the output file is // adopted. - // Unfortunately, virtual methods cannot be templated, so we use this weird - // static interface to allow for a templated return type. template - static void - putMergeFuture(BucketManager& bm, MergeKey const& key, - std::shared_future> future); + void putMergeFuture(MergeKey const& key, + std::shared_future> future); #ifdef BUILD_TESTS // Drop all references to merge futures in progress. - virtual void clearMergeFuturesForTesting() = 0; + void clearMergeFuturesForTesting(); #endif // Forget any buckets not referenced by the current BucketList. This will // not immediately cause the buckets to delete themselves, if someone else // is using them via a shared_ptr<>, but the BucketManager will no longer // independently keep them alive. - virtual void forgetUnreferencedBuckets() = 0; + void forgetUnreferencedBuckets(); // Feed a new batch of entries to the bucket list. This interface expects to // be given separate init (created) and live (updated) entry vectors. The // `header` value should be taken from the ledger at which this batch is // being added. - virtual void addLiveBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) = 0; - virtual void - addHotArchiveBatch(Application& app, LedgerHeader header, - std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries) = 0; + void addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); + void addHotArchiveBatch(Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); // Update the given LedgerHeader's bucketListHash to reflect the current // state of the bucket list. - virtual void snapshotLedger(LedgerHeader& currentHeader) = 0; + void snapshotLedger(LedgerHeader& currentHeader); // Sets index for bucket b if b is not already indexed and if BucketManager // is not shutting down. In most cases, there should only be a single index // for each bucket. However, during startup there are race conditions where // a bucket may be indexed twice. If there is an index race, set index with - // this function, otherwise use Bucket::setIndex(). - virtual void maybeSetIndex(std::shared_ptr b, - std::unique_ptr&& index) = 0; + // this function, otherwise use BucketBase::setIndex(). + void maybeSetIndex(std::shared_ptr b, + std::unique_ptr&& index); // Scans BucketList for non-live entries to evict starting at the entry // pointed to by EvictionIterator. Scans until `maxEntriesToEvict` entries // have been evicted or maxEvictionScanSize bytes have been scanned. - virtual void scanForEvictionLegacy(AbstractLedgerTxn& ltx, - uint32_t ledgerSeq) = 0; + void scanForEvictionLegacy(AbstractLedgerTxn& ltx, uint32_t ledgerSeq); - virtual void startBackgroundEvictionScan(uint32_t ledgerSeq) = 0; - virtual void - resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq, - LedgerKeySet const& modifiedKeys) = 0; + void startBackgroundEvictionScan(uint32_t ledgerSeq); + void resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, + uint32_t ledgerSeq, + LedgerKeySet const& modifiedKeys); - virtual medida::Meter& getBloomMissMeter() const = 0; - virtual medida::Meter& getBloomLookupMeter() const = 0; + medida::Meter& getBloomMissMeter() const; + medida::Meter& getBloomLookupMeter() const; #ifdef BUILD_TESTS // Install a fake/assumed ledger version and bucket list hash to use in next // call to addLiveBatch and snapshotLedger. This interface exists only for // testing in a specific type of history replay. - virtual void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, - uint256 const& hash) = 0; + void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, + uint256 const& hash); // Return the set of buckets in the current `getBucketDir()` directory. // This interface exists only for checking that the BucketDir isn't // leaking buckets, in tests. - virtual std::set getBucketHashesInBucketDirForTesting() const = 0; + std::set getBucketHashesInBucketDirForTesting() const; - virtual medida::Counter& getEntriesEvictedCounter() const = 0; + medida::Counter& getEntriesEvictedCounter() const; #endif // Return the set of buckets referenced by the BucketList - virtual std::set getBucketListReferencedBuckets() const = 0; + std::set getBucketListReferencedBuckets() const; // Return the set of buckets referenced by the BucketList, LCL HAS, // and publish queue. - virtual std::set getAllReferencedBuckets() const = 0; + std::set getAllReferencedBuckets() const; // Check for missing bucket files that would prevent `assumeState` from // succeeding - virtual std::vector - checkForMissingBucketsFiles(HistoryArchiveState const& has) = 0; + std::vector + checkForMissingBucketsFiles(HistoryArchiveState const& has); // Assume state from `has` in BucketList: find and attach all buckets in // `has`, set current BL. - virtual void assumeState(HistoryArchiveState const& has, - uint32_t maxProtocolVersion, - bool restartMerges) = 0; + void assumeState(HistoryArchiveState const& has, + uint32_t maxProtocolVersion, bool restartMerges); - virtual void shutdown() = 0; + void shutdown(); - virtual bool isShutdown() const = 0; + bool isShutdown() const; // Load the complete state of the ledger from the provided HAS. Throws if // any of the buckets referenced in the HAS do not exist. @@ -372,13 +348,12 @@ class BucketManager : NonMovableOrCopyable // // Also note: this returns a large map -- likely multiple GB of memory on // public nodes. The whole ledger. Call carefully, and only offline. - virtual std::map - loadCompleteLedgerState(HistoryArchiveState const& has) = 0; + std::map + loadCompleteLedgerState(HistoryArchiveState const& has); // Merge the bucket list of the provided HAS into a single "super bucket" // consisting of only live entries, and return it. - virtual std::shared_ptr - mergeBuckets(HistoryArchiveState const& has) = 0; + std::shared_ptr mergeBuckets(HistoryArchiveState const& has); // Visits all the active ledger entries or subset thereof. // @@ -397,23 +372,27 @@ class BucketManager : NonMovableOrCopyable // When `filterEntry` and `acceptEntry` always return `true`, this is // equivalent to iterating over `loadCompleteLedgerState`, so the same // memory/runtime implications apply. - virtual void visitLedgerEntries( + void visitLedgerEntries( HistoryArchiveState const& has, std::optional minLedger, std::function const& filterEntry, std::function const& acceptEntry, - bool includeAllStates) = 0; + bool includeAllStates); // Schedule a Work class that verifies the hashes of all referenced buckets // on background threads. - virtual std::shared_ptr - scheduleVerifyReferencedBucketsWork() = 0; + std::shared_ptr scheduleVerifyReferencedBucketsWork(); - virtual Config const& getConfig() const = 0; + Config const& getConfig() const; // Get bucketlist snapshot - virtual std::shared_ptr - getSearchableLiveBucketListSnapshot() = 0; + std::shared_ptr + getSearchableLiveBucketListSnapshot(); - virtual void reportBucketEntryCountMetrics() = 0; + void reportBucketEntryCountMetrics(); }; + +#define SKIP_1 50 +#define SKIP_2 5000 +#define SKIP_3 50000 +#define SKIP_4 500000 } diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h deleted file mode 100644 index b3de18c8c2..0000000000 --- a/src/bucket/BucketManagerImpl.h +++ /dev/null @@ -1,252 +0,0 @@ -#pragma once - -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" -#include "bucket/BucketManager.h" -#include "bucket/BucketMergeMap.h" -#include "main/Config.h" -#include "xdr/Stellar-ledger.h" - -#include -#include -#include -#include -#include - -// Copyright 2015 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -namespace medida -{ -class Timer; -class Meter; -class Counter; -} - -namespace stellar -{ - -class TmpDir; -class AbstractLedgerTxn; -class Application; -class Bucket; -class LiveBucketList; -class BucketSnapshotManager; -struct BucketEntryCounters; -enum class LedgerEntryTypeAndDurability : uint32_t; - -struct HistoryArchiveState; - -class BucketManagerImpl : public BucketManager -{ - template - using BucketMapT = std::map>; - - template - using FutureMapT = - UnorderedMap>>; - - static std::string const kLockFilename; - - Application& mApp; - std::unique_ptr mLiveBucketList; - std::unique_ptr mHotArchiveBucketList; - std::unique_ptr mSnapshotManager; - std::unique_ptr mTmpDirManager; - std::unique_ptr mWorkDir; - BucketMapT mSharedLiveBuckets; - BucketMapT mSharedHotArchiveBuckets; - std::shared_ptr - mSearchableBucketListSnapshot{}; - - // Lock for managing raw Bucket files or the bucket directory. This lock is - // only required for file access, but is not required for logical changes to - // a BucketList (i.e. addLiveBatch). - mutable std::recursive_mutex mBucketMutex; - std::unique_ptr mLockedBucketDir; - medida::Meter& mBucketLiveObjectInsertBatch; - medida::Meter& mBucketArchiveObjectInsertBatch; - medida::Timer& mBucketAddLiveBatch; - medida::Timer& mBucketAddArchiveBatch; - medida::Timer& mBucketSnapMerge; - medida::Counter& mSharedBucketsSize; - medida::Meter& mBucketListDBBloomMisses; - medida::Meter& mBucketListDBBloomLookups; - medida::Counter& mLiveBucketListSizeCounter; - medida::Counter& mArchiveBucketListSizeCounter; - EvictionCounters mBucketListEvictionCounters; - MergeCounters mMergeCounters; - std::shared_ptr mEvictionStatistics{}; - std::map - mBucketListEntryCountCounters; - std::map - mBucketListEntrySizeCounters; - - std::future mEvictionFuture{}; - - bool const mDeleteEntireBucketDirInDtor; - // Copy app's config for thread-safe access - Config const mConfig; - - // Records bucket-merges that are currently _live_ in some FutureBucket, in - // the sense of either running, or finished (with or without the - // FutureBucket being resolved). Entries in this map will be cleared when - // the FutureBucket is _cleared_ (typically when the owning BucketList level - // is committed). - FutureMapT mLiveBucketFutures; - FutureMapT mHotArchiveBucketFutures; - - // Records bucket-merges that are _finished_, i.e. have been adopted as - // (possibly redundant) bucket files. This is a "weak" (bi-multi-)map of - // hashes, that does not count towards std::shared_ptr refcounts, i.e. does - // not keep either the output bucket or any of its input buckets - // alive. Needs to be queried and updated on mSharedBuckets GC events. - BucketMergeMap mFinishedMerges; - - std::atomic mIsShutdown{false}; - - void cleanupStaleFiles(); - void deleteTmpDirAndUnlockBucketDir(); - void deleteEntireBucketDir(); - - medida::Timer& recordBulkLoadMetrics(std::string const& label, - size_t numEntries) const; - medida::Timer& getPointLoadTimer(LedgerEntryType t) const; - - template - std::shared_ptr adoptFileAsBucket( - std::string const& filename, uint256 const& hash, MergeKey* mergeKey, - std::unique_ptr index, - BucketMapT& bucketMap, FutureMapT& futureMap); - - template - std::shared_ptr getBucketByHash(uint256 const& hash, - BucketMapT& bucketMap); - template - std::shared_ptr - getBucketIfExists(uint256 const& hash, - BucketMapT const& bucketMap) const; - - template - std::shared_future> - getMergeFuture(MergeKey const& key, FutureMapT& futureMap); - - template - void putMergeFuture(MergeKey const& key, - std::shared_future> future, - FutureMapT& futureMap); - - template - void noteEmptyMergeOutput(MergeKey const& mergeKey, - FutureMapT& futureMap); - - void updateSharedBucketSize(); - -#ifdef BUILD_TESTS - bool mUseFakeTestValuesForNextClose{false}; - uint32_t mFakeTestProtocolVersion; - uint256 mFakeTestBucketListHash; -#endif - - protected: - void calculateSkipValues(LedgerHeader& currentHeader); - std::string bucketFilename(std::string const& bucketHexHash); - std::string bucketFilename(Hash const& hash); - - public: - BucketManagerImpl(Application& app); - ~BucketManagerImpl() override; - void initialize() override; - void dropAll() override; - std::string bucketIndexFilename(Hash const& hash) const override; - std::string const& getTmpDir() override; - std::string const& getBucketDir() const override; - LiveBucketList& getLiveBucketList() override; - HotArchiveBucketList& getHotArchiveBucketList() override; - BucketSnapshotManager& getBucketSnapshotManager() const override; - medida::Timer& getMergeTimer() override; - MergeCounters readMergeCounters() override; - void incrMergeCounters(MergeCounters const&) override; - TmpDirManager& getTmpDirManager() override; - bool renameBucketDirFile(std::filesystem::path const& src, - std::filesystem::path const& dst) override; - -#ifdef BUILD_TESTS - void clearMergeFuturesForTesting() override; -#endif - - void forgetUnreferencedBuckets() override; - void addLiveBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) override; - void - addHotArchiveBatch(Application& app, LedgerHeader header, - std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries) override; - void snapshotLedger(LedgerHeader& currentHeader) override; - void maybeSetIndex(std::shared_ptr b, - std::unique_ptr&& index) override; - void scanForEvictionLegacy(AbstractLedgerTxn& ltx, - uint32_t ledgerSeq) override; - void startBackgroundEvictionScan(uint32_t ledgerSeq) override; - void - resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq, - LedgerKeySet const& modifiedKeys) override; - - medida::Meter& getBloomMissMeter() const override; - medida::Meter& getBloomLookupMeter() const override; - -#ifdef BUILD_TESTS - // Install a fake/assumed ledger version and bucket list hash to use in next - // call to addLiveBatch and snapshotLedger. This interface exists only for - // testing in a specific type of history replay. - void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, - uint256 const& hash) override; - - std::set getBucketHashesInBucketDirForTesting() const override; - - medida::Counter& getEntriesEvictedCounter() const override; -#endif - - std::set getBucketListReferencedBuckets() const override; - std::set getAllReferencedBuckets() const override; - std::vector - checkForMissingBucketsFiles(HistoryArchiveState const& has) override; - void assumeState(HistoryArchiveState const& has, - uint32_t maxProtocolVersion, bool restartMerges) override; - void shutdown() override; - - bool isShutdown() const override; - - std::map - loadCompleteLedgerState(HistoryArchiveState const& has) override; - - std::shared_ptr - mergeBuckets(HistoryArchiveState const& has) override; - - void visitLedgerEntries( - HistoryArchiveState const& has, std::optional minLedger, - std::function const& filterEntry, - std::function const& acceptEntry, - bool includeAllStates) override; - - std::shared_ptr scheduleVerifyReferencedBucketsWork() override; - - Config const& getConfig() const override; - - std::shared_ptr - getSearchableLiveBucketListSnapshot() override; - - void reportBucketEntryCountMetrics() override; - - friend class BucketManager; -}; - -#define SKIP_1 50 -#define SKIP_2 5000 -#define SKIP_3 50000 -#define SKIP_4 500000 -} diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp index 0f8e3f2c81..dbc9e7c159 100644 --- a/src/bucket/BucketOutputIterator.cpp +++ b/src/bucket/BucketOutputIterator.cpp @@ -3,13 +3,13 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketOutputIterator.h" -#include "bucket/Bucket.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "ledger/LedgerTypeUtils.h" #include "util/GlobalChecks.h" #include "util/ProtocolVersion.h" -#include "xdr/Stellar-ledger.h" #include #include @@ -27,7 +27,7 @@ BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, MergeCounters& mc, asio::io_context& ctx, bool doFsync) - : mFilename(Bucket::randomBucketName(tmpDir)) + : mFilename(BucketBase::randomBucketName(tmpDir)) , mOut(ctx, doFsync) , mCtx(ctx) , mBuf(nullptr) @@ -55,9 +55,11 @@ BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, } else { + static_assert(std::is_same_v, + "unexpected bucket type"); releaseAssertOrThrow(protocolVersionStartsFrom( meta.ledgerVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); HotArchiveBucketEntry bme; bme.type(HOT_ARCHIVE_METAENTRY); @@ -95,6 +97,8 @@ BucketOutputIterator::put(typename BucketT::EntryT const& e) } else { + static_assert(std::is_same_v, + "unexpected bucket type"); if (e.type() == HOT_ARCHIVE_METAENTRY) { if (mPutMeta) @@ -181,8 +185,7 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, std::filesystem::remove(mFilename); if (mergeKey) { - BucketManager::noteEmptyMergeOutput(bucketManager, - *mergeKey); + bucketManager.noteEmptyMergeOutput(*mergeKey); } return std::make_shared(); } @@ -195,8 +198,7 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, { // either it's a new bucket or we just reconstructed a bucket // we already have, in any case ensure we have an index - if (auto b = - BucketManager::getBucketIfExists(bucketManager, hash); + if (auto b = bucketManager.getBucketIfExists(hash); !b || !b->isIndexed()) { index = BucketIndex::createIndex(bucketManager, mFilename, @@ -204,8 +206,8 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, } } - return BucketManager::adoptFileAsBucket( - bucketManager, mFilename.string(), hash, mergeKey, std::move(index)); + return bucketManager.adoptFileAsBucket(mFilename.string(), hash, + mergeKey, std::move(index)); } template class BucketOutputIterator; diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h index 7757ac56cf..9e72bebacf 100644 --- a/src/bucket/BucketOutputIterator.h +++ b/src/bucket/BucketOutputIterator.h @@ -4,12 +4,10 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketManager.h" #include "bucket/BucketUtils.h" #include "bucket/LedgerCmp.h" #include "util/XDRStream.h" -#include "xdr/Stellar-ledger.h" #include #include @@ -58,7 +56,4 @@ template class BucketOutputIterator bool shouldSynchronouslyIndex, MergeKey* mergeKey = nullptr); }; - -typedef BucketOutputIterator LiveBucketOutputIterator; -typedef BucketOutputIterator HotArchiveBucketOutputIterator; } diff --git a/src/bucket/BucketSnapshot.cpp b/src/bucket/BucketSnapshot.cpp index a419c84f00..a87c359ad8 100644 --- a/src/bucket/BucketSnapshot.cpp +++ b/src/bucket/BucketSnapshot.cpp @@ -3,8 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketSnapshot.h" -#include "bucket/Bucket.h" -#include "bucket/BucketListSnapshot.h" +#include "bucket/BucketIndex.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" +#include "bucket/SearchableBucketList.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTypeUtils.h" #include "util/XDRStream.h" @@ -147,6 +149,8 @@ BucketSnapshotBase::loadKeys( } else { + static_assert(std::is_same_v, + "unexpected bucket type"); result.push_back(*entryOp); } } @@ -172,7 +176,7 @@ LiveBucketSnapshot::getPoolIDsByAsset(Asset const& asset) const return mBucket->getIndex().getPoolIDsByAsset(asset); } -bool +Loop LiveBucketSnapshot::scanForEviction( EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, std::list& evictableKeys, @@ -183,13 +187,13 @@ LiveBucketSnapshot::scanForEviction( SOROBAN_PROTOCOL_VERSION)) { // EOF, skip to next bucket - return false; + return Loop::INCOMPLETE; } if (bytesToScan == 0) { // Reached end of scan region - return true; + return Loop::COMPLETE; } std::list maybeEvictQueue; @@ -197,7 +201,7 @@ LiveBucketSnapshot::scanForEviction( auto processQueue = [&]() { auto loadResult = populateLoadedEntries( - keysToSearch, bl.loadKeysWithLimits(keysToSearch)); + keysToSearch, bl.loadKeysWithLimits(keysToSearch, nullptr)); for (auto& e : maybeEvictQueue) { // If TTL entry has not yet been deleted @@ -253,7 +257,7 @@ LiveBucketSnapshot::scanForEviction( // Reached end of scan region bytesToScan = 0; processQueue(); - return true; + return Loop::COMPLETE; } bytesToScan -= bytesRead; @@ -261,7 +265,7 @@ LiveBucketSnapshot::scanForEviction( // Hit eof processQueue(); - return false; + return Loop::INCOMPLETE; } template diff --git a/src/bucket/BucketSnapshot.h b/src/bucket/BucketSnapshot.h index 71f33e448f..65b00022a1 100644 --- a/src/bucket/BucketSnapshot.h +++ b/src/bucket/BucketSnapshot.h @@ -4,10 +4,12 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" #include "bucket/LedgerCmp.h" +#include "bucket/LiveBucket.h" #include "util/NonCopyable.h" +#include "util/XDRStream.h" #include "xdr/Stellar-ledger-entries.h" #include #include @@ -15,7 +17,6 @@ namespace stellar { -class XDRInputFileStream; struct EvictionResultEntry; class LedgerKeyMeter; class SearchableLiveBucketListSnapshot; @@ -81,7 +82,7 @@ class LiveBucketSnapshot : public BucketSnapshotBase // pool std::vector const& getPoolIDsByAsset(Asset const& asset) const; - bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, + Loop scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, std::list& evictableKeys, SearchableLiveBucketListSnapshot& bl) const; diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index 36faf9519a..0cad98addf 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -3,9 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketSnapshotManager.h" -#include "bucket/Bucket.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" +#include "bucket/SearchableBucketList.h" #include "main/Application.h" #include "util/GlobalChecks.h" #include "util/XDRStream.h" // IWYU pragma: keep diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index af66e918b6..d2c797238f 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -4,9 +4,9 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" -#include "bucket/BucketManagerImpl.h" +#include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "util/NonCopyable.h" #include "util/UnorderedMap.h" diff --git a/src/bucket/BucketUtils.cpp b/src/bucket/BucketUtils.cpp new file mode 100644 index 0000000000..830d01545c --- /dev/null +++ b/src/bucket/BucketUtils.cpp @@ -0,0 +1,153 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/BucketUtils.h" +#include + +namespace stellar +{ + +MergeCounters& +MergeCounters::operator+=(MergeCounters const& delta) +{ + mPreInitEntryProtocolMerges += delta.mPreInitEntryProtocolMerges; + mPostInitEntryProtocolMerges += delta.mPostInitEntryProtocolMerges; + + mRunningMergeReattachments += delta.mRunningMergeReattachments; + mFinishedMergeReattachments += delta.mFinishedMergeReattachments; + + mPreShadowRemovalProtocolMerges += delta.mPreShadowRemovalProtocolMerges; + mPostShadowRemovalProtocolMerges += delta.mPostShadowRemovalProtocolMerges; + + mNewMetaEntries += delta.mNewMetaEntries; + mNewInitEntries += delta.mNewInitEntries; + mNewLiveEntries += delta.mNewLiveEntries; + mNewDeadEntries += delta.mNewDeadEntries; + mOldMetaEntries += delta.mOldMetaEntries; + mOldInitEntries += delta.mOldInitEntries; + mOldLiveEntries += delta.mOldLiveEntries; + mOldDeadEntries += delta.mOldDeadEntries; + + mOldEntriesDefaultAccepted += delta.mOldEntriesDefaultAccepted; + mNewEntriesDefaultAccepted += delta.mNewEntriesDefaultAccepted; + mNewInitEntriesMergedWithOldDead += delta.mNewInitEntriesMergedWithOldDead; + mOldInitEntriesMergedWithNewLive += delta.mOldInitEntriesMergedWithNewLive; + mOldInitEntriesMergedWithNewDead += delta.mOldInitEntriesMergedWithNewDead; + mNewEntriesMergedWithOldNeitherInit += + delta.mNewEntriesMergedWithOldNeitherInit; + + mShadowScanSteps += delta.mShadowScanSteps; + mMetaEntryShadowElisions += delta.mMetaEntryShadowElisions; + mLiveEntryShadowElisions += delta.mLiveEntryShadowElisions; + mInitEntryShadowElisions += delta.mInitEntryShadowElisions; + mDeadEntryShadowElisions += delta.mDeadEntryShadowElisions; + + mOutputIteratorTombstoneElisions += delta.mOutputIteratorTombstoneElisions; + mOutputIteratorBufferUpdates += delta.mOutputIteratorBufferUpdates; + mOutputIteratorActualWrites += delta.mOutputIteratorActualWrites; + return *this; +} + +bool +MergeCounters::operator==(MergeCounters const& other) const +{ + return ( + mPreInitEntryProtocolMerges == other.mPreInitEntryProtocolMerges && + mPostInitEntryProtocolMerges == other.mPostInitEntryProtocolMerges && + + mRunningMergeReattachments == other.mRunningMergeReattachments && + mFinishedMergeReattachments == other.mFinishedMergeReattachments && + + mNewMetaEntries == other.mNewMetaEntries && + mNewInitEntries == other.mNewInitEntries && + mNewLiveEntries == other.mNewLiveEntries && + mNewDeadEntries == other.mNewDeadEntries && + mOldMetaEntries == other.mOldMetaEntries && + mOldInitEntries == other.mOldInitEntries && + mOldLiveEntries == other.mOldLiveEntries && + mOldDeadEntries == other.mOldDeadEntries && + + mOldEntriesDefaultAccepted == other.mOldEntriesDefaultAccepted && + mNewEntriesDefaultAccepted == other.mNewEntriesDefaultAccepted && + mNewInitEntriesMergedWithOldDead == + other.mNewInitEntriesMergedWithOldDead && + mOldInitEntriesMergedWithNewLive == + other.mOldInitEntriesMergedWithNewLive && + mOldInitEntriesMergedWithNewDead == + other.mOldInitEntriesMergedWithNewDead && + mNewEntriesMergedWithOldNeitherInit == + other.mNewEntriesMergedWithOldNeitherInit && + + mShadowScanSteps == other.mShadowScanSteps && + mMetaEntryShadowElisions == other.mMetaEntryShadowElisions && + mLiveEntryShadowElisions == other.mLiveEntryShadowElisions && + mInitEntryShadowElisions == other.mInitEntryShadowElisions && + mDeadEntryShadowElisions == other.mDeadEntryShadowElisions && + + mOutputIteratorTombstoneElisions == + other.mOutputIteratorTombstoneElisions && + mOutputIteratorBufferUpdates == other.mOutputIteratorBufferUpdates && + mOutputIteratorActualWrites == other.mOutputIteratorActualWrites); +} + +// Check that eviction scan is based off of current ledger snapshot and that +// archival settings have not changed +bool +EvictionResult::isValid(uint32_t currLedger, + StateArchivalSettings const& currSas) const +{ + return initialLedger == currLedger && + initialSas.maxEntriesToArchive == currSas.maxEntriesToArchive && + initialSas.evictionScanSize == currSas.evictionScanSize && + initialSas.startingEvictionScanLevel == + currSas.startingEvictionScanLevel; +} + +EvictionCounters::EvictionCounters(Application& app) + : entriesEvicted(app.getMetrics().NewCounter( + {"state-archival", "eviction", "entries-evicted"})) + , bytesScannedForEviction(app.getMetrics().NewCounter( + {"state-archival", "eviction", "bytes-scanned"})) + , incompleteBucketScan(app.getMetrics().NewCounter( + {"state-archival", "eviction", "incomplete-scan"})) + , evictionCyclePeriod( + app.getMetrics().NewCounter({"state-archival", "eviction", "period"})) + , averageEvictedEntryAge( + app.getMetrics().NewCounter({"state-archival", "eviction", "age"})) +{ +} + +void +EvictionStatistics::recordEvictedEntry(uint64_t age) +{ + std::lock_guard l(mLock); + ++mNumEntriesEvicted; + mEvictedEntriesAgeSum += age; +} + +void +EvictionStatistics::submitMetricsAndRestartCycle(uint32_t currLedgerSeq, + EvictionCounters& counters) +{ + std::lock_guard l(mLock); + + // Only record metrics if we've seen a complete cycle to avoid noise + if (mCompleteCycle) + { + counters.evictionCyclePeriod.set_count(currLedgerSeq - + mEvictionCycleStartLedger); + + auto averageAge = mNumEntriesEvicted == 0 + ? 0 + : mEvictedEntriesAgeSum / mNumEntriesEvicted; + counters.averageEvictedEntryAge.set_count(averageAge); + } + + // Reset to start new cycle + mCompleteCycle = true; + mEvictedEntriesAgeSum = 0; + mNumEntriesEvicted = 0; + mEvictionCycleStartLedger = currLedgerSeq; +} +} diff --git a/src/bucket/BucketUtils.h b/src/bucket/BucketUtils.h index 0186671edc..ebc98f79dd 100644 --- a/src/bucket/BucketUtils.h +++ b/src/bucket/BucketUtils.h @@ -4,11 +4,143 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "main/Application.h" +#include "xdr/Stellar-ledger-entries.h" +#include +#include +#include + +namespace medida +{ +class Counter; +} namespace stellar { +class Application; + #define BUCKET_TYPE_ASSERT(BucketT) \ static_assert(std::is_same_v || \ std::is_same_v, \ "BucketT must be a Bucket type") + +// A fine-grained merge-operation-counter structure for tracking various +// events during merges. These are not medida counters because we do not +// want or need to publish this level of granularity outside of testing, and +// we do want merges to run as quickly as possible. +struct MergeCounters +{ + uint64_t mPreInitEntryProtocolMerges{0}; + uint64_t mPostInitEntryProtocolMerges{0}; + + uint64_t mRunningMergeReattachments{0}; + uint64_t mFinishedMergeReattachments{0}; + + uint64_t mPreShadowRemovalProtocolMerges{0}; + uint64_t mPostShadowRemovalProtocolMerges{0}; + + uint64_t mNewMetaEntries{0}; + uint64_t mNewInitEntries{0}; + uint64_t mNewLiveEntries{0}; + uint64_t mNewDeadEntries{0}; + uint64_t mOldMetaEntries{0}; + uint64_t mOldInitEntries{0}; + uint64_t mOldLiveEntries{0}; + uint64_t mOldDeadEntries{0}; + + uint64_t mOldEntriesDefaultAccepted{0}; + uint64_t mNewEntriesDefaultAccepted{0}; + uint64_t mNewInitEntriesMergedWithOldDead{0}; + uint64_t mOldInitEntriesMergedWithNewLive{0}; + uint64_t mOldInitEntriesMergedWithNewDead{0}; + uint64_t mNewEntriesMergedWithOldNeitherInit{0}; + + uint64_t mShadowScanSteps{0}; + uint64_t mMetaEntryShadowElisions{0}; + uint64_t mLiveEntryShadowElisions{0}; + uint64_t mInitEntryShadowElisions{0}; + uint64_t mDeadEntryShadowElisions{0}; + + uint64_t mOutputIteratorTombstoneElisions{0}; + uint64_t mOutputIteratorBufferUpdates{0}; + uint64_t mOutputIteratorActualWrites{0}; + MergeCounters& operator+=(MergeCounters const& delta); + bool operator==(MergeCounters const& other) const; +}; + +// Stores key that is eligible for eviction and the position of the eviction +// iterator as if that key was the last entry evicted +struct EvictionResultEntry +{ + LedgerKey key; + EvictionIterator iter; + uint32_t liveUntilLedger; + + EvictionResultEntry(LedgerKey const& key, EvictionIterator const& iter, + uint32_t liveUntilLedger) + : key(key), iter(iter), liveUntilLedger(liveUntilLedger) + { + } +}; + +struct EvictionResult +{ + // List of keys eligible for eviction in the order in which they occur in + // the bucket + std::list eligibleKeys{}; + + // Eviction iterator at the end of the scan region + EvictionIterator endOfRegionIterator; + + // LedgerSeq which this scan is based on + uint32_t initialLedger{}; + + // State archival settings that this scan is based on + StateArchivalSettings initialSas; + + EvictionResult(StateArchivalSettings const& sas) : initialSas(sas) + { + } + + // Returns true if this is a valid archival scan for the current ledger + // and archival settings. This is necessary because we start the scan + // for ledger N immediately after N - 1 closes. However, ledger N may + // contain a network upgrade changing eviction scan settings. Legacy SQL + // scans will run based on the changes that occurred during ledger N, + // meaning the scan we started at ledger N - 1 is invalid since it was based + // off of older settings. + bool isValid(uint32_t currLedger, + StateArchivalSettings const& currSas) const; +}; + +struct EvictionCounters +{ + medida::Counter& entriesEvicted; + medida::Counter& bytesScannedForEviction; + medida::Counter& incompleteBucketScan; + medida::Counter& evictionCyclePeriod; + medida::Counter& averageEvictedEntryAge; + + EvictionCounters(Application& app); +}; + +class EvictionStatistics +{ + private: + std::mutex mLock{}; + + // Only record metrics if we've seen a complete cycle to avoid noise + bool mCompleteCycle{false}; + uint64_t mEvictedEntriesAgeSum{}; + uint64_t mNumEntriesEvicted{}; + uint32_t mEvictionCycleStartLedger{}; + + public: + // Evicted entry "age" is the delta between its liveUntilLedger and the + // ledger when the entry is actually evicted + void recordEvictedEntry(uint64_t age); + + void submitMetricsAndRestartCycle(uint32_t currLedgerSeq, + EvictionCounters& counters); +}; } \ No newline at end of file diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp index 8aa6720b9e..acbd13a758 100644 --- a/src/bucket/FutureBucket.cpp +++ b/src/bucket/FutureBucket.cpp @@ -7,10 +7,10 @@ // else. #include "util/asio.h" // IWYU pragma: keep -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" #include "bucket/FutureBucket.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "bucket/MergeKey.h" #include "crypto/Hex.h" #include "main/Application.h" @@ -65,7 +65,7 @@ FutureBucket::FutureBucket( if (!snap->isEmpty() && protocolVersionIsBefore( snap->getBucketVersion(), - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { throw std::runtime_error( "Invalid ArchivalFutureBucket: ledger version doesn't support " @@ -378,7 +378,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, curr->getHash(), snap->getHash(), shadowHashes}; std::shared_future> f; - f = BucketManager::getMergeFuture(bm, mk); + f = bm.getMergeFuture(mk); if (f.valid()) { @@ -407,7 +407,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, ZoneNamedN(mergeZone, "Merge task", true); ZoneValueV(mergeZone, static_cast(level)); - auto res = Bucket::merge( + auto res = BucketBase::merge( bm, maxProtocolVersion, curr, snap, shadows, BucketListBase::keepTombstoneEntries(level), countMergeEvents, ctx, doFsync); @@ -440,7 +440,7 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, }); mOutputBucketFuture = task->get_future().share(); - BucketManager::putMergeFuture(bm, mk, mOutputBucketFuture); + bm.putMergeFuture(mk, mOutputBucketFuture); app.postOnBackgroundThread(bind(&task_t::operator(), task), "FutureBucket: merge"); checkState(); @@ -458,24 +458,22 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, auto& bm = app.getBucketManager(); if (hasOutputHash()) { - auto b = BucketManager::getBucketByHash( - bm, hexToBin256(getOutputHash())); + auto b = bm.getBucketByHash(hexToBin256(getOutputHash())); setLiveOutput(b); } else { releaseAssert(mState == FB_HASH_INPUTS); - mInputCurrBucket = BucketManager::getBucketByHash( - bm, hexToBin256(mInputCurrBucketHash)); - mInputSnapBucket = BucketManager::getBucketByHash( - bm, hexToBin256(mInputSnapBucketHash)); + mInputCurrBucket = + bm.getBucketByHash(hexToBin256(mInputCurrBucketHash)); + mInputSnapBucket = + bm.getBucketByHash(hexToBin256(mInputSnapBucketHash)); releaseAssert(mInputShadowBuckets.empty()); for (auto const& h : mInputShadowBucketHashes) { - auto b = - BucketManager::getBucketByHash(bm, hexToBin256(h)); + auto b = bm.getBucketByHash(hexToBin256(h)); releaseAssert(b); CLOG_DEBUG(Bucket, "Reconstituting shadow {}", h); diff --git a/src/bucket/FutureBucket.h b/src/bucket/FutureBucket.h index b8d64742bd..c79891e759 100644 --- a/src/bucket/FutureBucket.h +++ b/src/bucket/FutureBucket.h @@ -4,7 +4,6 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketUtils.h" #include "util/GlobalChecks.h" #include diff --git a/src/bucket/HotArchiveBucket.cpp b/src/bucket/HotArchiveBucket.cpp new file mode 100644 index 0000000000..b96cc98da1 --- /dev/null +++ b/src/bucket/HotArchiveBucket.cpp @@ -0,0 +1,149 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/HotArchiveBucket.h" +#include "bucket/BucketInputIterator.h" +#include "bucket/BucketOutputIterator.h" +#include "bucket/BucketUtils.h" + +namespace stellar +{ + +std::shared_ptr +HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries, + bool countMergeEvents, asio::io_context& ctx, + bool doFsync) +{ + ZoneScoped; + BucketMetadata meta; + meta.ledgerVersion = protocolVersion; + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::HOT_ARCHIVE; + auto entries = + convertToBucketEntry(archivedEntries, restoredEntries, deletedEntries); + + MergeCounters mc; + HotArchiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, + mc, ctx, doFsync); + for (auto const& e : entries) + { + out.put(e); + } + + if (countMergeEvents) + { + bucketManager.incrMergeCounters(mc); + } + + return out.getBucket(bucketManager, + bucketManager.getConfig().isUsingBucketListDB()); +} + +std::vector +HotArchiveBucket::convertToBucketEntry( + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + std::vector bucket; + for (auto const& e : archivedEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_ARCHIVED); + be.archivedEntry() = e; + bucket.push_back(be); + } + for (auto const& k : restoredEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_LIVE); + be.key() = k; + bucket.push_back(be); + } + for (auto const& k : deletedEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_DELETED); + be.key() = k; + bucket.push_back(be); + } + + BucketEntryIdCmp cmp; + std::sort(bucket.begin(), bucket.end(), cmp); + releaseAssert(std::adjacent_find(bucket.begin(), bucket.end(), + [&cmp](HotArchiveBucketEntry const& lhs, + HotArchiveBucketEntry const& rhs) { + return !cmp(lhs, rhs); + }) == bucket.end()); + return bucket; +} + +void +HotArchiveBucket::maybePut( + HotArchiveBucketOutputIterator& out, HotArchiveBucketEntry const& entry, + std::vector& shadowIterators, + bool keepShadowedLifecycleEntries, MergeCounters& mc) +{ + // Archived BucketList is only present after protocol 21, so shadows are + // never supported + out.put(entry); +} + +void +HotArchiveBucket::mergeCasesWithEqualKeys( + MergeCounters& mc, HotArchiveBucketInputIterator& oi, + HotArchiveBucketInputIterator& ni, HotArchiveBucketOutputIterator& out, + std::vector& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) +{ + // If two identical keys have the same type, throw an error. Otherwise, + // take the newer key. + HotArchiveBucketEntry const& oldEntry = *oi; + HotArchiveBucketEntry const& newEntry = *ni; + if (oldEntry.type() == newEntry.type()) + { + throw std::runtime_error( + "Malformed Hot Archive bucket: two identical keys with " + "the same type."); + } + + out.put(newEntry); + ++ni; + ++oi; +} + +uint32_t +HotArchiveBucket::getBucketVersion() const +{ + HotArchiveBucketInputIterator it(shared_from_this()); + return it.getMetadata().ledgerVersion; +} + +HotArchiveBucket::HotArchiveBucket(std::string const& filename, + Hash const& hash, + std::unique_ptr&& index) + : BucketBase(filename, hash, std::move(index)) +{ +} + +HotArchiveBucket::HotArchiveBucket() : BucketBase() +{ +} + +bool +HotArchiveBucket::isTombstoneEntry(HotArchiveBucketEntry const& e) +{ + return e.type() == HOT_ARCHIVE_LIVE; +} + +std::shared_ptr +HotArchiveBucket::bucketEntryToLoadResult(std::shared_ptr const& be) +{ + return isTombstoneEntry(*be) ? nullptr : be; +} + +} \ No newline at end of file diff --git a/src/bucket/HotArchiveBucket.h b/src/bucket/HotArchiveBucket.h new file mode 100644 index 0000000000..772ec0c22d --- /dev/null +++ b/src/bucket/HotArchiveBucket.h @@ -0,0 +1,96 @@ +#pragma once + +// copyright 2024 stellar development foundation and contributors. licensed +// under the apache license, version 2.0. see the copying file at the root +// of this distribution or at http://www.apache.org/licenses/license-2.0 + +#include "bucket/BucketBase.h" +#include "bucket/BucketUtils.h" +#include "xdr/Stellar-ledger-entries.h" + +namespace stellar +{ + +class HotArchiveBucket; +template class BucketOutputIterator; +template class BucketInputIterator; + +typedef BucketInputIterator HotArchiveBucketInputIterator; +typedef BucketOutputIterator HotArchiveBucketOutputIterator; + +/* + * Hot Archive Buckets are used by the HotBucketList to store recently evicted + * entries. They contain entries of type HotArchiveBucketEntry. + */ +class HotArchiveBucket : public BucketBase, + public std::enable_shared_from_this +{ + static std::vector + convertToBucketEntry(std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); + + public: + // Entry type that this bucket stores + using EntryT = HotArchiveBucketEntry; + + // Entry type returned by loadKeys + using LoadT = HotArchiveBucketEntry; + + HotArchiveBucket(); + virtual ~HotArchiveBucket() + { + } + HotArchiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + uint32_t getBucketVersion() const override; + + static std::shared_ptr + fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); + + // Returns true if the given BucketEntry should be dropped in the bottom + // level bucket (i.e. HOT_ARCHIVE_LIVE) + static bool isTombstoneEntry(HotArchiveBucketEntry const& e); + + // Note: this functions is called maybePut for interoperability with + // LiveBucket. This function always writes te given entry to the output + // iterator. + static void + maybePut(HotArchiveBucketOutputIterator& out, + HotArchiveBucketEntry const& entry, + std::vector& shadowIterators, + bool keepShadowedLifecycleEntries, MergeCounters& mc); + + // For now, we only count LiveBucket merge events + static void + countOldEntryType(MergeCounters& mc, HotArchiveBucketEntry const& e) + { + } + + static void + countNewEntryType(MergeCounters& mc, HotArchiveBucketEntry const& e) + { + } + + static void + checkProtocolLegality(HotArchiveBucketEntry const& entry, + uint32_t protocolVersion) + { + } + + static void mergeCasesWithEqualKeys( + MergeCounters& mc, HotArchiveBucketInputIterator& oi, + HotArchiveBucketInputIterator& ni, HotArchiveBucketOutputIterator& out, + std::vector& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries); + + static std::shared_ptr + bucketEntryToLoadResult(std::shared_ptr const& be); + + friend class HotArchiveBucketSnapshot; +}; +} \ No newline at end of file diff --git a/src/bucket/HotArchiveBucketList.cpp b/src/bucket/HotArchiveBucketList.cpp new file mode 100644 index 0000000000..ce180a734a --- /dev/null +++ b/src/bucket/HotArchiveBucketList.cpp @@ -0,0 +1,25 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/HotArchiveBucketList.h" +#include "bucket/BucketListBase.h" + +namespace stellar +{ + +void +HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + ZoneScoped; + releaseAssertOrThrow(protocolVersionStartsFrom( + currLedgerProtocol, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + addBatchInternal(app, currLedger, currLedgerProtocol, archiveEntries, + restoredEntries, deletedEntries); +} +} \ No newline at end of file diff --git a/src/bucket/HotArchiveBucketList.h b/src/bucket/HotArchiveBucketList.h new file mode 100644 index 0000000000..74a467435f --- /dev/null +++ b/src/bucket/HotArchiveBucketList.h @@ -0,0 +1,24 @@ +#pragma once + +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/BucketListBase.h" +#include "bucket/HotArchiveBucket.h" + +namespace stellar +{ +// The HotArchiveBucketList stores recently evicted entries. It contains Buckets +// of type HotArchiveBucket, which store individual entries of type +// HotArchiveBucketEntry. +class HotArchiveBucketList : public BucketListBase +{ + public: + void addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); +}; +} \ No newline at end of file diff --git a/src/bucket/LedgerCmp.h b/src/bucket/LedgerCmp.h index 01d6de81b9..76abf0909d 100644 --- a/src/bucket/LedgerCmp.h +++ b/src/bucket/LedgerCmp.h @@ -6,14 +6,14 @@ #include -#include "bucket/Bucket.h" #include "bucket/BucketUtils.h" #include "util/XDROperators.h" // IWYU pragma: keep #include "xdr/Stellar-ledger-entries.h" -#include "xdr/Stellar-ledger.h" namespace stellar { +class HotArchiveBucket; +class LiveBucket; template bool diff --git a/src/bucket/LiveBucket.cpp b/src/bucket/LiveBucket.cpp new file mode 100644 index 0000000000..f640422fcd --- /dev/null +++ b/src/bucket/LiveBucket.cpp @@ -0,0 +1,578 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/LiveBucket.h" +#include "bucket/BucketApplicator.h" +#include "bucket/BucketInputIterator.h" +#include "bucket/BucketOutputIterator.h" +#include "bucket/BucketUtils.h" +#include "bucket/LedgerCmp.h" +#include "ledger/LedgerTypeUtils.h" +#include + +namespace stellar +{ +namespace +{ +void +countShadowedEntryType(MergeCounters& mc, BucketEntry const& e) +{ + switch (e.type()) + { + case METAENTRY: + ++mc.mMetaEntryShadowElisions; + break; + case INITENTRY: + ++mc.mInitEntryShadowElisions; + break; + case LIVEENTRY: + ++mc.mLiveEntryShadowElisions; + break; + case DEADENTRY: + ++mc.mDeadEntryShadowElisions; + break; + } +} +} + +void +LiveBucket::countNewEntryType(MergeCounters& mc, BucketEntry const& e) +{ + switch (e.type()) + { + case METAENTRY: + ++mc.mNewMetaEntries; + break; + case INITENTRY: + ++mc.mNewInitEntries; + break; + case LIVEENTRY: + ++mc.mNewLiveEntries; + break; + case DEADENTRY: + ++mc.mNewDeadEntries; + break; + } +} +void +LiveBucket::countOldEntryType(MergeCounters& mc, BucketEntry const& e) +{ + switch (e.type()) + { + case METAENTRY: + ++mc.mOldMetaEntries; + break; + case INITENTRY: + ++mc.mOldInitEntries; + break; + case LIVEENTRY: + ++mc.mOldLiveEntries; + break; + case DEADENTRY: + ++mc.mOldDeadEntries; + break; + } +} + +void +LiveBucket::maybePut(LiveBucketOutputIterator& out, BucketEntry const& entry, + std::vector& shadowIterators, + bool keepShadowedLifecycleEntries, MergeCounters& mc) +{ + // In ledgers before protocol 11, keepShadowedLifecycleEntries will be + // `false` and we will drop all shadowed entries here. + // + // In ledgers at-or-after protocol 11, it will be `true` which means that we + // only elide 'put'ing an entry if it is in LIVEENTRY state; we keep entries + // in DEADENTRY and INITENTRY states, for two reasons: + // + // - DEADENTRY is preserved to ensure that old live-or-init entries that + // were killed remain dead, are not brought back to life accidentally by + // having a newer shadow eliding their later DEADENTRY (tombstone). This + // is possible because newer shadowing entries may both refer to the + // same key as an older dead entry, and may occur as an INIT/DEAD pair + // that subsequently annihilate one another. + // + // IOW we want to prevent the following scenario: + // + // lev1:DEAD, lev2:INIT, lev3:DEAD, lev4:INIT + // + // from turning into the following by shadowing: + // + // lev1:DEAD, lev2:INIT, -elided-, lev4:INIT + // + // and then the following by pairwise annihilation: + // + // -annihilated-, -elided-, lev4:INIT + // + // - INITENTRY is preserved to ensure that a DEADENTRY preserved by the + // previous rule does not itself shadow-out its own INITENTRY, but + // rather eventually ages and encounters (and is annihilated-by) that + // INITENTRY in an older level. Thus preventing the accumulation of + // redundant tombstones. + // + // Note that this decision only controls whether to elide dead entries due + // to _shadows_. There is a secondary elision of dead entries at the _oldest + // level_ of the bucketlist that is accomplished through filtering at the + // LiveBucketOutputIterator level, and happens independent of ledger + // protocol version. + + if (keepShadowedLifecycleEntries && + (entry.type() == INITENTRY || entry.type() == DEADENTRY)) + { + // Never shadow-out entries in this case; no point scanning shadows. + out.put(entry); + return; + } + + BucketEntryIdCmp cmp; + for (auto& si : shadowIterators) + { + // Advance the shadowIterator while it's less than the candidate + while (si && cmp(*si, entry)) + { + ++mc.mShadowScanSteps; + ++si; + } + // We have stepped si forward to the point that either si is exhausted, + // or else *si >= entry; we now check the opposite direction to see if + // we have equality. + if (si && !cmp(entry, *si)) + { + // If so, then entry is shadowed in at least one level. + countShadowedEntryType(mc, entry); + return; + } + } + // Nothing shadowed. + out.put(entry); +} + +void +LiveBucket::mergeCasesWithEqualKeys( + MergeCounters& mc, LiveBucketInputIterator& oi, LiveBucketInputIterator& ni, + LiveBucketOutputIterator& out, + std::vector& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) +{ + // Old and new are for the same key and neither is INIT, take the new + // key. If either key is INIT, we have to make some adjustments: + // + // old | new | result + // ---------+---------+----------- + // INIT | INIT | error + // LIVE | INIT | error + // DEAD | INIT=x | LIVE=x + // INIT=x | LIVE=y | INIT=y + // INIT | DEAD | empty + // + // + // What does this mean / why is it correct? + // + // Performing a merge between two same-key entries is about maintaining two + // invariants: + // + // 1. From the perspective of a reader (eg. the database) the pre-merge + // pair of entries and post-merge single entry are indistinguishable, + // at least in terms that the reader/database cares about (liveness & + // value). This is the most important invariant since it's what makes + // the database have the right values! + // + // 2. From the perspective of chronological _sequences_ of lifecycle + // transitions, if an entry is in INIT state then its (chronological) + // predecessor state is DEAD either by the next-oldest state being an + // _explicit_ DEAD tombstone, or by the INIT being the oldest state in + // the bucket list. This invariant allows us to assume that INIT + // followed by DEAD can be safely merged to empty (eliding the record) + // without revealing and reviving the key in some older non-DEAD state + // preceding the INIT. + // + // When merging a pair of non-INIT entries and taking the 'new' value, + // invariant #1 is easy to see as preserved (an LSM tree is defined as + // returning the newest value for an entry, so preserving the newest of any + // pair is correct), and by assumption neither entry is INIT-state so + // invariant #2 isn't relevant / is unaffected. + // + // When merging a pair with an INIT, we can go case-by-case through the + // table above and see that both invariants are preserved: + // + // - INIT,INIT and LIVE,INIT violate invariant #2, so by assumption should + // never be occurring. + // + // - DEAD,INIT=x are indistinguishable from LIVE=x from the perspective of + // the reader, satisfying invariant #1. And since LIVE=x is not + // INIT-state anymore invariant #2 is trivially preserved (does not + // apply). + // + // - INIT=x,LIVE=y is indistinguishable from INIT=y from the perspective + // of the reader, satisfying invariant #1. And assuming invariant #2 + // holds for INIT=x,LIVE=y, then it holds for INIT=y. + // + // - INIT,DEAD is indistinguishable from absence-of-an-entry from the + // perspective of a reader, maintaining invariant #1, _if_ invariant #2 + // also holds (the predecessor state _before_ INIT was + // absent-or-DEAD). And invariant #2 holds trivially _locally_ for this + // merge because there is no resulting state (i.e. it's not in + // INIT-state); and it holds slightly-less-trivially non-locally, + // because even if there is a subsequent (newer) INIT entry, the + // invariant is maintained for that newer entry too (it is still + // preceded by a DEAD state). + + BucketEntry const& oldEntry = *oi; + BucketEntry const& newEntry = *ni; + LiveBucket::checkProtocolLegality(oldEntry, protocolVersion); + LiveBucket::checkProtocolLegality(newEntry, protocolVersion); + countOldEntryType(mc, oldEntry); + countNewEntryType(mc, newEntry); + + if (newEntry.type() == INITENTRY) + { + // The only legal new-is-INIT case is merging a delete+create to an + // update. + if (oldEntry.type() != DEADENTRY) + { + throw std::runtime_error( + "Malformed bucket: old non-DEAD + new INIT."); + } + BucketEntry newLive; + newLive.type(LIVEENTRY); + newLive.liveEntry() = newEntry.liveEntry(); + ++mc.mNewInitEntriesMergedWithOldDead; + maybePut(out, newLive, shadowIterators, keepShadowedLifecycleEntries, + mc); + } + else if (oldEntry.type() == INITENTRY) + { + // If we get here, new is not INIT; may be LIVE or DEAD. + if (newEntry.type() == LIVEENTRY) + { + // Merge a create+update to a fresher create. + BucketEntry newInit; + newInit.type(INITENTRY); + newInit.liveEntry() = newEntry.liveEntry(); + ++mc.mOldInitEntriesMergedWithNewLive; + maybePut(out, newInit, shadowIterators, + keepShadowedLifecycleEntries, mc); + } + else + { + // Merge a create+delete to nothingness. + ++mc.mOldInitEntriesMergedWithNewDead; + } + } + else + { + // Neither is in INIT state, take the newer one. + ++mc.mNewEntriesMergedWithOldNeitherInit; + maybePut(out, newEntry, shadowIterators, keepShadowedLifecycleEntries, + mc); + } + ++oi; + ++ni; +} + +bool +LiveBucket::containsBucketIdentity(BucketEntry const& id) const +{ + BucketEntryIdCmp cmp; + LiveBucketInputIterator iter(shared_from_this()); + while (iter) + { + if (!(cmp(*iter, id) || cmp(id, *iter))) + { + return true; + } + ++iter; + } + return false; +} + +#ifdef BUILD_TESTS +void +LiveBucket::apply(Application& app) const +{ + ZoneScoped; + + auto filter = [&](LedgerEntryType t) { + if (app.getConfig().isUsingBucketListDB()) + { + return t == OFFER; + } + + return true; + }; + + std::unordered_set emptySet; + BucketApplicator applicator( + app, app.getConfig().LEDGER_PROTOCOL_VERSION, + 0 /*set to 0 so we always load from the parent to check state*/, + 0 /*set to a level that's not the bottom so we don't treat live entries + as init*/ + , + shared_from_this(), filter, emptySet); + BucketApplicator::Counters counters(app.getClock().now()); + while (applicator) + { + applicator.advance(counters); + } + counters.logInfo("direct", 0, app.getClock().now()); +} +#endif // BUILD_TESTS + +std::vector +LiveBucket::convertToBucketEntry(bool useInit, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) +{ + std::vector bucket; + for (auto const& e : initEntries) + { + BucketEntry ce; + ce.type(useInit ? INITENTRY : LIVEENTRY); + ce.liveEntry() = e; + bucket.push_back(ce); + } + for (auto const& e : liveEntries) + { + BucketEntry ce; + ce.type(LIVEENTRY); + ce.liveEntry() = e; + bucket.push_back(ce); + } + for (auto const& e : deadEntries) + { + BucketEntry ce; + ce.type(DEADENTRY); + ce.deadEntry() = e; + bucket.push_back(ce); + } + + BucketEntryIdCmp cmp; + std::sort(bucket.begin(), bucket.end(), cmp); + releaseAssert(std::adjacent_find( + bucket.begin(), bucket.end(), + [&cmp](BucketEntry const& lhs, BucketEntry const& rhs) { + return !cmp(lhs, rhs); + }) == bucket.end()); + return bucket; +} + +std::shared_ptr +LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries, + bool countMergeEvents, asio::io_context& ctx, bool doFsync) +{ + ZoneScoped; + // When building fresh buckets after protocol version 10 (i.e. version + // 11-or-after) we differentiate INITENTRY from LIVEENTRY. In older + // protocols, for compatibility sake, we mark both cases as LIVEENTRY. + bool useInit = protocolVersionStartsFrom( + protocolVersion, FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + + BucketMetadata meta; + meta.ledgerVersion = protocolVersion; + + if (protocolVersionStartsFrom( + protocolVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::LIVE; + } + + auto entries = + convertToBucketEntry(useInit, initEntries, liveEntries, deadEntries); + + MergeCounters mc; + LiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx, + doFsync); + for (auto const& e : entries) + { + out.put(e); + } + + if (countMergeEvents) + { + bucketManager.incrMergeCounters(mc); + } + + return out.getBucket(bucketManager, + bucketManager.getConfig().isUsingBucketListDB()); +} + +void +LiveBucket::checkProtocolLegality(BucketEntry const& entry, + uint32_t protocolVersion) +{ + if (protocolVersionIsBefore( + protocolVersion, + FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) && + (entry.type() == INITENTRY || entry.type() == METAENTRY)) + { + throw std::runtime_error(fmt::format( + FMT_STRING("unsupported entry type {} in protocol {:d} bucket"), + (entry.type() == INITENTRY ? "INIT" : "META"), protocolVersion)); + } +} + +Loop +LiveBucket::scanForEvictionLegacy( + AbstractLedgerTxn& ltx, EvictionIterator& iter, uint32_t& bytesToScan, + uint32_t& remainingEntriesToEvict, uint32_t ledgerSeq, + medida::Counter& entriesEvictedCounter, + medida::Counter& bytesScannedForEvictionCounter, + std::shared_ptr stats) const +{ + ZoneScoped; + releaseAssert(stats); + + if (isEmpty() || + protocolVersionIsBefore(getBucketVersion(), SOROBAN_PROTOCOL_VERSION)) + { + // EOF, need to continue reading next bucket + return Loop::INCOMPLETE; + } + + if (remainingEntriesToEvict == 0 || bytesToScan == 0) + { + // Reached end of scan region + return Loop::COMPLETE; + } + + XDRInputFileStream stream{}; + stream.open(mFilename.string()); + stream.seek(iter.bucketFileOffset); + + BucketEntry be; + while (stream.readOne(be)) + { + if (be.type() == INITENTRY || be.type() == LIVEENTRY) + { + auto const& le = be.liveEntry(); + if (isTemporaryEntry(le.data)) + { + ZoneNamedN(maybeEvict, "maybe evict entry", true); + + auto ttlKey = getTTLKey(le); + uint32_t liveUntilLedger = 0; + auto shouldEvict = [&] { + auto ttlLtxe = ltx.loadWithoutRecord(ttlKey); + if (!ttlLtxe) + { + // Entry was already deleted either manually or by an + // earlier eviction scan, do nothing + return false; + } + + releaseAssert(ttlLtxe); + liveUntilLedger = + ttlLtxe.current().data.ttl().liveUntilLedgerSeq; + return !isLive(ttlLtxe.current(), ledgerSeq); + }; + + if (shouldEvict()) + { + ZoneNamedN(evict, "evict entry", true); + auto age = ledgerSeq - liveUntilLedger; + stats->recordEvictedEntry(age); + + ltx.erase(ttlKey); + ltx.erase(LedgerEntryKey(le)); + entriesEvictedCounter.inc(); + --remainingEntriesToEvict; + } + } + } + + auto newPos = stream.pos(); + auto bytesRead = newPos - iter.bucketFileOffset; + iter.bucketFileOffset = newPos; + bytesScannedForEvictionCounter.inc(bytesRead); + if (bytesRead >= bytesToScan) + { + // Reached end of scan region + bytesToScan = 0; + return Loop::COMPLETE; + } + else if (remainingEntriesToEvict == 0) + { + return Loop::COMPLETE; + } + + bytesToScan -= bytesRead; + } + + // Hit eof + return Loop::INCOMPLETE; +} + +LiveBucket::LiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index) + : BucketBase(filename, hash, std::move(index)) +{ +} + +LiveBucket::LiveBucket() : BucketBase() +{ +} + +uint32_t +LiveBucket::getBucketVersion() const +{ + LiveBucketInputIterator it(shared_from_this()); + return it.getMetadata().ledgerVersion; +} + +BucketEntryCounters const& +LiveBucket::getBucketEntryCounters() const +{ + releaseAssert(mIndex); + return mIndex->getBucketEntryCounters(); +} + +bool +LiveBucket::isTombstoneEntry(BucketEntry const& e) +{ + return e.type() == DEADENTRY; +} + +std::shared_ptr +LiveBucket::bucketEntryToLoadResult(std::shared_ptr const& be) +{ + return isTombstoneEntry(*be) + ? nullptr + : std::make_shared(be->liveEntry()); +} + +BucketEntryCounters& +BucketEntryCounters::operator+=(BucketEntryCounters const& other) +{ + for (auto [type, count] : other.entryTypeCounts) + { + this->entryTypeCounts[type] += count; + } + for (auto [type, size] : other.entryTypeSizes) + { + this->entryTypeSizes[type] += size; + } + return *this; +} + +bool +BucketEntryCounters::operator==(BucketEntryCounters const& other) const +{ + return this->entryTypeCounts == other.entryTypeCounts && + this->entryTypeSizes == other.entryTypeSizes; +} + +bool +BucketEntryCounters::operator!=(BucketEntryCounters const& other) const +{ + return !(*this == other); +} + +} \ No newline at end of file diff --git a/src/bucket/LiveBucket.h b/src/bucket/LiveBucket.h new file mode 100644 index 0000000000..a34a3d8393 --- /dev/null +++ b/src/bucket/LiveBucket.h @@ -0,0 +1,147 @@ +#pragma once + +// copyright 2024 stellar development foundation and contributors. licensed +// under the apache license, version 2.0. see the copying file at the root +// of this distribution or at http://www.apache.org/licenses/license-2.0 + +#include "bucket/BucketBase.h" +#include "bucket/BucketUtils.h" +#include "ledger/LedgerTypeUtils.h" + +namespace medida +{ +class Counter; +} + +namespace stellar +{ +class AbstractLedgerTxn; +class Application; +class EvictionStatistics; +class LiveBucket; +template class BucketOutputIterator; +template class BucketInputIterator; + +typedef BucketOutputIterator LiveBucketOutputIterator; +typedef BucketInputIterator LiveBucketInputIterator; +struct BucketEntryCounters +{ + std::map entryTypeCounts; + std::map entryTypeSizes; + + BucketEntryCounters& operator+=(BucketEntryCounters const& other); + bool operator==(BucketEntryCounters const& other) const; + bool operator!=(BucketEntryCounters const& other) const; + + template + void + serialize(Archive& ar) + { + ar(entryTypeCounts, entryTypeSizes); + } +}; + +/* + * Live Buckets are used by the LiveBucketList to store the current canonical + * state of the ledger. They contain entries of type BucketEntry. + */ +class LiveBucket : public BucketBase, + public std::enable_shared_from_this +{ + public: + // Entry type that this bucket stores + using EntryT = BucketEntry; + + // Entry type returned by loadKeys + using LoadT = LedgerEntry; + + LiveBucket(); + virtual ~LiveBucket() + { + } + LiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + + // Returns true if a BucketEntry that is key-wise identical to the given + // BucketEntry exists in the bucket. For testing. + bool containsBucketIdentity(BucketEntry const& id) const; + + // At version 11, we added support for INITENTRY and METAENTRY. Before this + // we were only supporting LIVEENTRY and DEADENTRY. + static constexpr ProtocolVersion + FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY = + ProtocolVersion::V_11; + static constexpr ProtocolVersion FIRST_PROTOCOL_SHADOWS_REMOVED = + ProtocolVersion::V_12; + + static void checkProtocolLegality(BucketEntry const& entry, + uint32_t protocolVersion); + + static std::vector + convertToBucketEntry(bool useInit, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); + + static void mergeCasesWithEqualKeys( + MergeCounters& mc, LiveBucketInputIterator& oi, + LiveBucketInputIterator& ni, LiveBucketOutputIterator& out, + std::vector& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries); + +#ifdef BUILD_TESTS + // "Applies" the bucket to the database. For each entry in the bucket, + // if the entry is init or live, creates or updates the corresponding + // entry in the database (respectively; if the entry is dead (a + // tombstone), deletes the corresponding entry in the database. + void apply(Application& app) const; +#endif + + // Returns Loop::INCOMPLETE if eof reached, Loop::COMPLETE otherwise. + // Modifies iter as the bucket is scanned. Also modifies bytesToScan and + // maxEntriesToEvict such that after this function returns: + // bytesToScan -= amount_bytes_scanned + // maxEntriesToEvict -= entries_evicted + Loop scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, + uint32_t& bytesToScan, + uint32_t& remainingEntriesToEvict, + uint32_t ledgerSeq, + medida::Counter& entriesEvictedCounter, + medida::Counter& bytesScannedForEvictionCounter, + std::shared_ptr stats) const; + + // Create a fresh bucket from given vectors of init (created) and live + // (updated) LedgerEntries, and dead LedgerEntryKeys. The bucket will + // be sorted, hashed, and adopted in the provided BucketManager. + static std::shared_ptr + fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); + + // Returns true if the given BucketEntry should be dropped in the bottom + // level bucket (i.e. DEADENTRY) + static bool isTombstoneEntry(BucketEntry const& e); + + static std::shared_ptr + bucketEntryToLoadResult(std::shared_ptr const& be); + + // Whenever a given BucketEntry is "eligible" to be written as the merge + // result in the output bucket, this function writes the entry to the output + // iterator if the entry is not shadowed. + static void maybePut(LiveBucketOutputIterator& out, + BucketEntry const& entry, + std::vector& shadowIterators, + bool keepShadowedLifecycleEntries, MergeCounters& mc); + + static void countOldEntryType(MergeCounters& mc, BucketEntry const& e); + static void countNewEntryType(MergeCounters& mc, BucketEntry const& e); + + uint32_t getBucketVersion() const override; + + BucketEntryCounters const& getBucketEntryCounters() const; + + friend class LiveBucketSnapshot; +}; +} \ No newline at end of file diff --git a/src/bucket/LiveBucketList.cpp b/src/bucket/LiveBucketList.cpp new file mode 100644 index 0000000000..240f0d5876 --- /dev/null +++ b/src/bucket/LiveBucketList.cpp @@ -0,0 +1,201 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/LiveBucketList.h" +#include "bucket/BucketListBase.h" +#include "ledger/LedgerManager.h" + +#include + +namespace stellar +{ + +void +LiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) +{ + ZoneScoped; + addBatchInternal(app, currLedger, currLedgerProtocol, initEntries, + liveEntries, deadEntries); +} + +BucketEntryCounters +LiveBucketList::sumBucketEntryCounters() const +{ + BucketEntryCounters counters; + for (auto const& lev : mLevels) + { + for (auto const& b : {lev.getCurr(), lev.getSnap()}) + { + if (b->isIndexed()) + { + auto c = b->getBucketEntryCounters(); + counters += c; + } + } + } + return counters; +} + +void +LiveBucketList::updateStartingEvictionIterator(EvictionIterator& iter, + uint32_t firstScanLevel, + uint32_t ledgerSeq) +{ + // Check if an upgrade has changed the starting scan level to below the + // current iterator level + if (iter.bucketListLevel < firstScanLevel) + { + // Reset iterator to the new minimum level + iter.bucketFileOffset = 0; + iter.isCurrBucket = true; + iter.bucketListLevel = firstScanLevel; + } + + // Whenever a Bucket changes (spills or receives an incoming spill), the + // iterator offset in that bucket is invalidated. After scanning, we + // must write the iterator to the BucketList then close the ledger. + // Bucket spills occur on ledger close after we've already written the + // iterator, so the iterator may be invalidated. Because of this, we + // must check if the Bucket the iterator currently points to changed on + // the previous ledger, indicating the current iterator is invalid. + if (iter.isCurrBucket) + { + // Check if bucket received an incoming spill + releaseAssert(iter.bucketListLevel != 0); + if (BucketListBase::levelShouldSpill(ledgerSeq - 1, + iter.bucketListLevel - 1)) + { + // If Bucket changed, reset to start of bucket + iter.bucketFileOffset = 0; + } + } + else + { + if (BucketListBase::levelShouldSpill(ledgerSeq - 1, + iter.bucketListLevel)) + { + // If Bucket changed, reset to start of bucket + iter.bucketFileOffset = 0; + } + } +} + +bool +LiveBucketList::updateEvictionIterAndRecordStats( + EvictionIterator& iter, EvictionIterator startIter, + uint32_t configFirstScanLevel, uint32_t ledgerSeq, + std::shared_ptr stats, EvictionCounters& counters) +{ + releaseAssert(stats); + + // If we reached eof in curr bucket, start scanning snap. + // Last level has no snap so cycle back to the initial level. + if (iter.isCurrBucket && iter.bucketListLevel != kNumLevels - 1) + { + iter.isCurrBucket = false; + iter.bucketFileOffset = 0; + } + else + { + // If we reached eof in snap, move to next level + ++iter.bucketListLevel; + iter.isCurrBucket = true; + iter.bucketFileOffset = 0; + + // If we have scanned the last level, cycle back to initial + // level + if (iter.bucketListLevel == kNumLevels) + { + iter.bucketListLevel = configFirstScanLevel; + + // Record then reset metrics at beginning of new eviction cycle + stats->submitMetricsAndRestartCycle(ledgerSeq, counters); + } + } + + // If we are back to the bucket we started at, break + if (iter.bucketListLevel == startIter.bucketListLevel && + iter.isCurrBucket == startIter.isCurrBucket) + { + return true; + } + + return false; +} + +void +LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, + uint32_t scanSize, + std::shared_ptr b, + EvictionCounters& counters) +{ + // Check to see if we can finish scanning the new bucket before it + // receives an update + uint64_t period = bucketUpdatePeriod(evictionIter.bucketListLevel, + evictionIter.isCurrBucket); + if (period * scanSize < b->getSize()) + { + CLOG_WARNING(Bucket, + "Bucket too large for current eviction scan size."); + counters.incompleteBucketScan.inc(); + } +} + +// To avoid noisy data, only count metrics that encompass a complete +// eviction cycle. If a node joins the network mid cycle, metrics will be +// nullopt and be initialized at the start of the next cycle. +void +LiveBucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, + uint32_t ledgerSeq, + EvictionCounters& counters, + std::shared_ptr stats) +{ + releaseAssert(stats); + + auto getBucketFromIter = [&levels = mLevels](EvictionIterator const& iter) { + auto& level = levels.at(iter.bucketListLevel); + return iter.isCurrBucket ? level.getCurr() : level.getSnap(); + }; + + auto const& networkConfig = + app.getLedgerManager().getSorobanNetworkConfig(); + auto const firstScanLevel = + networkConfig.stateArchivalSettings().startingEvictionScanLevel; + auto evictionIter = networkConfig.evictionIterator(); + auto scanSize = networkConfig.stateArchivalSettings().evictionScanSize; + auto maxEntriesToEvict = + networkConfig.stateArchivalSettings().maxEntriesToArchive; + + updateStartingEvictionIterator(evictionIter, firstScanLevel, ledgerSeq); + + auto startIter = evictionIter; + auto b = getBucketFromIter(evictionIter); + + while (b->scanForEvictionLegacy( + ltx, evictionIter, scanSize, maxEntriesToEvict, ledgerSeq, + counters.entriesEvicted, counters.bytesScannedForEviction, + stats) == Loop::INCOMPLETE) + { + + if (updateEvictionIterAndRecordStats(evictionIter, startIter, + firstScanLevel, ledgerSeq, stats, + counters)) + { + break; + } + + b = getBucketFromIter(evictionIter); + checkIfEvictionScanIsStuck( + evictionIter, + networkConfig.stateArchivalSettings().evictionScanSize, b, + counters); + } + + networkConfig.updateEvictionIterator(ltx, evictionIter); +} +} diff --git a/src/bucket/LiveBucketList.h b/src/bucket/LiveBucketList.h new file mode 100644 index 0000000000..683337e096 --- /dev/null +++ b/src/bucket/LiveBucketList.h @@ -0,0 +1,59 @@ +#pragma once + +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/BucketListBase.h" +#include "bucket/LiveBucket.h" + +namespace stellar +{ +// The LiveBucketList stores the current canonical state of the ledger. It is +// made up of LiveBucket buckets, which in turn store individual entries of type +// BucketEntry. When an entry is "evicted" from the ledger, it is removed from +// the LiveBucketList. Depending on the evicted entry type, it may then be added +// to the HotArchiveBucketList. +class LiveBucketList : public BucketListBase +{ + public: + // Reset Eviction Iterator position if an incoming spill or upgrade has + // invalidated the previous position + static void updateStartingEvictionIterator(EvictionIterator& iter, + uint32_t firstScanLevel, + uint32_t ledgerSeq); + + // Update eviction iter and record stats after scanning a region in one + // bucket. Returns true if scan has looped back to startIter, false + // otherwise. + static bool updateEvictionIterAndRecordStats( + EvictionIterator& iter, EvictionIterator startIter, + uint32_t configFirstScanLevel, uint32_t ledgerSeq, + std::shared_ptr stats, EvictionCounters& counters); + + static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, + uint32_t scanSize, + std::shared_ptr b, + EvictionCounters& counters); + + void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, + uint32_t ledgerSeq, EvictionCounters& counters, + std::shared_ptr stats); + + // Add a batch of initial (created), live (updated) and dead entries to the + // bucketlist, representing the entries effected by closing + // `currLedger`. The bucketlist will incorporate these into the smallest + // (0th) level, as well as commit or prepare merges for any levels that + // should have spilled due to passing through `currLedger`. The `currLedger` + // and `currProtocolVersion` values should be taken from the ledger at which + // this batch is being added. + void addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); + + BucketEntryCounters sumBucketEntryCounters() const; +}; + +} \ No newline at end of file diff --git a/src/bucket/MergeKey.h b/src/bucket/MergeKey.h index d33a73672b..46bb64bbda 100644 --- a/src/bucket/MergeKey.h +++ b/src/bucket/MergeKey.h @@ -3,7 +3,6 @@ // Copyright 2019 Stellar Development Foundation and contributors. Licensed // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "xdr/Stellar-types.h" #include #include diff --git a/src/bucket/BucketListSnapshot.cpp b/src/bucket/SearchableBucketList.cpp similarity index 52% rename from src/bucket/BucketListSnapshot.cpp rename to src/bucket/SearchableBucketList.cpp index b071567b09..dd49a424c0 100644 --- a/src/bucket/BucketListSnapshot.cpp +++ b/src/bucket/SearchableBucketList.cpp @@ -2,90 +2,14 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/BucketListSnapshot.h" -#include "bucket/Bucket.h" +#include "bucket/SearchableBucketList.h" #include "bucket/BucketInputIterator.h" -#include "bucket/BucketList.h" -#include "bucket/BucketSnapshot.h" -#include "crypto/SecretKey.h" // IWYU pragma: keep -#include "ledger/LedgerTxn.h" +#include "bucket/BucketListSnapshotBase.h" -#include "medida/timer.h" -#include "util/GlobalChecks.h" -#include -#include +#include namespace stellar { -template -BucketListSnapshot::BucketListSnapshot( - BucketListBase const& bl, LedgerHeader header) - : mHeader(std::move(header)) -{ - releaseAssert(threadIsMain()); - - for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) - { - auto const& level = bl.getLevel(i); - mLevels.emplace_back(BucketLevelSnapshot(level)); - } -} - -template -BucketListSnapshot::BucketListSnapshot( - BucketListSnapshot const& snapshot) - : mLevels(snapshot.mLevels), mHeader(snapshot.mHeader) -{ -} - -template -std::vector> const& -BucketListSnapshot::getLevels() const -{ - return mLevels; -} - -template -uint32_t -BucketListSnapshot::getLedgerSeq() const -{ - return mHeader.ledgerSeq; -} - -template -LedgerHeader const& -SearchableBucketListSnapshotBase::getLedgerHeader() -{ - releaseAssert(mSnapshot); - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - return mSnapshot->getLedgerHeader(); -} - -template -void -SearchableBucketListSnapshotBase::loopAllBuckets( - std::function f, - BucketListSnapshot const& snapshot) const -{ - for (auto const& lev : snapshot.getLevels()) - { - // Return true if we should exit loop early - auto processBucket = [f](BucketSnapshotT const& b) { - if (b.isEmpty()) - { - return false; - } - - return f(b); - }; - - if (processBucket(lev.curr) || processBucket(lev.snap)) - { - return; - } - } -} - EvictionResult SearchableLiveBucketListSnapshot::scanForEviction( uint32_t ledgerSeq, EvictionCounters& counters, @@ -117,7 +41,7 @@ SearchableLiveBucketListSnapshot::scanForEviction( // If we scan scanSize before hitting bucket EOF, exit early if (b.scanForEviction(evictionIter, scanSize, ledgerSeq, - result.eligibleKeys, *this)) + result.eligibleKeys, *this) == Loop::COMPLETE) { break; } @@ -136,61 +60,31 @@ SearchableLiveBucketListSnapshot::scanForEviction( return result; } -std::vector -SearchableLiveBucketListSnapshot::loadKeysWithLimits( +template +std::optional> +SearchableBucketListSnapshotBase::loadKeysInternal( std::set const& inKeys, - LedgerKeyMeter* lkMeter) + LedgerKeyMeter* lkMeter, std::optional ledgerSeq) { ZoneScoped; // Make a copy of the key set, this loop is destructive auto keys = inKeys; - std::vector entries; + std::vector entries; auto loadKeysLoop = [&](auto const& b) { b.loadKeys(keys, entries, lkMeter); - return keys.empty(); + return keys.empty() ? Loop::COMPLETE : Loop::INCOMPLETE; }; mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - if (threadIsMain()) - { - auto timer = - mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size()) - .TimeScope(); - loopAllBuckets(loadKeysLoop, *mSnapshot); - } - else - { - // TODO: Background metrics - loopAllBuckets(loadKeysLoop, *mSnapshot); - } - return entries; -} - -std::optional> -SearchableLiveBucketListSnapshot::loadKeysFromLedger( - std::set const& inKeys, uint32_t ledgerSeq) -{ - ZoneScoped; - - // Make a copy of the key set, this loop is destructive - auto keys = inKeys; - std::vector entries; - auto loadKeysLoop = [&](auto const& b) { - b.loadKeys(keys, entries, /*lkMeter=*/nullptr); - return keys.empty(); - }; - - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - - if (ledgerSeq == mSnapshot->getLedgerSeq()) + if (!ledgerSeq || *ledgerSeq == mSnapshot->getLedgerSeq()) { loopAllBuckets(loadKeysLoop, *mSnapshot); } else { - auto iter = mHistoricalSnapshots.find(ledgerSeq); + auto iter = mHistoricalSnapshots.find(*ledgerSeq); if (iter == mHistoricalSnapshots.end()) { return std::nullopt; @@ -203,49 +97,6 @@ SearchableLiveBucketListSnapshot::loadKeysFromLedger( return entries; } -std::shared_ptr -SearchableLiveBucketListSnapshot::load(LedgerKey const& k) -{ - ZoneScoped; - - std::shared_ptr result{}; - auto sawBloomMiss = false; - - // Search function called on each Bucket in BucketList until we find the key - auto loadKeyBucketLoop = [&](auto const& b) { - auto [be, bloomMiss] = b.getBucketEntry(k); - sawBloomMiss = sawBloomMiss || bloomMiss; - - if (be) - { - result = LiveBucket::isTombstoneEntry(*be) - ? nullptr - : std::make_shared(be->liveEntry()); - - return true; - } - else - { - return false; - } - }; - - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - if (threadIsMain()) - { - mSnapshotManager.startPointLoadTimer(); - loopAllBuckets(loadKeyBucketLoop, *mSnapshot); - mSnapshotManager.endPointLoadTimer(k.type(), sawBloomMiss); - return result; - } - else - { - // TODO: Background metrics - loopAllBuckets(loadKeyBucketLoop, *mSnapshot); - return result; - } -} - // This query has two steps: // 1. For each bucket, determine what PoolIDs contain the target asset via the // assetToPoolID index @@ -275,7 +126,7 @@ SearchableLiveBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( trustlinesToLoad.emplace(trustlineKey); } - return false; // continue + return Loop::INCOMPLETE; // continue }; loopAllBuckets(trustLineLoop, *mSnapshot); @@ -288,7 +139,7 @@ SearchableLiveBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( std::vector result; auto loadKeysLoop = [&](auto const& b) { b.loadKeys(trustlinesToLoad, result, /*lkMeter=*/nullptr); - return trustlinesToLoad.empty(); + return trustlinesToLoad.empty() ? Loop::COMPLETE : Loop::INCOMPLETE; }; loopAllBuckets(loadKeysLoop, *mSnapshot); @@ -347,7 +198,7 @@ SearchableLiveBucketListSnapshot::loadInflationWinners(size_t maxWinners, } } - return false; + return Loop::INCOMPLETE; }; loopAllBuckets(countVotesInBucket, *mSnapshot); @@ -388,25 +239,14 @@ SearchableLiveBucketListSnapshot::loadInflationWinners(size_t maxWinners, return winners; } -template -BucketLevelSnapshot::BucketLevelSnapshot( - BucketLevel const& level) - : curr(level.getCurr()), snap(level.getSnap()) -{ -} - -template -SearchableBucketListSnapshotBase::SearchableBucketListSnapshotBase( - BucketSnapshotManager const& snapshotManager) - : mSnapshotManager(snapshotManager), mHistoricalSnapshots() -{ - - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); -} - -template -SearchableBucketListSnapshotBase::~SearchableBucketListSnapshotBase() +std::vector +SearchableLiveBucketListSnapshot::loadKeysWithLimits( + std::set const& inKeys, + LedgerKeyMeter* lkMeter) { + auto op = loadKeysInternal(inKeys, lkMeter, std::nullopt); + releaseAssertOrThrow(op); + return std::move(*op); } SearchableLiveBucketListSnapshot::SearchableLiveBucketListSnapshot( @@ -421,81 +261,12 @@ SearchableHotArchiveBucketListSnapshot::SearchableHotArchiveBucketListSnapshot( { } -std::shared_ptr -SearchableHotArchiveBucketListSnapshot::load(LedgerKey const& k) -{ - ZoneScoped; - - // Search function called on each Bucket in BucketList until we find the key - std::shared_ptr result{}; - auto loadKeyBucketLoop = [&](auto const& b) { - auto [be, _] = b.getBucketEntry(k); - - if (be) - { - result = HotArchiveBucket::isTombstoneEntry(*be) ? nullptr : be; - return true; - } - else - { - return false; - } - }; - - // TODO: Metrics - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - loopAllBuckets(loadKeyBucketLoop, *mSnapshot); - return result; -} - std::vector SearchableHotArchiveBucketListSnapshot::loadKeys( std::set const& inKeys) { - auto op = loadKeysFromLedger(inKeys, getLedgerSeq()); + auto op = loadKeysInternal(inKeys, /*lkMeter=*/nullptr, std::nullopt); releaseAssertOrThrow(op); return std::move(*op); } - -std::optional> -SearchableHotArchiveBucketListSnapshot::loadKeysFromLedger( - std::set const& inKeys, uint32_t ledgerSeq) -{ - ZoneScoped; - std::vector entries; - - // Make a copy of the key set, this loop is destructive - auto keys = inKeys; - auto loadKeysLoop = [&](auto const& b) { - b.loadKeys(keys, entries, /*lkMeter=*/nullptr); - return keys.empty(); - }; - - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - - if (ledgerSeq == mSnapshot->getLedgerSeq()) - { - loopAllBuckets(loadKeysLoop, *mSnapshot); - } - else - { - auto iter = mHistoricalSnapshots.find(ledgerSeq); - if (iter == mHistoricalSnapshots.end()) - { - return std::nullopt; - } - - releaseAssert(iter->second); - loopAllBuckets(loadKeysLoop, *iter->second); - } - - return entries; } - -template struct BucketLevelSnapshot; -template struct BucketLevelSnapshot; -template class BucketListSnapshot; -template class BucketListSnapshot; -template class SearchableBucketListSnapshotBase; -template class SearchableBucketListSnapshotBase; -} \ No newline at end of file diff --git a/src/bucket/SearchableBucketList.h b/src/bucket/SearchableBucketList.h new file mode 100644 index 0000000000..c2a4f9ae65 --- /dev/null +++ b/src/bucket/SearchableBucketList.h @@ -0,0 +1,54 @@ +#pragma once + +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "bucket/BucketListSnapshotBase.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" + +namespace stellar +{ +class SearchableLiveBucketListSnapshot + : public SearchableBucketListSnapshotBase +{ + SearchableLiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager); + + public: + std::vector + loadPoolShareTrustLinesByAccountAndAsset(AccountID const& accountID, + Asset const& asset); + + std::vector loadInflationWinners(size_t maxWinners, + int64_t minBalance); + + std::vector + loadKeysWithLimits(std::set const& inKeys, + LedgerKeyMeter* lkMeter); + + EvictionResult scanForEviction(uint32_t ledgerSeq, + EvictionCounters& counters, + EvictionIterator evictionIter, + std::shared_ptr stats, + StateArchivalSettings const& sas); + + friend std::shared_ptr + BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const; +}; + +class SearchableHotArchiveBucketListSnapshot + : public SearchableBucketListSnapshotBase +{ + SearchableHotArchiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager); + + public: + std::vector + loadKeys(std::set const& inKeys); + + friend std::shared_ptr + BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const; +}; +} \ No newline at end of file diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index 584f0d8a51..42b8755b2b 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -5,10 +5,9 @@ // This file contains tests for the BucketIndex and higher-level operations // concerning key-value lookup based on the BucketList. -#include "bucket/BucketList.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" #include "bucket/BucketSnapshotManager.h" +#include "bucket/LiveBucketList.h" #include "bucket/test/BucketTestUtils.h" #include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" @@ -16,7 +15,6 @@ #include "main/Config.h" #include "test/test.h" -#include "util/ProtocolVersion.h" #include "util/UnorderedMap.h" #include "util/UnorderedSet.h" #include "util/XDRCereal.h" @@ -635,8 +633,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") auto indexFilename = test.getBM().bucketIndexFilename(bucketHash); REQUIRE(fs::exists(indexFilename)); - auto b = BucketManager::getBucketByHash(test.getBM(), - bucketHash); + auto b = test.getBM().getBucketByHash(bucketHash); REQUIRE(b->isIndexed()); auto onDiskIndex = @@ -662,8 +659,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") } // Check if in-memory index has correct params - auto b = BucketManager::getBucketByHash(test.getBM(), - bucketHash); + auto b = test.getBM().getBucketByHash(bucketHash); REQUIRE(!b->isEmpty()); REQUIRE(b->isIndexed()); @@ -808,7 +804,7 @@ TEST_CASE("hot archive bucket lookups", "[bucket][bucketindex][archive]") app->getLedgerManager().getLastClosedLedgerHeader().header; header.ledgerSeq += 1; header.ledgerVersion = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); addHotArchiveBatchAndUpdateSnapshot(*app, header, archivedEntries, restoredEntries, deletedEntries); checkResult(); diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index d3bb8a2a9b..5bb6a71d52 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -10,12 +10,13 @@ // first to include -- so we try to include it before everything // else. #include "util/asio.h" -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" -#include "bucket/BucketList.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/HotArchiveBucketList.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "bucket/test/BucketTestUtils.h" #include "crypto/Hex.h" #include "ledger/LedgerTypeUtils.h" @@ -31,8 +32,8 @@ #include "util/Timer.h" #include "util/UnorderedSet.h" #include "xdr/Stellar-ledger.h" -#include "xdrpp/autocheck.h" +#include #include #include diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index f2a500f6e2..fc5390653f 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -10,10 +10,10 @@ // first to include -- so we try to include it before everything // else. #include "util/asio.h" -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" -#include "bucket/BucketManagerImpl.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "bucket/test/BucketTestUtils.h" #include "history/HistoryArchiveManager.h" #include "history/test/HistoryTestsUtils.h" @@ -50,7 +50,7 @@ clearFutures(Application::pointer app, LiveBucketList& bl) } // Then go through all the _worker threads_ and mop up any work they - // might still be doing (that might be "dropping a shared_ptr"). + // might still be doing (that might be "dropping a shared_ptr"). size_t n = static_cast(app->getConfig().WORKER_THREADS); @@ -101,10 +101,10 @@ TEST_CASE("skip list", "[bucket][bucketmanager]") Config const& cfg = getTestConfig(); Application::pointer app = createTestApplication(clock, cfg); - class BucketManagerTest : public BucketManagerImpl + class BucketManagerTest : public BucketManager { public: - BucketManagerTest(Application& app) : BucketManagerImpl(app) + BucketManagerTest(Application& app) : BucketManager(app) { } void diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index d39c5384b2..ffecf52ef3 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -3,9 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "BucketTestUtils.h" -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "crypto/Hex.h" #include "herder/Herder.h" #include "ledger/LedgerTxn.h" diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index 841d0ca6e6..bedb9adb69 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -11,10 +11,11 @@ // first to include -- so we try to include it before everything // else. #include "util/asio.h" -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/LiveBucket.h" #include "bucket/test/BucketTestUtils.h" #include "ledger/LedgerTxn.h" #include "ledger/test/LedgerTestUtils.h" @@ -80,7 +81,7 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]") dead = LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 1000); { - b1 = Bucket::merge( + b1 = BucketBase::merge( app->getBucketManager(), app->getConfig().LEDGER_PROTOCOL_VERSION, b1, LiveBucket::fresh(app->getBucketManager(), @@ -169,11 +170,11 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); + auto b1 = BucketBase::merge( + bm, vers, bLive, bDead, /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); CHECK(countEntries(b1) == 1); } }; @@ -208,11 +209,11 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto b1 = - Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto b1 = BucketBase::merge(bm, vers, bLive, bDead, /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); EntryCounts e(b1); CHECK(e.sum() == live.size()); CLOG_DEBUG(Bucket, "post-merge live count: {} of {}", e.nLive, @@ -262,12 +263,12 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - std::shared_ptr b3 = - Bucket::merge(app->getBucketManager(), - app->getConfig().LEDGER_PROTOCOL_VERSION, b1, b2, - /*shadows=*/{}, /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + std::shared_ptr b3 = BucketBase::merge( + app->getBucketManager(), + app->getConfig().LEDGER_PROTOCOL_VERSION, b1, b2, + /*shadows=*/{}, /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); CHECK(countEntries(b3) == liveCount); } }); @@ -319,11 +320,11 @@ TEST_CASE_VERSIONS("merging hot archive bucket entries", "[bucket][archival]") // e2 -> ARCHIVED // e3 -> LIVE // e4 -> DELETED - auto merged = - Bucket::merge(bm, vers, b1, b2, /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto merged = BucketBase::merge(bm, vers, b1, b2, /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); bool seen1 = false; bool seen4 = false; @@ -462,11 +463,11 @@ TEST_CASE("merges proceed old-style despite newer shadows", { // With proto 12, new bucket version solely depends on the snap version auto bucket = - Bucket::merge(bm, v12, b11first, b11second, - /*shadows=*/{b12first}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + BucketBase::merge(bm, v12, b11first, b11second, + /*shadows=*/{b12first}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); REQUIRE(bucket->getBucketVersion() == v11); } SECTION("shadow versions mixed, pick lower") @@ -474,21 +475,21 @@ TEST_CASE("merges proceed old-style despite newer shadows", // Merging older version (10) buckets, with mixed versions of shadows // (11, 12) Pick initentry (11) style merge auto bucket = - Bucket::merge(bm, v12, b10first, b10second, - /*shadows=*/{b12first, b11second}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + BucketBase::merge(bm, v12, b10first, b10second, + /*shadows=*/{b12first, b11second}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); REQUIRE(bucket->getBucketVersion() == v11); } SECTION("refuse to merge new version with shadow") { - REQUIRE_THROWS_AS(Bucket::merge(bm, v12, b12first, b12second, - /*shadows=*/{b12first}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true), + REQUIRE_THROWS_AS(BucketBase::merge(bm, v12, b12first, b12second, + /*shadows=*/{b12first}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true), std::runtime_error); } } @@ -519,12 +520,12 @@ TEST_CASE("merges refuse to exceed max protocol version", LiveBucket::fresh(bm, vers, {}, {otherLiveA}, {}, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - REQUIRE_THROWS_AS(Bucket::merge(bm, vers - 1, bnew1, bnew2, - /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true), + REQUIRE_THROWS_AS(BucketBase::merge(bm, vers - 1, bnew1, bnew2, + /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true), std::runtime_error); } @@ -595,7 +596,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto b1 = Bucket::merge( + auto b1 = BucketBase::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bDead, /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), @@ -635,12 +636,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto bmerge1 = Bucket::merge( + auto bmerge1 = BucketBase::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bLive, /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto b1 = Bucket::merge( + auto b1 = BucketBase::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bDead, /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), @@ -705,12 +706,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", CHECK(enew.nLive == 0); CHECK(enew.nDead == 1); - auto bmerge1 = Bucket::merge( + auto bmerge1 = BucketBase::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bold, bmed, /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto bmerge2 = Bucket::merge( + auto bmerge2 = BucketBase::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bnew, /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), @@ -792,12 +793,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto merged = - Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, b1, b2, - /*shadows=*/{shadow}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto merged = BucketBase::merge( + bm, cfg.LEDGER_PROTOCOL_VERSION, b1, b2, + /*shadows=*/{shadow}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); EntryCounts e(merged); if (initEra) { @@ -847,12 +848,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // risking shadowing-out level 3. Level 4 is a placeholder here, // just to be a thing-to-merge-level-3-with in the presence of // shadowing from 1 and 2. - auto merge43 = - Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level4, level3, - /*shadows=*/{level2, level1}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto merge43 = BucketBase::merge( + bm, cfg.LEDGER_PROTOCOL_VERSION, level4, level3, + /*shadows=*/{level2, level1}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); EntryCounts e43(merge43); if (initEra) { @@ -873,12 +874,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Do a merge between level 2 and 1, producing potentially // an annihilation of their INIT and DEAD pair. - auto merge21 = - Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level2, level1, - /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto merge21 = BucketBase::merge( + bm, cfg.LEDGER_PROTOCOL_VERSION, level2, level1, + /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); EntryCounts e21(merge21); if (initEra) { @@ -899,13 +900,13 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Do two more merges: one between the two merges we've // done so far, and then finally one with level 5. - auto merge4321 = - Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge43, merge21, - /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto merge54321 = Bucket::merge( + auto merge4321 = BucketBase::merge( + bm, cfg.LEDGER_PROTOCOL_VERSION, merge43, merge21, + /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto merge54321 = BucketBase::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, level5, merge4321, /*shadows=*/{}, /*keepTombstoneEntries=*/true, @@ -956,12 +957,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // shadowing-out the init on level 3. Level 2 is a placeholder here, // just to be a thing-to-merge-level-3-with in the presence of // shadowing from 1. - auto merge32 = - Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level3, level2, - /*shadows=*/{level1}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto merge32 = BucketBase::merge( + bm, cfg.LEDGER_PROTOCOL_VERSION, level3, level2, + /*shadows=*/{level1}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); EntryCounts e32(merge32); if (initEra) { @@ -983,12 +984,12 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Now do a merge between that 3+2 merge and level 1, and we risk // collecting tombstones in the lower levels, which we're expressly // trying to _stop_ doing by adding INIT. - auto merge321 = - Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge32, level1, - /*shadows=*/{}, - /*keepTombstoneEntries=*/true, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto merge321 = BucketBase::merge( + bm, cfg.LEDGER_PROTOCOL_VERSION, merge32, level1, + /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); EntryCounts e321(merge321); if (initEra) { diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index e5ff014a86..e37a82680a 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -3,10 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "catchup/ApplyBucketsWork.h" -#include "bucket/Bucket.h" #include "bucket/BucketApplicator.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "catchup/AssumeStateWork.h" #include "catchup/CatchupManager.h" #include "catchup/IndexBucketsWork.h" @@ -90,8 +90,8 @@ ApplyBucketsWork::getBucket(std::string const& hash) auto i = mBuckets.find(hash); auto b = (i != mBuckets.end()) ? i->second - : BucketManager::getBucketByHash( - mApp.getBucketManager(), hexToBin256(hash)); + : mApp.getBucketManager().getBucketByHash( + hexToBin256(hash)); releaseAssert(b); return b; } diff --git a/src/catchup/ApplyBufferedLedgersWork.cpp b/src/catchup/ApplyBufferedLedgersWork.cpp index 72d396c5a2..a2ca2af3b5 100644 --- a/src/catchup/ApplyBufferedLedgersWork.cpp +++ b/src/catchup/ApplyBufferedLedgersWork.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "catchup/ApplyBufferedLedgersWork.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucketList.h" #include "catchup/ApplyLedgerWork.h" #include "crypto/Hex.h" #include "ledger/LedgerManager.h" diff --git a/src/catchup/ApplyCheckpointWork.cpp b/src/catchup/ApplyCheckpointWork.cpp index cd0ffd822d..4823fb5bc0 100644 --- a/src/catchup/ApplyCheckpointWork.cpp +++ b/src/catchup/ApplyCheckpointWork.cpp @@ -3,18 +3,15 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "catchup/ApplyCheckpointWork.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucketList.h" #include "catchup/ApplyLedgerWork.h" #include "history/FileTransferInfo.h" #include "history/HistoryManager.h" #include "historywork/Progress.h" -#include "invariant/InvariantDoesNotHold.h" #include "ledger/CheckpointRange.h" #include "ledger/LedgerManager.h" #include "main/Application.h" -#include "main/ErrorMessages.h" -#include "util/FileSystemException.h" #include "util/GlobalChecks.h" #include "util/XDRCereal.h" #include diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index adc2d47adf..235d2f6385 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "AssumeStateWork.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucketList.h" #include "catchup/IndexBucketsWork.h" #include "crypto/Hex.h" #include "history/HistoryArchive.h" @@ -28,10 +28,10 @@ AssumeStateWork::AssumeStateWork(Application& app, auto& bm = mApp.getBucketManager(); for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - auto curr = BucketManager::getBucketByHash( - bm, hexToBin256(mHas.currentBuckets.at(i).curr)); - auto snap = BucketManager::getBucketByHash( - bm, hexToBin256(mHas.currentBuckets.at(i).snap)); + auto curr = bm.getBucketByHash( + hexToBin256(mHas.currentBuckets.at(i).curr)); + auto snap = bm.getBucketByHash( + hexToBin256(mHas.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while " @@ -43,8 +43,8 @@ AssumeStateWork::AssumeStateWork(Application& app, auto& nextFuture = mHas.currentBuckets.at(i).next; if (nextFuture.hasOutputHash()) { - auto nextBucket = BucketManager::getBucketByHash( - bm, hexToBin256(nextFuture.getOutputHash())); + auto nextBucket = bm.getBucketByHash( + hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { throw std::runtime_error("Missing future bucket files while " diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index bbc177f8c0..91c2cc831c 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -3,7 +3,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "catchup/CatchupWork.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" #include "catchup/ApplyBucketsWork.h" #include "catchup/ApplyBufferedLedgersWork.h" @@ -20,7 +19,6 @@ #include "historywork/DownloadVerifyTxResultsWork.h" #include "historywork/GetAndUnzipRemoteFileWork.h" #include "historywork/GetHistoryArchiveStateWork.h" -#include "historywork/VerifyBucketWork.h" #include "ledger/LedgerManager.h" #include "main/Application.h" #include "main/PersistentState.h" diff --git a/src/catchup/DownloadApplyTxsWork.cpp b/src/catchup/DownloadApplyTxsWork.cpp index a9164b5216..a9dadb2528 100644 --- a/src/catchup/DownloadApplyTxsWork.cpp +++ b/src/catchup/DownloadApplyTxsWork.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "catchup/DownloadApplyTxsWork.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucketList.h" #include "catchup/ApplyCheckpointWork.h" #include "history/FileTransferInfo.h" #include "history/HistoryManager.h" diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 900e0d0386..62ea3c289c 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -3,14 +3,11 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "IndexBucketsWork.h" -#include "bucket/Bucket.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" -#include "util/HashOfHash.h" +#include "util/Fs.h" +#include "util/Logging.h" #include "util/UnorderedSet.h" -#include "util/XDRStream.h" -#include "util/types.h" -#include "work/WorkWithCallback.h" #include namespace stellar diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index bf733a00bf..9ab0e032ba 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -3,9 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketInputIterator.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" -#include "bucket/BucketManagerImpl.h" +#include "bucket/LiveBucketList.h" #include "bucket/test/BucketTestUtils.h" #include "crypto/Random.h" #include "herder/Herder.h" @@ -22,7 +21,6 @@ #include "ledger/TrustLineWrapper.h" #include "lib/catch.hpp" #include "simulation/Simulation.h" -#include "simulation/Topologies.h" #include "test/TestExceptions.h" #include "test/TestMarket.h" #include "test/TestUtils.h" diff --git a/src/history/FileTransferInfo.h b/src/history/FileTransferInfo.h index d62e43cd48..348d47de89 100644 --- a/src/history/FileTransferInfo.h +++ b/src/history/FileTransferInfo.h @@ -4,11 +4,10 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" +#include "bucket/LiveBucket.h" #include "crypto/Hex.h" #include "main/Config.h" #include "util/Fs.h" -#include "util/Logging.h" #include "util/TmpDir.h" #include diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index 235d4f6206..aa6da206a4 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -7,15 +7,14 @@ // else. #include "util/asio.h" #include "history/HistoryArchive.h" -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "crypto/Hex.h" #include "crypto/SHA.h" #include "history/HistoryManager.h" #include "main/Application.h" #include "main/StellarCoreVersion.h" -#include "process/ProcessManager.h" #include "util/Fs.h" #include "util/GlobalChecks.h" #include "util/Logging.h" @@ -26,9 +25,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -306,8 +303,8 @@ HistoryArchiveState::containsValidBuckets(Application& app) const // Process bucket, return version auto processBucket = [&](std::string const& bucketHash) { - auto bucket = BucketManager::getBucketByHash( - app.getBucketManager(), hexToBin256(bucketHash)); + auto bucket = app.getBucketManager().getBucketByHash( + hexToBin256(bucketHash)); releaseAssert(bucket); int32_t version = 0; if (!bucket->isEmpty()) @@ -390,8 +387,8 @@ HistoryArchiveState::prepareForPublish(Application& app) auto& level = currentBuckets[i]; auto& prev = currentBuckets[i - 1]; - auto snap = BucketManager::getBucketByHash( - app.getBucketManager(), hexToBin256(prev.snap)); + auto snap = app.getBucketManager().getBucketByHash( + hexToBin256(prev.snap)); if (!level.next.isClear() && protocolVersionStartsFrom( snap->getBucketVersion(), diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index 9d1a99d54b..688c4f603d 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -7,10 +7,8 @@ // else. #include "util/asio.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" -#include "crypto/Hex.h" -#include "crypto/SHA.h" +#include "bucket/LiveBucketList.h" #include "herder/HerderImpl.h" #include "history/HistoryArchive.h" #include "history/HistoryArchiveManager.h" @@ -27,23 +25,17 @@ #include "main/Config.h" #include "medida/meter.h" #include "medida/metrics_registry.h" -#include "overlay/StellarXDR.h" -#include "process/ProcessManager.h" #include "transactions/TransactionSQL.h" #include "util/GlobalChecks.h" #include "util/Logging.h" -#include "util/Math.h" #include "util/StatusManager.h" #include "util/TmpDir.h" #include "work/ConditionalWork.h" #include "work/WorkScheduler.h" -#include "xdrpp/marshal.h" #include #include -#include #include -#include namespace stellar { diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index 1520d9168d..0d5a3e5185 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -3,16 +3,15 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "history/StateSnapshot.h" -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "crypto/Hex.h" #include "database/Database.h" #include "herder/HerderPersistence.h" #include "history/FileTransferInfo.h" #include "history/HistoryArchive.h" #include "history/HistoryManager.h" -#include "ledger/LedgerHeaderUtils.h" #include "main/Application.h" #include "main/Config.h" #include "transactions/TransactionSQL.h" @@ -121,8 +120,8 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) for (auto const& hash : mLocalState.differingBuckets(other)) { - auto b = BucketManager::getBucketByHash( - mApp.getBucketManager(), hexToBin256(hash)); + auto b = mApp.getBucketManager().getBucketByHash( + hexToBin256(hash)); releaseAssert(b); addIfExists(std::make_shared(*b)); } diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index f510be9d59..f6132ebcec 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -1021,10 +1021,8 @@ CatchupSimulation::validateCatchup(Application::pointer app) CHECK(wantBucketListHash == haveBucketListHash); CHECK(wantHash == haveHash); - CHECK(BucketManager::getBucketByHash(app->getBucketManager(), - wantBucket0Hash)); - CHECK(BucketManager::getBucketByHash(app->getBucketManager(), - wantBucket1Hash)); + CHECK(app->getBucketManager().getBucketByHash(wantBucket0Hash)); + CHECK(app->getBucketManager().getBucketByHash(wantBucket1Hash)); CHECK(wantBucket0Hash == haveBucket0Hash); CHECK(wantBucket1Hash == haveBucket1Hash); diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index 7ab09e1443..88453ef6a6 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -4,7 +4,8 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/BucketList.h" +#include "bucket/HotArchiveBucketList.h" +#include "bucket/LiveBucketList.h" #include "catchup/VerifyLedgerChainWork.h" #include "crypto/Hex.h" #include "herder/HerderImpl.h" diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp index c31f71f75d..0f519a2b4d 100644 --- a/src/historywork/DownloadBucketsWork.cpp +++ b/src/historywork/DownloadBucketsWork.cpp @@ -95,8 +95,8 @@ DownloadBucketsWork::yieldMoreWork() if (self) { auto bucketPath = ft.localPath_nogz(); - auto b = BucketManager::adoptFileAsBucket( - app.getBucketManager(), bucketPath, hexToBin256(hash), + auto b = app.getBucketManager().adoptFileAsBucket( + bucketPath, hexToBin256(hash), /*mergeKey=*/nullptr, /*index=*/nullptr); self->mBuckets[hash] = b; diff --git a/src/historywork/DownloadBucketsWork.h b/src/historywork/DownloadBucketsWork.h index 52db6cd968..573f5d8a82 100644 --- a/src/historywork/DownloadBucketsWork.h +++ b/src/historywork/DownloadBucketsWork.h @@ -3,7 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #pragma once -#include "bucket/Bucket.h" +#include "bucket/LiveBucket.h" #include "historywork/Progress.h" #include "medida/meter.h" #include "medida/metrics_registry.h" diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp index 11aa8ba0dc..9f99e44afb 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.cpp +++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp @@ -3,11 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "invariant/BucketListIsConsistentWithDatabase.h" -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" -#include "crypto/Hex.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "history/HistoryArchive.h" #include "invariant/InvariantManager.h" #include "ledger/LedgerManager.h" diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp index b5157bfd0e..d20177f1f6 100644 --- a/src/invariant/InvariantManagerImpl.cpp +++ b/src/invariant/InvariantManagerImpl.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "invariant/InvariantManagerImpl.h" -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" +#include "bucket/LiveBucket.h" +#include "bucket/LiveBucketList.h" #include "crypto/Hex.h" #include "invariant/Invariant.h" #include "invariant/InvariantDoesNotHold.h" diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index 27a7ae7f89..7a2a1f6b62 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -2,10 +2,10 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" +#include "bucket/LiveBucket.h" #include "bucket/test/BucketTestUtils.h" #include "catchup/ApplyBucketsWork.h" #include "ledger/LedgerHashUtils.h" diff --git a/src/invariant/test/InvariantTests.cpp b/src/invariant/test/InvariantTests.cpp index db5b8fb477..448dedf7ca 100644 --- a/src/invariant/test/InvariantTests.cpp +++ b/src/invariant/test/InvariantTests.cpp @@ -4,7 +4,6 @@ #include "util/asio.h" -#include "bucket/Bucket.h" #include "database/Database.h" #include "herder/TxSetFrame.h" #include "invariant/Invariant.h" diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index c5436f841e..6499885f20 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "ledger/LedgerManagerImpl.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucketList.h" #include "catchup/AssumeStateWork.h" #include "crypto/Hex.h" #include "crypto/KeyUtils.h" @@ -19,14 +19,12 @@ #include "history/HistoryManager.h" #include "ledger/FlushAndRotateMetaDebugWork.h" #include "ledger/LedgerHeaderUtils.h" -#include "ledger/LedgerRange.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnEntry.h" #include "ledger/LedgerTxnHeader.h" #include "main/Application.h" #include "main/Config.h" #include "main/ErrorMessages.h" -#include "overlay/OverlayManager.h" #include "transactions/MutableTransactionResult.h" #include "transactions/OperationFrame.h" #include "transactions/TransactionFrameBase.h" @@ -40,7 +38,6 @@ #include "util/Logging.h" #include "util/ProtocolVersion.h" #include "util/XDRCereal.h" -#include "util/XDROperators.h" #include "util/XDRStream.h" #include "work/WorkScheduler.h" @@ -58,7 +55,6 @@ #include #include -#include #include #include #include diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp index e97864e0d1..3455d51131 100644 --- a/src/ledger/LedgerStateSnapshot.cpp +++ b/src/ledger/LedgerStateSnapshot.cpp @@ -3,7 +3,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "ledger/LedgerStateSnapshot.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" #include "bucket/BucketSnapshotManager.h" #include "ledger/LedgerTxn.h" diff --git a/src/ledger/LedgerStateSnapshot.h b/src/ledger/LedgerStateSnapshot.h index dc4f6f76f9..87833034c2 100644 --- a/src/ledger/LedgerStateSnapshot.h +++ b/src/ledger/LedgerStateSnapshot.h @@ -4,10 +4,11 @@ #pragma once -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketSnapshotManager.h" +#include "bucket/SearchableBucketList.h" #include "ledger/LedgerTxn.h" #include "util/NonCopyable.h" +#include namespace stellar { diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index 348cb6fc68..14896d01e0 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -3,12 +3,9 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "ledger/LedgerTxn.h" -#include "bucket/BucketList.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" -#include "crypto/Hex.h" +#include "bucket/SearchableBucketList.h" #include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" #include "database/Database.h" #include "ledger/LedgerRange.h" #include "ledger/LedgerTxnEntry.h" @@ -19,16 +16,12 @@ #include "main/Application.h" #include "transactions/TransactionUtils.h" #include "util/GlobalChecks.h" -#include "util/XDROperators.h" -#include "util/XDRStream.h" #include "util/types.h" #include "xdr/Stellar-ledger-entries.h" -#include "xdrpp/marshal.h" #include #include #include -#include namespace stellar { diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index 5b6d9299e7..b536cb3777 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -4,7 +4,6 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/BucketList.h" #include "database/Database.h" #include "ledger/LedgerTxn.h" #include "util/RandomEvictionCache.h" diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp index 7edb34ff09..f22144668e 100644 --- a/src/ledger/NetworkConfig.cpp +++ b/src/ledger/NetworkConfig.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "ledger/NetworkConfig.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" +#include "bucket/LiveBucketList.h" #include "bucket/test/BucketTestUtils.h" #include "main/Application.h" #include "util/ProtocolVersion.h" diff --git a/src/ledger/test/LedgerCloseMetaStreamTests.cpp b/src/ledger/test/LedgerCloseMetaStreamTests.cpp index efdff716d5..ac75139260 100644 --- a/src/ledger/test/LedgerCloseMetaStreamTests.cpp +++ b/src/ledger/test/LedgerCloseMetaStreamTests.cpp @@ -3,7 +3,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketManager.h" -#include "bucket/BucketManagerImpl.h" #include "bucket/test/BucketTestUtils.h" #include "catchup/ReplayDebugMetaWork.h" #include "crypto/Hex.h" diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index 67f7f5dfd4..6d9c32f613 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -14,8 +14,6 @@ // first to include -- so we try to include it before everything // else. #include "util/asio.h" -#include "bucket/Bucket.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" #include "catchup/ApplyBucketsWork.h" #include "crypto/Hex.h" @@ -25,7 +23,6 @@ #include "herder/Herder.h" #include "herder/HerderPersistence.h" #include "history/HistoryArchiveManager.h" -#include "history/HistoryArchiveReportWork.h" #include "history/HistoryManager.h" #include "invariant/AccountSubEntriesCountIsValid.h" #include "invariant/BucketListIsConsistentWithDatabase.h" @@ -55,8 +52,6 @@ #include "overlay/OverlayManager.h" #include "overlay/OverlayManagerImpl.h" #include "process/ProcessManager.h" -#include "scp/LocalNode.h" -#include "scp/QuorumSetUtils.h" #include "util/GlobalChecks.h" #include "util/LogSlowExecution.h" #include "util/Logging.h" diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 2e41fe21b3..26e5343ef6 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -3,8 +3,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "main/ApplicationUtils.h" -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" #include "catchup/ApplyBucketsWork.h" #include "catchup/CatchupConfiguration.h" @@ -21,7 +19,6 @@ #include "ledger/LedgerManager.h" #include "ledger/LedgerTypeUtils.h" #include "main/ErrorMessages.h" -#include "main/ExternalQueue.h" #include "main/Maintainer.h" #include "main/PersistentState.h" #include "main/StellarCoreVersion.h" @@ -33,7 +30,6 @@ #include "util/xdrquery/XDRQuery.h" #include "work/WorkScheduler.h" -#include #include #include #include @@ -240,8 +236,8 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, std::set> retained; for (auto const& b : has.allBuckets()) { - auto bPtr = BucketManager::getBucketByHash( - app->getBucketManager(), hexToBin256(b)); + auto bPtr = app->getBucketManager().getBucketByHash( + hexToBin256(b)); releaseAssert(bPtr); retained.insert(bPtr); } @@ -665,7 +661,7 @@ dumpStateArchivalStatistics(Config cfg) { continue; } - auto b = BucketManager::getBucketByHash(bm, hash); + auto b = bm.getBucketByHash(hash); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + diff --git a/src/main/CommandHandler.cpp b/src/main/CommandHandler.cpp index fd8d9e3034..e2e22dbe44 100644 --- a/src/main/CommandHandler.cpp +++ b/src/main/CommandHandler.cpp @@ -3,18 +3,13 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "main/CommandHandler.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" #include "bucket/BucketSnapshotManager.h" -#include "crypto/Hex.h" #include "crypto/KeyUtils.h" #include "herder/Herder.h" #include "history/HistoryArchiveManager.h" -#include "ledger/InternalLedgerEntry.h" #include "ledger/LedgerManager.h" #include "ledger/LedgerTxn.h" -#include "ledger/LedgerTxnEntry.h" -#include "ledger/LedgerTxnImpl.h" #include "ledger/NetworkConfig.h" #include "lib/http/server.hpp" #include "lib/json/json.h" @@ -25,20 +20,17 @@ #include "overlay/BanManager.h" #include "overlay/OverlayManager.h" #include "overlay/SurveyManager.h" -#include "transactions/InvokeHostFunctionOpFrame.h" #include "transactions/MutableTransactionResult.h" #include "transactions/TransactionBridge.h" #include "transactions/TransactionUtils.h" #include "util/GlobalChecks.h" #include "util/Logging.h" -#include "util/StatusManager.h" #include #include #include "medida/reporting/json_reporter.h" #include "util/Decoder.h" #include "util/XDRCereal.h" -#include "util/XDROperators.h" #include "util/XDRStream.h" // IWYU pragma: keep #include "xdr/Stellar-ledger-entries.h" #include "xdr/Stellar-transaction.h" @@ -51,9 +43,7 @@ #include "test/TestAccount.h" #include "test/TxTests.h" #endif -#include #include -#include using std::placeholders::_1; using std::placeholders::_2; diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index be58b992c2..406d6d3b58 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -1927,9 +1927,11 @@ runApplyLoad(CommandLineArgs const& args) for (size_t i = 0; i < 100; ++i) { - app.getBucketManager().getBucketList().resolveAllFutures(); + app.getBucketManager() + .getLiveBucketList() + .resolveAllFutures(); releaseAssert(app.getBucketManager() - .getBucketList() + .getLiveBucketList() .futuresAllResolved()); al.benchmark(); } diff --git a/src/main/Config.cpp b/src/main/Config.cpp index c20c617616..6520ae72ea 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -4,8 +4,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "main/Config.h" -#include "bucket/BucketList.h" -#include "crypto/Hex.h" #include "crypto/KeyUtils.h" #include "herder/Herder.h" #include "history/HistoryArchive.h" @@ -17,8 +15,6 @@ #include "util/Fs.h" #include "util/GlobalChecks.h" #include "util/Logging.h" -#include "util/XDROperators.h" -#include "util/types.h" #include "overlay/OverlayManager.h" #include "util/UnorderedSet.h" diff --git a/src/main/QueryServer.cpp b/src/main/QueryServer.cpp index 95f1d80a44..18bba19423 100644 --- a/src/main/QueryServer.cpp +++ b/src/main/QueryServer.cpp @@ -3,8 +3,8 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "main/QueryServer.h" -#include "bucket/BucketListSnapshot.h" #include "bucket/BucketSnapshotManager.h" +#include "bucket/SearchableBucketList.h" #include "ledger/LedgerTxnImpl.h" #include "util/Logging.h" #include "util/XDRStream.h" // IWYU pragma: keep diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index 9484d39543..b36f9bf732 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -131,8 +131,7 @@ checkState(Application& app) if (nextFuture.hasOutputHash()) { auto hash = hexToBin256(nextFuture.getOutputHash()); - checkBucket( - BucketManager::getBucketByHash(bm, hash)); + checkBucket(bm.getBucketByHash(hash)); } } } diff --git a/src/simulation/CoreTests.cpp b/src/simulation/CoreTests.cpp index 423b77e211..ab91094f72 100644 --- a/src/simulation/CoreTests.cpp +++ b/src/simulation/CoreTests.cpp @@ -2,29 +2,21 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" -#include "bucket/BucketList.h" #include "bucket/BucketManager.h" -#include "bucket/BucketManagerImpl.h" -#include "bucket/LedgerCmp.h" #include "bucket/test/BucketTestUtils.h" #include "crypto/SHA.h" #include "herder/HerderImpl.h" -#include "herder/LedgerCloseData.h" #include "ledger/LedgerManager.h" #include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "lib/util/stdrandom.h" #include "main/Application.h" #include "medida/stats/snapshot.h" -#include "overlay/StellarXDR.h" #include "simulation/Topologies.h" #include "test/test.h" #include "transactions/TransactionFrame.h" #include "util/Logging.h" #include "util/Math.h" -#include "util/types.h" -#include "xdrpp/autocheck.h" #include #include diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index b6948485c4..ca71df5870 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -870,9 +870,9 @@ TEST_CASE("apply load", "[loadgen][applyload]") cpuInsRatioExclVm.Clear(); for (size_t i = 0; i < 100; ++i) { - app->getBucketManager().getBucketList().resolveAllFutures(); + app->getBucketManager().getLiveBucketList().resolveAllFutures(); releaseAssert( - app->getBucketManager().getBucketList().futuresAllResolved()); + app->getBucketManager().getLiveBucketList().futuresAllResolved()); al.benchmark(); } diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index 3cf7cef02c..a4305b03ab 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -3,7 +3,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "TestUtils.h" -#include "bucket/BucketList.h" #include "overlay/test/LoopbackPeer.h" #include "simulation/LoadGenerator.h" #include "simulation/Simulation.h" diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h index 3588f21d1a..5bf421efbd 100644 --- a/src/test/TestUtils.h +++ b/src/test/TestUtils.h @@ -4,7 +4,8 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/BucketList.h" +#include "bucket/HotArchiveBucketList.h" +#include "bucket/LiveBucketList.h" #include "invariant/InvariantDoesNotHold.h" #include "invariant/InvariantManagerImpl.h" #include "ledger/LedgerManagerImpl.h" @@ -48,7 +49,7 @@ testBucketMetadata(uint32_t protocolVersion) meta.ledgerVersion = protocolVersion; if (protocolVersionStartsFrom( protocolVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { meta.ext.v(1); meta.ext.bucketListType() = BucketListType::LIVE; diff --git a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json index dce4c10068..943063068f 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "b38e640c116c5c20e6f29bb3263c3958838612894976459bcc3919f20a810f17", + "hash": "ca60f54d9329381ca6d530366a1db8a91fc559302c9ec64e534764a62d1adf69", "header": { "ledgerVersion": 23, - "previousLedgerHash": "5f811c592f6e64c3c1887ac5e87a800705e24973f2a421e73b195a9b536ce16e", + "previousLedgerHash": "4aefc4467fab107446aeef0ba765631ec3e229ec08e4b29a5341ac38fdf59f9a", "scpValue": { - "txSetHash": "52700cda108028294a84b828eb308a308bfa92e9c22ddd7b232e1528377182de", + "txSetHash": "8c0d57e026680029e6628fb1b7c68d9b2861fde3481215d8e42b136a466b6fbe", "closeTime": 1451692800, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "3f162636d634ea16f9c120aa7868a0dbfd5ec9159af00a2f86f086009f280b70ee53ace9710f4e72ed96b79c2431620eb4823989e1ae4d54f1e1c675f2490903" + "signature": "b53daa10435473233214d2862f9af7cd5f71ff6b162717124700add4d9a5de24f45303c46ef78feb2aa724348b2771555549689bc3e54a5ad6fbd4e02d4e6f0d" } } }, - "txSetResultHash": "665c93b57fa49e038bb55b65055f7e3e11474e925aa7a5ad22aa77d07151eace", - "bucketListHash": "1cc6467cfd1511d8dc3a5e83d5fee466693801eea4f992b779e3ec6df7a5698f", + "txSetResultHash": "5edc137152a404b1ea08e31b691098b8b8ab53a30041af25868849694535ecf5", + "bucketListHash": "d71ea87f579389c8c976fca3cf1b755de14330d7aaa35e6544411ec2bd1cf443", "ledgerSeq": 28, "totalCoins": 1000000000000000000, "feePool": 804520, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "5f811c592f6e64c3c1887ac5e87a800705e24973f2a421e73b195a9b536ce16e", + "previousLedgerHash": "4aefc4467fab107446aeef0ba765631ec3e229ec08e4b29a5341ac38fdf59f9a", "phases": [ { "v": 0, @@ -503,18 +503,18 @@ "txProcessing": [ { "result": { - "transactionHash": "bb0a6b13caea6b015555dfd332aca1099e8654896bf7d1bcce8432e833a2572a", + "transactionHash": "364ec41dce0a678476ea3ebfc5caa28165ef3bf0976071d858b1c4044f187d25", "result": { - "feeCharged": 61612, + "feeCharged": 60559, "result": { - "code": "txFAILED", + "code": "txSUCCESS", "results": [ { "code": "opINNER", "tr": { - "type": "INVOKE_HOST_FUNCTION", - "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_TRAPPED" + "type": "EXTEND_FOOTPRINT_TTL", + "extendFootprintTTLResult": { + "code": "EXTEND_FOOTPRINT_TTL_SUCCESS" } } } @@ -529,13 +529,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 11, + "lastModifiedLedgerSeq": 9, "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 400000000, - "seqNum": 47244640256, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -559,9 +559,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640256, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -593,9 +593,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640256, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -619,9 +619,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640257, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -663,7 +663,76 @@ } } ], - "operations": [], + "operations": [ + { + "changes": [ + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 6, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", + "liveUntilLedgerSeq": 10006 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", + "liveUntilLedgerSeq": 10028 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 6, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", + "liveUntilLedgerSeq": 10006 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", + "liveUntilLedgerSeq": 10028 + } + }, + "ext": { + "v": 0 + } + } + } + ] + } + ], "txChangesAfter": [ { "type": "LEDGER_ENTRY_STATE", @@ -672,9 +741,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640257, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -722,9 +791,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399938388, - "seqNum": 47244640257, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399939441, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -782,18 +851,19 @@ }, { "result": { - "transactionHash": "62d28c373389d447341e9d75bc84e2c91437169a2a70d3606c8b3aa7d198ef5c", + "transactionHash": "e310227a8c0d8d1f78632e65ebca281cd60d8619c9afc64491bcce98e7cd7ee3", "result": { - "feeCharged": 42954, + "feeCharged": 106775, "result": { - "code": "txFAILED", + "code": "txSUCCESS", "results": [ { "code": "opINNER", "tr": { "type": "INVOKE_HOST_FUNCTION", "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED" + "code": "INVOKE_HOST_FUNCTION_SUCCESS", + "success": "cbbc48750debb8535093b3deaf88ac7f4cff87425576a58de2bac754acdb4616" } } } @@ -808,13 +878,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 12, + "lastModifiedLedgerSeq": 10, "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", "balance": 400000000, - "seqNum": 51539607552, + "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -838,9 +908,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607552, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -872,9 +942,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607552, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -898,9 +968,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607553, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -942,7 +1012,55 @@ } } ], - "operations": [], + "operations": [ + { + "changes": [ + { + "type": "LEDGER_ENTRY_CREATED", + "created": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "CONTRACT_DATA", + "contractData": { + "ext": { + "v": 0 + }, + "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", + "key": { + "type": "SCV_SYMBOL", + "sym": "key" + }, + "durability": "PERSISTENT", + "val": { + "type": "SCV_U64", + "u64": 42 + } + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_CREATED", + "created": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "764f4e59e20ac1a357f9f26ab0eaf46d196ab74822db44f039353a6f114864aa", + "liveUntilLedgerSeq": 47 + } + }, + "ext": { + "v": 0 + } + } + } + ] + } + ], "txChangesAfter": [ { "type": "LEDGER_ENTRY_STATE", @@ -951,9 +1069,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607553, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399873274, + "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1001,9 +1119,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 399957046, - "seqNum": 51539607553, + "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "balance": 399893225, + "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1051,8 +1169,7 @@ }, "events": [], "returnValue": { - "type": "SCV_BOOL", - "b": "FALSE" + "type": "SCV_VOID" }, "diagnosticEvents": [] } @@ -1377,19 +1494,18 @@ }, { "result": { - "transactionHash": "e310227a8c0d8d1f78632e65ebca281cd60d8619c9afc64491bcce98e7cd7ee3", + "transactionHash": "62d28c373389d447341e9d75bc84e2c91437169a2a70d3606c8b3aa7d198ef5c", "result": { - "feeCharged": 106775, + "feeCharged": 42954, "result": { - "code": "txSUCCESS", + "code": "txFAILED", "results": [ { "code": "opINNER", "tr": { "type": "INVOKE_HOST_FUNCTION", "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_SUCCESS", - "success": "cbbc48750debb8535093b3deaf88ac7f4cff87425576a58de2bac754acdb4616" + "code": "INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED" } } } @@ -1404,13 +1520,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 10, + "lastModifiedLedgerSeq": 12, "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", "balance": 400000000, - "seqNum": 42949672960, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1434,9 +1550,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672960, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1468,9 +1584,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672960, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1494,9 +1610,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672961, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1538,55 +1654,7 @@ } } ], - "operations": [ - { - "changes": [ - { - "type": "LEDGER_ENTRY_CREATED", - "created": { - "lastModifiedLedgerSeq": 28, - "data": { - "type": "CONTRACT_DATA", - "contractData": { - "ext": { - "v": 0 - }, - "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", - "key": { - "type": "SCV_SYMBOL", - "sym": "key" - }, - "durability": "PERSISTENT", - "val": { - "type": "SCV_U64", - "u64": 42 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_CREATED", - "created": { - "lastModifiedLedgerSeq": 28, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "764f4e59e20ac1a357f9f26ab0eaf46d196ab74822db44f039353a6f114864aa", - "liveUntilLedgerSeq": 47 - } - }, - "ext": { - "v": 0 - } - } - } - ] - } - ], + "operations": [], "txChangesAfter": [ { "type": "LEDGER_ENTRY_STATE", @@ -1595,9 +1663,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, - "seqNum": 42949672961, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1645,9 +1713,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399893225, - "seqNum": 42949672961, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 399957046, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1695,7 +1763,8 @@ }, "events": [], "returnValue": { - "type": "SCV_VOID" + "type": "SCV_BOOL", + "b": "FALSE" }, "diagnosticEvents": [] } @@ -1704,18 +1773,18 @@ }, { "result": { - "transactionHash": "364ec41dce0a678476ea3ebfc5caa28165ef3bf0976071d858b1c4044f187d25", + "transactionHash": "bb0a6b13caea6b015555dfd332aca1099e8654896bf7d1bcce8432e833a2572a", "result": { - "feeCharged": 60559, + "feeCharged": 61612, "result": { - "code": "txSUCCESS", + "code": "txFAILED", "results": [ { "code": "opINNER", "tr": { - "type": "EXTEND_FOOTPRINT_TTL", - "extendFootprintTTLResult": { - "code": "EXTEND_FOOTPRINT_TTL_SUCCESS" + "type": "INVOKE_HOST_FUNCTION", + "invokeHostFunctionResult": { + "code": "INVOKE_HOST_FUNCTION_TRAPPED" } } } @@ -1730,13 +1799,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 9, + "lastModifiedLedgerSeq": 11, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", "balance": 400000000, - "seqNum": 38654705664, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1760,9 +1829,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705664, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1794,9 +1863,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705664, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1820,9 +1889,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705665, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1864,76 +1933,7 @@ } } ], - "operations": [ - { - "changes": [ - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 6, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", - "liveUntilLedgerSeq": 10006 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 28, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", - "liveUntilLedgerSeq": 10028 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 6, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", - "liveUntilLedgerSeq": 10006 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 28, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", - "liveUntilLedgerSeq": 10028 - } - }, - "ext": { - "v": 0 - } - } - } - ] - } - ], + "operations": [], "txChangesAfter": [ { "type": "LEDGER_ENTRY_STATE", @@ -1942,9 +1942,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705665, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898388, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1992,9 +1992,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399939441, - "seqNum": 38654705665, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399938388, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, diff --git a/src/testdata/ledger-close-meta-v1-protocol-23.json b/src/testdata/ledger-close-meta-v1-protocol-23.json index 164bfe461b..8b885c3a8e 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "df0caa7841395c80b0f7b3dec4f13e5058a406fd0cb0959375c6830ea311e32e", + "hash": "1f1a643fd94c3ec834009d049a7e82053f22ae0f4571f123e7ef0a80463a6c38", "header": { "ledgerVersion": 23, - "previousLedgerHash": "238186da4a6e457877adec84246cbb50dd054cc81cd913ef97cffb492ff6ac74", + "previousLedgerHash": "bd01346128a3d366530ceed22c4e6d07f299fd2723fe5f5fa7fd9217cdb14bc4", "scpValue": { - "txSetHash": "6755cddd3f4b967d42930b3eb84bbd991ccf2f0ddec05fc85f24e77dcd6746d7", + "txSetHash": "f0c030c5a4294f8a0df3e91468c7a7a3a0f7b0ebd370cf9b2a08e78146be7fc5", "closeTime": 0, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "2d46696da63265a28b3055ccc9cdb2aa32d1008f4732f130dcda8842235a2419ac5432290892243f636563e42fa9b3829f31b8daef8b1e1142de52a770c3130f" + "signature": "3fa08aaa2ad7f9d98f6298bce516627b41dd49b3436f09a48dedd45e4228c1b4ea8815353ae2dcf86740e66d2a6dc8eae9a405eb7e2cc6957c899024b96b190f" } } }, - "txSetResultHash": "249b974bacf8b5c4a8f0b5598194c1b9eca64af0b5c1506daa871c1533b6baac", - "bucketListHash": "5ba9bbd81fb831cf30cf89b221629d376e563373bc6b56e1c44e82adca5e427f", + "txSetResultHash": "f66233c106977a4cc148e019411ff6ddfaf76c337d004ed9a304a70407b161d0", + "bucketListHash": "2cad5583e8417a71e3ab5033de493900a0febc5bb540240b3ef2367c4f04339e", "ledgerSeq": 7, "totalCoins": 1000000000000000000, "feePool": 800, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "238186da4a6e457877adec84246cbb50dd054cc81cd913ef97cffb492ff6ac74", + "previousLedgerHash": "bd01346128a3d366530ceed22c4e6d07f299fd2723fe5f5fa7fd9217cdb14bc4", "phases": [ { "v": 0, @@ -185,22 +185,43 @@ "txProcessing": [ { "result": { - "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", + "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", "result": { - "feeCharged": 100, + "feeCharged": 300, "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } + "code": "txFEE_BUMP_INNER_SUCCESS", + "innerResultPair": { + "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", + "result": { + "feeCharged": 200, + "result": { + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + }, + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + } + ] + }, + "ext": { + "v": 0 } } - ] + } }, "ext": { "v": 0 @@ -211,13 +232,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 5, + "lastModifiedLedgerSeq": 4, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989700, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 400000000, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -225,31 +246,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -265,9 +262,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -275,31 +272,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -323,9 +296,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -333,31 +306,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -373,9 +322,61 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], + "ext": { + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 5, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836480, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], + "ext": { + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836481, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -425,43 +426,18 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 399999900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 0, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -475,43 +451,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 400000900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -519,49 +470,28 @@ "v": 0 } } - }, + } + ] + }, + { + "changes": [ { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -575,43 +505,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999988600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 100, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -630,43 +535,22 @@ }, { "result": { - "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", + "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", "result": { - "feeCharged": 300, + "feeCharged": 100, "result": { - "code": "txFEE_BUMP_INNER_SUCCESS", - "innerResultPair": { - "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", - "result": { - "feeCharged": 200, - "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - }, - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - } - ] - }, - "ext": { - "v": 0 + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } } } - } + ] }, "ext": { "v": 0 @@ -677,13 +561,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 4, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 400000000, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989700, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -691,7 +575,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -707,9 +615,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -717,85 +625,57 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - } - ], - "txApplyProcessing": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "txChangesBefore": [ - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 7, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 7, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, "ext": { - "v": 0 + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } } } - }, - "ext": { - "v": 0 } } }, + "ext": { + "v": 0 + } + } + } + ], + "txApplyProcessing": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "txChangesBefore": [ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 5, + "lastModifiedLedgerSeq": 7, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836480, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -803,7 +683,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -819,9 +723,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836481, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -871,18 +775,43 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 0, - "limit": 100, - "flags": 1, + "balance": 399999900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -896,18 +825,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "balance": 400000900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -915,28 +869,49 @@ "v": 0 } } - } - ] - }, - { - "changes": [ + }, { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -950,18 +925,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 100, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999988600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, diff --git a/src/transactions/TransactionUtils.cpp b/src/transactions/TransactionUtils.cpp index bcdb02c257..48b87587db 100644 --- a/src/transactions/TransactionUtils.cpp +++ b/src/transactions/TransactionUtils.cpp @@ -3,9 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "transactions/TransactionUtils.h" -#include "bucket/BucketListSnapshot.h" #include "crypto/SHA.h" -#include "crypto/SecretKey.h" #include "ledger/InternalLedgerEntry.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnEntry.h" @@ -15,7 +13,6 @@ #include "transactions/OfferExchange.h" #include "transactions/SponsorshipUtils.h" #include "util/ProtocolVersion.h" -#include "util/XDROperators.h" #include "util/types.h" #include "xdr/Stellar-contract.h" #include "xdr/Stellar-ledger-entries.h" diff --git a/src/util/test/XDRStreamTests.cpp b/src/util/test/XDRStreamTests.cpp index 16754b5a1b..19ed8f035e 100644 --- a/src/util/test/XDRStreamTests.cpp +++ b/src/util/test/XDRStreamTests.cpp @@ -2,7 +2,6 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/Bucket.h" #include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "test/test.h"