From 4a662f2dbde5789cc1bfa2a8fc4c60425fe0e881 Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Mon, 1 Feb 2021 12:25:48 -0300 Subject: [PATCH 1/9] add explorer multiverse wrapper to track tips --- jormungandr/src/explorer/mod.rs | 1 + jormungandr/src/explorer/multiverse.rs | 96 ++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 jormungandr/src/explorer/multiverse.rs diff --git a/jormungandr/src/explorer/mod.rs b/jormungandr/src/explorer/mod.rs index f2172cc201..f7dff53f2e 100644 --- a/jormungandr/src/explorer/mod.rs +++ b/jormungandr/src/explorer/mod.rs @@ -1,6 +1,7 @@ pub mod error; pub mod graphql; mod indexing; +mod multiverse; mod persistent_sequence; use self::error::{Error, ErrorKind, Result}; diff --git a/jormungandr/src/explorer/multiverse.rs b/jormungandr/src/explorer/multiverse.rs new file mode 100644 index 0000000000..accf0704d8 --- /dev/null +++ b/jormungandr/src/explorer/multiverse.rs @@ -0,0 +1,96 @@ +use crate::blockcfg::{ChainLength, HeaderHash, Multiverse as MultiverseData}; +use chain_impl_mockchain::multiverse; +use std::collections::BTreeSet; +use std::sync::Arc; +use tokio::sync::RwLock; + +use super::State; + +pub struct Multiverse { + inner: Arc>, +} + +pub(super) type Ref = multiverse::Ref; + +struct Inner { + multiverse: MultiverseData, + tips: BTreeSet<(ChainLength, HeaderHash)>, +} + +impl Multiverse { + pub(super) fn new( + chain_length: ChainLength, + block0_id: HeaderHash, + initial_state: State, + ) -> (multiverse::Ref, Self) { + let mut multiverse = MultiverseData::new(); + let initial_ref = multiverse.insert(chain_length, block0_id, initial_state); + + let mut tips = BTreeSet::new(); + tips.insert((chain_length, block0_id)); + + ( + initial_ref, + Multiverse { + inner: Arc::new(RwLock::new(Inner { multiverse, tips })), + }, + ) + } + + pub(super) async fn insert( + &self, + chain_length: ChainLength, + parent: HeaderHash, + hash: HeaderHash, + value: State, + ) -> multiverse::Ref { + let mut guard = self.inner.write().await; + + guard + .tips + .remove(&(chain_length.nth_ancestor(1).unwrap(), parent)); + guard.tips.insert((chain_length, hash)); + guard.multiverse.insert(chain_length, hash, value) + } + + pub(super) async fn get_ref(&self, hash: &HeaderHash) -> Option> { + let guard = self.inner.read().await; + guard.multiverse.get_ref(&hash) + } + + /// run the garbage collection of the multiverse + /// + pub async fn gc(&self, depth: u32) { + let mut guard = self.inner.write().await; + guard.multiverse.gc(depth) + } + + /// get all the branches this block is in, None here means the block was never added + /// or it was moved to stable storage + pub(super) async fn tips(&self) -> Vec<(HeaderHash, multiverse::Ref)> { + let mut guard = self.inner.write().await; + let mut states = Vec::new(); + + // garbage collect old tips too + let mut new_tips = BTreeSet::new(); + + for (length, hash) in guard.tips.iter().rev() { + if let Some(state) = guard.multiverse.get_ref(hash) { + states.push((*hash, state)); + new_tips.insert((*length, *hash)); + } + } + + guard.tips = new_tips; + + states + } +} + +impl Clone for Multiverse { + fn clone(&self) -> Self { + Multiverse { + inner: self.inner.clone(), + } + } +} From a7b7f0a20aa6e4d43dba7bc2eed1c9d4bc2ecb29 Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Mon, 1 Feb 2021 12:28:40 -0300 Subject: [PATCH 2/9] use new multiverse and move branch queries to State --- jormungandr/src/explorer/mod.rs | 288 +++++++++++++++----------------- 1 file changed, 132 insertions(+), 156 deletions(-) diff --git a/jormungandr/src/explorer/mod.rs b/jormungandr/src/explorer/mod.rs index f7dff53f2e..eee5ff0ab2 100644 --- a/jormungandr/src/explorer/mod.rs +++ b/jormungandr/src/explorer/mod.rs @@ -4,7 +4,7 @@ mod indexing; mod multiverse; mod persistent_sequence; -use self::error::{Error, ErrorKind, Result}; +use self::error::{ExplorerError as Error, Result}; pub use self::graphql::create_schema; use self::graphql::Context; use self::indexing::{ @@ -18,7 +18,7 @@ use crate::blockcfg::{ Block, ChainLength, ConfigParam, ConfigParams, ConsensusVersion, Epoch, Fragment, FragmentId, HeaderHash, }; -use crate::blockchain::{self, Blockchain, Multiverse, MAIN_BRANCH_TAG}; +use crate::blockchain::{self, Blockchain, MAIN_BRANCH_TAG}; use crate::explorer::indexing::ExplorerVote; use crate::intercom::ExplorerMsg; use crate::utils::async_msg::MessageQueue; @@ -27,8 +27,8 @@ use chain_addr::Discrimination; use chain_core::property::Block as _; use chain_impl_mockchain::certificate::{Certificate, PoolId, VotePlanId}; use chain_impl_mockchain::fee::LinearFee; -use chain_impl_mockchain::multiverse; use futures::prelude::*; +use multiverse::Multiverse; use std::convert::Infallible; use std::sync::{ atomic::{AtomicU32, Ordering}, @@ -49,7 +49,7 @@ pub struct ExplorerDB { /// Structure that keeps all the known states to allow easy branch management /// each new block is indexed by getting its previous `State` from the multiverse /// and inserted a new updated one. - multiverse: Multiverse, + multiverse: Multiverse, /// This keeps track of the longest chain seen until now. All the queries are /// performed using the state of this branch, the HeaderHash is used as key for the /// multiverse, and the ChainLength is used in the updating process. @@ -79,10 +79,9 @@ pub struct BlockchainConfig { /// A new state can be obtained to from a Block and it's previous state, getting two /// independent states but with memory sharing to minimize resource utilization #[derive(Clone)] -struct State { - parent_ref: Option>, - transactions: Transactions, - blocks: Blocks, +pub(self) struct State { + pub transactions: Transactions, + pub blocks: Blocks, addresses: Addresses, epochs: Epochs, chain_lengths: ChainLengths, @@ -196,22 +195,18 @@ impl ExplorerDB { let vote_plans = apply_block_to_vote_plans(VotePlans::new(), &blockchain_tip, &block); let initial_state = State { + transactions, blocks, epochs, chain_lengths, - transactions, addresses, stake_pool_data, stake_pool_blocks, - parent_ref: None, vote_plans, }; - let multiverse = Multiverse::::new(); let block0_id = block0.id(); - multiverse - .insert(block0.chain_length(), block0_id, initial_state) - .await; + let (_, multiverse) = Multiverse::new(block0.chain_length(), block0_id, initial_state); let block0_id = block0.id(); @@ -219,9 +214,9 @@ impl ExplorerDB { let (stream, hash) = match maybe_head { Some(head) => (blockchain.storage().stream_from_to(block0_id, head)?, head), None => { - return Err(Error::from(ErrorKind::BootstrapError( + return Err(Error::BootstrapError( "Couldn't read the HEAD tag from storage".to_owned(), - ))) + )) } }; @@ -252,10 +247,7 @@ impl ExplorerDB { break; } let block = blockchain.storage().get(hash)?.ok_or_else(|| { - Error::from(ErrorKind::BootstrapError(format!( - "couldn't get block {} from the storage", - hash - ))) + Error::BootstrapError(format!("couldn't get block {} from the storage", hash)) })?; hash = block.header.block_parent_hash(); blocks.push(block); @@ -273,7 +265,7 @@ impl ExplorerDB { /// chain length is greater than the current. /// This doesn't perform any validation on the given block and the previous state, it /// is assumed that the Block is valid - async fn apply_block(&self, block: Block) -> Result> { + async fn apply_block(&self, block: Block) -> Result { let previous_block = block.header.block_parent_hash(); let chain_length = block.header.chain_length(); let block_id = block.header.hash(); @@ -281,11 +273,10 @@ impl ExplorerDB { let discrimination = self.blockchain_config.discrimination; let previous_state = multiverse - .get_ref(previous_block) + .get_ref(&previous_block) .await - .ok_or_else(|| Error::from(ErrorKind::AncestorNotFound(format!("{}", block.id()))))?; + .ok_or_else(|| Error::AncestorNotFound(block.id()))?; let State { - parent_ref: _, transactions, blocks, addresses, @@ -310,9 +301,9 @@ impl ExplorerDB { let state_ref = multiverse .insert( chain_length, + block.parent_id(), block_id, State { - parent_ref: Some(previous_state), transactions: apply_block_to_transactions(transactions, &explorer_block)?, blocks: apply_block_to_blocks(blocks, &explorer_block)?, addresses: apply_block_to_addresses(addresses, &explorer_block)?, @@ -332,16 +323,14 @@ impl ExplorerDB { Ok(state_ref) } - pub async fn get_latest_block_hash(&self) -> HeaderHash { - self.longest_chain_tip.get_block_id().await - } + pub async fn get_block(&self, block_id: &HeaderHash) -> Option> { + for (_hash, state_ref) in self.multiverse.tips().await.iter() { + if let Some(b) = state_ref.state().blocks.lookup(&block_id) { + return Some(Arc::clone(b)); + } + } - pub async fn get_block(&self, block_id: &HeaderHash) -> Option { - let block_id = *block_id; - self.with_latest_state(move |state| { - state.blocks.lookup(&block_id).map(|b| b.as_ref().clone()) - }) - .await + None } pub(self) async fn set_tip(&self, hash: HeaderHash) -> bool { @@ -384,72 +373,14 @@ impl ExplorerDB { } pub async fn get_epoch(&self, epoch: Epoch) -> Option { - self.with_latest_state(move |state| state.epochs.lookup(&epoch).map(|e| e.as_ref().clone())) - .await - } - - pub async fn find_block_by_chain_length( - &self, - chain_length: ChainLength, - ) -> Option { - self.with_latest_state(move |state| { - state - .chain_lengths - .lookup(&chain_length) - .map(|b| *b.as_ref()) - }) - .await - } - - pub async fn find_block_hash_by_transaction( - &self, - transaction_id: &FragmentId, - ) -> Option { - self.with_latest_state(move |state| { - state - .transactions - .lookup(&transaction_id) - .map(|id| *id.as_ref()) - }) - .await - } - - pub async fn get_transactions_by_address( - &self, - address: &ExplorerAddress, - ) -> Option> { - let address = address.clone(); - self.with_latest_state(move |state| { - state - .addresses - .lookup(&address) - .map(|set| set.as_ref().clone()) - }) - .await - } - - // Get the hashes of all blocks in the range [from, to) - // the ChainLength is returned to for easy of use in the case where - // `to` is greater than the max - pub async fn get_block_hash_range( - &self, - from: ChainLength, - to: ChainLength, - ) -> Vec<(HeaderHash, ChainLength)> { - let from = u32::from(from); - let to = u32::from(to); - - self.with_latest_state(move |state| { - (from..to) - .filter_map(|i| { - state - .chain_lengths - .lookup(&i.into()) - .map(|b| (*b.as_ref(), i.into())) - }) - .collect() - }) - .await + let tips = self.multiverse.tips().await; + let (_, state_ref) = &tips[0]; + + state_ref + .state() + .epochs + .lookup(&epoch) + .map(|e| e.as_ref().clone()) } pub async fn is_block_confirmed(&self, block_id: &HeaderHash) -> bool { @@ -468,67 +399,54 @@ impl ExplorerDB { pub async fn get_stake_pool_blocks( &self, pool: &PoolId, - ) -> Option> { + ) -> Option>> { let pool = pool.clone(); - self.with_latest_state(move |state| { - state - .stake_pool_blocks - .lookup(&pool) - .map(|i| i.as_ref().clone()) - }) - .await + + // this is a tricky query, one option would be to take a hash and return + // only the blocks from a particular branch, but it's not like a stake + // pool would produce inconsistent branches itself, although there may + // be a need to know the blocks that a stake pool got in the main branch + // too. + // for the time being, this query uses the maximum, because the branch + // that has more blocks from this particular stake pool has all the + // blocks produced by it + self.multiverse + .tips() + .await + .iter() + .filter_map(|(_hash, state_ref)| state_ref.state().stake_pool_blocks.lookup(&pool)) + .max_by_key(|seq| seq.len()) + .map(Arc::clone) } - pub async fn get_stake_pool_data(&self, pool: &PoolId) -> Option { + pub async fn get_stake_pool_data(&self, pool: &PoolId) -> Option> { let pool = pool.clone(); - self.with_latest_state(move |state| { - state - .stake_pool_data - .lookup(&pool) - .map(|i| i.as_ref().clone()) - }) - .await - } - pub async fn get_stake_pools(&self) -> Vec<(PoolId, Arc)> { - self.with_latest_state(move |state| { - state - .stake_pool_data - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect() - }) - .await - } + for (_hash, state_ref) in self.multiverse.tips().await.iter() { + if let Some(b) = state_ref.state().stake_pool_data.lookup(&pool) { + return Some(Arc::clone(b)); + } + } - pub async fn get_vote_plan_by_id(&self, vote_plan_id: &VotePlanId) -> Option { - self.with_latest_state(move |state| { - state - .vote_plans - .lookup(vote_plan_id) - .map(|vote_plan| vote_plan.as_ref().clone()) - }) - .await + None } - pub async fn get_vote_plans(&self) -> Vec<(VotePlanId, Arc)> { - self.with_latest_state(move |state| { - state - .vote_plans - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect() - }) - .await + pub async fn get_vote_plan_by_id( + &self, + vote_plan_id: &VotePlanId, + ) -> Option> { + for (_hash, state_ref) in self.multiverse.tips().await.iter() { + if let Some(b) = state_ref.state().vote_plans.lookup(&vote_plan_id) { + return Some(Arc::clone(b)); + } + } + + None } - /// run given function with the longest branch's state - async fn with_latest_state(&self, f: impl Fn(State) -> T) -> T { - let multiverse = self.multiverse.clone(); - let branch_id = self.get_latest_block_hash().await; - let maybe_state = multiverse.get(branch_id).await; - let state = maybe_state.expect("the longest chain to be indexed"); - f(state) + pub(self) async fn get_main_tip(&self) -> (HeaderHash, multiverse::Ref) { + let hash = self.longest_chain_tip.get_block_id().await; + (hash, self.multiverse.get_ref(&hash).await.unwrap()) } fn blockchain(&self) -> &Blockchain { @@ -546,7 +464,7 @@ fn apply_block_to_transactions( for id in ids { transactions = transactions .insert(id, Arc::new(block_id)) - .map_err(|_| ErrorKind::TransactionAlreadyExists(format!("{}", id)))?; + .map_err(|_| Error::TransactionAlreadyExists(id))?; } Ok(transactions) @@ -556,7 +474,7 @@ fn apply_block_to_blocks(blocks: Blocks, block: &ExplorerBlock) -> Result Result { @@ -620,9 +538,7 @@ fn apply_block_to_chain_lengths( .insert(new_block_chain_length, Arc::new(new_block_hash)) .map_err(|_| { // I think this shouldn't happen - Error::from(ErrorKind::ChainLengthBlockAlreadyExists(u32::from( - new_block_chain_length, - ))) + Error::ChainLengthBlockAlreadyExists(new_block_chain_length) }) } @@ -882,3 +798,63 @@ impl Tip { *self.0.read().await } } + +impl State { + pub fn get_vote_plans(&self) -> Vec<(VotePlanId, Arc)> { + self.vote_plans + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect() + } + + pub fn get_stake_pools(&self) -> Vec<(PoolId, Arc)> { + self.stake_pool_data + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect() + } + + pub fn transactions_by_address( + &self, + address: &ExplorerAddress, + ) -> Option> { + self.addresses + .lookup(address) + .map(|txs| PersistentSequence::clone(txs)) + } + + // Get the hashes of all blocks in the range [from, to) + // the ChainLength is returned to for easy of use in the case where + // `to` is greater than the max + pub fn get_block_hash_range( + &self, + from: ChainLength, + to: ChainLength, + ) -> Vec<(HeaderHash, ChainLength)> { + let from = u32::from(from); + let to = u32::from(to); + + (from..to) + .filter_map(|i| { + self.chain_lengths + .lookup(&i.into()) + .map(|b| (*b.as_ref(), i.into())) + }) + .collect() + } + + pub fn find_block_by_chain_length(&self, chain_length: ChainLength) -> Option { + self.chain_lengths + .lookup(&chain_length) + .map(|b| *b.as_ref()) + } + + pub fn find_block_hash_by_transaction( + &self, + transaction_id: &FragmentId, + ) -> Option { + self.transactions + .lookup(&transaction_id) + .map(|id| *id.as_ref()) + } +} From 8cde25e4ea03aee5d25633de157032609daef531 Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Mon, 1 Feb 2021 16:07:37 -0300 Subject: [PATCH 3/9] fix graphql --- jormungandr/src/explorer/graphql/mod.rs | 73 ++++++++++++++++--------- 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/jormungandr/src/explorer/graphql/mod.rs b/jormungandr/src/explorer/graphql/mod.rs index 66fb847ad1..41aa1ff311 100644 --- a/jormungandr/src/explorer/graphql/mod.rs +++ b/jormungandr/src/explorer/graphql/mod.rs @@ -29,6 +29,7 @@ pub use juniper::http::GraphQLRequest; use juniper::{EmptyMutation, EmptySubscription, FieldResult, GraphQLUnion, RootNode}; use std::convert::{TryFrom, TryInto}; use std::str::FromStr; +use std::sync::Arc; #[derive(Clone)] pub struct Block { @@ -47,7 +48,7 @@ impl Block { Block { hash } } - async fn get_explorer_block(&self, db: &ExplorerDB) -> FieldResult { + async fn get_explorer_block(&self, db: &ExplorerDB) -> FieldResult> { db.get_block(&self.hash).await.ok_or_else(|| { ErrorKind::InternalError("Couldn't find block's contents in explorer".to_owned()).into() }) @@ -216,8 +217,8 @@ enum Leader { BftLeader(BftLeader), } -impl From<&ExplorerBlock> for Block { - fn from(block: &ExplorerBlock) -> Block { +impl From> for Block { + fn from(block: Arc) -> Block { Block::from_valid_hash(block.id()) } } @@ -262,8 +263,11 @@ impl Transaction { async fn from_id(id: FragmentId, context: &Context) -> FieldResult { let block_hash = context .db - .find_block_hash_by_transaction(&id) + .get_main_tip() .await + .1 + .state() + .find_block_hash_by_transaction(&id) .ok_or_else(|| ErrorKind::NotFound(format!("transaction not found: {}", &id,)))?; Ok(Transaction { @@ -289,13 +293,16 @@ impl Transaction { } } - async fn get_block(&self, context: &Context) -> FieldResult { + async fn get_block(&self, context: &Context) -> FieldResult> { let block_id = match self.block_hash { Some(block_id) => block_id, None => context .db - .find_block_hash_by_transaction(&self.id) + .get_main_tip() .await + .1 + .state() + .find_block_hash_by_transaction(&self.id) .ok_or_else(|| { ErrorKind::InternalError("Transaction's block was not found".to_owned()) })?, @@ -340,7 +347,7 @@ impl Transaction { /// The block this transaction is in pub async fn block(&self, context: &Context) -> FieldResult { let block = self.get_block(context).await?; - Ok(Block::from(&block)) + Ok(Block::from(block)) } pub async fn inputs(&self, context: &Context) -> FieldResult> { @@ -467,8 +474,11 @@ impl Address { ) -> FieldResult { let transactions = context .db - .get_transactions_by_address(&self.id) + .get_main_tip() .await + .1 + .state() + .transactions_by_address(&self.id) .unwrap_or_else(PersistentSequence::::new); let boundaries = if transactions.len() > 0 { @@ -564,8 +574,8 @@ impl Proposal { #[derive(Clone)] pub struct Pool { id: certificate::PoolId, - data: Option, - blocks: Option>, + data: Option>, + blocks: Option>>, } impl Pool { @@ -596,7 +606,7 @@ impl Pool { } } - fn new_with_data(id: certificate::PoolId, data: StakePoolData) -> Self { + fn new_with_data(id: certificate::PoolId, data: Arc) -> Self { Pool { id, blocks: None, @@ -669,7 +679,7 @@ impl Pool { .db .get_stake_pool_data(&self.id) .await - .map(|data| PoolRegistration::from(data.registration)) + .map(|data| PoolRegistration::from(data.registration.clone())) .ok_or_else(|| ErrorKind::NotFound("Stake pool not found".to_owned()).into()), } } @@ -682,7 +692,11 @@ impl Pool { .get_stake_pool_data(&self.id) .await .ok_or_else(|| ErrorKind::NotFound("Stake pool not found".to_owned()).into()) - .map(|data| data.retirement.map(PoolRetirement::from)), + .map(|data| { + data.retirement + .as_ref() + .map(|r| PoolRetirement::from(r.clone())) + }), } } } @@ -699,7 +713,7 @@ impl Status { } pub async fn latest_block(&self, context: &Context) -> FieldResult { - latest_block(context).await.map(|b| Block::from(&b)) + latest_block(context).await.map(|b| Block::from(b)) } pub async fn epoch_stability_depth(&self, context: &Context) -> String { @@ -873,11 +887,14 @@ impl Epoch { PaginationInterval::Empty => unreachable!("No blocks found (not even genesis)"), PaginationInterval::Inclusive(range) => context .db + .get_main_tip() + .await + .1 + .state() .get_block_hash_range( (range.lower_bound + epoch_lower_bound).into(), (range.upper_bound + epoch_lower_bound + 1).into(), ) - .await .iter() .map(|(hash, index)| (*hash, u32::from(*index) - epoch_lower_bound)) .collect(), @@ -1053,7 +1070,7 @@ impl VotePlanStatus { .into()) } - pub fn vote_plan_from_data(vote_plan: super::indexing::ExplorerVotePlan) -> Self { + pub fn vote_plan_from_data(vote_plan: Arc) -> Self { let super::indexing::ExplorerVotePlan { id, vote_start, @@ -1061,7 +1078,7 @@ impl VotePlanStatus { committee_end, payload_type, proposals, - } = vote_plan; + } = (*vote_plan).clone(); VotePlanStatus { id: VotePlanId::from(id), @@ -1249,8 +1266,11 @@ impl Query { ) -> FieldResult> { Ok(context .db - .find_block_by_chain_length(length.try_into()?) + .get_main_tip() .await + .1 + .state() + .find_block_by_chain_length(length.try_into()?) .map(Block::from_valid_hash)) } @@ -1288,8 +1308,11 @@ impl Query { let b = range.upper_bound.checked_add(1).unwrap().into(); context .db - .get_block_hash_range(a, b) + .get_main_tip() .await + .1 + .state() + .get_block_hash_range(a, b) .iter_mut() .map(|(hash, chain_length)| (*hash, u32::from(*chain_length))) .collect() @@ -1325,7 +1348,7 @@ impl Query { after: Option, context: &Context, ) -> FieldResult { - let mut stake_pools = context.db.get_stake_pools().await; + let mut stake_pools = context.db.get_main_tip().await.1.state().get_stake_pools(); // Although it's probably not a big performance concern // There are a few alternatives to not have to sort this @@ -1368,7 +1391,7 @@ impl Query { ( Pool::new_with_data( certificate::PoolId::clone(pool_id), - StakePoolData::clone(stake_pool_data), + Arc::clone(stake_pool_data), ), i, ) @@ -1394,7 +1417,7 @@ impl Query { after: Option, context: &Context, ) -> FieldResult { - let mut vote_plans = context.db.get_vote_plans().await; + let mut vote_plans = context.db.get_main_tip().await.1.state().get_vote_plans(); vote_plans.sort_unstable_by_key(|(id, _data)| id.clone()); @@ -1430,7 +1453,7 @@ impl Query { .map(|i: u32| { let (_pool_id, vote_plan_data) = &vote_plans[usize::try_from(i).unwrap()]; ( - VotePlanStatus::vote_plan_from_data(vote_plan_data.as_ref().clone()), + VotePlanStatus::vote_plan_from_data(Arc::clone(vote_plan_data)), i, ) }) @@ -1453,9 +1476,9 @@ pub fn create_schema() -> Schema { Schema::new(Query {}, EmptyMutation::new(), EmptySubscription::new()) } -async fn latest_block(context: &Context) -> FieldResult { +async fn latest_block(context: &Context) -> FieldResult> { async { - let hash = context.db.get_latest_block_hash().await; + let hash = context.db.get_main_tip().await.0; context.db.get_block(&hash).await } .await From 9941a41fde9cc87d0e5c2746afdab85c74ec2af2 Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Mon, 1 Feb 2021 16:08:31 -0300 Subject: [PATCH 4/9] migrate explorer error to thiserror --- jormungandr/src/explorer/error.rs | 54 +++++++++++++------------------ jormungandr/src/start_up/error.rs | 2 +- 2 files changed, 23 insertions(+), 33 deletions(-) diff --git a/jormungandr/src/explorer/error.rs b/jormungandr/src/explorer/error.rs index 5be027030b..646fcb8990 100644 --- a/jormungandr/src/explorer/error.rs +++ b/jormungandr/src/explorer/error.rs @@ -1,35 +1,25 @@ +use crate::blockcfg::HeaderHash; use crate::{blockchain::StorageError, intercom}; +use thiserror::Error; -error_chain! { - foreign_links { - StorageError(StorageError); - // FIXME: fold into StorageError with more generic work in intercom streaming - StreamingError(intercom::Error); - } - errors { - BlockNotFound(hash: String) { - description("block not found"), - display("block '{}' cannot be found in the explorer", hash) - } - AncestorNotFound(hash: String) { - description("ancestor of block is not in explorer"), - display("ancestor of block '{}' cannot be found in the explorer", hash) - } - TransactionAlreadyExists(id: String) { - description("tried to index already existing transaction") - display("transaction '{}' is already indexed", id) - } - BlockAlreadyExists(id: String) { - description("tried to index already indexed block") - display("block '{}' is already indexed", id) - } - ChainLengthBlockAlreadyExists(chain_length: u32) { - description("tried to index already indexed chainlength in the given branch") - display("chain length: {} is already indexed", chain_length) - } - BootstrapError(msg: String) { - description("failed to initialize explorer's database from storage") - display("the explorer's database couldn't be initialized: {}", msg) - } - } +#[derive(Debug, Error)] +pub enum ExplorerError { + #[error("block {0} not found in explorer")] + BlockNotFound(HeaderHash), + #[error("ancestor of block '{0}' not found in explorer")] + AncestorNotFound(HeaderHash), + #[error("transaction '{0}' is already indexed")] + TransactionAlreadyExists(crate::blockcfg::FragmentId), + #[error("tried to index block '{0}' twice")] + BlockAlreadyExists(HeaderHash), + #[error("block with {0} chain length already exists in explorer branch")] + ChainLengthBlockAlreadyExists(crate::blockcfg::ChainLength), + #[error("the explorer's database couldn't be initialized: {0}")] + BootstrapError(String), + #[error("storage error")] + StorageError(#[from] StorageError), + #[error("streaming error")] + StreamingError(#[from] intercom::Error), } + +pub type Result = std::result::Result; diff --git a/jormungandr/src/start_up/error.rs b/jormungandr/src/start_up/error.rs index f738b3c333..8818ba7f99 100644 --- a/jormungandr/src/start_up/error.rs +++ b/jormungandr/src/start_up/error.rs @@ -54,7 +54,7 @@ pub enum Error { #[error("Block 0 is set to start in the future")] Block0InFuture, #[error("Error while loading the explorer from storage")] - ExplorerBootstrapError(#[from] explorer::error::Error), + ExplorerBootstrapError(#[from] explorer::error::ExplorerError), #[error("A service has terminated with an error")] ServiceTerminatedWithError(#[from] crate::utils::task::ServiceError), #[error("Unable to get system limits: {0}")] From 151800600a3c8f8e28326b0909dc66b07b165b8c Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Thu, 4 Feb 2021 10:30:05 -0300 Subject: [PATCH 5/9] allow warning temporarily --- jormungandr/src/explorer/multiverse.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/jormungandr/src/explorer/multiverse.rs b/jormungandr/src/explorer/multiverse.rs index accf0704d8..428f9e6ace 100644 --- a/jormungandr/src/explorer/multiverse.rs +++ b/jormungandr/src/explorer/multiverse.rs @@ -60,7 +60,8 @@ impl Multiverse { /// run the garbage collection of the multiverse /// - pub async fn gc(&self, depth: u32) { + #[allow(dead_code)] + pub(super) async fn gc(&self, depth: u32) { let mut guard = self.inner.write().await; guard.multiverse.gc(depth) } From 9232964bf00930fd146130661565aa19d693136e Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Thu, 4 Feb 2021 10:53:04 -0300 Subject: [PATCH 6/9] fix latest block redundant closure --- jormungandr/src/explorer/graphql/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jormungandr/src/explorer/graphql/mod.rs b/jormungandr/src/explorer/graphql/mod.rs index 41aa1ff311..b607e5c524 100644 --- a/jormungandr/src/explorer/graphql/mod.rs +++ b/jormungandr/src/explorer/graphql/mod.rs @@ -713,7 +713,7 @@ impl Status { } pub async fn latest_block(&self, context: &Context) -> FieldResult { - latest_block(context).await.map(|b| Block::from(b)) + latest_block(context).await.map(Block::from) } pub async fn epoch_stability_depth(&self, context: &Context) -> String { From 0f97466f17bacd3bde440dc40477749dea89426e Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Thu, 4 Feb 2021 12:11:06 -0300 Subject: [PATCH 7/9] pass hash by ref to new multiverse --- jormungandr/src/explorer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jormungandr/src/explorer/mod.rs b/jormungandr/src/explorer/mod.rs index eee5ff0ab2..de7c0868d5 100644 --- a/jormungandr/src/explorer/mod.rs +++ b/jormungandr/src/explorer/mod.rs @@ -337,7 +337,7 @@ impl ExplorerDB { // the tip changes which means now a block is confirmed (at least after // the initial epoch_stability_depth blocks). - let block = if let Some(state_ref) = self.multiverse.get_ref(hash).await { + let block = if let Some(state_ref) = self.multiverse.get_ref(&hash).await { let state = state_ref.state(); Arc::clone(state.blocks.lookup(&hash).unwrap()) } else { From f985ae4e49104cebabcf9ef475803e5977ec46fa Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Thu, 4 Feb 2021 12:17:12 -0300 Subject: [PATCH 8/9] allow dead_code is not needed anymore --- jormungandr/src/explorer/multiverse.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/jormungandr/src/explorer/multiverse.rs b/jormungandr/src/explorer/multiverse.rs index 428f9e6ace..54c6089d97 100644 --- a/jormungandr/src/explorer/multiverse.rs +++ b/jormungandr/src/explorer/multiverse.rs @@ -60,7 +60,6 @@ impl Multiverse { /// run the garbage collection of the multiverse /// - #[allow(dead_code)] pub(super) async fn gc(&self, depth: u32) { let mut guard = self.inner.write().await; guard.multiverse.gc(depth) From fead9eb3a49c114eb951a9c54fa8b7d04372ede7 Mon Sep 17 00:00:00 2001 From: Enzo Cioppettini Date: Mon, 8 Feb 2021 10:47:31 -0300 Subject: [PATCH 9/9] use current branch for is_block_confirmed although unlikely, it may report false positives otherwise also, it's probably a bit more efficient --- jormungandr/src/explorer/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/jormungandr/src/explorer/mod.rs b/jormungandr/src/explorer/mod.rs index de7c0868d5..62ee1c67b8 100644 --- a/jormungandr/src/explorer/mod.rs +++ b/jormungandr/src/explorer/mod.rs @@ -384,7 +384,13 @@ impl ExplorerDB { } pub async fn is_block_confirmed(&self, block_id: &HeaderHash) -> bool { - if let Some(block) = self.get_block(block_id).await { + let current_branch = self + .multiverse + .get_ref(&self.longest_chain_tip.get_block_id().await) + .await + .unwrap(); + + if let Some(block) = current_branch.state().blocks.lookup(&block_id) { let confirmed_block_chain_length: ChainLength = self .stable_store .confirmed_block_chain_length