diff --git a/Cargo.toml b/Cargo.toml index fa99397f7..73e672a56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ members = [ "cardano-legacy-address", "sparse-array", "typed-bytes", + "chain-explorer", ] [profile.bench] diff --git a/chain-explorer/Cargo.toml b/chain-explorer/Cargo.toml new file mode 100644 index 000000000..4a290c718 --- /dev/null +++ b/chain-explorer/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "chain-explorer" +version = "0.1.0" +authors = ["dev@iohk.io"] +edition = "2018" +license = "MIT OR Apache-2.0" +repository = "https://github.com/input-output-hk/chain-libs" + +[dependencies] +thiserror = "1.0.20" +tracing = "0.1" +rand = "0.8.3" +rand_chacha = "0.3.1" +sanakirja = "1.2.5" +zerocopy = "0.5.0" +byteorder = "1.4.3" +hex = "0.4.3" + +chain-core = { path = "../chain-core" } +chain-addr = { path = "../chain-addr" } +chain-crypto = { path = "../chain-crypto" } +chain-impl-mockchain = { path = "../chain-impl-mockchain" } diff --git a/chain-explorer/src/chain_storable/certificate.rs b/chain-explorer/src/chain_storable/certificate.rs new file mode 100644 index 000000000..31faaaea8 --- /dev/null +++ b/chain-explorer/src/chain_storable/certificate.rs @@ -0,0 +1,139 @@ +use crate::chain_storable::{Choice, VotePlanId}; +use sanakirja::{direct_repr, Storable, UnsizedStorable}; +use std::mem; +use zerocopy::AsBytes; + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[repr(C)] +pub struct TransactionCertificate { + tag: CertificateTag, + cert: SerializedCertificate, +} + +impl TransactionCertificate { + pub fn from_vote_plan_id(id: VotePlanId) -> Self { + TransactionCertificate { + tag: CertificateTag::VotePlan, + cert: SerializedCertificate { vote_plan: id }, + } + } + + pub fn from_public_vote_cast(vote: PublicVoteCast) -> Self { + TransactionCertificate { + tag: CertificateTag::PublicVoteCast, + cert: SerializedCertificate { + public_vote_cast: vote, + }, + } + } + + pub fn from_private_vote_cast(vote: PrivateVoteCast) -> Self { + TransactionCertificate { + tag: CertificateTag::PrivateVoteCast, + cert: SerializedCertificate { + private_vote_cast: vote, + }, + } + } + + pub fn as_vote_plan(&self) -> Option<&VotePlanId> { + unsafe { + match self { + Self { + tag: CertificateTag::VotePlan, + cert: SerializedCertificate { vote_plan }, + } => Some(vote_plan), + _ => None, + } + } + } + + pub fn as_public_vote_cast(&self) -> Option<&PublicVoteCast> { + unsafe { + match self { + Self { + tag: CertificateTag::PublicVoteCast, + cert: SerializedCertificate { public_vote_cast }, + } => Some(public_vote_cast), + _ => None, + } + } + } + + pub fn as_private_vote_cast(&self) -> Option<&PrivateVoteCast> { + unsafe { + match self { + Self { + tag: CertificateTag::PrivateVoteCast, + cert: SerializedCertificate { private_vote_cast }, + } => Some(private_vote_cast), + _ => None, + } + } + } +} + +direct_repr!(TransactionCertificate); + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, AsBytes)] +#[repr(u8)] +pub(crate) enum CertificateTag { + VotePlan = 0, + PublicVoteCast = 1, + PrivateVoteCast = 2, +} + +#[repr(C)] +#[derive(Clone, Copy)] +union SerializedCertificate { + vote_plan: VotePlanId, + public_vote_cast: PublicVoteCast, + private_vote_cast: PrivateVoteCast, +} + +impl SerializedCertificate { + fn as_bytes(&self) -> &[u8; mem::size_of::()] { + unsafe { std::mem::transmute(self) } + } +} + +impl PartialEq for SerializedCertificate { + fn eq(&self, other: &Self) -> bool { + self.as_bytes().eq(other.as_bytes()) + } +} + +impl Eq for SerializedCertificate {} + +impl PartialOrd for SerializedCertificate { + fn partial_cmp(&self, other: &Self) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +impl Ord for SerializedCertificate { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.as_bytes().cmp(other.as_bytes()) + } +} + +impl std::fmt::Debug for SerializedCertificate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&hex::encode(self.as_bytes())) + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, AsBytes)] +#[repr(C)] +pub struct PublicVoteCast { + pub vote_plan_id: VotePlanId, + pub proposal_index: u8, + pub choice: Choice, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, AsBytes)] +#[repr(C)] +pub struct PrivateVoteCast { + pub vote_plan_id: VotePlanId, + pub proposal_index: u8, +} diff --git a/chain-explorer/src/chain_storable/mod.rs b/chain-explorer/src/chain_storable/mod.rs new file mode 100644 index 000000000..23ad91522 --- /dev/null +++ b/chain-explorer/src/chain_storable/mod.rs @@ -0,0 +1,377 @@ +//! Types that can be stored in sanakirja, that map directly to chain types +//! +mod certificate; +use super::endian::{B32, L64}; +pub use certificate::*; +use chain_core::property::Serialize as _; +use chain_impl_mockchain::{header::HeaderId, transaction, value::Value}; +use sanakirja::{direct_repr, Storable, UnsizedStorable}; +use std::convert::TryInto; +use zerocopy::{AsBytes, FromBytes}; + +#[derive(PartialEq, Eq, PartialOrd, Ord)] +#[repr(C)] +pub struct AccountId(pub [u8; chain_impl_mockchain::transaction::INPUT_PTR_SIZE]); +direct_repr!(AccountId); + +impl std::fmt::Debug for AccountId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&hex::encode(self.0)) + } +} + +pub type ProposalIndex = u8; + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct ProposalId { + pub vote_plan: VotePlanId, + pub index: ProposalIndex, +} +direct_repr!(ProposalId); + +pub type BlockId = StorableHash; + +pub type FragmentId = StorableHash; +pub type VotePlanId = StorableHash; + +#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, AsBytes, FromBytes)] +#[cfg_attr(test, derive(Hash))] +#[repr(C)] +pub struct StorableHash(pub [u8; 32]); + +impl StorableHash { + pub const fn new(bytes: [u8; 32]) -> Self { + Self(bytes) + } +} + +impl std::fmt::Display for StorableHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex::encode(self.0)) + } +} + +direct_repr!(StorableHash); + +impl StorableHash { + pub const MIN: Self = StorableHash([0x00; 32]); + pub const MAX: Self = StorableHash([0xff; 32]); +} + +impl From for StorableHash { + fn from(id: chain_impl_mockchain::key::Hash) -> Self { + let bytes: [u8; 32] = id.into(); + + Self(bytes) + } +} + +impl From for chain_impl_mockchain::key::Hash { + fn from(val: StorableHash) -> Self { + HeaderId::from(val.0) + } +} + +impl From for StorableHash { + fn from(id: chain_impl_mockchain::certificate::VotePlanId) -> Self { + let bytes: [u8; 32] = id.into(); + + Self(bytes) + } +} + +impl From<[u8; 32]> for StorableHash { + fn from(bytes: [u8; 32]) -> Self { + Self(bytes) + } +} + +impl From for [u8; 32] { + fn from(wrapper: StorableHash) -> Self { + wrapper.0 + } +} + +impl std::fmt::Debug for StorableHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&hex::encode(self.0)) + } +} + +pub type SlotId = B32; +pub type EpochNumber = B32; + +#[derive(Debug, Clone, Copy, AsBytes, FromBytes, PartialEq, Eq, PartialOrd, Ord)] +#[repr(C)] +pub struct ChainLength(pub(super) B32); + +impl ChainLength { + pub const MAX: ChainLength = ChainLength(B32(zerocopy::U32::::MAX_VALUE)); + pub const MIN: ChainLength = ChainLength(B32(zerocopy::U32::::ZERO)); + + pub fn new(n: u32) -> Self { + Self(B32::new(n)) + } + + pub fn get(&self) -> u32 { + self.0.get() + } +} + +direct_repr!(ChainLength); + +impl From for ChainLength { + fn from(c: chain_impl_mockchain::block::ChainLength) -> Self { + Self(B32::new(u32::from(c))) + } +} + +impl From for chain_impl_mockchain::block::ChainLength { + fn from(c: ChainLength) -> Self { + c.get().into() + } +} + +impl From<&ChainLength> for u32 { + fn from(n: &ChainLength) -> Self { + n.0.get() + } +} + +impl From for u32 { + fn from(n: ChainLength) -> Self { + n.0.get() + } +} + +impl From for ChainLength { + fn from(n: u32) -> Self { + ChainLength::new(n) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, AsBytes, FromBytes)] +#[repr(C)] +pub struct BlockDate { + pub epoch: EpochNumber, + pub slot_id: SlotId, +} + +impl From for BlockDate { + fn from(d: chain_impl_mockchain::block::BlockDate) -> Self { + Self { + epoch: B32::new(d.epoch), + slot_id: B32::new(d.slot_id), + } + } +} + +pub type PoolId = StorableHash; + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, AsBytes)] +#[repr(u8)] +pub enum PayloadType { + Public = 1, + Private = 2, +} + +impl From for PayloadType { + fn from(p: chain_impl_mockchain::vote::PayloadType) -> Self { + match p { + chain_impl_mockchain::vote::PayloadType::Public => PayloadType::Public, + chain_impl_mockchain::vote::PayloadType::Private => PayloadType::Private, + } + } +} + +impl From for chain_impl_mockchain::vote::PayloadType { + fn from(p: PayloadType) -> Self { + match p { + PayloadType::Public => chain_impl_mockchain::vote::PayloadType::Public, + PayloadType::Private => chain_impl_mockchain::vote::PayloadType::Private, + } + } +} + +pub type ExternalProposalId = StorableHash; +pub type Options = u8; + +#[derive(Clone, Debug, FromBytes, AsBytes, PartialEq, Eq, Ord, PartialOrd)] +#[repr(C)] +pub struct ExplorerVoteProposal { + pub proposal_id: ExternalProposalId, + pub options: Options, +} + +impl From<&chain_impl_mockchain::certificate::Proposal> for ExplorerVoteProposal { + fn from(p: &chain_impl_mockchain::certificate::Proposal) -> Self { + ExplorerVoteProposal { + proposal_id: StorableHash::from(<[u8; 32]>::from(p.external_id().clone())), + options: p.options().choice_range().end, + } + } +} + +direct_repr!(ExplorerVoteProposal); + +pub type Choice = u8; +pub type Stake = L64; + +const MAX_ADDRESS_SIZE: usize = chain_addr::ADDR_SIZE_GROUP; + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, AsBytes, FromBytes)] +#[repr(C)] +pub struct Address(pub [u8; MAX_ADDRESS_SIZE]); + +impl Address { + pub const MIN: Address = Address([0u8; MAX_ADDRESS_SIZE]); + pub const MAX: Address = Address([255u8; MAX_ADDRESS_SIZE]); +} + +direct_repr!(Address); + +impl From for Address { + fn from(addr: chain_addr::Address) -> Self { + let mut bytes = [0u8; MAX_ADDRESS_SIZE]; + addr.serialize(&mut bytes[..]).unwrap(); + Self(bytes) + } +} + +impl From<&chain_addr::Address> for Address { + fn from(addr: &chain_addr::Address) -> Self { + let mut bytes = [0u8; MAX_ADDRESS_SIZE]; + addr.serialize(&mut bytes[..]).unwrap(); + Self(bytes) + } +} + +impl TryInto for Address { + type Error = chain_addr::Error; + + fn try_into(self) -> Result { + chain_addr::Address::from_bytes(&self.0[0..33]) + .or_else(|_| chain_addr::Address::from_bytes(&self.0[0..MAX_ADDRESS_SIZE])) + } +} + +impl std::fmt::Debug for Address { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&hex::encode(self.0)) + } +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, AsBytes, FromBytes)] +#[repr(C)] +pub struct TransactionInput { + pub input_ptr: [u8; 32], + pub value: L64, + pub utxo_or_account: u8, +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +#[repr(u8)] +pub enum InputType { + Utxo = 0x00, + // Notes: + // the original (on chain) type has only two discriminant values. + // the witness type is used to decide how to interpret the bytes in `input_ptr`, because the + // explorer doesn't store the witnesses, we need to save that metadata somewhere, that's the + // reason for the extra variant. It could be stored externally, but it would take more space + // for all inputs (unless is stored in a separate btree, but that uses a lot of space too). + AccountSingle = 0xfe, + AccountMulti = 0xff, +} + +// TODO: TryFrom? +impl From for InputType { + fn from(value: u8) -> Self { + match value { + 0x00 => InputType::Utxo, + 0xfe => InputType::AccountSingle, + 0xff => InputType::AccountMulti, + _ => unreachable!("invalid enum value"), + } + } +} + +impl TransactionInput { + pub fn input_type(&self) -> InputType { + self.utxo_or_account.into() + } + + pub(crate) fn from_original_with_witness( + input: &transaction::Input, + witness: &transaction::Witness, + ) -> Self { + TransactionInput { + input_ptr: input.bytes()[9..].try_into().unwrap(), + utxo_or_account: match (input.get_type(), witness) { + (transaction::InputType::Utxo, _) => InputType::Utxo as u8, + (transaction::InputType::Account, transaction::Witness::Account(_)) => { + InputType::AccountSingle as u8 + } + (transaction::InputType::Account, transaction::Witness::Multisig(_)) => { + InputType::AccountMulti as u8 + } + (transaction::InputType::Account, transaction::Witness::Utxo(_)) => unreachable!(), + (transaction::InputType::Account, transaction::Witness::OldUtxo(_, _, _)) => { + unreachable!() + } + }, + value: L64::new(input.value().0), + } + } +} + +impl From<&TransactionInput> for transaction::Input { + fn from(input: &TransactionInput) -> Self { + let utxo_or_account = match input.utxo_or_account.into() { + InputType::Utxo => 0x00, + InputType::AccountSingle => 0xff, + InputType::AccountMulti => 0xff, + }; + + transaction::Input::new(utxo_or_account, Value(input.value.get()), input.input_ptr) + } +} + +direct_repr!(TransactionInput); + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, AsBytes, FromBytes)] +#[repr(C)] +pub struct TransactionOutput { + pub address: Address, + pub value: L64, +} + +impl TransactionOutput { + pub fn from_original(output: &transaction::Output) -> Self { + TransactionOutput { + address: Address::from(output.address.clone()), + value: L64::new(output.value.0), + } + } +} + +impl From<&TransactionOutput> for transaction::Output { + fn from(output: &TransactionOutput) -> Self { + transaction::Output { + address: output.address.clone().try_into().unwrap(), + value: Value(output.value.get()), + } + } +} + +direct_repr!(TransactionOutput); + +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, AsBytes)] +#[repr(C)] +pub struct VotePlanMeta { + pub vote_start: BlockDate, + pub vote_end: BlockDate, + pub committee_end: BlockDate, + pub payload_type: PayloadType, +} + +direct_repr!(VotePlanMeta); diff --git a/chain-explorer/src/endian.rs b/chain-explorer/src/endian.rs new file mode 100644 index 000000000..bd01f187a --- /dev/null +++ b/chain-explorer/src/endian.rs @@ -0,0 +1,118 @@ +use byteorder::{BigEndian, LittleEndian}; +use sanakirja::{direct_repr, Storable, UnsizedStorable}; +use zerocopy::{AsBytes, FromBytes, U32, U64}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, AsBytes, FromBytes)] +#[repr(transparent)] +pub struct B32(pub U32); + +#[derive(Debug, Clone, PartialEq, Eq, AsBytes, FromBytes)] +#[repr(transparent)] +pub struct L32(pub U32); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, AsBytes, FromBytes)] +#[repr(transparent)] +pub struct B64(pub U64); + +#[derive(Debug, Clone, PartialEq, Eq, AsBytes, FromBytes)] +#[repr(transparent)] +pub struct L64(U64); + +impl L64 { + pub fn new(n: u64) -> Self { + Self(U64::::new(n)) + } + + pub fn get(&self) -> u64 { + self.0.get() + } +} + +impl B64 { + pub fn new(n: u64) -> Self { + Self(U64::::new(n)) + } + + pub fn get(&self) -> u64 { + self.0.get() + } +} + +impl B32 { + pub fn new(n: u32) -> Self { + Self(U32::::new(n)) + } + + pub fn get(&self) -> u32 { + self.0.get() + } +} + +impl L32 { + pub fn new(n: u32) -> Self { + Self(U32::::new(n)) + } + + pub fn get(&self) -> u32 { + self.0.get() + } +} + +impl AsRef> for L64 { + fn as_ref(&self) -> &U64 { + &self.0 + } +} + +impl Ord for B64 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +impl PartialOrd for B64 { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.as_bytes().partial_cmp(other.0.as_bytes()) + } +} + +impl Ord for B32 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +impl PartialOrd for B32 { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.as_bytes().partial_cmp(other.0.as_bytes()) + } +} + +impl Ord for L64 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.get().cmp(&other.0.get()) + } +} + +impl PartialOrd for L64 { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.get().partial_cmp(&other.0.get()) + } +} + +impl Ord for L32 { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.get().cmp(&other.0.get()) + } +} + +impl PartialOrd for L32 { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.get().partial_cmp(&other.0.get()) + } +} + +direct_repr!(B32); +direct_repr!(L32); +direct_repr!(B64); +direct_repr!(L64); diff --git a/chain-explorer/src/error.rs b/chain-explorer/src/error.rs new file mode 100644 index 000000000..079af433b --- /dev/null +++ b/chain-explorer/src/error.rs @@ -0,0 +1,18 @@ +use chain_impl_mockchain::block::HeaderId as HeaderHash; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum DbError { + #[error("ancestor of block '{0}' ('{1}') not found in explorer")] + AncestorNotFound(HeaderHash, HeaderHash), + #[error("tried to index block '{0}' twice")] + BlockAlreadyExists(HeaderHash), + #[error(transparent)] + SanakirjaError(#[from] ::sanakirja::Error), + #[error("the database was not initialized or was corrupted")] + UnitializedDatabase, + #[error("block is missing from the explorer")] + MissingBlock, + #[error("invalid block0")] + InvalidBlock0, +} diff --git a/chain-explorer/src/helpers.rs b/chain-explorer/src/helpers.rs new file mode 100644 index 000000000..6d7aca2f4 --- /dev/null +++ b/chain-explorer/src/helpers.rs @@ -0,0 +1,57 @@ +use super::{error::DbError, Db}; +use sanakirja::{ + btree::{self, BTreeMutPage, BTreePage}, + RootDb, Storable, +}; +use std::sync::Arc; + +pub(super) fn open_or_create_db< + K: Storable, + V: Storable, + P: BTreePage + BTreeMutPage, +>( + txn: &mut sanakirja::MutTxn, ()>, + root: super::schema::Root, +) -> Result, DbError> { + Ok(if let Some(db) = txn.root_db(root as usize) { + db + } else { + btree::create_db_(txn)? + }) +} + +pub(super) fn find_last_record_by( + txn: &T, + tree: &Db, + key: &K, + max_possible_value: &V, +) -> Option +where + K: Storable + PartialEq, + V: Storable + Clone + PartialEq, + T: ::sanakirja::LoadPage, +{ + let mut cursor = btree::Cursor::new(txn, tree).unwrap(); + + cursor.set(txn, key, Some(max_possible_value)).unwrap(); + + if let Some((k, _)) = cursor.prev(txn).unwrap() { + if k == key { + cursor.next(txn).unwrap(); + } + } + + assert!( + cursor + .current(txn) + .unwrap() + .map(|(_, v)| v != max_possible_value) + .unwrap_or(true), + "ran out of sequence numbers" + ); + + cursor + .current(txn) + .unwrap() + .and_then(|(k, v)| if k == key { Some(v.clone()) } else { None }) +} diff --git a/chain-explorer/src/lib.rs b/chain-explorer/src/lib.rs new file mode 100644 index 000000000..bebabedaf --- /dev/null +++ b/chain-explorer/src/lib.rs @@ -0,0 +1,181 @@ +pub mod chain_storable; +mod endian; +pub mod error; +mod helpers; +pub mod pagination; +mod pair; +pub mod schema; +mod seq; +mod state_ref; + +use self::error::DbError; +use chain_core::property::Block as _; +use chain_impl_mockchain::block::Block; +use chain_impl_mockchain::block::HeaderId as HeaderHash; +use sanakirja::btree; +use std::path::Path; +use std::sync::Arc; + +pub use seq::SeqNum; + +pub(crate) type P = btree::page::Page; +type Db = btree::Db; + +type SanakirjaMutTx = ::sanakirja::MutTxn, ()>; +type SanakirjaTx = ::sanakirja::Txn>; + +#[derive(Clone)] +pub struct ExplorerDb { + pub env: Arc<::sanakirja::Env>, +} + +pub enum OpenDb { + Initialized { + db: ExplorerDb, + last_stable_block: HeaderHash, + branches: Vec, + }, + NeedsBootstrap(NeedsBootstrap), +} + +pub struct NeedsBootstrap(ExplorerDb); + +pub struct Batch { + txn: schema::MutTxn<()>, +} + +impl Batch { + /// Try to add a new block to the indexes, this can fail if the parent of the block is not + /// processed. This doesn't perform any validation on the given block and the previous state, + /// it is assumed that the Block is valid + pub fn apply_block(&mut self, block: Block) -> Result<(), DbError> { + self.txn.add_block( + &block.parent_id().into(), + &block.id().into(), + block.chain_length().into(), + block.header().block_date().into(), + block.fragments(), + )?; + + Ok(()) + } + + pub fn commit(self) -> Result<(), DbError> { + self.txn.commit() + } +} + +impl NeedsBootstrap { + pub fn add_block0(self, block0: Block) -> Result { + if u32::from(block0.chain_length()) != 0 { + return Err(DbError::InvalidBlock0); + } + + let db = self.0; + let mut mut_tx = db.mut_txn_begin()?; + + let parent_id = block0.parent_id(); + let block_id = block0.id(); + + mut_tx.add_block0( + &parent_id.into(), + &block_id.into(), + block0.contents().iter(), + )?; + + mut_tx.commit()?; + + Ok(db) + } +} + +impl ExplorerDb { + pub fn open>(storage: Option

) -> Result { + let db = match storage { + Some(path) => ExplorerDb::new(path), + None => ExplorerDb::new_anon(), + }?; + + let txn = db.txn_begin(); + + match txn { + Ok(txn) => { + let chain_length = txn.get_stable_chain_length(); + let block = txn + .get_blocks_by_chain_length(&chain_length)? + .next() + .transpose()? + .ok_or(DbError::MissingBlock)?; + + let branches = txn + .get_branches()? + .map(|b| b.map(|id| HeaderHash::from(*id))) + .collect::>()?; + + Ok(OpenDb::Initialized { + last_stable_block: HeaderHash::from(*block), + branches, + db, + }) + } + Err(DbError::UnitializedDatabase) => Ok(OpenDb::NeedsBootstrap(NeedsBootstrap(db))), + Err(e) => Err(e), + } + } + + /// Try to add a new block to the indexes, this can fail if the parent of the block is not + /// processed. This doesn't perform any validation on the given block and the previous state, + /// it is assumed that the Block is valid + pub fn apply_block(&self, block: Block) -> Result<(), DbError> { + let db = self.clone(); + let mut_tx = db.mut_txn_begin()?; + + let mut batch = Batch { txn: mut_tx }; + + batch.apply_block(block)?; + + batch.commit()?; + + Ok(()) + } + + pub fn start_batch(&self) -> Result { + let mut_tx = self.mut_txn_begin()?; + + Ok(Batch { txn: mut_tx }) + } + + pub fn set_tip(&self, hash: HeaderHash) -> Result { + let mut mut_tx = self.mut_txn_begin()?; + + let status = mut_tx.set_tip(&hash.into())?; + + if status { + mut_tx.commit()?; + } + + Ok(status) + } + + fn new>(name: P) -> Result { + Self::new_with_size(name, 1 << 20) + } + + fn new_with_size>(name: P, size: u64) -> Result { + let env = ::sanakirja::Env::new(name, size, 2); + match env { + Ok(env) => Ok(Self { env: Arc::new(env) }), + Err(e) => Err(DbError::SanakirjaError(e)), + } + } + + fn new_anon() -> Result { + Self::new_anon_with_size(1 << 20) + } + + fn new_anon_with_size(size: u64) -> Result { + Ok(Self { + env: Arc::new(::sanakirja::Env::new_anon(size, 2)?), + }) + } +} diff --git a/chain-explorer/src/pagination.rs b/chain-explorer/src/pagination.rs new file mode 100644 index 000000000..159a76c49 --- /dev/null +++ b/chain-explorer/src/pagination.rs @@ -0,0 +1,356 @@ +//! Pagination abstraction compatible with graphql connections. +//! +//! The implementation has a high-level of abstraction so it's somewhat independent of the db +//! layout. This allows to paginate over trees where the cursor is either part of the key, or part +//! of the value, or can be extracted somehow. +//! +use super::{ + chain_storable::{ + BlockId, ChainLength, ExplorerVoteProposal, FragmentId, TransactionInput, + TransactionOutput, VotePlanId, + }, + error::DbError, + pair::Pair, + seq::SeqNum, + Db, SanakirjaTx, P, +}; +use sanakirja::{btree, Storable}; + +pub trait PaginationCursor: PartialOrd + Ord + PartialEq + Eq + Clone + Copy { + const MIN: Self; + const MAX: Self; +} + +impl PaginationCursor for u8 { + const MIN: Self = u8::MIN; + const MAX: Self = u8::MAX; +} + +impl PaginationCursor for SeqNum { + const MIN: SeqNum = SeqNum::MIN; + const MAX: SeqNum = SeqNum::MAX; +} + +impl PaginationCursor for ChainLength { + const MIN: ChainLength = ChainLength::MIN; + const MAX: ChainLength = ChainLength::MAX; +} + +pub trait MapEntry<'a, K, V, C> { + type Output; + + fn map_entry(&self, _: &'a K, _: &'a V) -> Option<(C, Self::Output)>; + fn map_cursor(&self, _: C) -> (K, Option); +} + +pub struct SanakirjaCursorIter<'a, K, V, C, F> +where + K: Storable + 'a, + V: Storable + 'a, + F: MapEntry<'a, K, V, C>, +{ + txn: &'a SanakirjaTx, + map_entry: F, + cursor: btree::Cursor>, + cursor_bounds: Option<(C, C)>, +} + +pub type CursorAndEntry<'a, K, V, C, F> = (C, >::Output); + +// TODO: implement ExactSizeIterator? +impl<'a, K, V, C, F> Iterator for SanakirjaCursorIter<'a, K, V, C, F> +where + K: Storable + PartialEq + 'a, + V: Storable + 'a, + F: MapEntry<'a, K, V, C>, +{ + type Item = Result, DbError>; + + fn next(&mut self) -> Option { + self.cursor + .next(self.txn) + .map(|item| item.and_then(|(k, v)| self.map_entry.map_entry(k, v))) + .map_err(DbError::from) + .transpose() + } +} + +impl<'a, K, V, C, F> DoubleEndedIterator for SanakirjaCursorIter<'a, K, V, C, F> +where + K: Storable + PartialEq + 'a, + V: Storable + 'a, + F: MapEntry<'a, K, V, C>, +{ + fn next_back(&mut self) -> Option { + self.cursor + .prev(self.txn) + .map(|item| item.and_then(|(k, v)| self.map_entry.map_entry(k, v))) + .map_err(DbError::from) + .transpose() + } +} + +impl<'a, K, V, C, F> SanakirjaCursorIter<'a, K, V, C, F> +where + K: Storable + PartialEq + 'a, + V: Storable + 'a, + C: PaginationCursor, + F: MapEntry<'a, K, V, C>, +{ + /// initialize a new iterator that can be used for cursor based pagination. + /// `entry_from_cursor` should return the smallest possible entry for the given cursor element, + /// this is because the internal sanakirja cursor is set at the first position greater than or + /// equal than what's returned by this function. + pub fn new(txn: &'a SanakirjaTx, map_entry: F, db: &Db) -> Result { + let mut cursor = btree::Cursor::new(txn, db)?; + let min_entry = map_entry.map_cursor(C::MIN); + let max_entry = map_entry.map_cursor(C::MAX); + + cursor.set(txn, &min_entry.0, min_entry.1.as_ref())?; + + // TODO: computing the last cursor could be done lazily on demand I guess, but I hope it's + // not expensive enough to matter, after all is just a single extra lookup. It could also + // be cached globally, which may be even better, because if we follow relay's graphql + // specification for connections then I think we always need it. + let cursor_bounds = cursor + .current(txn)? + .and_then(|(k, v)| map_entry.map_entry(k, v)) + .map(|first| -> Result<(C, C), DbError> { + let (max_key, max_value) = max_entry; + let mut cursor = btree::Cursor::new(txn, db)?; + + cursor.set(txn, &max_key, max_value.as_ref())?; + + if let Some(last) = cursor.prev(txn)? { + if let Some(last) = map_entry.map_entry(last.0, last.1) { + Ok((first.0, last.0)) + } else { + // we can unwrap here because we know there is at least one entry before, + // because the entry after this was not of this key (otherwise we would be + // in the if branch) and we are in the `map` function, so we know there is + // at least one entry for the given key. + let last = cursor.current(txn)?.unwrap(); + Ok((first.0, map_entry.map_entry(last.0, last.1).unwrap().0)) + } + } else { + Ok((first.0, first.0)) + } + }) + .transpose()?; + + Ok(Self { + txn, + map_entry, + cursor, + cursor_bounds, + }) + } + + /// this returns None only when the iterator is empty + pub fn first_cursor(&self) -> Option<&C> { + self.cursor_bounds.as_ref().map(|(first, _last)| first) + } + + /// this returns None only when the iterator is empty + pub fn last_cursor(&self) -> Option<&C> { + self.cursor_bounds.as_ref().map(|(_first, last)| last) + } + + /// put the initial iterator position to `cursor`. This has no effect if the cursor is outside + /// the bounds, and will return Ok(false) if that's the case. + pub fn seek(&mut self, cursor: C) -> Result { + match self.cursor_bounds { + Some((a, b)) if cursor >= a && cursor <= b => { + let (key, value) = self.map_entry.map_cursor(cursor); + self.cursor.set(self.txn, &key, value.as_ref())?; + + Ok(true) + } + _ => Ok(false), + } + } + + pub fn seek_end(&mut self) -> Result<(), DbError> { + if let Some((_, last)) = self.cursor_bounds { + assert!(self.seek(last)?); + } + + Ok(()) + } +} + +pub type TxsByAddress<'a> = + SanakirjaCursorIter<'a, SeqNum, Pair, SeqNum, AddressId>; +pub type BlocksInBranch<'a> = SanakirjaCursorIter<'a, ChainLength, BlockId, ChainLength, ()>; +pub type FragmentInputIter<'a> = SanakirjaCursorIter< + 'a, + Pair, + TransactionInput, + u8, + FragmentContentId, +>; +pub type FragmentOutputIter<'a> = SanakirjaCursorIter< + 'a, + Pair, + TransactionOutput, + u8, + FragmentContentId, +>; +pub type BlockFragmentsIter<'a> = + SanakirjaCursorIter<'a, BlockId, Pair, u8, BlockContentsId>; +pub type VotePlanProposalsIter<'a> = + SanakirjaCursorIter<'a, Pair, ExplorerVoteProposal, u8, VotePlanProposalsId>; + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct AddressId(SeqNum); + +impl From<&SeqNum> for AddressId { + fn from(i: &SeqNum) -> Self { + Self(*i) + } +} + +impl<'a> MapEntry<'a, SeqNum, Pair, SeqNum> for AddressId { + type Output = &'a FragmentId; + + fn map_entry( + &self, + k: &'a SeqNum, + v: &'a Pair, + ) -> Option<(SeqNum, Self::Output)> { + if k == &self.0 { + Some((v.a, &v.b)) + } else { + None + } + } + + fn map_cursor(&self, cursor: SeqNum) -> (SeqNum, Option>) { + ( + self.0, + Some(Pair { + a: cursor, + b: FragmentId::MIN, + }), + ) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct FragmentContentId(FragmentId, std::marker::PhantomData); + +impl From<&FragmentId> for FragmentContentId { + fn from(i: &FragmentId) -> Self { + Self(*i, std::marker::PhantomData) + } +} + +impl AsRef for FragmentContentId { + fn as_ref(&self) -> &FragmentId { + &self.0 + } +} + +impl<'a, V: 'a> MapEntry<'a, Pair, V, u8> for FragmentContentId { + type Output = &'a V; + + fn map_entry(&self, k: &'a Pair, v: &'a V) -> Option<(u8, Self::Output)> { + if &k.a == self.as_ref() { + Some((k.b, v)) + } else { + None + } + } + + fn map_cursor(&self, cursor: u8) -> (Pair, Option) { + ( + Pair { + a: *self.as_ref(), + b: cursor, + }, + None, + ) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct BlockContentsId(BlockId); + +impl From<&BlockId> for BlockContentsId { + fn from(i: &BlockId) -> Self { + Self(*i) + } +} + +impl<'a> MapEntry<'a, BlockId, Pair, u8> for BlockContentsId { + type Output = &'a FragmentId; + + fn map_entry(&self, k: &'a BlockId, v: &'a Pair) -> Option<(u8, Self::Output)> { + if k == &self.0 { + Some((v.a, &v.b)) + } else { + None + } + } + + fn map_cursor(&self, cursor: u8) -> (BlockId, Option>) { + ( + self.0, + Some(Pair { + a: cursor, + b: FragmentId::MIN, + }), + ) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct VotePlanProposalsId(VotePlanId); + +impl From<&VotePlanId> for VotePlanProposalsId { + fn from(i: &VotePlanId) -> Self { + Self(*i) + } +} + +impl<'a> MapEntry<'a, Pair, ExplorerVoteProposal, u8> for VotePlanProposalsId { + type Output = &'a ExplorerVoteProposal; + + fn map_entry( + &self, + k: &'a Pair, + v: &'a ExplorerVoteProposal, + ) -> Option<(u8, Self::Output)> { + if k.a == self.0 { + Some((k.b, v)) + } else { + None + } + } + + fn map_cursor(&self, cursor: u8) -> (Pair, Option) { + ( + Pair { + a: self.0, + b: cursor, + }, + None, + ) + } +} + +impl<'a, K, V> MapEntry<'a, K, V, K> for () +where + V: 'a, + K: 'a + Clone, +{ + type Output = &'a V; + + fn map_entry(&self, k: &'a K, v: &'a V) -> Option<(K, Self::Output)> { + Some((k.clone(), v)) + } + + fn map_cursor(&self, k: K) -> (K, Option) { + (k, None) + } +} diff --git a/chain-explorer/src/pair.rs b/chain-explorer/src/pair.rs new file mode 100644 index 000000000..76b2d705b --- /dev/null +++ b/chain-explorer/src/pair.rs @@ -0,0 +1,26 @@ +use sanakirja::Storable; +use std::fmt; +use zerocopy::{AsBytes, FromBytes}; + +#[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq)] +#[repr(C)] +pub struct Pair { + pub a: A, + pub b: B, +} + +impl Storable + for Pair +where + A: PartialOrd + Ord, + B: PartialOrd + Ord, +{ + type PageReferences = core::iter::Empty; + fn page_references(&self) -> Self::PageReferences { + core::iter::empty() + } + + fn compare(&self, _t: &T, b: &Self) -> core::cmp::Ordering { + (&self.a, &self.b).cmp(&(&b.a, &b.b)) + } +} diff --git a/chain-explorer/src/schema.rs b/chain-explorer/src/schema.rs new file mode 100644 index 000000000..5a44ea42a --- /dev/null +++ b/chain-explorer/src/schema.rs @@ -0,0 +1,1364 @@ +use super::{ + chain_storable::{ + Address, BlockDate, BlockId, ChainLength, ExplorerVoteProposal, FragmentId, + PrivateVoteCast, ProposalId, PublicVoteCast, StorableHash, TransactionCertificate, + TransactionInput, TransactionOutput, VotePlanId, VotePlanMeta, + }, + endian::{B32, L32, L64}, + error::DbError, + helpers::open_or_create_db, + pagination::{ + BlockFragmentsIter, BlocksInBranch, FragmentContentId, FragmentInputIter, + FragmentOutputIter, SanakirjaCursorIter, TxsByAddress, VotePlanProposalsIter, + }, + pair::Pair, + state_ref::SerializedStateRef, + Db, ExplorerDb, SanakirjaTx, P, +}; +use crate::state_ref::StateRef; +use chain_core::property::Fragment as _; +use chain_impl_mockchain::{ + config::ConfigParam, + fragment::Fragment, + transaction::{self, InputEnum, Witness}, + value::Value, +}; +use sanakirja::{btree, direct_repr, Commit, RootDb, Storable, UnsizedStorable}; +use std::convert::TryFrom; +use std::{convert::TryInto, sync::Arc}; +use tracing::{debug, span, Level}; +use zerocopy::{AsBytes, FromBytes}; + +pub type Txn = GenericTxn<::sanakirja::Txn>>; +pub type MutTxn = GenericTxn<::sanakirja::MutTxn, T>>; + +/// indices (or offsets) of data in the root page of the db +#[derive(Debug, PartialEq, Clone, Copy)] +#[repr(usize)] +pub(crate) enum Root { + /// not a btree root, but just data packed as a u64 + Stability, + /// not a btree root, but just data packed as a u64 + BooleanStaticSettings, + Blocks, + BlockTransactions, + VotePlans, + VotePlanProposals, + TransactionInputs, + TransactionOutputs, + TransactionCertificates, + TransactionBlocks, + ChainLenghts, + Tips, + States, +} + +// 'global' indices +type TransactionsInputs = Db, TransactionInput>; +type TransactionsOutputs = Db, TransactionOutput>; +type TransactionsCertificate = Db; +type TransactionsBlocks = Db; +type Blocks = Db; +type BlockTransactions = Db>; +type ChainLengths = Db; +type ChainLengthsCursor = btree::Cursor>; +type VotePlans = Db; +type VotePlanProposals = Db, ExplorerVoteProposal>; +/// Tips actually holds both the tip and the rest of the branches, depending on BranchTag. The tip +/// is then, duplicated, as is stored both with the Tip tag and with the Branch tag. +/// BranchTag::Tip is 0 so the first entry always gives you the tip even if there is another branch +/// with greater chain length. +/// This is not the cleanest way of doing it, but it simplifies some things. +type Tips = Db; + +// multiverse +type States = Db; + +#[derive(Debug, Clone, AsBytes, FromBytes, PartialEq, Eq)] +#[repr(C)] +struct BranchHead { + chain_length: B32, + id: BlockId, +} + +#[derive(Debug, Clone, Copy, AsBytes, PartialEq, Eq, PartialOrd, Ord)] +#[repr(u8)] +enum BranchTag { + Tip = 0, + Branch = 1, +} + +direct_repr!(BranchTag); +direct_repr!(BranchHead); + +/// Minimal needed metadata to be able to know which blocks are considered stable, packed in 64 +/// bits to be able to store them easily on the root page for quick access +/// They could be put in a separate btree if it becomes too cumbersome to manage. +#[derive(Debug, AsBytes, FromBytes)] +#[repr(C)] +struct Stability { + epoch_stability_depth: L32, + last_stable_block: ChainLength, +} + +/// Blockchain settings that can't change, packed as a u64 so they can be easily stored in the root +/// page for quick access. +/// They could be put in a separate btree if it becomes too cumbersome to manage, or even an extra +/// file (as they don't need to be mutated, just read once at the beginning). +#[derive(Debug, AsBytes, FromBytes)] +#[repr(C)] +pub struct StaticSettings { + discrimination: L32, + consensus: L32, +} + +impl ExplorerDb { + pub fn txn_begin(&self) -> Result { + let txn = ::sanakirja::Env::txn_begin(self.env.clone())?; + fn begin(txn: ::sanakirja::Txn>) -> Option { + Some(Txn { + states: txn.root_db(Root::States as usize)?, + tips: txn.root_db(Root::Tips as usize)?, + chain_lengths: txn.root_db(Root::ChainLenghts as usize)?, + transaction_inputs: txn.root_db(Root::TransactionInputs as usize)?, + transaction_outputs: txn.root_db(Root::TransactionOutputs as usize)?, + transaction_certificates: txn.root_db(Root::TransactionCertificates as usize)?, + transaction_blocks: txn.root_db(Root::TransactionBlocks as usize)?, + blocks: txn.root_db(Root::Blocks as usize)?, + block_transactions: txn.root_db(Root::BlockTransactions as usize)?, + vote_plans: txn.root_db(Root::VotePlans as usize)?, + vote_plan_proposals: txn.root_db(Root::VotePlanProposals as usize)?, + txn, + }) + } + if let Some(txn) = begin(txn) { + Ok(txn) + } else { + Err(DbError::UnitializedDatabase) + } + } + + pub(crate) fn mut_txn_begin(&self) -> Result, DbError> { + let mut txn = ::sanakirja::Env::mut_txn_begin(self.env.clone()).unwrap(); + Ok(MutTxn { + states: open_or_create_db(&mut txn, Root::States)?, + tips: open_or_create_db(&mut txn, Root::Tips)?, + chain_lengths: open_or_create_db(&mut txn, Root::ChainLenghts)?, + transaction_inputs: open_or_create_db(&mut txn, Root::TransactionInputs)?, + transaction_outputs: open_or_create_db(&mut txn, Root::TransactionOutputs)?, + transaction_certificates: open_or_create_db(&mut txn, Root::TransactionCertificates)?, + transaction_blocks: open_or_create_db(&mut txn, Root::TransactionBlocks)?, + blocks: open_or_create_db(&mut txn, Root::Blocks)?, + block_transactions: open_or_create_db(&mut txn, Root::BlockTransactions)?, + vote_plans: open_or_create_db(&mut txn, Root::VotePlans)?, + vote_plan_proposals: open_or_create_db(&mut txn, Root::VotePlanProposals)?, + txn, + }) + } +} + +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)] +#[repr(C)] +pub struct StakePoolMeta { + pub registration: FragmentId, + pub retirement: Option, +} + +direct_repr!(StakePoolMeta); + +#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] +#[repr(C)] +pub struct BlockMeta { + pub chain_length: ChainLength, + pub date: BlockDate, + pub parent_hash: BlockId, +} + +direct_repr!(BlockMeta); + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +#[repr(C)] +pub struct BlockProducer { + bytes: [u8; 32], +} + +direct_repr!(BlockProducer); + +pub struct GenericTxn + ::sanakirja::RootPage> +{ + txn: T, + states: States, + tips: Tips, + chain_lengths: ChainLengths, + transaction_inputs: TransactionsInputs, + transaction_outputs: TransactionsOutputs, + transaction_certificates: TransactionsCertificate, + transaction_blocks: TransactionsBlocks, + blocks: Blocks, + block_transactions: BlockTransactions, + vote_plans: VotePlans, + vote_plan_proposals: VotePlanProposals, +} + +impl + ::sanakirja::RootPage> GenericTxn {} + +impl MutTxn<()> { + pub fn add_block0<'a>( + &mut self, + parent_id: &BlockId, + block0_id: &BlockId, + fragments: impl Iterator, + ) -> Result<(), DbError> { + let span = span!(Level::DEBUG, "add_block0"); + let _enter = span.enter(); + debug!(?parent_id, ?block0_id); + + let state_ref = StateRef::new_empty(&mut self.txn)?; + + unsafe { + self.txn.set_root( + Root::Stability as usize, + std::mem::transmute(Stability::default()), + ) + }; + + let tip = BranchHead { + chain_length: B32::new(0), + id: *block0_id, + }; + + assert!(btree::put( + &mut self.txn, + &mut self.tips, + &BranchTag::Branch, + &tip + )?); + + // the tip is also the only branch + let head = BranchHead { + chain_length: B32::new(0), + id: *block0_id, + }; + + assert!(btree::put( + &mut self.txn, + &mut self.tips, + &BranchTag::Tip, + &head + )?); + + self.update_state( + fragments, + state_ref, + ChainLength::new(0), + block0_id, + BlockDate { + epoch: B32::new(0), + slot_id: B32::new(0), + }, + parent_id, + )?; + + Ok(()) + } + + pub fn add_block<'a>( + &mut self, + parent_id: &BlockId, + block_id: &BlockId, + chain_length: ChainLength, + block_date: BlockDate, + fragments: impl IntoIterator, + ) -> Result<(), DbError> { + let _span = span!( + Level::DEBUG, + "add_block", + ?parent_id, + ?block_id, + ?chain_length + ) + .entered(); + + // Early return if the block is already in the store. + // Ideally, operations down the line should be idempotent, but it is probably not the case + // now, and even then it's probably simpler to just check it here at once. + if btree::get(&self.txn, &self.blocks, block_id, None)? + .filter(|(id, _)| id == &block_id) + .is_some() + { + return Ok(()); + } + + let states = btree::get(&self.txn, &self.states, parent_id, None)? + .filter(|(branch_id, _states)| *branch_id == parent_id) + .map(|(_branch_id, states)| states) + .cloned() + .ok_or_else(|| DbError::AncestorNotFound((*block_id).into(), (*parent_id).into()))?; + + let state_ref = states.fork(&mut self.txn)?; + + self.update_state( + fragments.into_iter(), + state_ref, + chain_length, + block_id, + block_date, + parent_id, + )?; + + self.update_tips(parent_id, chain_length, block_id)?; + + Ok(()) + } + + /// this sets `BlockId` as the tip, overriding the current one, BUT add_block will still + /// change the tip anyway if the chain_length increases, this is mostly to simplify garbage + /// collection during bootstrap. + pub fn set_tip(&mut self, id: &BlockId) -> Result { + let _span = span!(Level::DEBUG, "set_tip", ?id).entered(); + + let current_tip = btree::get(&self.txn, &self.tips, &BranchTag::Tip, None)? + .expect("no tip in database") + .1 + .clone(); + + if ¤t_tip.id == id { + return Ok(true); + } + + let block_meta = btree::get(&self.txn, &self.blocks, id, None)?.filter(|(k, _)| *k == id); + + if let Some(block_meta) = block_meta.map(|(_, meta)| meta.clone()) { + assert!(btree::del( + &mut self.txn, + &mut self.tips, + &BranchTag::Tip, + None + )?); + + let new_tip = BranchHead { + id: *id, + chain_length: block_meta.chain_length.0, + }; + + assert!(btree::put( + &mut self.txn, + &mut self.tips, + &BranchTag::Tip, + &new_tip + )?); + + debug!("changed explorer tip"); + + if new_tip.chain_length.get() > current_tip.chain_length.get() { + self.gc_stable_states(block_meta.chain_length.get())?; + } + + Ok(true) + } else { + Ok(false) + } + } + + /// this drops old states at: tip_chain_length - (epoch_stability_depth + 1) so we keep the + /// amount of forks bounded, because we don't need to fork those states anymore. + fn gc_stable_states(&mut self, tip_chain_length: u32) -> Result<(), DbError> { + let _span = span!(Level::DEBUG, "gc_stable_states", ?tip_chain_length).entered(); + + let mut stability: Stability = + unsafe { std::mem::transmute(self.txn.root(Root::Stability as usize).unwrap()) }; + + if let Some(new_stable_chain_length) = + tip_chain_length.checked_sub(stability.get_epoch_stability_depth()) + { + stability.last_stable_block = ChainLength::new(new_stable_chain_length); + } + + if let Some(collect_at) = stability.last_stable_block.get().checked_sub(1) { + let mut states_to_gc = vec![]; + + let iter = btree::iter( + &self.txn, + &self.chain_lengths, + Some((&ChainLength::new(collect_at), None)), + )?; + + for block in iter { + let (chain_length, block_id) = block?; + + if chain_length.get() > collect_at { + break; + } + + let state = btree::get(&self.txn, &self.states, block_id, None)? + .filter(|(branch_id, _states)| *branch_id == block_id) + .map(|(_branch_id, states)| StateRef::from(states.clone())); + + if let Some(state) = state { + states_to_gc.push((*block_id, state)); + } + } + + let state = self.state_at_tip()?; + + let confirmed_block_id = *btree::get( + &self.txn, + &state.blocks, + &ChainLength::new(collect_at), + None, + )? + .unwrap() + .1; + + for (block_id, state) in states_to_gc.drain(..) { + debug!("dropping state {:?}", &block_id); + // this is safe because after dropping we are inmediately deleting the state from + // `self.states`. + unsafe { + state.drop(&mut self.txn)?; + assert!(btree::del( + &mut self.txn, + &mut self.states, + &block_id, + None + )?); + } + + // this means the block was part of a fork that didn't survive + if block_id != confirmed_block_id { + debug!("removing block from global indices {}", block_id); + btree::del(&mut self.txn, &mut self.blocks, &block_id, None)?; + + btree::del( + &mut self.txn, + &mut self.chain_lengths, + &ChainLength::new(collect_at), + Some(&block_id), + )?; + + let mut fragment_cursor = + btree::Cursor::new(&self.txn, &self.block_transactions)?; + + fragment_cursor.set(&self.txn, &block_id, None)?; + + // using *loop* instead of the iterator to avoid holding a borrow to the txn + // (so we can delete things inside the loop) + loop { + let fragment_id = match fragment_cursor.next(&self.txn)? { + Some((block, fragment_entry)) if block == &block_id => fragment_entry.b, + _ => break, + }; + + btree::del( + &mut self.txn, + &mut self.transaction_blocks, + &fragment_id, + Some(&block_id), + )?; + + self.gc_fragment(fragment_id)?; + } + } + } + } + unsafe { + self.txn + .set_root(Root::Stability as usize, std::mem::transmute(stability)) + }; + Ok(()) + } + + fn state_at_tip(&mut self) -> Result { + let tip = btree::get(&self.txn, &self.tips, &BranchTag::Tip, None)?.unwrap(); + let state = btree::get(&self.txn, &self.states, &tip.1.id, None)?; + let state = match state { + Some((s, state)) if &tip.1.id == s => StateRef::from(state.clone()), + _ => panic!("tip is not in the states"), + }; + Ok(state) + } + + /// Important: this won't remove the fragment if there are still blocks referencing it in the + /// index. + fn gc_fragment(&mut self, fragment_id: StorableHash) -> Result<(), DbError> { + // make sure that no alive block is referencing this fragment + let should_delete = + btree::get(&self.txn, &self.transaction_blocks, &fragment_id, None)?.is_none(); + + if should_delete { + let mut input_counter = 0u8; + + while btree::del( + &mut self.txn, + &mut self.transaction_inputs, + &Pair { + a: fragment_id, + b: input_counter, + }, + None, + )? { + input_counter += 1 + } + + let mut output_counter = 0u8; + + while btree::del( + &mut self.txn, + &mut self.transaction_outputs, + &Pair { + a: fragment_id, + b: output_counter, + }, + None, + )? { + output_counter += 1 + } + + btree::del( + &mut self.txn, + &mut self.transaction_certificates, + &fragment_id, + None, + )?; + + // TODO: technically we need to collect VotePlans too... the problem with that is that + // with the current indexes there is no way of knowing if they are included in other + // fragment, so it is not safe to remove them. + } + Ok(()) + } + + fn update_state<'a>( + &mut self, + fragments: impl Iterator, + mut state_ref: StateRef, + chain_length: ChainLength, + block_id: &StorableHash, + block_date: BlockDate, + parent_id: &StorableHash, + ) -> Result<(), DbError> { + let _span = span!(Level::DEBUG, "update_state").entered(); + + self.apply_fragments(block_id, fragments, &mut state_ref)?; + state_ref.add_block_to_blocks(&mut self.txn, &chain_length, block_id)?; + + let new_state = state_ref.finish(&mut self.txn)?; + + if !btree::put(&mut self.txn, &mut self.states, block_id, &new_state)? { + return Err(DbError::BlockAlreadyExists((*block_id).into())); + } + + self.update_chain_lengths(chain_length, block_id)?; + + self.add_block_meta( + block_id, + BlockMeta { + chain_length, + date: block_date, + parent_hash: *parent_id, + }, + )?; + + Ok(()) + } + + fn apply_fragments<'a>( + &mut self, + block_id: &BlockId, + fragments: impl Iterator, + state_ref: &mut StateRef, + ) -> Result<(), DbError> { + for (idx, fragment) in fragments.enumerate() { + let fragment_id = StorableHash::from(fragment.id()); + + btree::put( + &mut self.txn, + &mut self.block_transactions, + block_id, + &Pair { + a: u8::try_from(idx).expect("found more than 255 fragments in a block"), + b: fragment_id, + }, + )?; + + btree::put( + &mut self.txn, + &mut self.transaction_blocks, + &fragment_id, + block_id, + )?; + + match &fragment { + Fragment::Initial(config_params) => { + let mut settings = StaticSettings::new(); + let mut stability: Stability = unsafe { + std::mem::transmute(self.txn.root(Root::Stability as usize).unwrap()) + }; + + for config_param in config_params.iter() { + match config_param { + ConfigParam::Discrimination(d) => { + settings.set_discrimination(*d); + } + ConfigParam::Block0Date(_) => {} + ConfigParam::ConsensusVersion(c) => { + settings.set_consensus(*c); + } + ConfigParam::SlotsPerEpoch(_) => {} + ConfigParam::SlotDuration(_) => {} + ConfigParam::EpochStabilityDepth(c) => { + stability.set_epoch_stability_depth(*c); + } + ConfigParam::ConsensusGenesisPraosActiveSlotsCoeff(_) => {} + ConfigParam::BlockContentMaxSize(_) => {} + ConfigParam::AddBftLeader(_) => {} + ConfigParam::RemoveBftLeader(_) => {} + ConfigParam::LinearFee(_) => {} + ConfigParam::ProposalExpiration(_) => {} + ConfigParam::KesUpdateSpeed(_) => {} + ConfigParam::TreasuryAdd(_) => {} + ConfigParam::TreasuryParams(_) => {} + ConfigParam::RewardPot(_) => {} + ConfigParam::RewardParams(_) => {} + ConfigParam::PerCertificateFees(_) => {} + ConfigParam::FeesInTreasury(_) => {} + ConfigParam::RewardLimitNone => {} + ConfigParam::RewardLimitByAbsoluteStake(_) => {} + ConfigParam::PoolRewardParticipationCapping(_) => {} + ConfigParam::AddCommitteeId(_) => {} + ConfigParam::RemoveCommitteeId(_) => {} + ConfigParam::PerVoteCertificateFees(_) => {} + ConfigParam::TransactionMaxExpiryEpochs(_) => {} + } + } + + unsafe { + self.txn + .set_root(Root::Stability as usize, std::mem::transmute(stability)); + self.txn.set_root( + Root::BooleanStaticSettings as usize, + std::mem::transmute(settings), + ); + } + } + Fragment::OldUtxoDeclaration(_) => {} + Fragment::Transaction(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::OwnerStakeDelegation(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::StakeDelegation(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::PoolRegistration(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::PoolRetirement(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::PoolUpdate(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::UpdateProposal(_) => {} + Fragment::UpdateVote(_) => {} + Fragment::VotePlan(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + self.add_vote_plan(&fragment_id, tx)?; + } + Fragment::VoteCast(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + + let vote_cast = tx.as_slice().payload().into_payload(); + let vote_plan_id = + StorableHash::from(<[u8; 32]>::from(vote_cast.vote_plan().clone())); + let proposal_index = vote_cast.proposal_index(); + + let proposal_id = ProposalId { + vote_plan: vote_plan_id, + index: proposal_index, + }; + + state_ref.apply_vote(&mut self.txn, &fragment_id, &proposal_id)?; + + let certificate = match vote_cast.payload() { + chain_impl_mockchain::vote::Payload::Public { choice } => { + TransactionCertificate::from_public_vote_cast(PublicVoteCast { + vote_plan_id, + proposal_index, + choice: choice.as_byte(), + }) + } + + chain_impl_mockchain::vote::Payload::Private { + encrypted_vote: _, + proof: _, + } => TransactionCertificate::from_private_vote_cast(PrivateVoteCast { + vote_plan_id, + proposal_index, + }), + }; + + btree::put( + &mut self.txn, + &mut self.transaction_certificates, + &fragment_id, + &certificate, + )?; + } + Fragment::VoteTally(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + Fragment::EncryptedVoteTally(tx) => { + self.apply_transaction(fragment_id, tx, state_ref)?; + } + } + } + + Ok(()) + } + + fn get_settings(&self) -> StaticSettings { + let raw = self.txn.root(Root::BooleanStaticSettings as usize).unwrap(); + + unsafe { std::mem::transmute(raw) } + } + + fn add_vote_plan( + &mut self, + fragment_id: &FragmentId, + tx: &transaction::Transaction, + ) -> Result<(), DbError> { + let vote_plan = tx.as_slice().payload().into_payload(); + let vote_plan_id = StorableHash::from(<[u8; 32]>::from(vote_plan.to_id())); + let vote_plan_meta = VotePlanMeta { + vote_start: vote_plan.vote_start().into(), + vote_end: vote_plan.vote_end().into(), + committee_end: vote_plan.committee_end().into(), + payload_type: vote_plan.payload_type().into(), + }; + + for (idx, proposal) in vote_plan.proposals().iter().enumerate() { + btree::put( + &mut self.txn, + &mut self.vote_plan_proposals, + &Pair { + a: vote_plan_id, + b: idx as u8, + }, + &proposal.into(), + )?; + } + + btree::put( + &mut self.txn, + &mut self.vote_plans, + &vote_plan_id, + &vote_plan_meta, + )?; + + btree::put( + &mut self.txn, + &mut self.transaction_certificates, + fragment_id, + &TransactionCertificate::from_vote_plan_id(vote_plan_id), + )?; + + Ok(()) + } + + fn apply_transaction

( + &mut self, + fragment_id: FragmentId, + tx: &transaction::Transaction

, + state: &mut StateRef, + ) -> Result<(), DbError> { + let etx = tx.as_slice(); + + // is important that we put outputs first, because utxo's can refer to inputs in the same + // transaction, so those need to be already indexed. Although it would be technically + // faster to just look for them in the serialized transaction, that increases complexity + // for something that is not really that likely. Besides, the pages should be in the system + // cache because we recently inserted them. + for (idx, output) in etx.outputs().iter().enumerate() { + self.put_transaction_output(fragment_id, idx as u8, &output)?; + state.apply_output_to_stake_control(&mut self.txn, &output)?; + state.add_transaction_to_address( + &mut self.txn, + &fragment_id, + &output.address.into(), + )?; + } + + for (index, (input, witness)) in etx.inputs_and_witnesses().iter().enumerate() { + self.put_transaction_input(fragment_id, index as u8, &input, &witness)?; + + let resolved_utxo = match input.to_enum() { + InputEnum::AccountInput(_, _) => None, + InputEnum::UtxoInput(input) => { + Some(self.resolve_utxo(&self.transaction_outputs, input)?.clone()) + } + }; + + self.apply_input_to_stake_control(&input, &witness, resolved_utxo.as_ref(), state)?; + + self.apply_input_to_transactions_by_address( + &fragment_id, + &input, + &witness, + resolved_utxo.as_ref(), + state, + )?; + } + + Ok(()) + } + + fn update_tips( + &mut self, + parent_id: &BlockId, + chain_length: ChainLength, + block_id: &BlockId, + ) -> Result<(), DbError> { + let parent_key = BranchHead { + chain_length: B32::new( + chain_length + .0 + .get() + .checked_sub(1) + .expect("update tips called with block0"), + ), + id: *parent_id, + }; + + let _ = btree::del( + &mut self.txn, + &mut self.tips, + &BranchTag::Branch, + Some(&parent_key), + )?; + + let key = BranchHead { + chain_length: chain_length.0, + id: *block_id, + }; + + btree::put(&mut self.txn, &mut self.tips, &BranchTag::Branch, &key)?; + + let current_tip = { + let mut cursor = btree::Cursor::new(&self.txn, &self.tips)?; + + cursor.next(&self.txn)?.unwrap().1.chain_length.get() + }; + + if chain_length.get() > current_tip { + self.set_tip(block_id)?; + } + + Ok(()) + } + + fn update_chain_lengths( + &mut self, + chain_length: ChainLength, + block_id: &BlockId, + ) -> Result<(), DbError> { + btree::put( + &mut self.txn, + &mut self.chain_lengths, + &chain_length, + block_id, + )?; + + Ok(()) + } + + fn put_transaction_input( + &mut self, + fragment_id: FragmentId, + index: u8, + input: &transaction::Input, + witness: &transaction::Witness, + ) -> Result<(), DbError> { + btree::put( + &mut self.txn, + &mut self.transaction_inputs, + &Pair { + a: fragment_id, + b: index, + }, + &TransactionInput::from_original_with_witness(input, witness), + )?; + + Ok(()) + } + + fn put_transaction_output( + &mut self, + fragment_id: FragmentId, + index: u8, + output: &transaction::Output, + ) -> Result<(), DbError> { + btree::put( + &mut self.txn, + &mut self.transaction_outputs, + &Pair { + a: fragment_id, + b: index, + }, + &TransactionOutput::from_original(output), + )?; + + Ok(()) + } + + fn add_block_meta(&mut self, block_id: &BlockId, block: BlockMeta) -> Result<(), DbError> { + btree::put(&mut self.txn, &mut self.blocks, block_id, &block)?; + + Ok(()) + } + + fn apply_input_to_stake_control( + &mut self, + input: &transaction::Input, + witness: &transaction::Witness, + resolved_utxo: Option<&TransactionOutput>, + state: &mut StateRef, + ) -> Result<(), DbError> { + match (input.to_enum(), witness) { + (InputEnum::AccountInput(account, value), Witness::Account(_)) => { + state.substract_stake_from_account( + &mut self.txn, + account.to_single_account().unwrap().as_ref(), + value, + )?; + } + (InputEnum::AccountInput(_, _), Witness::Multisig(_)) => {} + (InputEnum::UtxoInput(_), Witness::Utxo(_)) => { + // TODO: this is not the cleanest way of doing this... + let output = resolved_utxo.expect("missing utxo pointer resolution"); + + let address: chain_addr::Address = output.address.clone().try_into().unwrap(); + + if let chain_addr::Kind::Group(_, account) = address.kind() { + let value = &output.value; + state.substract_stake_from_account( + &mut self.txn, + account, + Value(value.get()), + )?; + } + } + (InputEnum::UtxoInput(_), Witness::OldUtxo(_, _, _)) => {} + _ => unreachable!("invalid combination of input and witness"), + } + Ok(()) + } + + fn apply_input_to_transactions_by_address( + &mut self, + fragment_id: &super::chain_storable::FragmentId, + input: &transaction::Input, + witness: &transaction::Witness, + resolved_utxo: Option<&TransactionOutput>, + state: &mut StateRef, + ) -> Result<(), DbError> { + match (input.to_enum(), witness) { + (InputEnum::AccountInput(account_id, _value), Witness::Account(_)) => { + let kind = chain_addr::Kind::Account( + account_id + .to_single_account() + .expect("the input to be validated") + .into(), + ); + + let discrimination = self.get_settings().get_discrimination().unwrap(); + let address = chain_addr::Address(discrimination, kind).into(); + + state.add_transaction_to_address(&mut self.txn, fragment_id, &address)?; + } + (InputEnum::AccountInput(_, _), Witness::Multisig(_)) => {} + (InputEnum::UtxoInput(_), Witness::Utxo(_)) => { + // TODO: this is not the cleanest way of doing this... + let output = resolved_utxo.expect("missing utxo pointer resolution"); + + state.add_transaction_to_address( + &mut self.txn, + fragment_id, + &output.address.clone(), + )?; + } + (InputEnum::UtxoInput(_), Witness::OldUtxo(_, _, _)) => {} + _ => unreachable!("invalid combination of input and witness"), + } + + Ok(()) + } + + // mostly used to retrieve the address of a utxo input (because it's embedded in the output) + // we need this mostly to know the addresses involved in a tx. + // but it is also used for stake/funds tracking, as we need to know how much to substract. + fn resolve_utxo( + &self, + transactions: &TransactionsOutputs, + utxo_pointer: transaction::UtxoPointer, + ) -> Result<&TransactionOutput, DbError> { + let txid = utxo_pointer.transaction_id; + let idx = utxo_pointer.output_index; + + let mut cursor = btree::Cursor::new(&self.txn, transactions)?; + + let key = Pair { + a: StorableHash::from(txid), + b: idx, + }; + + cursor.set( + &self.txn, + &key, + Some(&TransactionOutput { + // address: Address::MAX, + address: Address::MIN, + value: L64::new(u64::MIN), + }), + )?; + + if let Some((_, output)) = cursor.current(&self.txn)?.filter(|(k, _)| *k == &key) { + Ok(output) + } else { + panic!("missing utxo {:?}", txid) + } + } + + pub fn commit(self) -> Result<(), DbError> { + // destructure things so we get some sort of exhaustiveness-check + let Self { + mut txn, + states, + tips, + chain_lengths, + transaction_inputs, + transaction_outputs, + transaction_certificates, + transaction_blocks, + blocks, + block_transactions, + vote_plans, + vote_plan_proposals, + } = self; + + txn.set_root(Root::States as usize, states.db); + txn.set_root(Root::Tips as usize, tips.db); + txn.set_root(Root::ChainLenghts as usize, chain_lengths.db); + txn.set_root(Root::TransactionInputs as usize, transaction_inputs.db); + txn.set_root(Root::TransactionOutputs as usize, transaction_outputs.db); + txn.set_root( + Root::TransactionCertificates as usize, + transaction_certificates.db, + ); + txn.set_root(Root::TransactionBlocks as usize, transaction_blocks.db); + txn.set_root(Root::Blocks as usize, blocks.db); + txn.set_root(Root::BlockTransactions as usize, block_transactions.db); + txn.set_root(Root::VotePlans as usize, vote_plans.db); + txn.set_root(Root::VotePlanProposals as usize, vote_plan_proposals.db); + + txn.commit()?; + + Ok(()) + } +} + +impl Txn { + pub fn get_transactions_by_address<'a>( + &'a self, + state_id: &StorableHash, + address: &Address, + ) -> Result>, DbError> { + let state = btree::get(&self.txn, &self.states, state_id, None)?; + + let state = match state { + Some((s, state)) if state_id == s => StateRef::from(state.clone()), + _ => return Ok(None), + }; + + let address_id = match btree::get(&self.txn, &state.address_id, address, None)? { + Some((a, id)) if a == address => id, + _ => return Ok(None), + }; + + Ok(Some(SanakirjaCursorIter::new( + &self.txn, + address_id.into(), + &state.address_transactions, + )?)) + } + + pub fn get_blocks<'a>( + &'a self, + state_id: &StorableHash, + ) -> Result, DbError> { + let state = btree::get(&self.txn, &self.states, state_id, None)?; + + let state = match state { + Some((s, state)) if state_id == s => StateRef::from(state.clone()), + _ => return Ok(None), + }; + + Ok(Some(SanakirjaCursorIter::new( + &self.txn, + (), + &state.blocks, + )?)) + } + + pub fn get_branches(&self) -> Result { + let mut cursor = btree::Cursor::new(&self.txn, &self.tips)?; + + // skip the tip tag + cursor.next(&self.txn)?; + + Ok(BranchIter { + txn: &self.txn, + cursor, + }) + } + + pub fn get_tip(&self) -> Result { + let mut cursor = btree::Cursor::new(&self.txn, &self.tips)?; + + Ok(cursor.next(&self.txn)?.unwrap().1.id) + } + + pub fn get_block_fragments<'a, 'b: 'a>( + &'a self, + block_id: &'b BlockId, + ) -> Result { + SanakirjaCursorIter::new(&self.txn, block_id.into(), &self.block_transactions) + } + + pub fn get_fragment_inputs<'a, 'b: 'a>( + &'a self, + fragment_id: &'b FragmentId, + ) -> Result, DbError> { + SanakirjaCursorIter::new( + &self.txn, + FragmentContentId::from(fragment_id), + &self.transaction_inputs, + ) + } + + pub fn get_fragment_outputs<'a, 'b: 'a>( + &'a self, + fragment_id: &'b FragmentId, + ) -> Result, DbError> { + SanakirjaCursorIter::new( + &self.txn, + FragmentContentId::from(fragment_id), + &self.transaction_outputs, + ) + } + + pub fn get_fragment_certificate( + &self, + fragment_id: &FragmentId, + ) -> Result, DbError> { + let key = *fragment_id; + + let certificate = btree::get(&self.txn, &self.transaction_certificates, &key, None)?; + + Ok(certificate.and_then(|(k, v)| if k == &key { Some(v) } else { None })) + } + + pub fn get_blocks_by_chain_length<'a, 'b: 'a>( + &'a self, + chain_length: &'b ChainLength, + ) -> Result>, DbError> { + let mut cursor = btree::Cursor::new(&self.txn, &self.chain_lengths)?; + + cursor.set(&self.txn, chain_length, None)?; + + Ok(BlocksByChainLenght { + txn: &self.txn, + cursor, + chain_length: *chain_length, + }) + } + + pub fn get_block_meta(&self, block_id: &BlockId) -> Result, DbError> { + let block_meta = btree::get(&self.txn, &self.blocks, block_id, None)?; + + Ok(block_meta.and_then(|(k, v)| if k == block_id { Some(v) } else { None })) + } + + pub fn get_vote_plan_meta( + &self, + vote_plan_id: &VotePlanId, + ) -> Result, DbError> { + let certificate = btree::get(&self.txn, &self.vote_plans, vote_plan_id, None)?; + + Ok(certificate.and_then(|(k, v)| if k == vote_plan_id { Some(v) } else { None })) + } + + pub fn get_vote_plan_proposals<'a, 'b: 'a>( + &'a self, + vote_plan_id: &'b VotePlanId, + ) -> Result { + SanakirjaCursorIter::new(&self.txn, vote_plan_id.into(), &self.vote_plan_proposals) + } + + // TODO: this is duplicated in the MutTxn, it would be nice to find a way to re-use it, but I'm + // not sure if there is any trait combination that allows it easily (for T). + pub fn get_settings(&self) -> StaticSettings { + let raw = self.txn.root(Root::BooleanStaticSettings as usize); + + unsafe { std::mem::transmute(raw) } + } + + pub fn get_stable_chain_length(&self) -> ChainLength { + let stability: Stability = + unsafe { std::mem::transmute(self.txn.root(Root::Stability as usize)) }; + + stability.last_stable_block + } + + // paginating this seems a bit overkill + pub fn transaction_blocks(&self, tx: &FragmentId) -> Result, DbError> { + let iter = btree::iter(&self.txn, &self.transaction_blocks, Some((tx, None)))?; + + let mut ids = vec![]; + + for block in iter { + let (k, block) = block?; + + if tx != k { + break; + } + + ids.push(*block); + } + + Ok(ids) + } +} + +pub struct BranchIter<'a> { + txn: &'a SanakirjaTx, + cursor: btree::Cursor>, +} + +impl<'a> Iterator for BranchIter<'a> { + type Item = Result<&'a FragmentId, DbError>; + + fn next(&mut self) -> Option { + self.cursor + .next(self.txn) + .transpose() + .map(|item| item.map(|(_, v)| &v.id).map_err(DbError::from)) + } +} + +pub struct BlocksByChainLenght<'a> { + txn: &'a SanakirjaTx, + cursor: ChainLengthsCursor, + chain_length: ChainLength, +} + +impl<'a> Iterator for BlocksByChainLenght<'a> { + type Item = Result<&'a BlockId, DbError>; + + fn next(&mut self) -> Option { + self.cursor + .next(self.txn) + .map(|item| { + item.and_then(|(k, v)| { + if k == &self.chain_length { + Some(v) + } else { + None + } + }) + }) + .map_err(DbError::from) + .transpose() + } +} + +impl Default for Stability { + fn default() -> Self { + Self { + epoch_stability_depth: L32::new(u32::MAX), + last_stable_block: ChainLength::new(0), + } + } +} + +impl Stability { + fn set_epoch_stability_depth(&mut self, e: u32) { + self.epoch_stability_depth = L32::new(e); + } + + fn get_epoch_stability_depth(&self) -> u32 { + self.epoch_stability_depth.get() + } +} + +impl StaticSettings { + pub fn new() -> Self { + Self { + discrimination: L32::new(0), + consensus: L32::new(0), + } + } + + pub fn set_discrimination(&mut self, d: chain_addr::Discrimination) { + match d { + chain_addr::Discrimination::Production => self.discrimination = L32::new(1), + chain_addr::Discrimination::Test => self.discrimination = L32::new(2), + } + } + + pub fn get_discrimination(&self) -> Option { + match self.discrimination.get() { + 0 => None, + 1 => Some(chain_addr::Discrimination::Production), + 2 => Some(chain_addr::Discrimination::Test), + _ => unreachable!("invalid discrimination tag"), + } + } + pub fn set_consensus(&mut self, c: chain_impl_mockchain::chaintypes::ConsensusType) { + match c { + chain_impl_mockchain::chaintypes::ConsensusType::Bft => self.consensus = L32::new(1), + chain_impl_mockchain::chaintypes::ConsensusType::GenesisPraos => { + self.consensus = L32::new(2) + } + } + } + + pub fn get_consensus(&self) -> Option { + match self.consensus.get() { + 0 => None, + 1 => Some(chain_impl_mockchain::chaintypes::ConsensusType::Bft), + 2 => Some(chain_impl_mockchain::chaintypes::ConsensusType::GenesisPraos), + _ => unreachable!("invalid discrimination tag"), + } + } +} + +impl Default for StaticSettings { + fn default() -> Self { + Self::new() + } +} + +impl PartialOrd for BranchHead { + fn partial_cmp(&self, other: &Self) -> Option { + // NOTE: the order is reversed, so branches are stored in descending order + other.chain_length.partial_cmp(&self.chain_length) + } +} + +impl Ord for BranchHead { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + (other.chain_length, &other.id).cmp(&(self.chain_length, &self.id)) + } +} diff --git a/chain-explorer/src/seq.rs b/chain-explorer/src/seq.rs new file mode 100644 index 000000000..78ff7576e --- /dev/null +++ b/chain-explorer/src/seq.rs @@ -0,0 +1,34 @@ +use super::endian::B64; +use sanakirja::{direct_repr, Storable, UnsizedStorable}; +use zerocopy::{AsBytes, FromBytes}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, FromBytes, AsBytes)] +#[repr(C)] +pub struct SeqNum(B64); + +direct_repr!(SeqNum); + +impl SeqNum { + pub const MAX: SeqNum = SeqNum(B64(zerocopy::U64::::MAX_VALUE)); + pub const MIN: SeqNum = SeqNum(B64(zerocopy::U64::::ZERO)); + + pub fn new(n: u64) -> Self { + Self(B64::new(n)) + } + + pub fn next(self) -> SeqNum { + Self::new(self.0.get() + 1) + } +} + +impl From for u64 { + fn from(n: SeqNum) -> Self { + n.0.get() + } +} + +impl From for SeqNum { + fn from(n: u64) -> Self { + SeqNum::new(n) + } +} diff --git a/chain-explorer/src/state_ref.rs b/chain-explorer/src/state_ref.rs new file mode 100644 index 000000000..714e69ebd --- /dev/null +++ b/chain-explorer/src/state_ref.rs @@ -0,0 +1,350 @@ +use super::{ + chain_storable::{AccountId, Address, BlockId, FragmentId, PoolId, Stake}, + endian::L64, + helpers::find_last_record_by, + pair::Pair, + seq::SeqNum, + SanakirjaMutTx, +}; +use super::{ + chain_storable::{ChainLength, ProposalId}, + error::DbError, +}; +use chain_impl_mockchain::{transaction, value::Value}; +use sanakirja::{ + btree::{self, Db}, + direct_repr, Storable, UnsizedStorable, +}; +use std::convert::TryInto; + +pub type StakeControl = Db; +pub type BlocksInBranch = Db; + +pub type AddressId = SeqNum; +pub type AddressIds = Db; +pub type AddressTransactions = Db>; +pub type Votes = Db>; + +// a typed (and in-memory) version of SerializedStateRef +pub struct StateRef { + pub stake_control: StakeControl, + pub blocks: BlocksInBranch, + pub address_id: AddressIds, + pub address_transactions: AddressTransactions, + pub votes: Votes, + // cached field, this gets written back by `finish` + next_address_id: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[repr(C)] +pub struct SerializedStateRef { + pub stake_control: L64, + pub blocks: L64, + pub address_id: L64, + pub addresses: L64, + pub votes: L64, +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct PoolIdEntry { + pool: PoolId, + seq: SeqNum, +} + +direct_repr!(PoolIdEntry); + +impl From for StateRef { + fn from(ser: SerializedStateRef) -> Self { + StateRef { + stake_control: Db::from_page(ser.stake_control.get()), + blocks: Db::from_page(ser.blocks.get()), + address_id: Db::from_page(ser.address_id.get()), + address_transactions: Db::from_page(ser.addresses.get()), + votes: Db::from_page(ser.votes.get()), + next_address_id: None, + } + } +} + +impl StateRef { + pub fn new_empty(txn: &mut T) -> Result + where + T: ::sanakirja::AllocPage + + ::sanakirja::LoadPage + + ::sanakirja::RootPage, + { + let mut empty = Self { + stake_control: btree::create_db_(txn).unwrap(), + blocks: btree::create_db_(txn).unwrap(), + address_id: btree::create_db_(txn).unwrap(), + address_transactions: btree::create_db_(txn).unwrap(), + votes: btree::create_db_(txn).unwrap(), + + next_address_id: None, + }; + + // TODO: extract [0u8; 65] to an Address::sigil function + btree::put( + txn, + &mut empty.address_id, + &Address([0u8; 65]), + &SeqNum::new(0), + )?; + + Ok(empty) + } + + pub fn finish(mut self, txn: &mut SanakirjaMutTx) -> Result { + // if the sequence counter for addresses was incremented previously, rewrite it + if let Some(next_seq) = self.next_address_id { + btree::del(txn, &mut self.address_id, &Address([0u8; 65]), None)?; + + debug_assert!(btree::put( + txn, + &mut self.address_id, + &Address([0u8; 65]), + &next_seq.next(), + )?); + } + + Ok(SerializedStateRef { + stake_control: L64::new(self.stake_control.db), + blocks: L64::new(self.blocks.db), + address_id: L64::new(self.address_id.db), + addresses: L64::new(self.address_transactions.db), + votes: L64::new(self.votes.db), + }) + } + + pub fn apply_vote( + &mut self, + txn: &mut SanakirjaMutTx, + fragment_id: &FragmentId, + proposal_id: &ProposalId, + ) -> Result<(), DbError> { + let max_possible_value = Pair { + a: SeqNum::MAX, + b: FragmentId::MAX, + }; + + let seq = find_last_record_by(txn, &self.votes, proposal_id, &max_possible_value) + .map(|last| last.a.next()) + .unwrap_or(SeqNum::MIN); + + btree::put( + txn, + &mut self.votes, + proposal_id, + &Pair { + a: seq, + b: *fragment_id, + }, + )?; + + Ok(()) + } + + /// Add the given transaction to address at the end of the list + /// This function *only* checks the last fragment to avoid repetition when a transaction has more + /// than one (input|output) with the same address (eg: utxo input and change). + pub fn add_transaction_to_address( + &mut self, + txn: &mut SanakirjaMutTx, + fragment_id: &FragmentId, + address: &Address, + ) -> Result<(), DbError> { + let address_id = self.get_or_insert_address_id(txn, address)?; + + let max_possible_value = Pair { + a: SeqNum::MAX, + b: FragmentId::MAX, + }; + + let seq = match find_last_record_by( + &*txn, + &self.address_transactions, + &address_id, + &max_possible_value, + ) { + Some(v) => { + if &v.b == fragment_id { + return Ok(()); + } else { + v.a.next() + } + } + None => SeqNum::MIN, + }; + + debug_assert!(btree::put( + txn, + &mut self.address_transactions, + &address_id, + &Pair { + a: seq, + b: *fragment_id, + }, + )?); + + Ok(()) + } + + pub fn add_block_to_blocks( + &mut self, + txn: &mut SanakirjaMutTx, + chain_length: &ChainLength, + block_id: &BlockId, + ) -> Result<(), DbError> { + btree::put(txn, &mut self.blocks, chain_length, block_id).unwrap(); + Ok(()) + } + + pub(crate) fn get_or_insert_address_id( + &mut self, + txn: &mut SanakirjaMutTx, + address: &Address, + ) -> Result { + let address_exists = btree::get(txn, &self.address_id, address, None)? + .filter(|(id, _)| id == &address) + .map(|(_, v)| v) + .cloned(); + + let address_id = if let Some(v) = address_exists { + v + } else { + let next_seq = if let Some(next_seq) = self.next_address_id { + next_seq + } else { + *btree::get(txn, &self.address_id, &Address([0u8; 65]), None)? + .unwrap() + .1 + }; + + self.next_address_id = Some(next_seq.next()); + + btree::put(txn, &mut self.address_id, address, &next_seq)?; + + next_seq + }; + + Ok(address_id) + } + + pub fn apply_output_to_stake_control( + &mut self, + txn: &mut SanakirjaMutTx, + output: &transaction::Output, + ) -> Result<(), DbError> { + match output.address.kind() { + chain_addr::Kind::Group(_, account) => { + self.add_stake_to_account(txn, account, output.value)?; + } + chain_addr::Kind::Account(account) => { + self.add_stake_to_account(txn, account, output.value)?; + } + chain_addr::Kind::Single(_account) => {} + chain_addr::Kind::Multisig(_) => {} + chain_addr::Kind::Script(_) => {} + } + Ok(()) + } + + pub fn add_stake_to_account( + &mut self, + txn: &mut SanakirjaMutTx, + account: &chain_crypto::PublicKey, + value: Value, + ) -> Result<(), DbError> { + let op = + |current_stake: u64, value: u64| -> u64 { current_stake.checked_add(value).unwrap() }; + + self.update_stake_for_account(txn, account, op, value) + } + + pub fn substract_stake_from_account( + &mut self, + txn: &mut SanakirjaMutTx, + account: &chain_crypto::PublicKey, + value: Value, + ) -> Result<(), DbError> { + let op = + |current_stake: u64, value: u64| -> u64 { current_stake.checked_sub(value).unwrap() }; + + self.update_stake_for_account(txn, account, op, value) + } + + fn update_stake_for_account( + &mut self, + txn: &mut SanakirjaMutTx, + account: &chain_crypto::PublicKey, + op: impl Fn(u64, u64) -> u64, + value: Value, + ) -> Result<(), DbError> { + let account_id = AccountId(account.as_ref().try_into().unwrap()); + + let current_stake = btree::get(txn, &self.stake_control, &account_id, None) + .unwrap() + .and_then(|(k, stake)| { + if k == &account_id { + Some(stake.get()) + } else { + None + } + }) + .unwrap_or(0); + + let new_stake = op(current_stake, value.0); + + btree::del(txn, &mut self.stake_control, &account_id, None).unwrap(); + btree::put( + txn, + &mut self.stake_control, + &account_id, + &L64::new(new_stake), + )?; + + Ok(()) + } + + /// gc this fork so the allocated pages can be re-used + /// + /// # Safety + /// + /// It's important that any references to this particular state are not used anymore. For the + /// current use-case, callers need to ensure that this snapshot is not referenced anymore in + /// the `States` btree. + pub unsafe fn drop(self, txn: &mut SanakirjaMutTx) -> Result<(), DbError> { + let StateRef { + stake_control, + blocks, + address_transactions, + address_id, + votes, + next_address_id: _, + } = self; + + btree::drop(txn, stake_control)?; + btree::drop(txn, blocks)?; + btree::drop(txn, address_id)?; + btree::drop(txn, address_transactions)?; + btree::drop(txn, votes)?; + + Ok(()) + } +} + +impl SerializedStateRef { + pub fn fork(&self, txn: &mut SanakirjaMutTx) -> Result { + Ok(StateRef { + stake_control: btree::fork_db(txn, &Db::from_page(self.stake_control.get()))?, + blocks: btree::fork_db(txn, &Db::from_page(self.blocks.get()))?, + address_id: btree::fork_db(txn, &Db::from_page(self.address_id.get()))?, + address_transactions: btree::fork_db(txn, &Db::from_page(self.addresses.get()))?, + votes: btree::fork_db(txn, &Db::from_page(self.votes.get()))?, + next_address_id: None, + }) + } +} + +direct_repr!(SerializedStateRef);