From 736b05ce8e3827fa4612d3a49f285a888d0ee892 Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Wed, 4 Dec 2024 18:55:45 +0530 Subject: [PATCH] roles_logic doc nits --- .../src/channel_logic/channel_factory.rs | 18 +- .../src/channel_logic/proxy_group_channel.rs | 21 +- .../roles-logic-sv2/src/common_properties.rs | 14 +- protocols/v2/roles-logic-sv2/src/errors.rs | 17 +- .../src/handlers/job_declaration.rs | 2 + .../v2/roles-logic-sv2/src/handlers/mining.rs | 1 + .../src/handlers/template_distribution.rs | 1 + .../v2/roles-logic-sv2/src/job_creator.rs | 64 +++--- .../v2/roles-logic-sv2/src/job_dispatcher.rs | 4 +- protocols/v2/roles-logic-sv2/src/lib.rs | 4 +- .../v2/roles-logic-sv2/src/routing_logic.rs | 132 ++++++------ protocols/v2/roles-logic-sv2/src/selectors.rs | 198 +++++++++--------- protocols/v2/roles-logic-sv2/src/utils.rs | 14 +- 13 files changed, 249 insertions(+), 241 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index 1f7e4c037..f15be4289 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -56,7 +56,7 @@ pub enum OnNewShare { /// Used when the received is malformed, is for an inexistent channel or do not meet downstream /// target. SendErrorDownstream(SubmitSharesError<'static>), - /// Used when an exteded channel in a proxy receive a share, and the share meet upstream + /// Used when an extended channel in a proxy receive a share, and the share meet upstream /// target, in this case a new share must be sent upstream. Also an optional template id is /// returned, when a job declarator want to send a valid share upstream could use the /// template for get the up job id. @@ -71,7 +71,7 @@ pub enum OnNewShare { /// (share, template id, coinbase,complete extranonce) ShareMeetBitcoinTarget((Share, Option, Vec, Vec)), /// Indicate that the share meet downstream target, in the case we could send a success - /// response dowmstream. + /// response downstream. ShareMeetDownstreamTarget, } @@ -256,7 +256,7 @@ impl ChannelFactory { let max_extranonce_size = self.extranonces.get_range2_len() as u16; if min_extranonce_size <= max_extranonce_size { // SECURITY is very unlikely to finish the ids btw this unwrap could be used by an - // attaccher that want to dirsrupt the service maybe we should have a method + // attacker that want to disrupt the service maybe we should have a method // to reuse ids that are no longer connected? let channel_id = self .ids @@ -394,7 +394,7 @@ impl ChannelFactory { } /// This function is called when downstream have a group channel - /// Shouldnt all standard channel's be non HOM?? + /// should not all standard channel's be non HOM?? fn new_standard_channel_for_non_hom_downstream( &mut self, request_id: u32, @@ -462,7 +462,7 @@ impl ChannelFactory { .get(&channel_id) .unwrap(); // OPTIMIZATION this could be memoized somewhere cause is very likely that we will receive a - // lot od OpenStandardMiningChannel requests consequtevely + // lot od OpenStandardMiningChannel requests consecutively let job_id = self.job_ids.next(); let future_jobs: Option>> = self .future_jobs @@ -558,11 +558,11 @@ impl ChannelFactory { } // When a new non HOM downstream opens a channel, we use this function to prepare all the - // extended jobs (future and non) and the prev hash that we need to send dowmstream + // extended jobs (future and non) and the prev hash that we need to send downstream fn prepare_jobs_and_p_hash(&mut self, result: &mut Vec, complete_id: u64) { // If group is 0 it means that we are preparing jobs and p hash for a non HOM downstream // that want to open a new extended channel in that case we want to use the channel id - // TODO verify that this is true also for the case where the channle factory is in a proxy + // TODO verify that this is true also for the case where the channel factory is in a proxy // and not in a pool. let group_id = match GroupId::into_group_id(complete_id) { 0 => GroupId::into_channel_id(complete_id), @@ -1167,7 +1167,7 @@ impl PoolChannelFactory { let target = self.job_creator.last_target(); // When downstream set a custom mining job we add the job to the negotiated job // hashmap, with the extended channel id as a key. Whenever the pool receive a share must - // first check if the channel have a negotiated job if so we can not retreive the template + // first check if the channel have a negotiated job if so we can not retrieve the template // via the job creator but we create a new one from the set custom job. if self.negotiated_jobs.contains_key(&m.channel_id) { let referenced_job = self.negotiated_jobs.get(&m.channel_id).unwrap(); @@ -1305,7 +1305,7 @@ impl PoolChannelFactory { } } -/// Used by proxies that want to open extended channls with upstream. If the proxy has job +/// Used by proxies that want to open extended channels with upstream. If the proxy has job /// declaration capabilities, we set the job creator and the coinbase outs. #[derive(Debug)] pub struct ProxyExtendedChannelFactory { diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index dc88e646d..e75eb057d 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -96,9 +96,9 @@ impl GroupChannel { last_received_job: None, } } - /// Called when a channel is successfully opened for header only mining on standard channels. - /// Here we store the new channel, and update state for jobs and return relevant SV2 messages - /// (NewMiningJob and SNPH) + // Called when a channel is successfully opened for header only mining(HOM) on standard + // channels. Here, we store the new channel, and update state for jobs and return relevant + // SV2 messages (NewMiningJob and SNPH) fn on_channel_success_for_hom_downtream( &mut self, m: OpenStandardMiningChannelSuccess, @@ -148,9 +148,10 @@ impl GroupChannel { Ok(res) } - /// If a matching job is already in the future job queue, - /// we set a new valid job, otherwise we clear the future jobs - /// queue and stage a prev hash to be used when the job arrives + + // If a matching job is already in the future job queue, + // we set a new valid job, otherwise we clear the future jobs + // queue and stage a prev hash to be used when the job arrives fn update_new_prev_hash(&mut self, m: &SetNewPrevHash) { while let Some(job) = self.future_jobs.pop() { if job.job_id == m.job_id { @@ -168,8 +169,9 @@ impl GroupChannel { }; self.last_prev_hash = Some(cloned.clone()); } - /// Pushes new job to future_job queue if it is future, - /// otherwise we set it as the valid job + + // Pushes new job to future_job queue if it is future, + // otherwise we set it as the valid job fn on_new_extended_mining_job(&mut self, m: NewExtendedMiningJob<'static>) { self.last_received_job = Some(m.clone()); if m.is_future() { @@ -178,7 +180,8 @@ impl GroupChannel { self.last_valid_job = Some(m) } } - /// Returns most recent job + + // Returns most recent job fn last_received_job_to_standard_job( &mut self, channel_id: u32, diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index 61d4dd2f5..805820ea9 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -29,7 +29,7 @@ pub struct PairSettings { pub trait IsUpstream + ?Sized> { /// Used to bitcoin protocol version for the channel. fn get_version(&self) -> u16; - // Used to get flags for the defined sv2 message protocol + /// Used to get flags for the defined sv2 message protocol fn get_flags(&self) -> u32; /// Used to check if the upstream supports the protocol that the downstream wants to use fn get_supported_protocols(&self) -> Vec; @@ -55,7 +55,7 @@ pub trait IsUpstream + ?Sized> /// Channel to be opened with the upstream nodes. #[derive(Debug, Clone, Copy)] pub enum UpstreamChannel { - // nominal hash rate + /// nominal hash rate Standard(f32), Group, Extended, @@ -102,7 +102,7 @@ pub trait IsMiningDownstream: IsDownstream { } } -/// Implemented for the NullDownstreamMiningSelector +// Implemented for the NullDownstreamMiningSelector impl IsUpstream for () { fn get_version(&self) -> u16 { unreachable!("Null upstream do not have a version"); @@ -128,7 +128,7 @@ impl IsUpstream for } } -/// Implemented for the NullDownstreamMiningSelector +// Implemented for the NullDownstreamMiningSelector impl IsDownstream for () { fn get_downstream_mining_data(&self) -> CommonDownstreamData { unreachable!("Null downstream do not have mining data"); @@ -154,11 +154,11 @@ impl IsMiningUpstream downstream ids + // Mapping of upstream id -> downstream ids request_ids_map: HashMap>, next_id: u32, } @@ -182,7 +182,7 @@ impl RequestIdMapper { new_id } - /// Removes a upstream/downstream mapping from the `RequsetIdMapper`. + /// Removes a upstream/downstream mapping from the `RequestIdMapper`. pub fn remove(&mut self, upstream_id: u32) -> Option { self.request_ids_map.remove(&upstream_id) } diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index cc05fede7..47369a483 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -7,6 +7,7 @@ use crate::{ use binary_sv2::Error as BinarySv2Error; use std::fmt::{self, Display, Formatter}; +/// Error enum #[derive(Debug)] pub enum Error { /// Payload size is too big to fit into a frame @@ -29,7 +30,7 @@ pub enum Error { NoCompatibleUpstream(CommonDownstreamData), /// Error if the hashmap `future_jobs` field in the `GroupChannelJobDispatcher` is empty. NoFutureJobs, - /// No Downstreams connected + /// No Downstream's connected NoDownstreamsConnected, /// PrevHash requires non-existent Job Id PrevHashRequireNonExistentJobId(u32), @@ -61,7 +62,7 @@ pub enum Error { GroupIdNotFound, /// A share has been received but no job for it exist ShareDoNotMatchAnyJob, - /// A share has been recived but no channel for it exist + /// A share has been received but no channel for it exist ShareDoNotMatchAnyChannel, /// Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase InvalidCoinbase, @@ -124,7 +125,7 @@ impl Display for Error { BadPayloadSize => write!(f, "Payload is too big to fit into the frame"), BinarySv2Error(v) => write!( f, - "BinarySv2Error: error in serializing/deserilizing binary format {:?}", + "BinarySv2Error: error in serializing/deserializing binary format {:?}", v ), DownstreamDown => { @@ -174,12 +175,12 @@ impl Display for Error { }, NoMoreExtranonces => write!(f, "No more extranonces"), JobIsNotFutureButPrevHashNotPresent => write!(f, "A non future job always expect a previous new prev hash"), - ChannelIsNeitherExtendedNeitherInAPool => write!(f, "If a channel is neither extended neither is part of a pool the only thing to do when a OpenStandardChannle is received is to relay it upstream with and updated request id"), - ExtranonceSpaceEnded => write!(f, "No more avaible extranonces for downstream"), + ChannelIsNeitherExtendedNeitherInAPool => write!(f, "If a channel is neither extended neither is part of a pool the only thing to do when a OpenStandardChannel is received is to relay it upstream with and updated request id"), + ExtranonceSpaceEnded => write!(f, "No more available extranonces for downstream"), ImpossibleToCalculateMerkleRoot => write!(f, "Impossible to calculate merkle root"), GroupIdNotFound => write!(f, "Group id not found"), - ShareDoNotMatchAnyJob => write!(f, "A share has been recived but no job for it exist"), - ShareDoNotMatchAnyChannel => write!(f, "A share has been recived but no channel for it exist"), + ShareDoNotMatchAnyJob => write!(f, "A share has been received but no job for it exist"), + ShareDoNotMatchAnyChannel => write!(f, "A share has been received but no channel for it exist"), InvalidCoinbase => write!(f, "Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase"), ValueRemainingNotUpdated => write!(f, "Value remaining in coinbase output was not correctly updated (it's equal to 0)"), UnknownOutputScriptType => write!(f, "Unknown script type in config"), @@ -189,7 +190,7 @@ impl Display for Error { TxVersionTooBig => write!(f, "Tx version can not be greater than i32::MAX"), TxVersionTooLow => write!(f, "Tx version can not be lower than 1"), TxDecodingError(e) => write!(f, "Impossible to decode tx: {:?}", e), - NotFoundChannelId => write!(f, "No downstream has been registred for this channel id"), + NotFoundChannelId => write!(f, "No downstream has been registered for this channel id"), NoValidJob => write!(f, "Impossible to create a standard job for channelA cause no valid job has been received from upstream yet"), NoValidTranslatorJob => write!(f, "Impossible to create a extended job for channel cause no valid job has been received from upstream yet"), NoTemplateForId => write!(f, "Impossible to retrieve a template for the required job id"), diff --git a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs index f0453fd8d..a2ec2aea1 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs @@ -41,6 +41,8 @@ use crate::{parsers::JobDeclaration, utils::Mutex}; use std::sync::Arc; + +/// see [`SendTo_`] pub type SendTo = SendTo_, ()>; use super::SendTo_; use crate::errors::Error; diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs index 918a32405..8edb43976 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs @@ -60,6 +60,7 @@ use const_sv2::*; use std::{fmt::Debug as D, sync::Arc}; use tracing::{debug, error, info, trace}; +/// see [`SendTo_`] pub type SendTo = SendTo_, Remote>; /// Represents supported channel types in a mining connection. diff --git a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs index a0a5b94ec..445a83922 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs @@ -42,6 +42,7 @@ use template_distribution_sv2::{ RequestTransactionDataSuccess, SetNewPrevHash, SubmitSolution, }; +/// see [`SendTo_`] pub type SendTo = SendTo_, ()>; use const_sv2::*; use core::convert::TryInto; diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index 1ed653762..31ef5f224 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -165,15 +165,15 @@ pub fn extended_job_from_custom_job( ) } -/// returns an extended job given the provided template from the Template Provider and other -/// Pool role related fields. -/// -/// Pool related arguments: -/// -/// * `coinbase_outputs`: coinbase output transactions specified by the pool. -/// * `job_id`: incremented job identifier specified by the pool. -/// * `version_rolling_allowed`: boolean specified by the channel. -/// * `extranonce_len`: extranonce length specified by the channel. +// returns an extended job given the provided template from the Template Provider and other +// Pool role related fields. +// +// Pool related arguments: +// +// * `coinbase_outputs`: coinbase output transactions specified by the pool. +// * `job_id`: incremented job identifier specified by the pool. +// * `version_rolling_allowed`: boolean specified by the channel. +// * `extranonce_len`: extranonce length specified by the channel. fn new_extended_job( new_template: &mut NewTemplate, coinbase_outputs: &mut [TxOut], @@ -234,8 +234,8 @@ fn new_extended_job( Ok(new_extended_mining_job) } -/// used to extract the coinbase transaction prefix for extended jobs -/// so the extranonce search space can be introduced +// used to extract the coinbase transaction prefix for extended jobs +// so the extranonce search space can be introduced fn coinbase_tx_prefix( coinbase: &Transaction, script_prefix_len: usize, @@ -258,15 +258,15 @@ fn coinbase_tx_prefix( r.try_into().map_err(Error::BinarySv2Error) } -/// used to extract the coinbase transaction suffix for extended jobs -/// so the extranonce search space can be introduced +// used to extract the coinbase transaction suffix for extended jobs +// so the extranonce search space can be introduced fn coinbase_tx_suffix( coinbase: &Transaction, extranonce_len: u8, script_prefix_len: usize, ) -> Result, Error> { let encoded = coinbase.serialize(); - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // If script_prefix_len is not 0 we are not in a test environment and the coinbase have the 0 // witness let segwit_bytes = match script_prefix_len { 0 => 0, @@ -319,8 +319,8 @@ fn get_bip_34_bytes(new_template: &NewTemplate, tx_version: i32) -> Result, version: i32, @@ -330,7 +330,7 @@ fn coinbase( pool_signature: String, extranonce_len: u8, ) -> Transaction { - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // If script_prefix_len is not 0 we are not in a test environment and the coinbase have the 0 // witness let witness = match bip34_bytes.len() { 0 => Witness::from_vec(vec![]), @@ -378,8 +378,8 @@ pub fn extended_job_to_non_segwit( coinbase_tx_suffix: stripped_tx.into_coinbase_tx_suffix()?, }) } -/// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix -/// to ensure miners are hashing with the correct coinbase +// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix +// to ensure miners are hashing with the correct coinbase struct StrippedCoinbaseTx { version: u32, inputs: Vec>, @@ -390,7 +390,7 @@ struct StrippedCoinbaseTx { } impl StrippedCoinbaseTx { - /// create + // create fn from_coinbase(tx: Transaction, full_extranonce_len: usize) -> Result { let bip141_bytes_len = tx .input @@ -420,13 +420,13 @@ impl StrippedCoinbaseTx { }) } - /// the coinbase tx prefix is the LE bytes concatenation of the tx version and all - /// of the tx inputs minus the 32 bytes after the bip34 bytes in the script - /// and the last input's sequence (used as the first entry in the coinbase tx suffix). - /// The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce - /// space for the miner. We remove the bip141 marker and flag since it is only used for - /// computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root - // clippy allow because we dont want to consume self + // the coinbase tx prefix is the LE bytes concatenation of the tx version and all + // of the tx inputs minus the 32 bytes after the bip34 bytes in the script + // and the last input's sequence (used as the first entry in the coinbase tx suffix). + // The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce + // space for the miner. We remove the bip141 marker and flag since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self #[allow(clippy::wrong_self_convention)] fn into_coinbase_tx_prefix(&self) -> Result, errors::Error> { let mut inputs = self.inputs.clone(); @@ -445,11 +445,11 @@ impl StrippedCoinbaseTx { prefix.try_into().map_err(Error::BinarySv2Error) } - /// This coinbase tx suffix is the sequence of the last tx input plus - /// the serialized tx outputs and the lock time. Note we do not use the witnesses - /// (placed between txouts and lock time) since it is only used for - /// computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root - // clippy allow because we dont want to consume self + // This coinbase tx suffix is the sequence of the last tx input plus + // the serialized tx outputs and the lock time. Note we do not use the witnesses + // (placed between txouts and lock time) since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self #[allow(clippy::wrong_self_convention)] fn into_coinbase_tx_suffix(&self) -> Result, errors::Error> { let mut suffix: Vec = vec![]; diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index 6d231edaa..262aaabae 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -54,7 +54,7 @@ struct BlockHeader<'a> { } impl<'a> BlockHeader<'a> { - /// calculates the sha256 blockhash of the header + // calculates the sha256 blockhash of the header #[allow(dead_code)] pub fn hash(&self) -> Target { let mut engine = sha256d::Hash::engine(); @@ -98,7 +98,7 @@ pub struct GroupChannelJobDispatcher { /// Used to signal if submitted shares correlate to valid jobs pub enum SendSharesResponse { - //ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), + /// ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), Valid(SubmitSharesStandard), Invalid(SubmitSharesError<'static>), } diff --git a/protocols/v2/roles-logic-sv2/src/lib.rs b/protocols/v2/roles-logic-sv2/src/lib.rs index a38e0ad97..c6ffe67bc 100644 --- a/protocols/v2/roles-logic-sv2/src/lib.rs +++ b/protocols/v2/roles-logic-sv2/src/lib.rs @@ -22,13 +22,13 @@ //! handlers::common::ParseDownstreamCommonMessages + //! handlers::mining::ParseDownstreamMiningMessages + //! -//! ProxyDownstreamConnetion: +//! ProxyDownstreamConnection: //! common_properties::IsDownstream + //! common_properties::IsMiningDownstream + //! handlers::common::ParseDownstreamCommonMessages + //! handlers::mining::ParseDownstreamMiningMessages + //! -//! ProxyUpstreamConnetion: +//! ProxyUpstreamConnection: //! common_properties::IsUpstream + //! common_properties::IsMiningUpstream + //! handlers::common::ParseUpstreamCommonMessages + diff --git a/protocols/v2/roles-logic-sv2/src/routing_logic.rs b/protocols/v2/roles-logic-sv2/src/routing_logic.rs index a23e62e4b..37ddbb7fa 100644 --- a/protocols/v2/roles-logic-sv2/src/routing_logic.rs +++ b/protocols/v2/roles-logic-sv2/src/routing_logic.rs @@ -274,20 +274,20 @@ impl< Sel: DownstreamMiningSelector + D, > MiningRouter for MiningProxyRoutingLogic { - /// Handles the `OpenStandardMiningChannel` message. - /// - /// This method processes the request to open a standard mining channel. It selects a suitable - /// upstream, updates the request ID to ensure uniqueness, and then delegates to - /// `on_open_standard_channel_request_header_only` to finalize the process. - /// - /// # Arguments - /// - `downstream`: The downstream requesting the channel opening. - /// - `request`: A mutable reference to the `OpenStandardMiningChannel` message. - /// - `downstream_mining_data`: Common data about the downstream mining setup. - /// - /// # Returns - /// - `Result>, Error>`: Returns the selected upstream for the downstream or an - /// error. + // Handles the `OpenStandardMiningChannel` message. + // + // This method processes the request to open a standard mining channel. It selects a suitable + // upstream, updates the request ID to ensure uniqueness, and then delegates to + // `on_open_standard_channel_request_header_only` to finalize the process. + // + // # Arguments + // - `downstream`: The downstream requesting the channel opening. + // - `request`: A mutable reference to the `OpenStandardMiningChannel` message. + // - `downstream_mining_data`: Common data about the downstream mining setup. + // + // # Returns + // - `Result>, Error>`: Returns the selected upstream for the downstream or an + // error. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -310,19 +310,19 @@ impl< self.on_open_standard_channel_request_header_only(downstream, request) } - /// Handles the `OpenStandardMiningChannelSuccess` message. - /// - /// This method processes the success message received from an upstream when a standard mining - /// channel is opened. It maps the request ID back to the original ID from the downstream and - /// updates the associated group and channel IDs in the upstream. - /// - /// # Arguments - /// - `upstream`: The upstream involved in the channel opening. - /// - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. - /// - /// # Returns - /// - `Result>, Error>`: Returns the downstream corresponding to the request or - /// an error. + // Handles the `OpenStandardMiningChannelSuccess` message. + // + // This method processes the success message received from an upstream when a standard mining + // channel is opened. It maps the request ID back to the original ID from the downstream and + // updates the associated group and channel IDs in the upstream. + // + // # Arguments + // - `upstream`: The upstream involved in the channel opening. + // - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. + // + // # Returns + // - `Result>, Error>`: Returns the downstream corresponding to the request or + // an error. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -352,17 +352,17 @@ impl< } } -/// Selects the upstream with the lowest total hash rate. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Arc>`: The upstream entity with the lowest total hash rate. -/// -/// # Panics -/// This function panics if the slice is empty, as it is internally guaranteed that this function -/// will only be called with non-empty vectors. +// Selects the upstream with the lowest total hash rate. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Arc>`: The upstream entity with the lowest total hash rate. +// +// # Panics +// This function panics if the slice is empty, as it is internally guaranteed that this function +// will only be called with non-empty vectors. fn minor_total_hr_upstream(ups: &mut [Arc>]) -> Arc> where Down: IsMiningDownstream + D, @@ -384,13 +384,13 @@ where .clone() // Unwrap is safe because the function only operates on non-empty vectors. } -/// Filters upstream entities that are not configured for header-only mining. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Vec>>`: A vector of upstream entities that are not header-only. +// Filters upstream entities that are not configured for header-only mining. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Vec>>`: A vector of upstream entities that are not header-only. fn filter_header_only(ups: &mut [Arc>]) -> Vec>> where Down: IsMiningDownstream + D, @@ -408,18 +408,18 @@ where .collect() } -/// Selects the most appropriate upstream entity based on specific criteria. -/// -/// # Criteria -/// - If only one upstream is available, it is selected. -/// - If multiple upstreams exist, preference is given to those not configured as header-only. -/// - Among the remaining upstreams, the one with the lowest total hash rate is selected. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Option>>`: The selected upstream entity, or `None` if none are available. +// Selects the most appropriate upstream entity based on specific criteria. +// +// # Criteria +// - If only one upstream is available, it is selected. +// - If multiple upstreams exist, preference is given to those not configured as header-only. +// - Among the remaining upstreams, the one with the lowest total hash rate is selected. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstream(ups: &mut [Arc>]) -> Option>> where Down: IsMiningDownstream + D, @@ -443,18 +443,18 @@ impl< Sel: DownstreamMiningSelector + D, > MiningProxyRoutingLogic { - /// Selects an upstream entity from a list of available upstreams. - /// - /// # Arguments - /// - `ups`: A mutable slice of upstream mining entities. - /// - /// # Returns - /// - `Option>>`: The selected upstream entity, or `None` if none are available. + // Selects an upstream entity from a list of available upstreams. + // + // # Arguments + // - `ups`: A mutable slice of upstream mining entities. + // + // # Returns + // - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstreams(ups: &mut [Arc>]) -> Option>> { select_upstream(ups) } - /// Handles the `SetupConnection` process for header-only mining downstreams. + /// Handles the `SetupConnection` process for header-only mining downstream's. /// /// This method selects compatible upstreams, assigns connection flags, and maps the /// downstream to the selected upstreams. @@ -487,7 +487,7 @@ impl< Ok((downstream_data, message)) } - /// Handles a standard channel opening request for header-only mining downstreams. + /// Handles a standard channel opening request for header-only mining downstream's. /// /// # Arguments /// - `downstream`: The downstream mining entity. diff --git a/protocols/v2/roles-logic-sv2/src/selectors.rs b/protocols/v2/roles-logic-sv2/src/selectors.rs index fcfcffeb5..ceb4dfed3 100644 --- a/protocols/v2/roles-logic-sv2/src/selectors.rs +++ b/protocols/v2/roles-logic-sv2/src/selectors.rs @@ -48,18 +48,18 @@ use std::{collections::HashMap, fmt::Debug as D, sync::Arc}; /// to facilitate efficient message routing. #[derive(Debug, Clone, Default)] pub struct ProxyDownstreamMiningSelector { - /// Maps request IDs to their corresponding downstream nodes. + // Maps request IDs to their corresponding downstream nodes. request_id_to_remotes: HashMap>, BuildNoHashHasher>, - /// Maps group channel IDs to a list of downstream nodes. + // Maps group channel IDs to a list of downstream nodes. channel_id_to_downstreams: HashMap>>, BuildNoHashHasher>, - /// Maps standard channel IDs to a single downstream node. + // Maps standard channel IDs to a single downstream node. channel_id_to_downstream: HashMap>, BuildNoHashHasher>, } impl ProxyDownstreamMiningSelector { /// Creates a new `ProxyDownstreamMiningSelector`. /// - /// This initializes the internal mappings with `nohash` hashers for performance. + /// This initializes the internal mappings with `nohash` hasher for performance. pub fn new() -> Self { Self { request_id_to_remotes: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -80,10 +80,10 @@ impl ProxyDownstreamMiningSelector { } impl ProxyDownstreamMiningSelector { - /// Removes a downstream node from all mappings. - /// - /// # Arguments - /// - `d`: The downstream node to be removed. + // Removes a downstream node from all mappings. + // + // # Arguments + // - `d`: The downstream node to be removed. fn _remove_downstream(&mut self, d: &Arc>) { self.request_id_to_remotes.retain(|_, v| !Arc::ptr_eq(v, d)); self.channel_id_to_downstream @@ -94,25 +94,25 @@ impl ProxyDownstreamMiningSelector { impl DownstreamMiningSelector for ProxyDownstreamMiningSelector { - /// Registers a request ID and its associated downstream node. - /// - /// # Arguments - /// - `request_id`: The unique request ID. - /// - `downstream`: The downstream node associated with the request. + // Registers a request ID and its associated downstream node. + // + // # Arguments + // - `request_id`: The unique request ID. + // - `downstream`: The downstream node associated with the request. fn on_open_standard_channel_request(&mut self, request_id: u32, downstream: Arc>) { self.request_id_to_remotes.insert(request_id, downstream); } - /// Finalizes the mapping of a standard channel to its downstream node. - /// - /// # Arguments - /// - `request_id`: The request ID used during the channel opening. - /// - `g_channel_id`: The group channel ID. - /// - `channel_id`: The specific standard channel ID. - /// - /// # Returns - /// - `Ok`: The downstream node associated with the request. - /// - `Err`: If the request ID is unknown. + // Finalizes the mapping of a standard channel to its downstream node. + // + // # Arguments + // - `request_id`: The request ID used during the channel opening. + // - `g_channel_id`: The group channel ID. + // - `channel_id`: The specific standard channel ID. + // + // # Returns + // - `Ok`: The downstream node associated with the request. + // - `Err`: If the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -135,25 +135,25 @@ impl DownstreamMiningSelector Ok(downstream) } - /// Retrieves all downstream nodes associated with a standard/group channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard/group channel ID. - /// - /// # Returns - /// - `Some`: A reference to the vector of downstream nodes. - /// - `None`: If no nodes are associated with the channel. + // Retrieves all downstream nodes associated with a standard/group channel ID. + // + // # Arguments + // - `channel_id`: The standard/group channel ID. + // + // # Returns + // - `Some`: A reference to the vector of downstream nodes. + // - `None`: If no nodes are associated with the channel. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>> { self.channel_id_to_downstreams.get(&channel_id) } - /// Removes all downstream nodes associated with a standard/group channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard/group channel ID. - /// - /// # Returns - /// A vector of the removed downstream nodes. + // Removes all downstream nodes associated with a standard/group channel ID. + // + // # Arguments + // - `channel_id`: The standard/group channel ID. + // + // # Returns + // A vector of the removed downstream nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>> { let downs = self .channel_id_to_downstreams @@ -165,10 +165,10 @@ impl DownstreamMiningSelector downs } - /// Removes a specific downstream node from all mappings. - /// - /// # Arguments - /// - `d`: The downstream node to be removed. + // Removes a specific downstream node from all mappings. + // + // # Arguments + // - `d`: The downstream node to be removed. fn remove_downstream(&mut self, d: &Arc>) { for dws in self.channel_id_to_downstreams.values_mut() { dws.retain(|node| !Arc::ptr_eq(node, d)); @@ -177,22 +177,22 @@ impl DownstreamMiningSelector self._remove_downstream(d); } - /// Retrieves the downstream node associated with a specific standard channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard channel ID. - /// - /// # Returns - /// - `Some`: The downstream node. - /// - `None`: If no node is associated with the channel. + // Retrieves the downstream node associated with a specific standard channel ID. + // + // # Arguments + // - `channel_id`: The standard channel ID. + // + // # Returns + // - `Some`: The downstream node. + // - `None`: If no node is associated with the channel. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>> { self.channel_id_to_downstream.get(&channel_id).cloned() } - /// Retrieves all downstream nodes currently managed by this selector. - /// - /// # Returns - /// A vector of downstream nodes. + // Retrieves all downstream nodes currently managed by this selector. + // + // # Returns + // A vector of downstream nodes. fn get_all_downstreams(&self) -> Vec>> { self.channel_id_to_downstream.values().cloned().collect() } @@ -233,22 +233,22 @@ pub trait DownstreamMiningSelector: channel_id: u32, ) -> Result>, Error>; - /// Retrieves all downstreams associated with a channel ID. + /// Retrieves all downstream's associated with a channel ID. /// /// # Arguments /// - `channel_id`: The channel ID to query. /// /// # Returns - /// - `Option<&Vec>>>`: The list of downstreams or `None`. + /// - `Option<&Vec>>>`: The list of downstream's or `None`. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>>; - /// Removes all downstreams associated with a channel ID. + /// Removes all downstream's associated with a channel ID. /// /// # Arguments - /// - `channel_id`: The channel ID to remove downstreams from. + /// - `channel_id`: The channel ID to remove downstream's from. /// /// # Returns - /// - `Vec>>`: The removed downstreams. + /// - `Vec>>`: The removed downstream's. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>>; /// Removes a specific downstream. @@ -266,10 +266,10 @@ pub trait DownstreamMiningSelector: /// - `Option>>`: The downstream or `None`. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>>; - /// Retrieves all downstreams. + /// Retrieves all downstream's. /// /// # Returns - /// - `Vec>>`: All downstreams. + /// - `Vec>>`: All downstream's. fn get_all_downstreams(&self) -> Vec>>; } @@ -298,10 +298,10 @@ impl NullDownstreamMiningSelector { } impl DownstreamMiningSelector for NullDownstreamMiningSelector { - /// Called when a standard channel open request is received. - /// - /// This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op - /// implementation. + // Called when a standard channel open request is received. + // + // This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op + // implementation. fn on_open_standard_channel_request( &mut self, _request_id: u32, @@ -310,9 +310,9 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_request") } - /// Called when a standard channel open request is successful. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Called when a standard channel open request is successful. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn on_open_standard_channel_success( &mut self, _request_id: u32, @@ -322,37 +322,37 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_success") } - /// Retrieves the downstreams in a specific channel. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves the downstream'ss in a specific channel. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn get_downstreams_in_channel(&self, _channel_id: u32) -> Option<&Vec>>> { unreachable!("get_downstreams_in_channel") } - /// Removes downstreams in a specific channel. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Removes downstream's in a specific channel. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstreams_in_channel(&mut self, _channel_id: u32) -> Vec>> { unreachable!("remove_downstreams_in_channel") } - /// Removes a specific downstream node. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Removes a specific downstream node. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstream(&mut self, _d: &Arc>) { unreachable!("remove_downstream") } - /// Retrieves the downstream associated with a specific channel ID. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves the downstream associated with a specific channel ID. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn downstream_from_channel_id(&self, _channel_id: u32) -> Option>> { unreachable!("downstream_from_channel_id") } - /// Retrieves all downstream nodes managed by this selector. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves all downstream nodes managed by this selector. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn get_all_downstreams(&self) -> Vec>> { unreachable!("get_all_downstreams") } @@ -459,14 +459,14 @@ impl< Up: IsMiningUpstream, > UpstreamMiningSelctor for GeneralMiningSelector { - /// Handles the `SetupConnection` process and determines the pairable upstream nodes. - /// - /// # Arguments - /// - `pair_settings`: The settings for pairing downstream and upstream nodes. - /// - /// # Returns - /// - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. - /// - `Err`: If no upstreams are pairable. + // Handles the `SetupConnection` process and determines the pairable upstream nodes. + // + // # Arguments + // - `pair_settings`: The settings for pairing downstream and upstream nodes. + // + // # Returns + // - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. + // - `Err`: If no upstreams are pairable. fn on_setup_connection( &mut self, pair_settings: &PairSettings, @@ -489,14 +489,14 @@ impl< Err(Error::NoPairableUpstream((2, 2, 0))) } - /// Retrieves an upstream node by its ID. - /// - /// # Arguments - /// - `upstream_id`: The unique ID of the upstream node. - /// - /// # Returns - /// - `Some`: The upstream node. - /// - `None`: If no upstream is found. + // Retrieves an upstream node by its ID. + // + // # Arguments + // - `upstream_id`: The unique ID of the upstream node. + // + // # Returns + // - `Some`: The upstream node. + // - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>> { self.id_to_upstream.get(&upstream_id).cloned() } diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index 654276125..a6f2a8ec5 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -277,17 +277,17 @@ pub enum InputError { /// how do we set the adequate target? /// /// According to \[1] and \[2], it is possible to model the probability of finding a block with -/// a random variable X whose distribution is negtive hypergeometric \[3]. +/// a random variable X whose distribution is negative hypergeometric \[3]. /// Such a variable is characterized as follows. Say that there are n (2^256) elements (possible /// hash values), of which t (values <= target) are defined as success and the remaining as -/// failures. The variable X has codomain the positive integers, and X=k is the event where element +/// failures. The variable X has co-domain the positive integers, and X=k is the event where element /// are drawn one after the other, without replacement, and only the k-th element is successful. /// The expected value of this variable is (n-t)/(t+1). /// So, on average, a miner has to perform (2^256-t)/(t+1) hashes before finding hash whose value /// is below the target t. If the pool wants, on average, a share every s seconds, then, on /// average, the miner has to perform h*s hashes before finding one that is smaller than the /// target, where h is the miner's hashrate. Therefore, s*h= (2^256-t)/(t+1). If we consider h the -/// global bitcoin's hashrate, s = 600 seconds and t the bicoin global target, then, for all the +/// global bitcoin's hashrate, s = 600 seconds and t the bitcoin global target, then, for all the /// blocks we tried, the two members of the equations have the same order of magnitude and, most /// of the cases, they coincide with the first two digits. We take this as evidence of the /// correctness of our calculations. Thus, if the pool wants on average a share every s @@ -367,7 +367,7 @@ pub fn hash_rate_from_target(target: U256<'static>, share_per_min: f64) -> Resul let max_target = Uint256::from_be_bytes(max_target); let numerator = max_target - (target - Uint256::one()); - // now we calcualte the denominator s(t+1) + // now we calculate the denominator s(t+1) // *100 here to move the fractional bit up so we can make this an int later let shares_occurrency_frequence = 60_f64 / (share_per_min) * 100.0; // note that t+1 cannot be zero because t unsigned. Therefore the denominator is zero if and @@ -410,7 +410,7 @@ pub struct GroupId { } impl GroupId { - /// New GroupId it starts with groups 0, since 0 is reserved for hom downstreams + /// New GroupId it starts with groups 0, since 0 is reserved for hom downstream's pub fn new() -> Self { Self { group_ids: Id::new(), @@ -423,8 +423,8 @@ impl GroupId { self.group_ids.next() } - /// Create a channel for a paricular group and return the channel id - /// _group_id is left for a future use of this API where we have an hirearchy of ids so that we + /// Create a channel for a particular group and return the channel id + /// _group_id is left for a future use of this API where we have an hierarchy of ids so that we /// don't break old versions pub fn new_channel_id(&mut self, _group_id: u32) -> u32 { self.channel_ids.next()