From afb2c1c1cff8460bf67d372581e7cd60e9893f47 Mon Sep 17 00:00:00 2001 From: plebhash Date: Mon, 18 Nov 2024 09:33:51 -0300 Subject: [PATCH 01/12] docs for channel_logic modules --- .../src/channel_logic/channel_factory.rs | 77 ++++++++++++++++--- .../roles-logic-sv2/src/channel_logic/mod.rs | 6 ++ .../src/channel_logic/proxy_group_channel.rs | 5 ++ 3 files changed, 76 insertions(+), 12 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index 943349ebd..cb74c4f76 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -1,3 +1,5 @@ +//! Contains logic for creating channels. + use super::extended_to_standard_job; use crate::{ common_properties::StandardChannel, @@ -48,7 +50,7 @@ pub struct PartialSetCustomMiningJob { pub future_job: bool, } -/// Represent the action that needs to be done when a new share is received. +/// Represents the action that needs to be done when a new share is received. #[derive(Debug, Clone)] pub enum OnNewShare { /// Used when the received is malformed, is for an inexistent channel or do not meet downstream @@ -74,7 +76,7 @@ pub enum OnNewShare { } impl OnNewShare { - /// convert standard share into extended share + /// Convert standard share into extended share pub fn into_extended(&mut self, extranonce: Vec, up_id: u32) { match self { OnNewShare::SendErrorDownstream(_) => (), @@ -119,7 +121,7 @@ impl OnNewShare { } } -/// A share can be both extended or standard +/// A share can be either extended or standard #[derive(Clone, Debug)] pub enum Share { Extended(SubmitSharesExtended<'static>), @@ -127,7 +129,7 @@ pub enum Share { Standard((SubmitSharesStandard, u32)), } -/// helper type used before a `SetNewPrevHash` has a channel_id +/// Helper type used before a `SetNewPrevHash` has a channel_id #[derive(Clone, Debug)] pub struct StagedPhash { job_id: u32, @@ -137,6 +139,7 @@ pub struct StagedPhash { } impl StagedPhash { + /// converts a Staged PrevHash into a SetNewPrevHash message pub fn into_set_p_hash( &self, channel_id: u32, @@ -153,24 +156,31 @@ impl StagedPhash { } impl Share { + /// get share sequence number pub fn get_sequence_number(&self) -> u32 { match self { Share::Extended(s) => s.sequence_number, Share::Standard(s) => s.0.sequence_number, } } + + /// get share channel id pub fn get_channel_id(&self) -> u32 { match self { Share::Extended(s) => s.channel_id, Share::Standard(s) => s.0.channel_id, } } + + /// get share timestamp pub fn get_n_time(&self) -> u32 { match self { Share::Extended(s) => s.ntime, Share::Standard(s) => s.0.ntime, } } + + /// get share nonce pub fn get_nonce(&self) -> u32 { match self { Share::Extended(s) => s.nonce, @@ -178,6 +188,7 @@ impl Share { } } + /// get share job id pub fn get_job_id(&self) -> u32 { match self { Share::Extended(s) => s.job_id, @@ -185,6 +196,7 @@ impl Share { } } + /// get share version pub fn get_version(&self) -> u32 { match self { Share::Extended(s) => s.version, @@ -194,7 +206,7 @@ impl Share { } #[derive(Debug)] -/// Basic logic shared between all the channel factory. +/// Basic logic shared between all the channel factories struct ChannelFactory { ids: Arc>, standard_channels_for_non_hom_downstreams: @@ -236,13 +248,14 @@ impl ChannelFactory { ), } } + /// Called when a `OpenExtendedMiningChannel` message is received. /// Here we save the downstream's target (based on hashrate) and the /// channel's extranonce details before returning the relevant SV2 mining messages /// to be sent downstream. For the mining messages, we will first return an /// `OpenExtendedMiningChannelSuccess` if the channel is successfully opened. Then we add /// the `NewExtendedMiningJob` and `SetNewPrevHash` messages if the relevant data is - /// available. If the channel opening fails, we return `OpenExtenedMiningChannelError`. + /// available. If the channel opening fails, we return `OpenExtendedMiningChannelError`. pub fn new_extended_channel( &mut self, request_id: u32, @@ -313,8 +326,9 @@ impl ChannelFactory { )]) } } + /// Called when we want to replicate a channel already opened by another actor. - /// is used only in the jd client from the template provider module to mock a pool. + /// It is used only in the jd client from the template provider module to mock a pool. /// Anything else should open channel with the new_extended_channel function pub fn replicate_upstream_extended_channel_only_jd( &mut self, @@ -672,6 +686,7 @@ impl ChannelFactory { self.last_prev_hash = Some((m, ids)); Ok(()) } + /// Called when a `NewExtendedMiningJob` arrives. If the job is future, we add it to the future /// queue. If the job is not future, we pair it with a the most recent prev hash fn on_new_extended_mining_job( @@ -907,6 +922,7 @@ impl ChannelFactory { Ok(OnNewShare::SendErrorDownstream(error)) } } + /// Returns the downstream target and extranonce for the channel fn get_channel_specific_mining_info(&self, m: &Share) -> Option<(mining_sv2::Target, Vec)> { match m { @@ -968,7 +984,7 @@ impl ChannelFactory { } } -/// Used by a pool to in order to manage all downstream channel. It add job creation capabilities +/// Used by a pool to in order to manage all downstream channel. It adds job creation capabilities /// to ChannelFactory. #[derive(Debug)] pub struct PoolChannelFactory { @@ -976,11 +992,12 @@ pub struct PoolChannelFactory { job_creator: JobsCreators, pool_coinbase_outputs: Vec, pool_signature: String, - // extedned_channel_id -> SetCustomMiningJob + // extended_channel_id -> SetCustomMiningJob negotiated_jobs: HashMap, BuildNoHashHasher>, } impl PoolChannelFactory { + /// constructor pub fn new( ids: Arc>, extranonces: ExtendedExtranonce, @@ -1019,6 +1036,7 @@ impl PoolChannelFactory { negotiated_jobs: HashMap::with_hasher(BuildNoHashHasher::default()), } } + /// Calls [`ChannelFactory::add_standard_channel`] pub fn add_standard_channel( &mut self, @@ -1030,6 +1048,7 @@ impl PoolChannelFactory { self.inner .add_standard_channel(request_id, downstream_hash_rate, is_header_only, id) } + /// Calls [`ChannelFactory::new_extended_channel`] pub fn new_extended_channel( &mut self, @@ -1040,6 +1059,7 @@ impl PoolChannelFactory { self.inner .new_extended_channel(request_id, hash_rate, min_extranonce_size) } + /// Called when we want to replicate a channel already opened by another actor. /// is used only in the jd client from the template provider module to mock a pool. /// Anything else should open channel with the new_extended_channel function @@ -1057,6 +1077,7 @@ impl PoolChannelFactory { extranonce_size, ) } + /// Called only when a new prev hash is received by a Template Provider. It matches the /// message with a `job_id` and calls [`ChannelFactory::on_new_prev_hash`] /// it return the job_id @@ -1074,6 +1095,7 @@ impl PoolChannelFactory { self.inner.on_new_prev_hash(new_prev_hash)?; Ok(job_id) } + /// Called only when a new template is received by a Template Provider pub fn on_new_template( &mut self, @@ -1087,6 +1109,7 @@ impl PoolChannelFactory { )?; self.inner.on_new_extended_mining_job(new_job) } + /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the /// shares against the channel's respective target and return `OnNewShare` to let us know if /// and where the shares should be relayed @@ -1213,11 +1236,13 @@ impl PoolChannelFactory { ) } } + /// Utility function to return a new group id pub fn new_group_id(&mut self) -> u32 { let new_id = self.inner.ids.safe_lock(|ids| ids.new_group_id()).unwrap(); new_id } + /// Utility function to return a new standard channel id pub fn new_standard_id_for_hom(&mut self) -> u32 { let hom_group_id = 0; @@ -1228,6 +1253,7 @@ impl PoolChannelFactory { .unwrap(); new_id } + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce /// space) pub fn extranonce_from_downstream_extranonce( @@ -1238,6 +1264,7 @@ impl PoolChannelFactory { .extranonces .extranonce_from_downstream_extranonce(ext) } + /// Called when a new custom mining job arrives pub fn on_new_set_custom_mining_job( &mut self, @@ -1265,16 +1292,18 @@ impl PoolChannelFactory { true } + /// get extended channel ids pub fn get_extended_channels_ids(&self) -> Vec { self.inner.extended_channels.keys().copied().collect() } + /// update coinbase outputs pub fn update_pool_outputs(&mut self, outs: Vec) { self.pool_coinbase_outputs = outs; } - /// calls [`ChannelFactory::update_target_for_channel`] - /// Set a partucular downstream channel target. + /// Calls [`ChannelFactory::update_target_for_channel`] + /// Set a particular downstream channel target. pub fn update_target_for_channel( &mut self, channel_id: u32, @@ -1282,7 +1311,8 @@ impl PoolChannelFactory { ) -> Option { self.inner.update_target_for_channel(channel_id, new_target) } - // Set the target for this channel. This is the upstream target. + + /// Set the target for this channel. This is the upstream target. pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } @@ -1301,6 +1331,7 @@ pub struct ProxyExtendedChannelFactory { } impl ProxyExtendedChannelFactory { + /// constructor #[allow(clippy::too_many_arguments)] pub fn new( ids: Arc>, @@ -1353,6 +1384,7 @@ impl ProxyExtendedChannelFactory { extended_channel_id, } } + /// Calls [`ChannelFactory::add_standard_channel`] pub fn add_standard_channel( &mut self, @@ -1364,6 +1396,7 @@ impl ProxyExtendedChannelFactory { self.inner .add_standard_channel(request_id, downstream_hash_rate, id_header_only, id) } + /// Calls [`ChannelFactory::new_extended_channel`] pub fn new_extended_channel( &mut self, @@ -1374,6 +1407,7 @@ impl ProxyExtendedChannelFactory { self.inner .new_extended_channel(request_id, hash_rate, min_extranonce_size) } + /// Called only when a new prev hash is received by a Template Provider when job declaration is /// used. It matches the message with a `job_id`, creates a new custom job, and calls /// [`ChannelFactory::on_new_prev_hash`] @@ -1417,6 +1451,7 @@ impl ProxyExtendedChannelFactory { panic!("A channel factory without job creator do not have declaration capabilities") } } + /// Called only when a new template is received by a Template Provider when job declaration is /// used. It creates a new custom job and calls /// [`ChannelFactory::on_new_extended_mining_job`] @@ -1687,12 +1722,17 @@ impl ProxyExtendedChannelFactory { ) -> Result, BuildNoHashHasher>, Error> { self.inner.on_new_extended_mining_job(m) } + + /// set new target pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } + + /// get last valid job version pub fn last_valid_job_version(&self) -> Option { self.inner.last_valid_job.as_ref().map(|j| j.0.version) } + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce /// space) pub fn extranonce_from_downstream_extranonce( @@ -1703,6 +1743,7 @@ impl ProxyExtendedChannelFactory { .extranonces .extranonce_from_downstream_extranonce(ext) } + /// Returns the most recent prev hash pub fn last_prev_hash(&self) -> Option> { self.inner @@ -1710,27 +1751,38 @@ impl ProxyExtendedChannelFactory { .as_ref() .map(|f| f.0.prev_hash.clone()) } + + /// last min ntime pub fn last_min_ntime(&self) -> Option { self.inner.last_prev_hash.as_ref().map(|f| f.0.min_ntime) } + + /// last nbits pub fn last_nbits(&self) -> Option { self.inner.last_prev_hash.as_ref().map(|f| f.0.nbits) } + + /// extranonce_size pub fn extranonce_size(&self) -> usize { self.inner.extranonces.get_len() } + + /// extranonce_2 size pub fn channel_extranonce2_size(&self) -> usize { self.inner.extranonces.get_len() - self.inner.extranonces.get_range0_len() } // Only used when the proxy is using Job Declaration + /// update pool outputs pub fn update_pool_outputs(&mut self, outs: Vec) { self.pool_coinbase_outputs = Some(outs); } + /// get this channel id pub fn get_this_channel_id(&self) -> u32 { self.extended_channel_id } + /// returns the extranonce1 len of the upstream. For a proxy, this would /// be the extranonce_prefix len pub fn get_upstream_extranonce1_len(&self) -> usize { @@ -1755,6 +1807,7 @@ pub enum ExtendedChannelKind { Pool, } impl ExtendedChannelKind { + /// set target pub fn set_target(&mut self, new_target: &mut Target) { match self { ExtendedChannelKind::Proxy { upstream_target } diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs index 7b5f0feed..05dbb3d12 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs @@ -1,3 +1,9 @@ +//! A module for managing channels on applications. +//! +//! Divided in two submodules: +//! - [`channel_factory`] +//! - [`proxy_group_channel`] + pub mod channel_factory; pub mod proxy_group_channel; diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index 65f4cdb3d..452c39fbb 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -1,3 +1,5 @@ +//! Contains logic for managing Standard Channels via Group Channels. + use crate::{common_properties::StandardChannel, parsers::Mining, Error}; use mining_sv2::{ @@ -14,6 +16,7 @@ pub struct GroupChannels { channels: HashMap, } impl GroupChannels { + /// constructor pub fn new() -> Self { Self { channels: HashMap::new(), @@ -70,6 +73,8 @@ impl GroupChannels { None => Err(Error::GroupIdNotFound), } } + + /// get group channel ids pub fn ids(&self) -> Vec { self.channels.keys().copied().collect() } From eba3979fd967204646d21b0d3f5e89eade7cc75f Mon Sep 17 00:00:00 2001 From: plebhash Date: Tue, 19 Nov 2024 10:35:44 -0300 Subject: [PATCH 02/12] docs for job_dispatcher module --- .../v2/roles-logic-sv2/src/job_dispatcher.rs | 49 ++++++------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index 019f9f274..4eac84734 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -41,6 +41,8 @@ pub fn extended_to_standard_job_for_group_channel<'a>( merkle_root: merkle_root?.try_into().ok()?, }) } + +// helper struct to easily calculate block hashes from headers #[allow(dead_code)] struct BlockHeader<'a> { version: u32, @@ -67,36 +69,13 @@ impl<'a> BlockHeader<'a> { } } -#[allow(dead_code)] -fn target_from_shares( - job: &DownstreamJob, - prev_hash: &[u8], - nbits: u32, - share: &SubmitSharesStandard, -) -> Target { - let header = BlockHeader { - version: share.version, - prev_hash, - merkle_root: &job.merkle_root, - timestamp: share.ntime, - nbits, - nonce: share.nonce, - }; - header.hash() -} - +// helper struct to identify Standard Jobs being managed for downstream #[derive(Debug)] struct DownstreamJob { merkle_root: Vec, extended_job_id: u32, } -#[derive(Debug)] -struct ExtendedJobs { - #[allow(dead_code)] - upstream_target: Vec, -} - /// Used by proxies to keep track of standard jobs in the group channel /// created with the sv2 server #[derive(Debug)] @@ -117,6 +96,7 @@ pub struct GroupChannelJobDispatcher { nbits: u32, } +/// Used to signal if submitted shares correlate to valid jobs pub enum SendSharesResponse { //ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), Valid(SubmitSharesStandard), @@ -124,6 +104,7 @@ pub enum SendSharesResponse { } impl GroupChannelJobDispatcher { + /// constructor pub fn new(ids: Arc>) -> Self { Self { target: [0_u8; 32].into(), @@ -497,21 +478,21 @@ mod tests { job_id: u32, ) { let shares = SubmitSharesStandard { - /// Channel identification. + // Channel identification. channel_id: standard_channel_id, - /// Unique sequential identifier of the submit within the channel. + // Unique sequential identifier of the submit within the channel. sequence_number: 0, - /// Identifier of the job as provided by *NewMiningJob* or - /// *NewExtendedMiningJob* message. + // Identifier of the job as provided by *NewMiningJob* or + // *NewExtendedMiningJob* message. job_id, - /// Nonce leading to the hash being submitted. + // Nonce leading to the hash being submitted. nonce: 1, - /// The nTime field in the block header. This MUST be greater than or equal - /// to the header_timestamp field in the latest SetNewPrevHash message - /// and lower than or equal to that value plus the number of seconds since - /// the receipt of that message. + // The nTime field in the block header. This MUST be greater than or equal + // to the header_timestamp field in the latest SetNewPrevHash message + // and lower than or equal to that value plus the number of seconds since + // the receipt of that message. ntime: 1, - /// Full nVersion field. + // Full nVersion field. version: 1, }; let mut faulty_shares = shares.clone(); From 64cf326644ab83025192224343ebfac429997dfd Mon Sep 17 00:00:00 2001 From: plebhash Date: Wed, 20 Nov 2024 17:57:42 -0300 Subject: [PATCH 03/12] docs for parsers module --- protocols/v2/roles-logic-sv2/src/parsers.rs | 25 +++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/parsers.rs b/protocols/v2/roles-logic-sv2/src/parsers.rs index d30ca6ece..bdf9e8640 100644 --- a/protocols/v2/roles-logic-sv2/src/parsers.rs +++ b/protocols/v2/roles-logic-sv2/src/parsers.rs @@ -1,5 +1,7 @@ -//! The parsers modules provides logic to convert raw SV2 message data into rust types -//! as well as logic to handle conversions among SV2 rust types +//! The parsers module provides logic to convert raw Sv2 message data into rust types, +//! as well as logic to handle conversions among Sv2 rust types +//! +//! Most of the logic on this module is tightly coupled with the binary_sv2 crate. use crate::Error; @@ -86,8 +88,12 @@ use mining_sv2::{ use core::convert::{TryFrom, TryInto}; use tracing::error; +// todo: fix this, PoolMessages shouldn't be a generic parser +/// An alias to a generic parser pub type AnyMessage<'a> = PoolMessages<'a>; +/// A parser of messages that are common to all Sv2 subprotocols, to be used for parsing raw +/// messages #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum CommonMessages<'a> { @@ -99,6 +105,7 @@ pub enum CommonMessages<'a> { SetupConnectionSuccess(SetupConnectionSuccess), } +/// A parser of messages of Template Distribution subprotocol, to be used for parsing raw messages #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum TemplateDistribution<'a> { @@ -116,6 +123,7 @@ pub enum TemplateDistribution<'a> { SubmitSolution(SubmitSolution<'a>), } +/// A parser of messages of Job Declaration subprotocol, to be used for parsing raw messages #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum JobDeclaration<'a> { @@ -140,6 +148,7 @@ pub enum JobDeclaration<'a> { SubmitSolution(SubmitSolutionJd<'a>), } +/// A parser of messages of Mining subprotocol, to be used for parsing raw messages #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum Mining<'a> { @@ -187,6 +196,7 @@ pub enum Mining<'a> { } impl<'a> Mining<'a> { + /// converter into static lifetime pub fn into_static(self) -> Mining<'static> { match self { Mining::CloseChannel(m) => Mining::CloseChannel(m.into_static()), @@ -225,8 +235,12 @@ impl<'a> Mining<'a> { } } +/// A trait that every Sv2 message parser must implement. +/// It helps parsing from Rust types to raw messages. pub trait IsSv2Message { + /// get message type fn message_type(&self) -> u8; + /// get channel bit fn channel_bit(&self) -> bool; } @@ -584,6 +598,7 @@ impl<'decoder> Deserialize<'decoder> for MiningDeviceMessages<'decoder> { } } +/// A list of 8-bit message type variants that are common to all Sv2 subprotocols #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -634,6 +649,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for CommonMessages<'a> { } } +/// A list of 8-bit message type variants under Template Distribution subprotocol #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -710,6 +726,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for TemplateDistribution<'a> { } } +/// A list of 8-bit message type variants under Job Declaration subprotocol #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -808,6 +825,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for JobDeclaration<'a> { } } +/// A list of 8-bit message type variants under Mining subprotocol #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -976,6 +994,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for Mining<'a> { } } +/// A parser of messages that a Mining Device could send #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum MiningDeviceMessages<'a> { @@ -1017,6 +1036,8 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for MiningDeviceMessages<'a> { } } +// todo: fix this, PoolMessages should only contain Mining and Common +/// A parser of all messages a Pool could send #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum PoolMessages<'a> { From 9bb23a0c3f4eaa0faaa30106958a20fd8d5e2671 Mon Sep 17 00:00:00 2001 From: plebhash Date: Thu, 21 Nov 2024 17:59:16 -0300 Subject: [PATCH 04/12] docs for roles_logic_sv2::errors --- protocols/v2/roles-logic-sv2/src/errors.rs | 56 ++++++++++++++++++++-- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index 20c4bcd55..cc05fede7 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -8,58 +8,106 @@ use binary_sv2::Error as BinarySv2Error; use std::fmt::{self, Display, Formatter}; #[derive(Debug)] -/// No NoPairableUpstreamT(min_v, max_v, all falgs supported)) pub enum Error { - /// Errors if payload size is too big to fit into a frame. + /// Payload size is too big to fit into a frame BadPayloadSize, + /// Expected Length of 32, but received different length ExpectedLen32(usize), + /// Error serializing/deserializing binary format BinarySv2Error(BinarySv2Error), + /// Downstream is not connected anymore DownstreamDown, + /// A channel was attempted to be added to an Upstream, but no groups are specified NoGroupsFound, + /// Unexpected message received. UnexpectedMessage(u8), + /// Extended channels do not have group IDs NoGroupIdOnExtendedChannel, - /// (`min_v`, `max_v`, all flags supported) + /// No pairable upstream. Parameters are: (`min_v`, `max_v`, all flags supported) NoPairableUpstream((u16, u16, u32)), + /// No compatible upstream NoCompatibleUpstream(CommonDownstreamData), /// Error if the hashmap `future_jobs` field in the `GroupChannelJobDispatcher` is empty. NoFutureJobs, + /// No Downstreams connected NoDownstreamsConnected, + /// PrevHash requires non-existent Job Id PrevHashRequireNonExistentJobId(u32), + /// Request Id not mapped RequestIdNotMapped(u32), + /// There are no upstream connected NoUpstreamsConnected, + /// Protocol has not been implemented, but should be UnimplementedProtocol, + /// Unexpected `PoolMessage` type UnexpectedPoolMessage, + /// Upstream is answering with a wrong request ID {} or + /// `DownstreamMiningSelector::on_open_standard_channel_request` has not been called + /// before relaying open channel request to upstream UnknownRequestId(u32), + /// No more extranonces NoMoreExtranonces, + /// A non future job always expect a previous new prev hash JobIsNotFutureButPrevHashNotPresent, + /// If a channel is neither extended or part of a pool, + /// the only thing to do when a OpenStandardChannel is received + /// is to relay it upstream with and updated request id ChannelIsNeitherExtendedNeitherInAPool, + /// No more available extranonces for downstream" ExtranonceSpaceEnded, + /// Impossible to calculate merkle root ImpossibleToCalculateMerkleRoot, + /// Group Id not found GroupIdNotFound, + /// A share has been received but no job for it exist ShareDoNotMatchAnyJob, + /// A share has been recived but no channel for it exist ShareDoNotMatchAnyChannel, + /// Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase InvalidCoinbase, + /// Value remaining in coinbase output was not correctly updated (it's equal to 0) ValueRemainingNotUpdated, + /// Unknown script type in config UnknownOutputScriptType, + /// Invalid `output_script_value` for script type. It must be a valid public key/script InvalidOutputScript, + /// Empty coinbase outputs in config EmptyCoinbaseOutputs, + /// Block header version cannot be bigger than `i32::MAX` VersionTooBig, + /// Tx version cannot be bigger than `i32::MAX` TxVersionTooBig, + /// Tx version cannot be lower than 1 TxVersionTooLow, + /// Impossible to decode tx TxDecodingError(String), + /// No downstream has been registered for this channel id NotFoundChannelId, + /// Impossible to create a standard job for channel + /// because no valid job has been received from upstream yet NoValidJob, + /// Impossible to create an extended job for channel + /// because no valid job has been received from upstream yet NoValidTranslatorJob, + /// Impossible to retrieve a template for the required job id NoTemplateForId, + /// Impossible to retrieve a template for the required template id NoValidTemplate(String), + /// Invalid extranonce size. Params: (required min, requested) InvalidExtranonceSize(u16, u16), + /// Poison Lock PoisonLock(String), + /// Invalid BIP34 bytes InvalidBip34Bytes(Vec), - // (downstream_job_id, upstream_job_id) + /// Channel Factory did not update job. Params: (downstream_job_id, upstream_job_id) JobNotUpdated(u32, u32), + /// Impossible to get Target TargetError(InputError), + /// Impossible to get Hashrate HashrateError(InputError), + /// Message is well formatted but can not be handled LogicErrorMessage(std::boxed::Box>), + /// JD server cannot propagate the block due to missing transactions JDSMissingTransactions, } From 4c460bf4887231818906bf87d38089b60f655040 Mon Sep 17 00:00:00 2001 From: plebhash Date: Thu, 21 Nov 2024 18:31:31 -0300 Subject: [PATCH 05/12] docs for roles_logic_sv2::common_properties --- .../v2/roles-logic-sv2/src/common_properties.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index 5c25624b1..ba7f57076 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -1,4 +1,5 @@ -//! Traits that implements very basic properties that every implementation should use +//! This module defines traits for properties that every SRI-based application should implement + use crate::selectors::{ DownstreamMiningSelector, DownstreamSelector, NullDownstreamMiningSelector, }; @@ -24,7 +25,7 @@ pub struct PairSettings { pub flags: u32, } -/// A trait that defines the basic properties of an upstream node. +/// General properties that every Sv2 compatible upstream node must implement. pub trait IsUpstream + ?Sized> { /// Used to bitcoin protocol version for the channel. fn get_version(&self) -> u16; @@ -75,26 +76,33 @@ pub struct StandardChannel { pub extranonce: Extranonce, } -/// General properties that every Sv2 compatible mining upstream nodes must implement. +/// General properties that every Sv2 compatible mining upstream node must implement. pub trait IsMiningUpstream + ?Sized>: IsUpstream { /// should return total hash rate local to the node fn total_hash_rate(&self) -> u64; + /// add hashrate to the node fn add_hash_rate(&mut self, to_add: u64); + /// get open channels on the node fn get_opened_channels(&mut self) -> &mut Vec; + /// update channels fn update_channels(&mut self, c: UpstreamChannel); + /// check if node is limited to hom fn is_header_only(&self) -> bool { has_requires_std_job(self.get_flags()) } } -/// General properties that every Sv2 compatible mining downstream nodes must implement. +/// General properties that every Sv2 compatible downstream node must implement. pub trait IsDownstream { + /// get downstream mining data fn get_downstream_mining_data(&self) -> CommonDownstreamData; } +/// General properties that every Sv2 compatible mining downstream node must implement. pub trait IsMiningDownstream: IsDownstream { + /// check if node is doing hom fn is_header_only(&self) -> bool { self.get_downstream_mining_data().header_only } From 2ddd13d4e00d9064417c445d3d80edd26561f815 Mon Sep 17 00:00:00 2001 From: plebhash Date: Thu, 21 Nov 2024 19:16:27 -0300 Subject: [PATCH 06/12] docs for roles_logic_sv2::utils --- protocols/v2/roles-logic-sv2/src/utils.rs | 73 +++++++++++++++-------- 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index d4b6f8944..f818eba1f 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -1,3 +1,5 @@ +//! A collection of helper primitives + use std::{ convert::{TryFrom, TryInto}, ops::{Div, Mul}, @@ -6,10 +8,9 @@ use std::{ }; use binary_sv2::{Seq064K, ShortTxId, U256}; +use bitcoin::Block; use job_declaration_sv2::{DeclareMiningJob, SubmitSolutionJd}; use siphasher::sip::SipHasher24; -//compact_target_from_u256 -use bitcoin::Block; use stratum_common::{ bitcoin, bitcoin::{ @@ -29,13 +30,15 @@ use tracing::error; use crate::errors::Error; -/// Generator of unique ids +/// Generator of unique ids. +/// It keeps an internal counter, which is incremented every time a new unique id is requested. #[derive(Debug, PartialEq, Eq, Clone)] pub struct Id { state: u32, } impl Id { + /// constructor pub fn new() -> Self { Self { state: 0 } } @@ -53,7 +56,21 @@ impl Default for Id { } } -/// Safer Mutex wrapper +/// A custom `Mutex` implementation that provides enhanced safety and ergonomics over +/// the standard `std::sync::Mutex`. +/// +/// This `Mutex` offers the following features: +/// - **Closure-Based Locking:** The `safe_lock` method encapsulates the locking process, ensuring +/// the lock is automatically released after the closure completes. +/// - **Error Handling:** `safe_lock` enforces explicit handling of potential `PoisonError` +/// conditions, reducing the risk of panics caused by poisoned locks. +/// - **Panic-Safe Option:** The `super_safe_lock` method provides an alternative that unwraps the +/// result of `safe_lock`, with optional runtime safeguards against panics. +/// - **Extensibility:** Includes feature-gated functionality to customize behavior, such as +/// stricter runtime checks using external tools like `no-panic`. +/// +/// This design minimizes the risk of common concurrency pitfalls and promotes safer +/// handling of shared mutable state. #[derive(Debug)] pub struct Mutex(Mutex_); @@ -79,6 +96,7 @@ impl Mutex { Ok(return_value) } + /// Mutex super safe lock pub fn super_safe_lock(&self, thunk: F) -> Ret where F: FnOnce(&mut T) -> Ret, @@ -111,10 +129,12 @@ impl Mutex { //} } + /// Mutex constructor pub fn new(v: T) -> Self { Mutex(Mutex_::new(v)) } + /// remove from Mutex pub fn to_remove(&self) -> Result, PoisonError>> { self.0.lock() } @@ -163,14 +183,13 @@ pub fn merkle_root_from_path>( Some(merkle_root_from_path_(coinbase_id, path).to_vec()) } -// TODO remove when we have https://github.com/rust-bitcoin/rust-bitcoin/issues/1319 +/// calculate merkle root from path pub fn merkle_root_from_path_>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { match path.len() { 0 => coinbase_id, _ => reduce_path(coinbase_id, path), } } -// TODO remove when we have https://github.com/rust-bitcoin/rust-bitcoin/issues/1319 fn reduce_path>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { let mut root = coinbase_id; for node in path { @@ -183,9 +202,7 @@ fn reduce_path>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { root } -// -// Coinbase output construction utils -// +/// Coinbase output construction utils #[derive(Debug, Clone)] pub struct CoinbaseOutput { pub output_script_type: String, @@ -252,23 +269,20 @@ impl TryFrom for Script { } } +/// A list of potential errors during conversion between hashrate and target #[derive(Debug)] pub enum InputError { NegativeInput, DivisionByZero, } -/// The pool set a target for each miner. Each target is calibrated on the hashrate of the miner. -/// The following function takes as input a miner hashrate and the shares per minute requested by -/// the pool. The output t is the target (in big endian) for the miner with that hashrate. The -/// miner that mines with target t produces the requested number of shares per minute. +/// Calculates the target (in big endian) given some hashrate and share frequency (per minute). /// -/// -/// If we want a speficic number of shares per minute from a miner of known hashrate, +/// If we want a specific number of shares per minute from a miner of known hashrate, /// how do we set the adequate target? /// -/// According to [1] and [2], it is possible to model the probability of finding a block with -/// a random variable X whose distribution is negtive hypergeometric [3]. +/// According to \[1] and \[2], it is possible to model the probability of finding a block with +/// a random variable X whose distribution is negtive hypergeometric \[3]. /// Such a variable is characterized as follows. Say that there are n (2^256) elements (possible /// hash values), of which t (values <= target) are defined as success and the remaining as /// failures. The variable X has codomain the positive integers, and X=k is the event where element @@ -284,11 +298,11 @@ pub enum InputError { /// correctness of our calculations. Thus, if the pool wants on average a share every s /// seconds from a miner with hashrate h, then the target t for the miner is t = (2^256-sh)/(sh+1). /// -/// [1] https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742 -/// [2] https://www.zora.uzh.ch/id/eprint/173483/1/SSRN-id3399742-2.pdf -/// [3] https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution -/// bdiff: 0x00000000ffff0000000000000000000000000000000000000000000000000000 -/// https://en.bitcoin.it/wiki/Difficulty#How_soon_might_I_expect_to_generate_a_block.3F +/// \[1] [https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742) +/// +/// \[2] [https://www.zora.uzh.ch/id/eprint/173483/1/SSRN-id3399742-2.pdf](https://www.zora.uzh.ch/id/eprint/173483/1/SSRN-id3399742-2.pdf) +/// +/// \[3] [https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution](https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution) pub fn hash_rate_to_target( hashrate: f64, share_per_min: f64, @@ -384,6 +398,7 @@ fn from_uint128_to_u128(input: Uint128) -> u128 { u128::from_be_bytes(input) } +/// helper converter u128 to uint256 pub fn from_u128_to_uint256(input: u128) -> Uint256 { let input: [u8; 16] = input.to_be_bytes(); let mut be_bytes = [0_u8; 32]; @@ -576,6 +591,7 @@ fn test_merkle_root_from_path() { ); } +/// Converts a u256 to a BlockHash type pub fn u256_to_block_hash(v: U256<'static>) -> BlockHash { let hash: [u8; 32] = v.to_vec().try_into().unwrap(); let hash = Hash::from_inner(hash); @@ -659,6 +675,7 @@ pub fn target_from_hash_rate(hash_per_second: f32, share_per_min: f32) -> U256<' target.into() } +/// todo: remove this, not used anywhere #[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))] pub fn get_target( nonce: u32, @@ -698,6 +715,8 @@ pub fn get_target( hash.reverse(); hash } + +/// Returns a tuple with a list of transaction short hashes and the nonce used to generate them pub fn hash_lists_tuple( tx_data: Vec, tx_short_hash_nonce: u64, @@ -715,6 +734,7 @@ pub fn hash_lists_tuple( (tx_short_hash_list, tx_hash_list_hash) } +/// Computes SipHash 24 of some transaction id (short hash) pub fn get_short_hash(txid: bitcoin::Txid, tx_short_hash_nonce: u64) -> ShortTxId<'static> { // hash the short hash nonce let nonce_hash = sha256::Hash::hash(&tx_short_hash_nonce.to_le_bytes()); @@ -741,12 +761,15 @@ fn tx_hash_list_hash_builder(txid_list: Vec) -> U256<'static> { hash.to_vec().try_into().unwrap() } +/// Creates a block from a solution submission pub struct BlockCreator<'a> { last_declare: DeclareMiningJob<'a>, tx_list: Vec, message: SubmitSolutionJd<'a>, } + impl<'a> BlockCreator<'a> { + /// Constructor pub fn new( last_declare: DeclareMiningJob<'a>, tx_list: Vec, @@ -760,9 +783,9 @@ impl<'a> BlockCreator<'a> { } } -/// TODO write a test for this function that takes an already mined block, and test if the new -/// block created with the hash of the new block created with the block creator coincides with the -/// hash of the mined block +// TODO write a test for this function that takes an already mined block, and test if the new +// block created with the hash of the new block created with the block creator coincides with the +// hash of the mined block impl<'a> From> for bitcoin::Block { fn from(block_creator: BlockCreator<'a>) -> bitcoin::Block { let last_declare = block_creator.last_declare; From c998005ee5aa98c6124b53334945c84144b154f0 Mon Sep 17 00:00:00 2001 From: GitGab19 Date: Fri, 15 Nov 2024 16:56:04 +0100 Subject: [PATCH 07/12] handlers docs --- .../v2/roles-logic-sv2/src/handlers/common.rs | 88 ++++- .../src/handlers/job_declaration.rs | 192 ++++++++++- .../v2/roles-logic-sv2/src/handlers/mining.rs | 301 +++++++++++++++++- .../v2/roles-logic-sv2/src/handlers/mod.rs | 89 +++--- .../src/handlers/template_distribution.rs | 170 ++++++++++ 5 files changed, 759 insertions(+), 81 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/handlers/common.rs b/protocols/v2/roles-logic-sv2/src/handlers/common.rs index c2fb4ceb6..a520b8ca7 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/common.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/common.rs @@ -1,3 +1,36 @@ +//! # Common Handlers +//! +//! This module defines traits and implementations for handling common Stratum V2 messages exchanged +//! between upstream and downstream nodes. +//! +//! ## Core Traits +//! +//! - `ParseUpstreamCommonMessages`: Implemented by downstream nodes to handle common messages +//! received from upstream nodes, such as setup connection results or channel endpoint changes. +//! - `ParseDownstreamCommonMessages`: Implemented by upstream nodes to process setup connection +//! messages received from downstream nodes. +//! +//! ## Message Handling +//! +//! Handlers in this module are responsible for: +//! - Parsing and deserializing common messages. +//! - Dispatching deserialized messages to appropriate handler functions based on message type, such +//! as `SetupConnection` or `ChannelEndpointChanged`. +//! - Ensuring robust error handling for unexpected or malformed messages. +//! +//! ## Return Type +//! +//! Functions return `Result`, where `SendTo` specifies the next action for the +//! message: whether to forward it, respond to it, or ignore it. +//! +//! ## Structure +//! +//! This module includes: +//! - Traits for upstream and downstream message parsing and handling. +//! - Functions to process common message types while maintaining clear separation of concerns. +//! - Error handling mechanisms to address edge cases and ensure reliable communication within +//! Stratum V2 networks. + use super::SendTo_; use crate::{ common_properties::CommonDownstreamData, @@ -41,6 +74,7 @@ where routing_logic, ) } + /// Takes a message and it calls the appropriate handler function /// /// Arguments: @@ -86,19 +120,43 @@ where } } - /// Called by `Self::handle_message_common` when the `SetupConnectionSuccess` message is - /// received from the upstream node. + /// Handles a `SetupConnectionSuccess` message. + /// + /// This method processes a `SetupConnectionSuccess` message and handles it + /// by delegating to the appropriate handler. + /// + /// # Arguments + /// - `message`: The `SetupConnectionSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_setup_connection_success( &mut self, m: SetupConnectionSuccess, ) -> Result; - /// Called by `Self::handle_message_common` when the `SetupConnectionError` message is received - /// from the upstream node. + /// Handles a `SetupConnectionError` message. + /// + /// This method processes a `SetupConnectionError` message and handles it + /// by delegating to the appropriate handler. + /// + /// # Arguments + /// - `message`: The `SetupConnectionError` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_setup_connection_error(&mut self, m: SetupConnectionError) -> Result; - /// Called by `Self::handle_message_common` when the `ChannelEndpointChanged` message is - /// received from the upstream node. + /// Handles a `ChannelEndpointChanged` message. + /// + /// This method processes a `ChannelEndpointChanged` message and handles it + /// by delegating to the appropriate handler. + /// + /// # Arguments + /// - `message`: The `ChannelEndpointChanged` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_channel_endpoint_changed( &mut self, m: ChannelEndpointChanged, @@ -129,6 +187,7 @@ where Err(e) => Err(e), } } + /// It takes a message type and a payload, and if the message is a serialized setup connection /// message, it calls the `on_setup_connection` function on the routing logic, and then calls /// the `handle_setup_connection` function on the router @@ -150,8 +209,8 @@ where } /// It takes a message do setup connection message, it calls - /// the `on_setup_connection` function on the routing logic, and then calls the - /// `handle_setup_connection` function on the router + /// the `on_setup_connection` function on the routing logic, and then calls + /// the `handle_setup_connection` function on the router fn handle_message_common_deserilized( self_: Arc>, message: Result, Error>, @@ -192,8 +251,17 @@ where } } - /// Called by `Self::handle_message_common` when a setup connection message is received from the - /// downstream node. + /// Handles a `SetupConnection` message. + /// + /// This method processes a `SetupConnection` message and handles it + /// by delegating to the appropriate handler in the routing logic. + /// + /// # Arguments + /// - `message`: The `SetupConnection` message. + /// - `result`: The result of the `on_setup_connection` call, if available. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_setup_connection( &mut self, m: SetupConnection, diff --git a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs index 61044fa15..f0453fd8d 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs @@ -1,3 +1,44 @@ +//! # Job Declaration Handlers +//! +//! This module defines traits and functions for handling job declaration messages within the +//! Stratum V2 protocol. The job declaration process is integral to managing mining tasks and +//! transactions between server and client components. +//! +//! ## Core Traits +//! +//! - `ParseServerJobDeclarationMessages`: This trait is implemented by downstream nodes to process +//! job declaration messages received from upstream nodes. The trait includes methods for handling +//! job-related events such as mining job token allocation, job declaration successes or errors, +//! and transaction identification or provisioning. +//! - `ParseClientJobDeclarationMessages`: This trait is implemented by upstream nodes to manage job +//! declaration messages received from downstream nodes. It facilitates the handling of job +//! declarations, mining job token allocation, and transaction solutions submitted by downstream +//! nodes. +//! +//! ## Message Handling +//! +//! The handlers are responsible for the following tasks: +//! - Parsing and deserializing job declaration messages into appropriate types. +//! - Dispatching the deserialized messages to specific handler functions based on their type, such +//! as handling job token allocation, job declaration success or error responses, and transaction +//! data management. +//! +//! ## Return Type +//! +//! The functions return a `Result`. The `SendTo` type determines the next action for +//! the message: whether the message should be relayed, responded to, or ignored. If an error occurs +//! during processing, the `Error` type is returned. +//! +//! ## Structure +//! +//! This module contains: +//! - Traits for processing job declaration messages, covering both server-side and client-side +//! handling. +//! - Functions designed to parse, deserialize, and process messages related to job declarations, +//! with robust error handling. +//! - Error handling mechanisms to address unexpected messages and ensure safe processing, +//! particularly in the context of shared state. + use crate::{parsers::JobDeclaration, utils::Mutex}; use std::sync::Arc; pub type SendTo = SendTo_, ()>; @@ -7,12 +48,25 @@ use core::convert::TryInto; use job_declaration_sv2::*; use tracing::{debug, error, info, trace}; -/// A trait implemented by a downstream to handle SV2 job declaration messages. +/// A trait for parsing and handling SV2 job declaration messages sent by a server. +/// +/// This trait is designed to be implemented by downstream components that need to handle +/// various job declaration messages from an upstream SV2 server, such as job allocation, +/// declaration success, and error messages. pub trait ParseServerJobDeclarationMessages where Self: Sized, { - /// Used to parse job declaration message and route to the message's respected handler function + /// Routes an incoming job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message_type`: The type identifier of the incoming message. + /// - `payload`: A mutable slice containing the message payload. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration( self_: Arc>, message_type: u8, @@ -21,6 +75,16 @@ where Self::handle_message_job_declaration_deserialized(self_, (message_type, payload).try_into()) } + /// Routes a deserialized job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message`: A `Result` containing either the parsed message or an + /// error. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration_deserialized( self_: Arc>, message: Result, Error>, @@ -81,44 +145,95 @@ where Err(e) => Err(e), } } - /// When upstream send AllocateMiningJobTokenSuccess self should use the received token to - /// negotiate the next job + + /// Handles an `AllocateMiningJobTokenSuccess` message. /// - /// "[`job_declaration_sv2::AllocateMiningJobToken`]" + /// This method processes a message indicating a successful job token allocation. + /// + /// # Arguments + /// - `message`: The `AllocateMiningJobTokenSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_allocate_mining_job_token_success( &mut self, message: AllocateMiningJobTokenSuccess, ) -> Result; - // When upstream send DeclareMiningJobSuccess if the token is different from the one negotiated - // self must use the new token to refer to the committed job + /// Handles a `DeclareMiningJobSuccess` message. + /// + /// This method processes a message indicating a successful mining job declaration. + /// + /// # Arguments + /// - `message`: The `DeclareMiningJobSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_declare_mining_job_success( &mut self, message: DeclareMiningJobSuccess, ) -> Result; - // TODO: comment + /// Handles a `DeclareMiningJobError` message. + /// + /// This method processes a message indicating an error in the mining job declaration process. + /// + /// # Arguments + /// - `message`: The `DeclareMiningJobError` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_declare_mining_job_error( &mut self, message: DeclareMiningJobError, ) -> Result; - // TODO: comment + /// Handles an `IdentifyTransactions` message. + /// + /// This method processes a message that provides transaction identification data. + /// + /// # Arguments + /// - `message`: The `IdentifyTransactions` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_identify_transactions( &mut self, message: IdentifyTransactions, ) -> Result; - // TODO: comment + /// Handles a `ProvideMissingTransactions` message. + /// + /// This method processes a message that supplies missing transaction data. + /// + /// # Arguments + /// - `message`: The `ProvideMissingTransactions` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_provide_missing_transactions( &mut self, message: ProvideMissingTransactions, ) -> Result; } + +/// The `ParseClientJobDeclarationMessages` trait is responsible for handling job declaration +/// messages sent by clients to upstream nodes. The methods process messages like job declarations, +/// solutions, and transaction success indicators, ensuring proper routing and handling. pub trait ParseClientJobDeclarationMessages where Self: Sized, { + /// Routes an incoming job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message_type`: The type identifier of the incoming message. + /// - `payload`: A mutable slice containing the message payload. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration( self_: Arc>, message_type: u8, @@ -127,6 +242,16 @@ where Self::handle_message_job_declaration_deserialized(self_, (message_type, payload).try_into()) } + /// Routes a deserialized job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message`: A `Result` containing either the parsed message or an + /// error. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration_deserialized( self_: Arc>, message: Result, Error>, @@ -176,27 +301,72 @@ where .safe_lock(|x| x.handle_submit_solution(message)) .map_err(|e| crate::Error::PoisonLock(e.to_string()))? } - Ok(_) => todo!(), Err(e) => Err(e), } } + /// Handles an `AllocateMiningJobToken` message. + /// + /// This method processes a message that allocates a mining job token. + /// + /// # Arguments + /// - `message`: The `AllocateMiningJobToken` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_allocate_mining_job_token( &mut self, message: AllocateMiningJobToken, ) -> Result; + /// Handles a `DeclareMiningJob` message. + /// + /// This method processes a message that declares a new mining job. + /// + /// # Arguments + /// - `message`: The `DeclareMiningJob` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_declare_mining_job(&mut self, message: DeclareMiningJob) -> Result; + /// Handles an `IdentifyTransactionsSuccess` message. + /// + /// This method processes a message that confirms the identification of transactions. + /// + /// # Arguments + /// - `message`: The `IdentifyTransactionsSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_identify_transactions_success( &mut self, message: IdentifyTransactionsSuccess, ) -> Result; + /// Handles a `ProvideMissingTransactionsSuccess` message. + /// + /// This method processes a message that confirms the receipt of missing transactions. + /// + /// # Arguments + /// - `message`: The `ProvideMissingTransactionsSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_provide_missing_transactions_success( &mut self, message: ProvideMissingTransactionsSuccess, ) -> Result; + + /// Handles a `SubmitSolution` message. + /// + /// This method processes a message that submits a solution for the mining job. + /// + /// # Arguments + /// - `message`: The `SubmitSolutionJd` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_submit_solution(&mut self, message: SubmitSolutionJd) -> Result; } diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs index b5ec45e16..918a32405 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs @@ -1,3 +1,41 @@ +//! # Mining Handlers +//! +//! This module defines traits and functions for handling mining-related messages within the Stratum +//! V2 protocol. +//! +//! ## Core Traits +//! +//! - `ParseUpstreamMiningMessages`: Implemented by downstream nodes to process mining messages +//! received from upstream nodes. This trait provides methods for handling mining events like new +//! mining jobs, share submissions, extranonce prefix updates, and channel status updates. +//! - `ParseDownstreamMiningMessages`: Implemented by upstream nodes to manage mining messages +//! received from downstream nodes. This trait includes methods for managing tasks such as +//! submitting shares, opening mining channels, and handling mining job responses. +//! +//! ## Message Handling +//! +//! Handlers in this module are responsible for: +//! - Parsing and deserializing mining-related messages into the appropriate types. +//! - Dispatching the deserialized messages to specific handler functions based on message type, +//! such as handling new mining jobs, share submissions, and extranonce updates. +//! - Ensuring the integrity and validity of received messages, while interacting with downstream +//! mining systems to ensure proper communication and task execution. +//! +//! ## Return Type +//! +//! Functions return `Result, Error>`, where `SendTo` specifies the next action +//! for the message: whether it should be sent to the downstream node, an error response should be +//! generated, or the message should be ignored. +//! +//! ## Structure +//! +//! This module includes: +//! - Traits for processing mining-related messages for both upstream and downstream communication. +//! - Functions to parse, deserialize, and process messages related to mining, ensuring robust error +//! handling for unexpected conditions. +//! - Support for managing mining channels, extranonce prefixes, and share submissions, while +//! handling edge cases and ensuring the correctness of the mining process. + use crate::{common_properties::RequestIdMapper, errors::Error, parsers::Mining}; use core::convert::TryInto; use mining_sv2::{ @@ -24,16 +62,20 @@ use tracing::{debug, error, info, trace}; pub type SendTo = SendTo_, Remote>; +/// Represents supported channel types in a mining connection. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum SupportedChannelTypes { Standard, Extended, Group, - // Non header only connection can support both group and extended channels. + /// Represents a connection that supports both group and extended channels. GroupAndExtended, } -/// Connection-wide downtream's messages parser implemented by an upstream. +/// Trait for parsing downstream mining messages in a Stratum V2 connection. +/// +/// This trait defines methods for parsing and routing downstream messages +/// related to mining operations. pub trait ParseDownstreamMiningMessages< Up: IsMiningUpstream + D, Selector: DownstreamMiningSelector + D, @@ -41,10 +83,19 @@ pub trait ParseDownstreamMiningMessages< > where Self: IsMiningDownstream + Sized + D, { + /// Returns the type of channel supported by the downstream connection. fn get_channel_type(&self) -> SupportedChannelTypes; - /// Used to parse and route SV2 mining messages from the downstream based on `message_type` and - /// `payload` + /// Handles a mining message from the downstream, given its type and payload. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message_type`: The type of the mining message. + /// - `payload`: The raw payload of the message. + /// - `routing_logic`: The logic for routing the message to the appropriate upstream entity. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_message_mining( self_mutex: Arc>, message_type: u8, @@ -64,7 +115,15 @@ pub trait ParseDownstreamMiningMessages< } } - /// Used to route SV2 mining messages from the downstream + /// Deserializes and processes a mining message from the downstream. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message`: The mining message to be processed. + /// - `routing_logic`: The logic for routing the message to the appropriate upstream entity. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_message_mining_deserialized( self_mutex: Arc>, message: Result, Error>, @@ -265,9 +324,20 @@ pub trait ParseDownstreamMiningMessages< } } + /// Checks if work selection is enabled for the downstream connection. + /// + /// # Returns + /// - `bool`: `true` if work selection is enabled, `false` otherwise. fn is_work_selection_enabled(&self) -> bool; - /// returns None if the user is authorized and Open + /// Checks if the downstream user is authorized. + /// + /// # Arguments + /// - `_self_mutex`: The `Arc>` representing the downstream entity. + /// - `_user_identity`: The user's identity to be checked. + /// + /// # Returns + /// - `Result`: `true` if the user is authorized, `false` otherwise. fn is_downstream_authorized( _self_mutex: Arc>, _user_identity: &binary_sv2::Str0255, @@ -275,32 +345,94 @@ pub trait ParseDownstreamMiningMessages< Ok(true) } + /// Handles an `OpenStandardMiningChannel` message. + /// + /// This method processes an `OpenStandardMiningChannel` message and initiates the + /// appropriate response. + /// + /// # Arguments + /// - `m`: The `OpenStandardMiningChannel` message. + /// - `up`: An optional upstream entity to which the message is forwarded. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_standard_mining_channel( &mut self, m: OpenStandardMiningChannel, up: Option>>, ) -> Result, Error>; + /// Handles an `OpenExtendedMiningChannel` message. + /// + /// This method processes an `OpenExtendedMiningChannel` message and initiates the + /// appropriate response. + /// + /// # Arguments + /// - `m`: The `OpenExtendedMiningChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_extended_mining_channel( &mut self, m: OpenExtendedMiningChannel, ) -> Result, Error>; + /// Handles an `UpdateChannel` message. + /// + /// This method processes an `UpdateChannel` message and updates the channel settings. + /// + /// # Arguments + /// - `m`: The `UpdateChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_update_channel(&mut self, m: UpdateChannel) -> Result, Error>; + /// Handles a `SubmitSharesStandard` message. + /// + /// This method processes a `SubmitSharesStandard` message and validates the submitted shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesStandard` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_standard( &mut self, m: SubmitSharesStandard, ) -> Result, Error>; + /// Handles a `SubmitSharesExtended` message. + /// + /// This method processes a `SubmitSharesExtended` message and validates the submitted shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesExtended` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_extended( &mut self, m: SubmitSharesExtended, ) -> Result, Error>; + /// Handles a `SetCustomMiningJob` message. + /// + /// This method processes a `SetCustomMiningJob` message and applies the custom mining job + /// settings. + /// + /// # Arguments + /// - `m`: The `SetCustomMiningJob` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_custom_mining_job(&mut self, m: SetCustomMiningJob) -> Result, Error>; } -/// Connection-wide upstream's messages parser implemented by a downstream. + +/// A trait defining the parser for upstream mining messages used by a downstream. +/// +/// This trait provides the functionality to handle and route various types of mining messages +/// from the upstream based on the message type and payload. pub trait ParseUpstreamMiningMessages< Down: IsMiningDownstream + D, Selector: DownstreamMiningSelector + D, @@ -308,16 +440,35 @@ pub trait ParseUpstreamMiningMessages< > where Self: IsMiningUpstream + Sized + D, { + /// Retrieves the type of the channel supported by this upstream parser. + /// + /// # Returns + /// - `SupportedChannelTypes`: The supported channel type for this upstream. fn get_channel_type(&self) -> SupportedChannelTypes; + /// Retrieves an optional RequestIdMapper, used to manage request IDs across connections. + /// + /// # Returns + /// - `Option>>`: An optional RequestIdMapper for request ID + /// modification. fn get_request_id_mapper(&mut self) -> Option>> { None } - /// Used to parse and route SV2 mining messages from the upstream based on `message_type` and - /// `payload` The implementor of DownstreamMining needs to pass a RequestIdMapper if needing - /// to change the req id. Proxies likely would want to update a downstream req id to a new - /// one as req id must be connection-wide unique + /// Parses and routes SV2 mining messages from the upstream based on the message type and + /// payload. The implementor of DownstreamMining needs to pass a RequestIdMapper if changing + /// the request ID. Proxies typically need this to ensure the request ID is unique across + /// the connection. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message_type`: The type of the incoming message. + /// - `payload`: The payload containing the message data. + /// - `routing_logic`: The logic to handle the routing of the message based on the type. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message, either sending a + /// response or an error. fn handle_message_mining( self_mutex: Arc>, message_type: u8, @@ -334,6 +485,17 @@ pub trait ParseUpstreamMiningMessages< } } + /// Handles the deserialized mining message from the upstream, processing it according to the + /// routing logic. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message`: The deserialized mining message, wrapped in a Result for error handling. + /// - `routing_logic`: The logic used to route the message based on the type. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message, either sending a + /// response or an error. fn handle_message_mining_deserialized( self_mutex: Arc>, message: Result, @@ -673,64 +835,181 @@ pub trait ParseUpstreamMiningMessages< } } + /// Determines whether work selection is enabled for this upstream. + /// + /// # Returns + /// - `bool`: A boolean indicating if work selection is enabled. fn is_work_selection_enabled(&self) -> bool; + /// Handles a successful response for opening a standard mining channel. + /// + /// # Arguments + /// - `m`: The `OpenStandardMiningChannelSuccess` message. + /// - `remote`: An optional reference to the downstream, wrapped in an `Arc`. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_standard_mining_channel_success( &mut self, m: OpenStandardMiningChannelSuccess, remote: Option>>, ) -> Result, Error>; + /// Handles a successful response for opening an extended mining channel. + /// + /// # Arguments + /// - `m`: The `OpenExtendedMiningChannelSuccess` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_extended_mining_channel_success( &mut self, m: OpenExtendedMiningChannelSuccess, ) -> Result, Error>; + /// Handles an error when opening a mining channel. + /// + /// # Arguments + /// - `m`: The `OpenMiningChannelError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_open_mining_channel_error( &mut self, m: OpenMiningChannelError, ) -> Result, Error>; + /// Handles an error when updating a mining channel. + /// + /// # Arguments + /// - `m`: The `UpdateChannelError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_update_channel_error(&mut self, m: UpdateChannelError) -> Result, Error>; + /// Handles a request to close a mining channel. + /// + /// # Arguments + /// - `m`: The `CloseChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_close_channel(&mut self, m: CloseChannel) -> Result, Error>; + /// Handles a request to set the extranonce prefix for mining. + /// + /// # Arguments + /// - `m`: The `SetExtranoncePrefix` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_extranonce_prefix( &mut self, m: SetExtranoncePrefix, ) -> Result, Error>; + /// Handles a successful submission of shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesSuccess` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_success( &mut self, m: SubmitSharesSuccess, ) -> Result, Error>; + /// Handles an error when submitting shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_submit_shares_error(&mut self, m: SubmitSharesError) -> Result, Error>; + /// Handles a new mining job. + /// + /// # Arguments + /// - `m`: The `NewMiningJob` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_new_mining_job(&mut self, m: NewMiningJob) -> Result, Error>; + /// Handles a new extended mining job. + /// + /// # Arguments + /// - `m`: The `NewExtendedMiningJob` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_new_extended_mining_job( &mut self, m: NewExtendedMiningJob, ) -> Result, Error>; + /// Handles a request to set the new previous hash. + /// + /// # Arguments + /// - `m`: The `SetNewPrevHash` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result, Error>; + /// Handles a successful response for setting a custom mining job. + /// + /// # Arguments + /// - `m`: The `SetCustomMiningJobSuccess` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_custom_mining_job_success( &mut self, m: SetCustomMiningJobSuccess, ) -> Result, Error>; + /// Handles an error when setting a custom mining job. + /// + /// # Arguments + /// - `m`: The `SetCustomMiningJobError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_set_custom_mining_job_error( &mut self, m: SetCustomMiningJobError, ) -> Result, Error>; + /// Handles a request to set the target for mining. + /// + /// # Arguments + /// - `m`: The `SetTarget` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_target(&mut self, m: SetTarget) -> Result, Error>; + /// Handles a request to reconnect the mining connection. + /// + /// # Arguments + /// - `m`: The `Reconnect` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_reconnect(&mut self, m: Reconnect) -> Result, Error>; + /// Handles a request to set the group channel for mining. + /// + /// # Arguments + /// - `_m`: The `SetGroupChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_group_channel(&mut self, _m: SetGroupChannel) -> Result, Error> { Ok(SendTo::None(None)) } diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mod.rs b/protocols/v2/roles-logic-sv2/src/handlers/mod.rs index b7d39ab4e..b717eb34a 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mod.rs @@ -1,76 +1,65 @@ -//! Handlers are divided per (sub)protocol and per Downstream/Upstream. -//! Each (sup)protocol defines a handler for both the Upstream node and the Downstream node -//! Handlers are a trait called `Parse[Downstream/Upstream][(sub)protocol]` -//! (eg. `ParseDownstreamCommonMessages`). +//! # Handlers Overview //! -//! When implemented, the handler makes the `handle_message_[(sub)protoco](..)` (e.g. -//! `handle_message_common(..)`) function available. +//! This module centralizes the logic for processing and routing Stratum V2 protocol messages, +//! defining traits and utilities to handle messages for both Downstream and Upstream roles. //! -//! The trait requires the implementer to define one function for each message type that a role -//! defined by the (sub)protocol and the Upstream/Downstream state could receive. +//! ## Purpose //! -//! This function will always take a mutable ref to `self`, a message payload + message type, and -//! the routing logic. -//! Using `parsers` in `crate::parser`, the payload and message type are parsed in an actual SV2 -//! message. -//! Routing logic is used in order to select the correct Downstream/Upstream to which the message -//! must be relayed/sent. -//! Routing logic is used to update the request id when needed. -//! After that, the specific function for the message type (implemented by the implementer) is -//! called with the SV2 message and the remote that must receive the message. +//! - Standardize the handling of protocol-specific messages. +//! - Enable efficient routing, transformation, and relaying of messages between nodes. +//! - Support modularity and scalability across Stratum V2 subprotocols. //! -//! A `Result` is returned and it is the duty of the implementer to send the -//! message. +//! ## Structure +//! +//! The module is organized by subprotocol and role, with handler traits for: +//! - `ParseDownstream[Protocol]`: Handles messages from Downstream nodes. +//! - `ParseUpstream[Protocol]`: Handles messages from Upstream nodes. +//! +//! Supported subprotocols include: +//! - `common`: Shared messages across protocols. +//! - `job_declaration`: Job-related messages. +//! - `mining`: Mining-specific messages. +//! - `template_distribution`: Template distribution messages. +//! +//! ## Return Values +//! +//! Handlers return `Result`, where: +//! - `SendTo_` specifies the action (relay, respond, or no action). +//! - `Error` indicates processing issues. pub mod common; pub mod job_declaration; pub mod mining; pub mod template_distribution; + use crate::utils::Mutex; use std::sync::Arc; #[derive(Debug)] -/// Message is a serializable entity that rapresent the meanings of communication between Remote(s) -/// SendTo_ is used to add context to Message, it say what we need to do with that Message. +/// Represents a serializable entity used for communication between Remotes. +/// The `SendTo_` enum adds context to the message, specifying the intended action. pub enum SendTo_ { - /// Used by proxies when Message must be relayed downstream or upstream and we want to specify - /// to which particular downstream or upstream we want to relay the message. - /// - /// When the message that we need to relay is the same message that we received should be used - /// RelaySameMessageToRemote in order to save an allocation. + /// Relay a new message to a specific remote. RelayNewMessageToRemote(Arc>, Message), - /// Used by proxies when Message must be relayed downstream or upstream and we want to specify - /// to which particular downstream or upstream we want to relay the message. - /// - /// Is used when we need to relay the same message the we received in order to save an - /// allocation. + /// Relay the same received message to a specific remote to avoid extra allocations. RelaySameMessageToRemote(Arc>), - /// Used by proxies when Message must be relayed downstream or upstream and we do not want to - /// specify specify to which particular downstream or upstream we want to relay the - /// message. + /// Relay a new message without specifying a specific remote. /// - /// This is used in proxies that do and Sv1 to Sv2 translation. The upstream is connected via - /// an extended channel that means that + /// This is common in proxies that translate between SV1 and SV2 protocols, where messages are + /// often broadcasted via extended channels. RelayNewMessage(Message), - /// Used proxies clients and servers to directly respond to a received message. + /// Directly respond to a received message. Respond(Message), + /// Relay multiple messages to various destinations. Multiple(Vec>), - /// Used by proxies, clients, and servers, when Message do not have to be used in any of the - /// above way. If Message is still needed to be used in a non conventional way we use - /// SendTo::None(Some(message)) If we just want to discard it we can use SendTo::None(None) + /// Indicates that no immediate action is required for the message. /// - /// SendTo::None(Some(m)) could be used for example when we do not need to send the message, - /// but we still need it for successive handling/transformation. - /// One of these cases are proxies that are connected to upstream via an extended channel (like - /// the Sv1 <-> Sv2 translator). This because extended channel messages are always general - /// for all the downstream, where standard channel message can be specific for a particular - /// downstream. Another case is when 2 roles are implemented in the same software, like a - /// pool that is both TP client and a Mining server, messages received by the TP client - /// must be sent to the Mining Server than transformed in Mining messages and sent to the - /// downstream. + /// This variant allows for cases where the message is still needed for later processing + /// (e.g., transformations or when two roles are implemented in the same software). None(Option), } impl SendTo_ { + /// Extracts the message, if available. pub fn into_message(self) -> Option { match self { Self::RelayNewMessageToRemote(_, m) => Some(m), @@ -81,6 +70,8 @@ impl SendTo_ { Self::None(m) => m, } } + + /// Extracts the remote, if available. pub fn into_remote(self) -> Option>> { match self { Self::RelayNewMessageToRemote(r, _) => Some(r), diff --git a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs index fd321d0f7..a0a5b94ec 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs @@ -1,3 +1,40 @@ +//! # Template Distribution Handlers +//! +//! This module defines traits and functions for handling template distribution messages within the +//! Stratum V2 protocol. +//! +//! ## Core Traits +//! +//! - `ParseServerTemplateDistributionMessages`: Implemented by downstream nodes to process template +//! distribution messages received from upstream nodes. This trait includes methods for handling +//! template-related events like new templates, previous hash updates, and transaction data +//! requests. +//! - `ParseClientTemplateDistributionMessages`: Implemented by upstream nodes to manage template +//! distribution messages received from downstream nodes. This trait handles coinbase output size, +//! transaction data requests, and solution submissions. +//! +//! ## Message Handling +//! +//! Handlers are responsible for: +//! - Parsing and deserializing template distribution messages into appropriate types. +//! - Dispatching the deserialized messages to specific handler functions based on message type, +//! such as handling new templates, transaction data requests, and coinbase output data. +//! +//! ## Return Type +//! +//! Functions return `Result`, where `SendTo` determines the next action for the +//! message: whether it should be relayed, responded to, or ignored. +//! +//! ## Structure +//! +//! This module includes: +//! - Traits for processing template distribution messages, including server-side and client-side +//! handling. +//! - Functions to parse, deserialize, and process messages related to template distribution, +//! ensuring robust error handling. +//! - Error handling mechanisms to address unexpected messages and ensure safe processing, +//! especially in the context of shared state. + use super::SendTo_; use crate::{errors::Error, parsers::TemplateDistribution, utils::Mutex}; use template_distribution_sv2::{ @@ -11,10 +48,28 @@ use core::convert::TryInto; use std::sync::Arc; use tracing::{debug, error, info, trace}; +/// Trait for handling template distribution messages received from upstream nodes (server side). +/// Includes functions to handle messages such as new templates, previous hash updates, and +/// transaction data requests. pub trait ParseServerTemplateDistributionMessages where Self: Sized, { + /// Handles incoming template distribution messages. + /// + /// This function is responsible for parsing and dispatching the appropriate handler based on + /// the message type. It first deserializes the payload and then routes it to the + /// corresponding handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message_type`: The type of the incoming message. + /// - `payload`: The raw payload data of the message. + /// + /// # Returns + /// - `Result`: The result of processing the message, where `SendTo` indicates + /// the next step in message handling. fn handle_message_template_distribution( self_: Arc>, message_type: u8, @@ -25,6 +80,19 @@ where (message_type, payload).try_into(), ) } + + /// Handles deserialized template distribution messages. + /// + /// This function takes the deserialized message and processes it according to the specific + /// message type, invoking the appropriate handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message`: The deserialized `TemplateDistribution` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_message_template_distribution_desrialized( self_: Arc>, message: Result, Error>, @@ -80,22 +148,82 @@ where Err(e) => Err(e), } } + + /// Handles a `NewTemplate` message. + /// + /// This method processes the `NewTemplate` message, which contains information about a newly + /// generated template. + /// + /// # Arguments + /// - `m`: The `NewTemplate` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_new_template(&mut self, m: NewTemplate) -> Result; + + /// Handles a `SetNewPrevHash` message. + /// + /// This method processes the `SetNewPrevHash` message, which updates the previous hash for a + /// template. + /// + /// # Arguments + /// - `m`: The `SetNewPrevHash` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result; + + /// Handles a `RequestTransactionDataSuccess` message. + /// + /// This method processes the success response for a requested transaction data message. + /// + /// # Arguments + /// - `m`: The `RequestTransactionDataSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_tx_data_success( &mut self, m: RequestTransactionDataSuccess, ) -> Result; + + /// Handles a `RequestTransactionDataError` message. + /// + /// This method processes an error response for a requested transaction data message. + /// + /// # Arguments + /// - `m`: The `RequestTransactionDataError` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_tx_data_error( &mut self, m: RequestTransactionDataError, ) -> Result; } +/// Trait for handling template distribution messages received from downstream nodes (client side). +/// Includes functions to handle messages such as coinbase output data size, transaction data +/// requests, and solution submissions. pub trait ParseClientTemplateDistributionMessages where Self: Sized, { + /// Handles incoming template distribution messages. + /// + /// This function is responsible for parsing and dispatching the appropriate handler based on + /// the message type. It first deserializes the payload and then routes it to the + /// corresponding handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message_type`: The type of the incoming message. + /// - `payload`: The raw payload data of the message. + /// + /// # Returns + /// - `Result`: The result of processing the message, where `SendTo` indicates + /// the next step in message handling. fn handle_message_template_distribution( self_: Arc>, message_type: u8, @@ -107,6 +235,18 @@ where ) } + /// Handles deserialized template distribution messages. + /// + /// This function takes the deserialized message and processes it according to the specific + /// message type, invoking the appropriate handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message`: The deserialized `TemplateDistribution` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_message_template_distribution_desrialized( self_: Arc>, message: Result, Error>, @@ -137,8 +277,38 @@ where Err(e) => Err(e), } } + + /// Handles a `CoinbaseOutputDataSize` message. + /// + /// This method processes a message that includes the coinbase output data size. + /// + /// # Arguments + /// - `m`: The `CoinbaseOutputDataSize` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_coinbase_out_data_size(&mut self, m: CoinbaseOutputDataSize) -> Result; + + /// Handles a `RequestTransactionData` message. + /// + /// This method processes a message requesting transaction data. + /// + /// # Arguments + /// - `m`: The `RequestTransactionData` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_tx_data(&mut self, m: RequestTransactionData) -> Result; + + /// Handles a `SubmitSolution` message. + /// + /// This method processes a solution submission message. + /// + /// # Arguments + /// - `m`: The `SubmitSolution` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_submit_solution(&mut self, m: SubmitSolution) -> Result; } From db3496e89ebed4f06c811c10d9cf700441f7ca4f Mon Sep 17 00:00:00 2001 From: Gabriele Vernetti Date: Fri, 22 Nov 2024 20:50:07 +0100 Subject: [PATCH 08/12] routing_logic docs --- .../v2/roles-logic-sv2/src/routing_logic.rs | 322 ++++++++++++------ 1 file changed, 214 insertions(+), 108 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/routing_logic.rs b/protocols/v2/roles-logic-sv2/src/routing_logic.rs index f6e4f3e7b..a23e62e4b 100644 --- a/protocols/v2/roles-logic-sv2/src/routing_logic.rs +++ b/protocols/v2/roles-logic-sv2/src/routing_logic.rs @@ -1,24 +1,50 @@ -//! The routing logic code is used by the handler to determine where a message should be relayed or -//! responded to +//! This module contains the routing logic used by handlers to determine where a message should be +//! relayed or responded to. //! -//! TODO It seems like a good idea to hide all the traits to the user and export marker traits -//! check if possible +//! ## Overview //! -//! - CommonRouter -> implemented by routers used by the common (sub)protocol +//! The routing logic defines a set of traits and structures to manage message routing in Stratum +//! V2. The following components are included: //! -//! - MiningRouter -> implemented by routers used by the mining (sub)protocol +//! - **`CommonRouter`**: Trait implemented by routers for the common (sub)protocol. +//! - **`MiningRouter`**: Trait implemented by routers for the mining (sub)protocol. +//! - **`CommonRoutingLogic`**: Enum defining the various routing logic for the common protocol +//! (e.g., Proxy, None). +//! - **`MiningRoutingLogic`**: Enum defining the routing logic for the mining protocol (e.g., +//! Proxy, None). +//! - **`NoRouting`**: Marker router that implements both `CommonRouter` and `MiningRouter` for +//! cases where no routing logic is required. +//! - **`MiningProxyRoutingLogic`**: Routing logic valid for a standard Sv2 mining proxy, +//! implementing both `CommonRouter` and `MiningRouter`. //! -//! - CommonRoutingLogic -> enum that define the enum the various routing logic for the common -//! (sub)protocol (eg Proxy None ...). +//! ## Details //! -//! - MiningProxyRoutingLogic -> enum that define the enum the various routing logic for the common -//! (sub)protocol (eg Proxy None ...). +//! ### Traits //! -//! - NoRouting -> implement both CommonRouter and MiningRouter used when the routing logic needed -//! is only None +//! - **`CommonRouter`**: Defines routing behavior for common protocol messages. +//! - **`MiningRouter`**: Defines routing behavior for mining protocol messages. Requires +//! `DownstreamMiningSelector` for downstream selection. //! -//! - MiningProxyRoutingLogic -> routing logic valid for a standard Sv2 mining proxy it is both a -//! CommonRouter and a MiningRouter +//! ### Enums +//! +//! - **`CommonRoutingLogic`**: Represents possible routing logics for the common protocol, such as +//! proxy-based routing or no routing. +//! - **`MiningRoutingLogic`**: Represents possible routing logics for the mining protocol, +//! supporting additional parameters such as selectors and marker traits. +//! +//! ### Structures +//! +//! - **`NoRouting`**: A minimal implementation of `CommonRouter` and `MiningRouter` that panics +//! when used. Its primary purpose is to serve as a placeholder for cases where no routing logic +//! is applied. +//! - **`MiningProxyRoutingLogic`**: Implements routing logic for a standard Sv2 proxy, including +//! upstream selection and message transformation. +//! +//! ## Future Work +//! +//! - Consider hiding all traits from the public API and exporting only marker traits. +//! - Improve upstream selection logic to be configurable by the caller. + use crate::{ common_properties::{CommonDownstreamData, IsMiningDownstream, IsMiningUpstream, PairSettings}, selectors::{ @@ -34,26 +60,45 @@ use common_messages_sv2::{ use mining_sv2::{OpenStandardMiningChannel, OpenStandardMiningChannelSuccess}; use std::{collections::HashMap, fmt::Debug as D, marker::PhantomData, sync::Arc}; -/// The CommonRouter trait defines a router needed by -/// [`crate::handlers::common::ParseUpstreamCommonMessages`] and -/// [`crate::handlers::common::ParseDownstreamCommonMessages`] +/// Defines routing logic for common protocol messages. +/// +/// Implemented by handlers (such as [`crate::handlers::common::ParseUpstreamCommonMessages`] and +/// [`crate::handlers::common::ParseDownstreamCommonMessages`]) to determine the behavior for common +/// protocol routing. pub trait CommonRouter: std::fmt::Debug { + /// Handles a `SetupConnection` message for the common protocol. + /// + /// # Arguments + /// - `message`: The `SetupConnection` message received. + /// + /// # Returns + /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: The routing result. fn on_setup_connection( &mut self, message: &SetupConnection, ) -> Result<(CommonDownstreamData, SetupConnectionSuccess), Error>; } -/// The MiningRouter trait defines a router needed by -/// [`crate::handlers::mining::ParseDownstreamMiningMessages`] and -/// [`crate::handlers::mining::ParseUpstreamMiningMessages`] +/// Defines routing logic for mining protocol messages. +/// +/// Implemented by handlers (such as [`crate::handlers::mining::ParseUpstreamMiningMessages`] and +/// [`crate::handlers::mining::ParseDownstreamMiningMessages`]) to determine the behavior for mining +/// protocol routing. This trait extends [`CommonRouter`] to handle mining-specific routing logic. pub trait MiningRouter< Down: IsMiningDownstream, Up: IsMiningUpstream, Sel: DownstreamMiningSelector, >: CommonRouter { - #[allow(clippy::result_unit_err)] + /// Handles an `OpenStandardMiningChannel` message from a downstream. + /// + /// # Arguments + /// - `downstream`: The downstream mining entity. + /// - `request`: The mining channel request message. + /// - `downstream_mining_data`: Associated downstream mining data. + /// + /// # Returns + /// - `Result>, Error>`: The upstream mining entity. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -61,7 +106,14 @@ pub trait MiningRouter< downstream_mining_data: &CommonDownstreamData, ) -> Result>, Error>; - #[allow(clippy::result_unit_err)] + /// Handles an `OpenStandardMiningChannelSuccess` message from an upstream. + /// + /// # Arguments + /// - `upstream`: The upstream mining entity. + /// - `request`: The successful channel opening message. + /// + /// # Returns + /// - `Result>, Error>`: The downstream mining entity. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -69,10 +121,9 @@ pub trait MiningRouter< ) -> Result>, Error>; } -/// NoRouting Router used when `RoutingLogic::None` and `MiningRoutingLogic::None` are needed. -/// It implements both `CommonRouter` and `MiningRouter`, and panics if used as an actual router. -/// The only purpose of `NoRouting` is a marker trait for when `RoutingLogic::None` and -/// `MiningRoutingLogic::None` +/// A no-operation router for scenarios where no routing logic is needed. +/// +/// Implements both `CommonRouter` and `MiningRouter` but panics if invoked. #[derive(Debug)] pub struct NoRouting(); @@ -84,6 +135,7 @@ impl CommonRouter for NoRouting { unreachable!() } } + impl< Down: IsMiningDownstream + D, Up: IsMiningUpstream + D, @@ -107,16 +159,16 @@ impl< } } -/// Enum that contains the possible routing logic is usually contructed before calling -/// handle_message_..() +/// Routing logic options for the common protocol. #[derive(Debug)] pub enum CommonRoutingLogic { + /// Proxy routing logic for the common protocol. Proxy(&'static Mutex), + /// No routing logic. None, } -/// Enum that contains the possibles routing logic is usually contructed before calling -/// handle_message_..() +/// Routing logic options for the mining protocol. #[derive(Debug)] pub enum MiningRoutingLogic< Down: IsMiningDownstream + D, @@ -124,8 +176,11 @@ pub enum MiningRoutingLogic< Sel: DownstreamMiningSelector + D, Router: 'static + MiningRouter, > { + /// Proxy routing logic for the mining protocol. Proxy(&'static Mutex), + /// No routing logic. None, + /// Marker for the generic parameters. _P(PhantomData<(Down, Up, Sel)>), } @@ -155,12 +210,39 @@ impl< } } +/// Routing logic for a standard Sv2 mining proxy. +#[derive(Debug)] +pub struct MiningProxyRoutingLogic< + Down: IsMiningDownstream + D, + Up: IsMiningUpstream + D, + Sel: DownstreamMiningSelector + D, +> { + /// Selector for upstream entities. + pub upstream_selector: GeneralMiningSelector, + /// ID generator for downstream entities. + pub downstream_id_generator: Id, + /// Mapping from downstream to upstream entities. + pub downstream_to_upstream_map: HashMap>>>, +} + impl< Down: IsMiningDownstream + D, Up: IsMiningUpstream + D, Sel: DownstreamMiningSelector + D, > CommonRouter for MiningProxyRoutingLogic { + /// Handles the `SetupConnection` message. + /// + /// This method initializes the connection between a downstream and an upstream by determining + /// the appropriate upstream based on the provided protocol, versions, and flags. + /// + /// # Arguments + /// - `message`: A reference to the `SetupConnection` message containing the connection details. + /// + /// # Returns + /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: On success, returns the + /// downstream connection data and the corresponding setup success message. Returns an error + /// otherwise. fn on_setup_connection( &mut self, message: &SetupConnection, @@ -180,7 +262,7 @@ impl< (Protocol::MiningProtocol, true) => { self.on_setup_connection_mining_header_only(&pair_settings) } - // TODO add handler for other protocols + // TODO: Add handler for other protocols. _ => Err(Error::UnimplementedProtocol), } } @@ -192,12 +274,55 @@ impl< Sel: DownstreamMiningSelector + D, > MiningRouter for MiningProxyRoutingLogic { - /// On open standard channel success: - /// 1. the downstream that requested the opening of the channel must be selected an put in the - /// right group channel - /// 2. request_id from upsteram is replaced with the original request id from downstream + /// Handles the `OpenStandardMiningChannel` message. + /// + /// This method processes the request to open a standard mining channel. It selects a suitable + /// upstream, updates the request ID to ensure uniqueness, and then delegates to + /// `on_open_standard_channel_request_header_only` to finalize the process. + /// + /// # Arguments + /// - `downstream`: The downstream requesting the channel opening. + /// - `request`: A mutable reference to the `OpenStandardMiningChannel` message. + /// - `downstream_mining_data`: Common data about the downstream mining setup. + /// + /// # Returns + /// - `Result>, Error>`: Returns the selected upstream for the downstream or an + /// error. + fn on_open_standard_channel( + &mut self, + downstream: Arc>, + request: &mut OpenStandardMiningChannel, + downstream_mining_data: &CommonDownstreamData, + ) -> Result>, Error> { + let upstreams = self + .downstream_to_upstream_map + .get(downstream_mining_data) + .ok_or(Error::NoCompatibleUpstream(*downstream_mining_data))?; + // If we are here, a list of possible upstreams has already been selected. + // TODO: The upstream selection logic should be specified by the caller. + let upstream = + Self::select_upstreams(&mut upstreams.to_vec()).ok_or(Error::NoUpstreamsConnected)?; + let old_id = request.get_request_id_as_u32(); + let new_req_id = upstream + .safe_lock(|u| u.get_mapper().unwrap().on_open_channel(old_id)) + .map_err(|e| Error::PoisonLock(e.to_string()))?; + request.update_id(new_req_id); + self.on_open_standard_channel_request_header_only(downstream, request) + } + + /// Handles the `OpenStandardMiningChannelSuccess` message. /// - /// The selected downstream is returned + /// This method processes the success message received from an upstream when a standard mining + /// channel is opened. It maps the request ID back to the original ID from the downstream and + /// updates the associated group and channel IDs in the upstream. + /// + /// # Arguments + /// - `upstream`: The upstream involved in the channel opening. + /// - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. + /// + /// # Returns + /// - `Result>, Error>`: Returns the downstream corresponding to the request or + /// an error. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -225,48 +350,19 @@ impl< }) .map_err(|e| Error::PoisonLock(e.to_string()))? } - - /// At this point the Sv2 connection with downstream is initialized that means that - /// routing_logic has already preselected a set of upstreams pairable with downstream. - /// - /// Updates the request id from downstream to a connection-wide unique request id for - /// downstream. - fn on_open_standard_channel( - &mut self, - downstream: Arc>, - request: &mut OpenStandardMiningChannel, - downstream_mining_data: &CommonDownstreamData, - ) -> Result>, Error> { - let upstreams = self - .downstream_to_upstream_map - .get(downstream_mining_data) - .ok_or(Error::NoCompatibleUpstream(*downstream_mining_data))?; - // If we are here a list of possible upstreams has been already selected - // TODO the upstream selection logic should be specified by the caller - let upstream = - Self::select_upstreams(&mut upstreams.to_vec()).ok_or(Error::NoUpstreamsConnected)?; - let old_id = request.get_request_id_as_u32(); - let new_req_id = upstream - .safe_lock(|u| u.get_mapper().unwrap().on_open_channel(old_id)) - .map_err(|e| Error::PoisonLock(e.to_string()))?; - request.update_id(new_req_id); - self.on_open_standard_channel_request_header_only(downstream, request) - } -} - -/// Routing logic valid for a standard Sv2 proxy -#[derive(Debug)] -pub struct MiningProxyRoutingLogic< - Down: IsMiningDownstream + D, - Up: IsMiningUpstream + D, - Sel: DownstreamMiningSelector + D, -> { - pub upstream_selector: GeneralMiningSelector, - pub downstream_id_generator: Id, - pub downstream_to_upstream_map: HashMap>>>, - //pub upstream_startegy: MiningUpstreamSelectionStrategy, } +/// Selects the upstream with the lowest total hash rate. +/// +/// # Arguments +/// - `ups`: A mutable slice of upstream mining entities. +/// +/// # Returns +/// - `Arc>`: The upstream entity with the lowest total hash rate. +/// +/// # Panics +/// This function panics if the slice is empty, as it is internally guaranteed that this function +/// will only be called with non-empty vectors. fn minor_total_hr_upstream(ups: &mut [Arc>]) -> Arc> where Down: IsMiningDownstream + D, @@ -275,7 +371,7 @@ where { ups.iter_mut() .reduce(|acc, item| { - // Is fine to unwrap a safe_lock result + // Safely locks and compares the total hash rate of each upstream. if acc.safe_lock(|x| x.total_hash_rate()).unwrap() < item.safe_lock(|x| x.total_hash_rate()).unwrap() { @@ -284,12 +380,17 @@ where item } }) - // Internal private function we only call thi function with non void vectors so is safe to - // unwrap here .unwrap() - .clone() + .clone() // Unwrap is safe because the function only operates on non-empty vectors. } +/// Filters upstream entities that are not configured for header-only mining. +/// +/// # Arguments +/// - `ups`: A mutable slice of upstream mining entities. +/// +/// # Returns +/// - `Vec>>`: A vector of upstream entities that are not header-only. fn filter_header_only(ups: &mut [Arc>]) -> Vec>> where Down: IsMiningDownstream + D, @@ -307,9 +408,18 @@ where .collect() } -/// If only one upstream is avaiable return it. -/// Try to return an upstream that is not header only. -/// Return the upstream that has less hash rate from downstreams. +/// Selects the most appropriate upstream entity based on specific criteria. +/// +/// # Criteria +/// - If only one upstream is available, it is selected. +/// - If multiple upstreams exist, preference is given to those not configured as header-only. +/// - Among the remaining upstreams, the one with the lowest total hash rate is selected. +/// +/// # Arguments +/// - `ups`: A mutable slice of upstream mining entities. +/// +/// # Returns +/// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstream(ups: &mut [Arc>]) -> Option>> where Down: IsMiningDownstream + D, @@ -333,28 +443,32 @@ impl< Sel: DownstreamMiningSelector + D, > MiningProxyRoutingLogic { - /// TODO this should stay in a enum UpstreamSelectionLogic that get passed from the caller to - /// the several methods + /// Selects an upstream entity from a list of available upstreams. + /// + /// # Arguments + /// - `ups`: A mutable slice of upstream mining entities. + /// + /// # Returns + /// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstreams(ups: &mut [Arc>]) -> Option>> { select_upstream(ups) } - /// On setup connection the proxy finds all the upstreams that support the downstream - /// connection, creates a downstream message parser that points to all the possible - /// upstreams, and then responds with suppported flags. + /// Handles the `SetupConnection` process for header-only mining downstreams. /// - /// The upstream with min total_hash_rate is selected (TODO a method to let the caller which - /// upstream select from the possible ones should be added - /// on_setup_connection_mining_header_only_2 that return a Vec of possibe upstreams) + /// This method selects compatible upstreams, assigns connection flags, and maps the + /// downstream to the selected upstreams. /// - /// This function returns a downstream id that the new created downstream must return via the - /// trait function get_id and the flags of the paired upstream + /// # Arguments + /// - `pair_settings`: The pairing settings for the connection. + /// + /// # Returns + /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: The connection result. pub fn on_setup_connection_mining_header_only( &mut self, pair_settings: &PairSettings, ) -> Result<(CommonDownstreamData, SetupConnectionSuccess), Error> { let mut upstreams = self.upstream_selector.on_setup_connection(pair_settings)?; - // TODO the upstream selection logic should be specified by the caller let upstream = Self::select_upstreams(&mut upstreams.0).ok_or(Error::NoUpstreamsConnected)?; let downstream_data = CommonDownstreamData { @@ -373,21 +487,14 @@ impl< Ok((downstream_data, message)) } - /// On open standard channel request: - /// 1. an upstream must be selected between the possible upstreams for this downstream. If the - /// downstream* is header only, just one upstream will be there, so the choice is easy, if - /// not (TODO on_open_standard_channel_request_no_standard_job must be used) - /// 2. request_id from downstream is updated to a connection-wide unique request-id for - /// upstreams - /// - /// The selected upstream is returned + /// Handles a standard channel opening request for header-only mining downstreams. /// + /// # Arguments + /// - `downstream`: The downstream mining entity. + /// - `request`: The standard mining channel request message. /// - /// * The downstream that wants to open a channel already connected with the proxy so a - /// valid upstream has already been selected (otherwise downstream can not be connected). - /// If the downstream is header only, only one valid upstream has been selected (cause a - /// header only mining device can be connected only with one pool) - #[allow(clippy::result_unit_err)] + /// # Returns + /// - `Result>, Error>`: The selected upstream mining entity. pub fn on_open_standard_channel_request_header_only( &mut self, downstream: Arc>, @@ -396,7 +503,6 @@ impl< let downstream_mining_data = downstream .safe_lock(|d| d.get_downstream_mining_data()) .map_err(|e| crate::Error::PoisonLock(e.to_string()))?; - // header only downstream must map to only one upstream let upstream = self .downstream_to_upstream_map .get(&downstream_mining_data) From f093d2cee807704c30bb3b8d1b894752a66bce48 Mon Sep 17 00:00:00 2001 From: Gabriele Vernetti Date: Fri, 22 Nov 2024 20:50:16 +0100 Subject: [PATCH 09/12] selectors docs --- protocols/v2/roles-logic-sv2/src/selectors.rs | 256 ++++++++++++++++-- 1 file changed, 233 insertions(+), 23 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/selectors.rs b/protocols/v2/roles-logic-sv2/src/selectors.rs index b3750bdb2..fcfcffeb5 100644 --- a/protocols/v2/roles-logic-sv2/src/selectors.rs +++ b/protocols/v2/roles-logic-sv2/src/selectors.rs @@ -1,5 +1,39 @@ -//! Selectors are used from the routing logic in order to chose to which remote or set of remotes -//! a message should be ralyied, or to which remote or set of remotes a message should be sent. +//! This module provides selectors and routing logic for managing downstream and upstream nodes +//! in a mining proxy environment. Selectors help determine the appropriate remote(s) to relay or +//! send messages to. +//! +//! ## Components +//! +//! - **`ProxyDownstreamMiningSelector`**: A selector for managing downstream nodes in a mining +//! proxy, mapping requests and channel IDs to specific downstream nodes or groups. +//! - **`NullDownstreamMiningSelector`**: A no-op selector for cases where routing logic is +//! unnecessary, commonly used in test scenarios. +//! - **`GeneralMiningSelector`**: A flexible upstream selector that matches downstream nodes with +//! compatible upstream nodes based on pairing settings and flags. +//! +//! ## Traits +//! +//! - **`DownstreamSelector`**: Base trait for all downstream selectors. +//! - **`DownstreamMiningSelector`**: Specialized trait for selectors managing mining-specific +//! downstream nodes. +//! - **`UpstreamSelector`**: Base trait for upstream node selectors. +//! - **`UpstreamMiningSelctor`**: Specialized trait for selectors managing upstream mining nodes. +//! +//! ## Details +//! +//! ### ProxyDownstreamMiningSelector +//! - Manages mappings for request IDs, channel IDs, and downstream groups. +//! - Provides methods to handle standard channel operations, such as opening channels and +//! retrieving or removing downstream nodes associated with a channel. +//! +//! ### NullDownstreamMiningSelector +//! - Implements all required traits but panics if called. +//! - Useful for minimal setups or as a placeholder in tests. +//! +//! ### GeneralMiningSelector +//! - Matches downstream nodes to upstream nodes based on pairing compatibility. +//! - Tracks upstream nodes and their IDs for efficient lookups. + use crate::{ common_properties::{IsDownstream, IsMiningDownstream, IsMiningUpstream, PairSettings}, utils::Mutex, @@ -8,15 +42,24 @@ use crate::{ use nohash_hasher::BuildNoHashHasher; use std::{collections::HashMap, fmt::Debug as D, sync::Arc}; -/// A DownstreamMiningSelector useful for routing messages in a mining proxy +/// A selector used for routing messages to specific downstream mining nodes. +/// +/// This structure maintains mappings for request IDs, channel IDs, and downstream nodes +/// to facilitate efficient message routing. #[derive(Debug, Clone, Default)] pub struct ProxyDownstreamMiningSelector { + /// Maps request IDs to their corresponding downstream nodes. request_id_to_remotes: HashMap>, BuildNoHashHasher>, + /// Maps group channel IDs to a list of downstream nodes. channel_id_to_downstreams: HashMap>>, BuildNoHashHasher>, + /// Maps standard channel IDs to a single downstream node. channel_id_to_downstream: HashMap>, BuildNoHashHasher>, } impl ProxyDownstreamMiningSelector { + /// Creates a new `ProxyDownstreamMiningSelector`. + /// + /// This initializes the internal mappings with `nohash` hashers for performance. pub fn new() -> Self { Self { request_id_to_remotes: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -24,6 +67,10 @@ impl ProxyDownstreamMiningSelector { channel_id_to_downstream: HashMap::with_hasher(BuildNoHashHasher::default()), } } + + /// Creates a new `ProxyDownstreamMiningSelector` wrapped in a mutex and an `Arc`. + /// + /// This is useful for concurrent environments where shared ownership is needed. pub fn new_as_mutex() -> Arc> where Self: Sized, @@ -33,6 +80,10 @@ impl ProxyDownstreamMiningSelector { } impl ProxyDownstreamMiningSelector { + /// Removes a downstream node from all mappings. + /// + /// # Arguments + /// - `d`: The downstream node to be removed. fn _remove_downstream(&mut self, d: &Arc>) { self.request_id_to_remotes.retain(|_, v| !Arc::ptr_eq(v, d)); self.channel_id_to_downstream @@ -43,10 +94,25 @@ impl ProxyDownstreamMiningSelector { impl DownstreamMiningSelector for ProxyDownstreamMiningSelector { + /// Registers a request ID and its associated downstream node. + /// + /// # Arguments + /// - `request_id`: The unique request ID. + /// - `downstream`: The downstream node associated with the request. fn on_open_standard_channel_request(&mut self, request_id: u32, downstream: Arc>) { self.request_id_to_remotes.insert(request_id, downstream); } + /// Finalizes the mapping of a standard channel to its downstream node. + /// + /// # Arguments + /// - `request_id`: The request ID used during the channel opening. + /// - `g_channel_id`: The group channel ID. + /// - `channel_id`: The specific standard channel ID. + /// + /// # Returns + /// - `Ok`: The downstream node associated with the request. + /// - `Err`: If the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -69,10 +135,25 @@ impl DownstreamMiningSelector Ok(downstream) } + /// Retrieves all downstream nodes associated with a standard/group channel ID. + /// + /// # Arguments + /// - `channel_id`: The standard/group channel ID. + /// + /// # Returns + /// - `Some`: A reference to the vector of downstream nodes. + /// - `None`: If no nodes are associated with the channel. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>> { self.channel_id_to_downstreams.get(&channel_id) } + /// Removes all downstream nodes associated with a standard/group channel ID. + /// + /// # Arguments + /// - `channel_id`: The standard/group channel ID. + /// + /// # Returns + /// A vector of the removed downstream nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>> { let downs = self .channel_id_to_downstreams @@ -84,17 +165,34 @@ impl DownstreamMiningSelector downs } + /// Removes a specific downstream node from all mappings. + /// + /// # Arguments + /// - `d`: The downstream node to be removed. fn remove_downstream(&mut self, d: &Arc>) { for dws in self.channel_id_to_downstreams.values_mut() { - dws.retain(|d| !Arc::ptr_eq(d, d)); + dws.retain(|node| !Arc::ptr_eq(node, d)); } self._remove_downstream(d); } + /// Retrieves the downstream node associated with a specific standard channel ID. + /// + /// # Arguments + /// - `channel_id`: The standard channel ID. + /// + /// # Returns + /// - `Some`: The downstream node. + /// - `None`: If no node is associated with the channel. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>> { self.channel_id_to_downstream.get(&channel_id).cloned() } + + /// Retrieves all downstream nodes currently managed by this selector. + /// + /// # Returns + /// A vector of downstream nodes. fn get_all_downstreams(&self) -> Vec>> { self.channel_id_to_downstream.values().cloned().collect() } @@ -103,16 +201,31 @@ impl DownstreamMiningSelector impl DownstreamSelector for ProxyDownstreamMiningSelector {} /// Implemented by a selector used by an upstream mining node or and upstream mining node -/// abstraction in order to find the right downstream to which a message should be sent or relayied +/// abstraction in order to find the right downstream to which a message should be sent or relayed. pub trait DownstreamMiningSelector: DownstreamSelector { + /// Handles a request to open a standard channel. + /// + /// # Arguments + /// - `request_id`: The ID of the request. + /// - `downstream`: A reference to the downstream requesting the channel. fn on_open_standard_channel_request( &mut self, request_id: u32, downstream: Arc>, ); + /// Handles a successful response to opening a standard channel. + /// + /// # Arguments + /// - `request_id`: The ID of the request. + /// - `g_channel_id`: The global channel ID. + /// - `channel_id`: The local channel ID. + /// + /// # Returns + /// - `Result>, Error>`: The downstream associated with the channel or an + /// error. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -120,32 +233,62 @@ pub trait DownstreamMiningSelector: channel_id: u32, ) -> Result>, Error>; - // group / standard naming is terrible channel_id in this case can be either the channel_id - // or the group_channel_id + /// Retrieves all downstreams associated with a channel ID. + /// + /// # Arguments + /// - `channel_id`: The channel ID to query. + /// + /// # Returns + /// - `Option<&Vec>>>`: The list of downstreams or `None`. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>>; + /// Removes all downstreams associated with a channel ID. + /// + /// # Arguments + /// - `channel_id`: The channel ID to remove downstreams from. + /// + /// # Returns + /// - `Vec>>`: The removed downstreams. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>>; + /// Removes a specific downstream. + /// + /// # Arguments + /// - `d`: A reference to the downstream to remove. fn remove_downstream(&mut self, d: &Arc>); - // only for standard + /// Retrieves a downstream by channel ID (only for standard channels). + /// + /// # Arguments + /// - `channel_id`: The channel ID to query. + /// + /// # Returns + /// - `Option>>`: The downstream or `None`. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>>; + /// Retrieves all downstreams. + /// + /// # Returns + /// - `Vec>>`: All downstreams. fn get_all_downstreams(&self) -> Vec>>; } +/// A generic downstream selector. pub trait DownstreamSelector {} -/// A DownstreamMiningSelector that do nothing. Useful when ParseDownstreamCommonMessages or -/// ParseUpstreamCommonMessages must be implemented in very simple application (eg for test -/// puorposes) +/// A no-op implementation of `DownstreamMiningSelector`. +/// +/// This selector is primarily used for testing or minimal setups where routing logic is not needed. #[derive(Debug, Clone, Copy, Default)] pub struct NullDownstreamMiningSelector(); impl NullDownstreamMiningSelector { + /// Creates a new `NullDownstreamMiningSelector`. pub fn new() -> Self { NullDownstreamMiningSelector() } + + /// Creates a new `NullDownstreamMiningSelector` wrapped in a mutex and an `Arc`. pub fn new_as_mutex() -> Arc> where Self: Sized, @@ -155,6 +298,10 @@ impl NullDownstreamMiningSelector { } impl DownstreamMiningSelector for NullDownstreamMiningSelector { + /// Called when a standard channel open request is received. + /// + /// This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op + /// implementation. fn on_open_standard_channel_request( &mut self, _request_id: u32, @@ -163,6 +310,9 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_request") } + /// Called when a standard channel open request is successful. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn on_open_standard_channel_success( &mut self, _request_id: u32, @@ -172,52 +322,92 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_success") } + /// Retrieves the downstreams in a specific channel. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn get_downstreams_in_channel(&self, _channel_id: u32) -> Option<&Vec>>> { unreachable!("get_downstreams_in_channel") } + + /// Removes downstreams in a specific channel. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstreams_in_channel(&mut self, _channel_id: u32) -> Vec>> { unreachable!("remove_downstreams_in_channel") } + /// Removes a specific downstream node. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. + fn remove_downstream(&mut self, _d: &Arc>) { + unreachable!("remove_downstream") + } + + /// Retrieves the downstream associated with a specific channel ID. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn downstream_from_channel_id(&self, _channel_id: u32) -> Option>> { unreachable!("downstream_from_channel_id") } + + /// Retrieves all downstream nodes managed by this selector. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn get_all_downstreams(&self) -> Vec>> { unreachable!("get_all_downstreams") } - - fn remove_downstream(&mut self, _d: &Arc>) { - unreachable!("remove_downstream") - } } impl DownstreamSelector for NullDownstreamMiningSelector {} +/// Trait for selecting upstream nodes in a mining context. pub trait UpstreamSelector {} +/// Trait for selecting upstream mining nodes. +/// +/// This trait allows pairing downstream mining nodes with upstream nodes +/// based on their settings and capabilities. pub trait UpstreamMiningSelctor< Down: IsMiningDownstream, Up: IsMiningUpstream, Sel: DownstreamMiningSelector, >: UpstreamSelector { + /// Handles the `SetupConnection` process. + /// + /// # Arguments + /// - `pair_settings`: The settings for pairing downstream and upstream nodes. + /// + /// # Returns + /// - `Ok((Vec>>, u32))`: A vector of upstream nodes and their combined flags. + /// - `Err`: If no upstreams are pairable. #[allow(clippy::type_complexity)] fn on_setup_connection( &mut self, pair_settings: &PairSettings, ) -> Result<(Vec>>, u32), Error>; + + /// Retrieves an upstream node by its ID. + /// + /// # Arguments + /// - `upstream_id`: The unique ID of the upstream node. + /// + /// # Returns + /// - `Some`: The upstream node. + /// - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>>; } -/// Upstream selector is used to chose between a set of known mining upstream nodes which one/ones -/// can accept messages from a specific mining downstream node +/// General implementation of an upstream mining selector. #[derive(Debug)] pub struct GeneralMiningSelector< Sel: DownstreamMiningSelector, Down: IsMiningDownstream, Up: IsMiningUpstream, > { + /// List of upstream nodes. pub upstreams: Vec>>, + /// Mapping of upstream IDs to their respective nodes. pub id_to_upstream: HashMap>, BuildNoHashHasher>, sel: std::marker::PhantomData, down: std::marker::PhantomData, @@ -229,10 +419,13 @@ impl< Down: IsMiningDownstream, > GeneralMiningSelector { + /// Creates a new `GeneralMiningSelector`. + /// + /// # Arguments + /// - `upstreams`: A vector of upstream nodes. pub fn new(upstreams: Vec>>) -> Self { let mut id_to_upstream = HashMap::with_hasher(BuildNoHashHasher::default()); for up in &upstreams { - // Is ok to unwrap safe_lock result id_to_upstream.insert(up.safe_lock(|u| u.get_id()).unwrap(), up.clone()); } Self { @@ -242,10 +435,16 @@ impl< down: std::marker::PhantomData, } } + + /// Updates the list of upstream nodes. + /// + /// # Arguments + /// - `upstreams`: The new list of upstream nodes. pub fn update_upstreams(&mut self, upstreams: Vec>>) { self.upstreams = upstreams; } } + impl< Sel: DownstreamMiningSelector, Down: IsMiningDownstream, @@ -260,9 +459,14 @@ impl< Up: IsMiningUpstream, > UpstreamMiningSelctor for GeneralMiningSelector { - /// Return the set of mining upstream nodes that can accept messages from a downstream with - /// the passed PairSettings and the sum of all the accepted flags - #[allow(clippy::type_complexity)] + /// Handles the `SetupConnection` process and determines the pairable upstream nodes. + /// + /// # Arguments + /// - `pair_settings`: The settings for pairing downstream and upstream nodes. + /// + /// # Returns + /// - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. + /// - `Err`: If no upstreams are pairable. fn on_setup_connection( &mut self, pair_settings: &PairSettings, @@ -272,10 +476,8 @@ impl< for node in &self.upstreams { let is_pairable = node .safe_lock(|node| node.is_pairable(pair_settings)) - // Is ok to unwrap safe_lock result .unwrap(); if is_pairable { - // Is ok to unwrap safe_lock result supported_flags |= node.safe_lock(|n| n.get_flags()).unwrap(); supported_upstreams.push(node.clone()); } @@ -287,6 +489,14 @@ impl< Err(Error::NoPairableUpstream((2, 2, 0))) } + /// Retrieves an upstream node by its ID. + /// + /// # Arguments + /// - `upstream_id`: The unique ID of the upstream node. + /// + /// # Returns + /// - `Some`: The upstream node. + /// - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>> { self.id_to_upstream.get(&upstream_id).cloned() } From e4547daecb7b4597004a7be60210b89b120154bd Mon Sep 17 00:00:00 2001 From: GitGab19 Date: Mon, 2 Dec 2024 13:40:51 +0700 Subject: [PATCH 10/12] roles_logic_sv2 README creation --- protocols/v2/roles-logic-sv2/README.md | 34 ++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 protocols/v2/roles-logic-sv2/README.md diff --git a/protocols/v2/roles-logic-sv2/README.md b/protocols/v2/roles-logic-sv2/README.md new file mode 100644 index 000000000..0787863f9 --- /dev/null +++ b/protocols/v2/roles-logic-sv2/README.md @@ -0,0 +1,34 @@ +# `roles_logic_sv2` + +[![crates.io](https://img.shields.io/crates/v/roles_logic_sv2.svg)](https://crates.io/crates/roles_logic_sv2) +[![docs.rs](https://docs.rs/roles_logic_sv2/badge.svg)](https://docs.rs/roles_logic_sv2) +[![rustc+](https://img.shields.io/badge/rustc-1.75.0%2B-lightgrey.svg)](https://blog.rust-lang.org/2023/12/28/Rust-1.75.0.html) +[![license](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/stratum-mining/stratum/blob/main/LICENSE.md) +[![codecov](https://codecov.io/gh/stratum-mining/stratum/branch/main/graph/badge.svg?flag=roles_logic_sv2-coverage)](https://codecov.io/gh/stratum-mining/stratum) + +`roles_logic_sv2` provides the core logic and utilities for implementing roles in the Stratum V2 (Sv2) protocol, such as miners, pools, and proxies. It abstracts message handling, channel management, job creation, and routing logic, enabling efficient and secure communication across upstream and downstream connections. + +## Main Components + +- **Channel Logic**: Manages the lifecycle and settings of communication channels (standard, extended, and group ones) between roles. +- **Handlers**: Provides traits for handling logic of Sv2 protocol messages. +- **Job Management**: Facilitates the creation, validation, and dispatching of mining jobs. +- **Parsers**: Handles serialization and deserialization of Sv2 messages via [`binary_sv2`](https://docs.rs/binary_sv2/latest/binary_sv2/index.html). +- **Routing Logic**: Implements message routing and downstream/upstream selector utilities. Useful for advanced proxy implementations with multiplexing of Standard Channels across different upstreams. +- **Utilities**: Provides helpers for safe mutex locking, mining-specific calculations, and more. + +## Usage + +To include this crate in your project, run: + +```bash +cargo add roles_logic_sv2 +``` + +This crate can be built with the following feature flags: + +- `with_serde`: Enables serialization and deserialization support using the serde library. This feature flag also activates the with_serde feature for dependent crates such as `binary_sv2`, `common_messages_sv2`, `template_distribution_sv2`, `job_declaration_sv2`, and `mining_sv2`. + Note that this feature flag is only used for the Message Generator, and deprecated + for any other kind of usage. It will likely be fully deprecated in the future. +- `prop_test`: Enables property-based testing features for template distribution logic, leveraging dependencies' testing capabilities such as `template_distribution_sv2` crate. +- `disable_nopanic`: Disables the nopanic logic in scenarios where code coverage tools might conflict with it. \ No newline at end of file From bc90429d2eae4f9bff7a125c4c951f256f7155ea Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Wed, 4 Dec 2024 18:55:45 +0530 Subject: [PATCH 11/12] roles_logic doc nits --- .../src/channel_logic/channel_factory.rs | 18 +- .../src/channel_logic/proxy_group_channel.rs | 21 +- .../roles-logic-sv2/src/common_properties.rs | 14 +- protocols/v2/roles-logic-sv2/src/errors.rs | 17 +- .../src/handlers/job_declaration.rs | 2 + .../v2/roles-logic-sv2/src/handlers/mining.rs | 1 + .../src/handlers/template_distribution.rs | 1 + .../v2/roles-logic-sv2/src/job_creator.rs | 64 +++--- .../v2/roles-logic-sv2/src/job_dispatcher.rs | 4 +- protocols/v2/roles-logic-sv2/src/lib.rs | 4 +- .../v2/roles-logic-sv2/src/routing_logic.rs | 132 ++++++------ protocols/v2/roles-logic-sv2/src/selectors.rs | 198 +++++++++--------- protocols/v2/roles-logic-sv2/src/utils.rs | 14 +- 13 files changed, 249 insertions(+), 241 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index cb74c4f76..040adb097 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -56,7 +56,7 @@ pub enum OnNewShare { /// Used when the received is malformed, is for an inexistent channel or do not meet downstream /// target. SendErrorDownstream(SubmitSharesError<'static>), - /// Used when an exteded channel in a proxy receive a share, and the share meet upstream + /// Used when an extended channel in a proxy receive a share, and the share meet upstream /// target, in this case a new share must be sent upstream. Also an optional template id is /// returned, when a job declarator want to send a valid share upstream could use the /// template for get the up job id. @@ -71,7 +71,7 @@ pub enum OnNewShare { /// (share, template id, coinbase,complete extranonce) ShareMeetBitcoinTarget((Share, Option, Vec, Vec)), /// Indicate that the share meet downstream target, in the case we could send a success - /// response dowmstream. + /// response downstream. ShareMeetDownstreamTarget, } @@ -266,7 +266,7 @@ impl ChannelFactory { let max_extranonce_size = self.extranonces.get_range2_len() as u16; if min_extranonce_size <= max_extranonce_size { // SECURITY is very unlikely to finish the ids btw this unwrap could be used by an - // attaccher that want to dirsrupt the service maybe we should have a method + // attacker that want to disrupt the service maybe we should have a method // to reuse ids that are no longer connected? let channel_id = self .ids @@ -404,7 +404,7 @@ impl ChannelFactory { } /// This function is called when downstream have a group channel - /// Shouldnt all standard channel's be non HOM?? + /// should not all standard channel's be non HOM?? fn new_standard_channel_for_non_hom_downstream( &mut self, request_id: u32, @@ -472,7 +472,7 @@ impl ChannelFactory { .get(&channel_id) .unwrap(); // OPTIMIZATION this could be memoized somewhere cause is very likely that we will receive a - // lot od OpenStandardMiningChannel requests consequtevely + // lot od OpenStandardMiningChannel requests consecutively let job_id = self.job_ids.next(); let future_jobs: Option>> = self .future_jobs @@ -568,11 +568,11 @@ impl ChannelFactory { } // When a new non HOM downstream opens a channel, we use this function to prepare all the - // extended jobs (future and non) and the prev hash that we need to send dowmstream + // extended jobs (future and non) and the prev hash that we need to send downstream fn prepare_jobs_and_p_hash(&mut self, result: &mut Vec, complete_id: u64) { // If group is 0 it means that we are preparing jobs and p hash for a non HOM downstream // that want to open a new extended channel in that case we want to use the channel id - // TODO verify that this is true also for the case where the channle factory is in a proxy + // TODO verify that this is true also for the case where the channel factory is in a proxy // and not in a pool. let group_id = match GroupId::into_group_id(complete_id) { 0 => GroupId::into_channel_id(complete_id), @@ -1178,7 +1178,7 @@ impl PoolChannelFactory { let target = self.job_creator.last_target(); // When downstream set a custom mining job we add the job to the negotiated job // hashmap, with the extended channel id as a key. Whenever the pool receive a share must - // first check if the channel have a negotiated job if so we can not retreive the template + // first check if the channel have a negotiated job if so we can not retrieve the template // via the job creator but we create a new one from the set custom job. if self.negotiated_jobs.contains_key(&m.channel_id) { let referenced_job = self.negotiated_jobs.get(&m.channel_id).unwrap(); @@ -1318,7 +1318,7 @@ impl PoolChannelFactory { } } -/// Used by proxies that want to open extended channls with upstream. If the proxy has job +/// Used by proxies that want to open extended channels with upstream. If the proxy has job /// declaration capabilities, we set the job creator and the coinbase outs. #[derive(Debug)] pub struct ProxyExtendedChannelFactory { diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index 452c39fbb..73068fb8e 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -99,9 +99,9 @@ impl GroupChannel { last_received_job: None, } } - /// Called when a channel is successfully opened for header only mining on standard channels. - /// Here we store the new channel, and update state for jobs and return relevant SV2 messages - /// (NewMiningJob and SNPH) + // Called when a channel is successfully opened for header only mining(HOM) on standard + // channels. Here, we store the new channel, and update state for jobs and return relevant + // SV2 messages (NewMiningJob and SNPH) fn on_channel_success_for_hom_downtream( &mut self, m: OpenStandardMiningChannelSuccess, @@ -151,9 +151,10 @@ impl GroupChannel { Ok(res) } - /// If a matching job is already in the future job queue, - /// we set a new valid job, otherwise we clear the future jobs - /// queue and stage a prev hash to be used when the job arrives + + // If a matching job is already in the future job queue, + // we set a new valid job, otherwise we clear the future jobs + // queue and stage a prev hash to be used when the job arrives fn update_new_prev_hash(&mut self, m: &SetNewPrevHash) { while let Some(job) = self.future_jobs.pop() { if job.job_id == m.job_id { @@ -171,8 +172,9 @@ impl GroupChannel { }; self.last_prev_hash = Some(cloned.clone()); } - /// Pushes new job to future_job queue if it is future, - /// otherwise we set it as the valid job + + // Pushes new job to future_job queue if it is future, + // otherwise we set it as the valid job fn on_new_extended_mining_job(&mut self, m: NewExtendedMiningJob<'static>) { self.last_received_job = Some(m.clone()); if m.is_future() { @@ -181,7 +183,8 @@ impl GroupChannel { self.last_valid_job = Some(m) } } - /// Returns most recent job + + // Returns most recent job fn last_received_job_to_standard_job( &mut self, channel_id: u32, diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index ba7f57076..d36c901da 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -29,7 +29,7 @@ pub struct PairSettings { pub trait IsUpstream + ?Sized> { /// Used to bitcoin protocol version for the channel. fn get_version(&self) -> u16; - // Used to get flags for the defined sv2 message protocol + /// Used to get flags for the defined sv2 message protocol fn get_flags(&self) -> u32; /// Used to check if the upstream supports the protocol that the downstream wants to use fn get_supported_protocols(&self) -> Vec; @@ -55,7 +55,7 @@ pub trait IsUpstream + ?Sized> /// Channel to be opened with the upstream nodes. #[derive(Debug, Clone, Copy)] pub enum UpstreamChannel { - // nominal hash rate + /// nominal hash rate Standard(f32), Group, Extended, @@ -108,7 +108,7 @@ pub trait IsMiningDownstream: IsDownstream { } } -/// Implemented for the NullDownstreamMiningSelector +// Implemented for the NullDownstreamMiningSelector impl IsUpstream for () { fn get_version(&self) -> u16 { unreachable!("Null upstream do not have a version"); @@ -134,7 +134,7 @@ impl IsUpstream for } } -/// Implemented for the NullDownstreamMiningSelector +// Implemented for the NullDownstreamMiningSelector impl IsDownstream for () { fn get_downstream_mining_data(&self) -> CommonDownstreamData { unreachable!("Null downstream do not have mining data"); @@ -160,11 +160,11 @@ impl IsMiningUpstream downstream ids + // Mapping of upstream id -> downstream ids request_ids_map: HashMap>, next_id: u32, } @@ -188,7 +188,7 @@ impl RequestIdMapper { new_id } - /// Removes a upstream/downstream mapping from the `RequsetIdMapper`. + /// Removes a upstream/downstream mapping from the `RequestIdMapper`. pub fn remove(&mut self, upstream_id: u32) -> Option { self.request_ids_map.remove(&upstream_id) } diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index cc05fede7..47369a483 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -7,6 +7,7 @@ use crate::{ use binary_sv2::Error as BinarySv2Error; use std::fmt::{self, Display, Formatter}; +/// Error enum #[derive(Debug)] pub enum Error { /// Payload size is too big to fit into a frame @@ -29,7 +30,7 @@ pub enum Error { NoCompatibleUpstream(CommonDownstreamData), /// Error if the hashmap `future_jobs` field in the `GroupChannelJobDispatcher` is empty. NoFutureJobs, - /// No Downstreams connected + /// No Downstream's connected NoDownstreamsConnected, /// PrevHash requires non-existent Job Id PrevHashRequireNonExistentJobId(u32), @@ -61,7 +62,7 @@ pub enum Error { GroupIdNotFound, /// A share has been received but no job for it exist ShareDoNotMatchAnyJob, - /// A share has been recived but no channel for it exist + /// A share has been received but no channel for it exist ShareDoNotMatchAnyChannel, /// Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase InvalidCoinbase, @@ -124,7 +125,7 @@ impl Display for Error { BadPayloadSize => write!(f, "Payload is too big to fit into the frame"), BinarySv2Error(v) => write!( f, - "BinarySv2Error: error in serializing/deserilizing binary format {:?}", + "BinarySv2Error: error in serializing/deserializing binary format {:?}", v ), DownstreamDown => { @@ -174,12 +175,12 @@ impl Display for Error { }, NoMoreExtranonces => write!(f, "No more extranonces"), JobIsNotFutureButPrevHashNotPresent => write!(f, "A non future job always expect a previous new prev hash"), - ChannelIsNeitherExtendedNeitherInAPool => write!(f, "If a channel is neither extended neither is part of a pool the only thing to do when a OpenStandardChannle is received is to relay it upstream with and updated request id"), - ExtranonceSpaceEnded => write!(f, "No more avaible extranonces for downstream"), + ChannelIsNeitherExtendedNeitherInAPool => write!(f, "If a channel is neither extended neither is part of a pool the only thing to do when a OpenStandardChannel is received is to relay it upstream with and updated request id"), + ExtranonceSpaceEnded => write!(f, "No more available extranonces for downstream"), ImpossibleToCalculateMerkleRoot => write!(f, "Impossible to calculate merkle root"), GroupIdNotFound => write!(f, "Group id not found"), - ShareDoNotMatchAnyJob => write!(f, "A share has been recived but no job for it exist"), - ShareDoNotMatchAnyChannel => write!(f, "A share has been recived but no channel for it exist"), + ShareDoNotMatchAnyJob => write!(f, "A share has been received but no job for it exist"), + ShareDoNotMatchAnyChannel => write!(f, "A share has been received but no channel for it exist"), InvalidCoinbase => write!(f, "Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase"), ValueRemainingNotUpdated => write!(f, "Value remaining in coinbase output was not correctly updated (it's equal to 0)"), UnknownOutputScriptType => write!(f, "Unknown script type in config"), @@ -189,7 +190,7 @@ impl Display for Error { TxVersionTooBig => write!(f, "Tx version can not be greater than i32::MAX"), TxVersionTooLow => write!(f, "Tx version can not be lower than 1"), TxDecodingError(e) => write!(f, "Impossible to decode tx: {:?}", e), - NotFoundChannelId => write!(f, "No downstream has been registred for this channel id"), + NotFoundChannelId => write!(f, "No downstream has been registered for this channel id"), NoValidJob => write!(f, "Impossible to create a standard job for channelA cause no valid job has been received from upstream yet"), NoValidTranslatorJob => write!(f, "Impossible to create a extended job for channel cause no valid job has been received from upstream yet"), NoTemplateForId => write!(f, "Impossible to retrieve a template for the required job id"), diff --git a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs index f0453fd8d..a2ec2aea1 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs @@ -41,6 +41,8 @@ use crate::{parsers::JobDeclaration, utils::Mutex}; use std::sync::Arc; + +/// see [`SendTo_`] pub type SendTo = SendTo_, ()>; use super::SendTo_; use crate::errors::Error; diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs index 918a32405..8edb43976 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs @@ -60,6 +60,7 @@ use const_sv2::*; use std::{fmt::Debug as D, sync::Arc}; use tracing::{debug, error, info, trace}; +/// see [`SendTo_`] pub type SendTo = SendTo_, Remote>; /// Represents supported channel types in a mining connection. diff --git a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs index a0a5b94ec..445a83922 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs @@ -42,6 +42,7 @@ use template_distribution_sv2::{ RequestTransactionDataSuccess, SetNewPrevHash, SubmitSolution, }; +/// see [`SendTo_`] pub type SendTo = SendTo_, ()>; use const_sv2::*; use core::convert::TryInto; diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index 1ed653762..31ef5f224 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -165,15 +165,15 @@ pub fn extended_job_from_custom_job( ) } -/// returns an extended job given the provided template from the Template Provider and other -/// Pool role related fields. -/// -/// Pool related arguments: -/// -/// * `coinbase_outputs`: coinbase output transactions specified by the pool. -/// * `job_id`: incremented job identifier specified by the pool. -/// * `version_rolling_allowed`: boolean specified by the channel. -/// * `extranonce_len`: extranonce length specified by the channel. +// returns an extended job given the provided template from the Template Provider and other +// Pool role related fields. +// +// Pool related arguments: +// +// * `coinbase_outputs`: coinbase output transactions specified by the pool. +// * `job_id`: incremented job identifier specified by the pool. +// * `version_rolling_allowed`: boolean specified by the channel. +// * `extranonce_len`: extranonce length specified by the channel. fn new_extended_job( new_template: &mut NewTemplate, coinbase_outputs: &mut [TxOut], @@ -234,8 +234,8 @@ fn new_extended_job( Ok(new_extended_mining_job) } -/// used to extract the coinbase transaction prefix for extended jobs -/// so the extranonce search space can be introduced +// used to extract the coinbase transaction prefix for extended jobs +// so the extranonce search space can be introduced fn coinbase_tx_prefix( coinbase: &Transaction, script_prefix_len: usize, @@ -258,15 +258,15 @@ fn coinbase_tx_prefix( r.try_into().map_err(Error::BinarySv2Error) } -/// used to extract the coinbase transaction suffix for extended jobs -/// so the extranonce search space can be introduced +// used to extract the coinbase transaction suffix for extended jobs +// so the extranonce search space can be introduced fn coinbase_tx_suffix( coinbase: &Transaction, extranonce_len: u8, script_prefix_len: usize, ) -> Result, Error> { let encoded = coinbase.serialize(); - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // If script_prefix_len is not 0 we are not in a test environment and the coinbase have the 0 // witness let segwit_bytes = match script_prefix_len { 0 => 0, @@ -319,8 +319,8 @@ fn get_bip_34_bytes(new_template: &NewTemplate, tx_version: i32) -> Result, version: i32, @@ -330,7 +330,7 @@ fn coinbase( pool_signature: String, extranonce_len: u8, ) -> Transaction { - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // If script_prefix_len is not 0 we are not in a test environment and the coinbase have the 0 // witness let witness = match bip34_bytes.len() { 0 => Witness::from_vec(vec![]), @@ -378,8 +378,8 @@ pub fn extended_job_to_non_segwit( coinbase_tx_suffix: stripped_tx.into_coinbase_tx_suffix()?, }) } -/// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix -/// to ensure miners are hashing with the correct coinbase +// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix +// to ensure miners are hashing with the correct coinbase struct StrippedCoinbaseTx { version: u32, inputs: Vec>, @@ -390,7 +390,7 @@ struct StrippedCoinbaseTx { } impl StrippedCoinbaseTx { - /// create + // create fn from_coinbase(tx: Transaction, full_extranonce_len: usize) -> Result { let bip141_bytes_len = tx .input @@ -420,13 +420,13 @@ impl StrippedCoinbaseTx { }) } - /// the coinbase tx prefix is the LE bytes concatenation of the tx version and all - /// of the tx inputs minus the 32 bytes after the bip34 bytes in the script - /// and the last input's sequence (used as the first entry in the coinbase tx suffix). - /// The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce - /// space for the miner. We remove the bip141 marker and flag since it is only used for - /// computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root - // clippy allow because we dont want to consume self + // the coinbase tx prefix is the LE bytes concatenation of the tx version and all + // of the tx inputs minus the 32 bytes after the bip34 bytes in the script + // and the last input's sequence (used as the first entry in the coinbase tx suffix). + // The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce + // space for the miner. We remove the bip141 marker and flag since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self #[allow(clippy::wrong_self_convention)] fn into_coinbase_tx_prefix(&self) -> Result, errors::Error> { let mut inputs = self.inputs.clone(); @@ -445,11 +445,11 @@ impl StrippedCoinbaseTx { prefix.try_into().map_err(Error::BinarySv2Error) } - /// This coinbase tx suffix is the sequence of the last tx input plus - /// the serialized tx outputs and the lock time. Note we do not use the witnesses - /// (placed between txouts and lock time) since it is only used for - /// computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root - // clippy allow because we dont want to consume self + // This coinbase tx suffix is the sequence of the last tx input plus + // the serialized tx outputs and the lock time. Note we do not use the witnesses + // (placed between txouts and lock time) since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self #[allow(clippy::wrong_self_convention)] fn into_coinbase_tx_suffix(&self) -> Result, errors::Error> { let mut suffix: Vec = vec![]; diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index 4eac84734..e0589c526 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -54,7 +54,7 @@ struct BlockHeader<'a> { } impl<'a> BlockHeader<'a> { - /// calculates the sha256 blockhash of the header + // calculates the sha256 blockhash of the header #[allow(dead_code)] pub fn hash(&self) -> Target { let mut engine = sha256d::Hash::engine(); @@ -98,7 +98,7 @@ pub struct GroupChannelJobDispatcher { /// Used to signal if submitted shares correlate to valid jobs pub enum SendSharesResponse { - //ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), + /// ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), Valid(SubmitSharesStandard), Invalid(SubmitSharesError<'static>), } diff --git a/protocols/v2/roles-logic-sv2/src/lib.rs b/protocols/v2/roles-logic-sv2/src/lib.rs index a38e0ad97..c6ffe67bc 100644 --- a/protocols/v2/roles-logic-sv2/src/lib.rs +++ b/protocols/v2/roles-logic-sv2/src/lib.rs @@ -22,13 +22,13 @@ //! handlers::common::ParseDownstreamCommonMessages + //! handlers::mining::ParseDownstreamMiningMessages + //! -//! ProxyDownstreamConnetion: +//! ProxyDownstreamConnection: //! common_properties::IsDownstream + //! common_properties::IsMiningDownstream + //! handlers::common::ParseDownstreamCommonMessages + //! handlers::mining::ParseDownstreamMiningMessages + //! -//! ProxyUpstreamConnetion: +//! ProxyUpstreamConnection: //! common_properties::IsUpstream + //! common_properties::IsMiningUpstream + //! handlers::common::ParseUpstreamCommonMessages + diff --git a/protocols/v2/roles-logic-sv2/src/routing_logic.rs b/protocols/v2/roles-logic-sv2/src/routing_logic.rs index a23e62e4b..37ddbb7fa 100644 --- a/protocols/v2/roles-logic-sv2/src/routing_logic.rs +++ b/protocols/v2/roles-logic-sv2/src/routing_logic.rs @@ -274,20 +274,20 @@ impl< Sel: DownstreamMiningSelector + D, > MiningRouter for MiningProxyRoutingLogic { - /// Handles the `OpenStandardMiningChannel` message. - /// - /// This method processes the request to open a standard mining channel. It selects a suitable - /// upstream, updates the request ID to ensure uniqueness, and then delegates to - /// `on_open_standard_channel_request_header_only` to finalize the process. - /// - /// # Arguments - /// - `downstream`: The downstream requesting the channel opening. - /// - `request`: A mutable reference to the `OpenStandardMiningChannel` message. - /// - `downstream_mining_data`: Common data about the downstream mining setup. - /// - /// # Returns - /// - `Result>, Error>`: Returns the selected upstream for the downstream or an - /// error. + // Handles the `OpenStandardMiningChannel` message. + // + // This method processes the request to open a standard mining channel. It selects a suitable + // upstream, updates the request ID to ensure uniqueness, and then delegates to + // `on_open_standard_channel_request_header_only` to finalize the process. + // + // # Arguments + // - `downstream`: The downstream requesting the channel opening. + // - `request`: A mutable reference to the `OpenStandardMiningChannel` message. + // - `downstream_mining_data`: Common data about the downstream mining setup. + // + // # Returns + // - `Result>, Error>`: Returns the selected upstream for the downstream or an + // error. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -310,19 +310,19 @@ impl< self.on_open_standard_channel_request_header_only(downstream, request) } - /// Handles the `OpenStandardMiningChannelSuccess` message. - /// - /// This method processes the success message received from an upstream when a standard mining - /// channel is opened. It maps the request ID back to the original ID from the downstream and - /// updates the associated group and channel IDs in the upstream. - /// - /// # Arguments - /// - `upstream`: The upstream involved in the channel opening. - /// - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. - /// - /// # Returns - /// - `Result>, Error>`: Returns the downstream corresponding to the request or - /// an error. + // Handles the `OpenStandardMiningChannelSuccess` message. + // + // This method processes the success message received from an upstream when a standard mining + // channel is opened. It maps the request ID back to the original ID from the downstream and + // updates the associated group and channel IDs in the upstream. + // + // # Arguments + // - `upstream`: The upstream involved in the channel opening. + // - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. + // + // # Returns + // - `Result>, Error>`: Returns the downstream corresponding to the request or + // an error. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -352,17 +352,17 @@ impl< } } -/// Selects the upstream with the lowest total hash rate. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Arc>`: The upstream entity with the lowest total hash rate. -/// -/// # Panics -/// This function panics if the slice is empty, as it is internally guaranteed that this function -/// will only be called with non-empty vectors. +// Selects the upstream with the lowest total hash rate. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Arc>`: The upstream entity with the lowest total hash rate. +// +// # Panics +// This function panics if the slice is empty, as it is internally guaranteed that this function +// will only be called with non-empty vectors. fn minor_total_hr_upstream(ups: &mut [Arc>]) -> Arc> where Down: IsMiningDownstream + D, @@ -384,13 +384,13 @@ where .clone() // Unwrap is safe because the function only operates on non-empty vectors. } -/// Filters upstream entities that are not configured for header-only mining. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Vec>>`: A vector of upstream entities that are not header-only. +// Filters upstream entities that are not configured for header-only mining. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Vec>>`: A vector of upstream entities that are not header-only. fn filter_header_only(ups: &mut [Arc>]) -> Vec>> where Down: IsMiningDownstream + D, @@ -408,18 +408,18 @@ where .collect() } -/// Selects the most appropriate upstream entity based on specific criteria. -/// -/// # Criteria -/// - If only one upstream is available, it is selected. -/// - If multiple upstreams exist, preference is given to those not configured as header-only. -/// - Among the remaining upstreams, the one with the lowest total hash rate is selected. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Option>>`: The selected upstream entity, or `None` if none are available. +// Selects the most appropriate upstream entity based on specific criteria. +// +// # Criteria +// - If only one upstream is available, it is selected. +// - If multiple upstreams exist, preference is given to those not configured as header-only. +// - Among the remaining upstreams, the one with the lowest total hash rate is selected. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstream(ups: &mut [Arc>]) -> Option>> where Down: IsMiningDownstream + D, @@ -443,18 +443,18 @@ impl< Sel: DownstreamMiningSelector + D, > MiningProxyRoutingLogic { - /// Selects an upstream entity from a list of available upstreams. - /// - /// # Arguments - /// - `ups`: A mutable slice of upstream mining entities. - /// - /// # Returns - /// - `Option>>`: The selected upstream entity, or `None` if none are available. + // Selects an upstream entity from a list of available upstreams. + // + // # Arguments + // - `ups`: A mutable slice of upstream mining entities. + // + // # Returns + // - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstreams(ups: &mut [Arc>]) -> Option>> { select_upstream(ups) } - /// Handles the `SetupConnection` process for header-only mining downstreams. + /// Handles the `SetupConnection` process for header-only mining downstream's. /// /// This method selects compatible upstreams, assigns connection flags, and maps the /// downstream to the selected upstreams. @@ -487,7 +487,7 @@ impl< Ok((downstream_data, message)) } - /// Handles a standard channel opening request for header-only mining downstreams. + /// Handles a standard channel opening request for header-only mining downstream's. /// /// # Arguments /// - `downstream`: The downstream mining entity. diff --git a/protocols/v2/roles-logic-sv2/src/selectors.rs b/protocols/v2/roles-logic-sv2/src/selectors.rs index fcfcffeb5..ceb4dfed3 100644 --- a/protocols/v2/roles-logic-sv2/src/selectors.rs +++ b/protocols/v2/roles-logic-sv2/src/selectors.rs @@ -48,18 +48,18 @@ use std::{collections::HashMap, fmt::Debug as D, sync::Arc}; /// to facilitate efficient message routing. #[derive(Debug, Clone, Default)] pub struct ProxyDownstreamMiningSelector { - /// Maps request IDs to their corresponding downstream nodes. + // Maps request IDs to their corresponding downstream nodes. request_id_to_remotes: HashMap>, BuildNoHashHasher>, - /// Maps group channel IDs to a list of downstream nodes. + // Maps group channel IDs to a list of downstream nodes. channel_id_to_downstreams: HashMap>>, BuildNoHashHasher>, - /// Maps standard channel IDs to a single downstream node. + // Maps standard channel IDs to a single downstream node. channel_id_to_downstream: HashMap>, BuildNoHashHasher>, } impl ProxyDownstreamMiningSelector { /// Creates a new `ProxyDownstreamMiningSelector`. /// - /// This initializes the internal mappings with `nohash` hashers for performance. + /// This initializes the internal mappings with `nohash` hasher for performance. pub fn new() -> Self { Self { request_id_to_remotes: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -80,10 +80,10 @@ impl ProxyDownstreamMiningSelector { } impl ProxyDownstreamMiningSelector { - /// Removes a downstream node from all mappings. - /// - /// # Arguments - /// - `d`: The downstream node to be removed. + // Removes a downstream node from all mappings. + // + // # Arguments + // - `d`: The downstream node to be removed. fn _remove_downstream(&mut self, d: &Arc>) { self.request_id_to_remotes.retain(|_, v| !Arc::ptr_eq(v, d)); self.channel_id_to_downstream @@ -94,25 +94,25 @@ impl ProxyDownstreamMiningSelector { impl DownstreamMiningSelector for ProxyDownstreamMiningSelector { - /// Registers a request ID and its associated downstream node. - /// - /// # Arguments - /// - `request_id`: The unique request ID. - /// - `downstream`: The downstream node associated with the request. + // Registers a request ID and its associated downstream node. + // + // # Arguments + // - `request_id`: The unique request ID. + // - `downstream`: The downstream node associated with the request. fn on_open_standard_channel_request(&mut self, request_id: u32, downstream: Arc>) { self.request_id_to_remotes.insert(request_id, downstream); } - /// Finalizes the mapping of a standard channel to its downstream node. - /// - /// # Arguments - /// - `request_id`: The request ID used during the channel opening. - /// - `g_channel_id`: The group channel ID. - /// - `channel_id`: The specific standard channel ID. - /// - /// # Returns - /// - `Ok`: The downstream node associated with the request. - /// - `Err`: If the request ID is unknown. + // Finalizes the mapping of a standard channel to its downstream node. + // + // # Arguments + // - `request_id`: The request ID used during the channel opening. + // - `g_channel_id`: The group channel ID. + // - `channel_id`: The specific standard channel ID. + // + // # Returns + // - `Ok`: The downstream node associated with the request. + // - `Err`: If the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -135,25 +135,25 @@ impl DownstreamMiningSelector Ok(downstream) } - /// Retrieves all downstream nodes associated with a standard/group channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard/group channel ID. - /// - /// # Returns - /// - `Some`: A reference to the vector of downstream nodes. - /// - `None`: If no nodes are associated with the channel. + // Retrieves all downstream nodes associated with a standard/group channel ID. + // + // # Arguments + // - `channel_id`: The standard/group channel ID. + // + // # Returns + // - `Some`: A reference to the vector of downstream nodes. + // - `None`: If no nodes are associated with the channel. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>> { self.channel_id_to_downstreams.get(&channel_id) } - /// Removes all downstream nodes associated with a standard/group channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard/group channel ID. - /// - /// # Returns - /// A vector of the removed downstream nodes. + // Removes all downstream nodes associated with a standard/group channel ID. + // + // # Arguments + // - `channel_id`: The standard/group channel ID. + // + // # Returns + // A vector of the removed downstream nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>> { let downs = self .channel_id_to_downstreams @@ -165,10 +165,10 @@ impl DownstreamMiningSelector downs } - /// Removes a specific downstream node from all mappings. - /// - /// # Arguments - /// - `d`: The downstream node to be removed. + // Removes a specific downstream node from all mappings. + // + // # Arguments + // - `d`: The downstream node to be removed. fn remove_downstream(&mut self, d: &Arc>) { for dws in self.channel_id_to_downstreams.values_mut() { dws.retain(|node| !Arc::ptr_eq(node, d)); @@ -177,22 +177,22 @@ impl DownstreamMiningSelector self._remove_downstream(d); } - /// Retrieves the downstream node associated with a specific standard channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard channel ID. - /// - /// # Returns - /// - `Some`: The downstream node. - /// - `None`: If no node is associated with the channel. + // Retrieves the downstream node associated with a specific standard channel ID. + // + // # Arguments + // - `channel_id`: The standard channel ID. + // + // # Returns + // - `Some`: The downstream node. + // - `None`: If no node is associated with the channel. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>> { self.channel_id_to_downstream.get(&channel_id).cloned() } - /// Retrieves all downstream nodes currently managed by this selector. - /// - /// # Returns - /// A vector of downstream nodes. + // Retrieves all downstream nodes currently managed by this selector. + // + // # Returns + // A vector of downstream nodes. fn get_all_downstreams(&self) -> Vec>> { self.channel_id_to_downstream.values().cloned().collect() } @@ -233,22 +233,22 @@ pub trait DownstreamMiningSelector: channel_id: u32, ) -> Result>, Error>; - /// Retrieves all downstreams associated with a channel ID. + /// Retrieves all downstream's associated with a channel ID. /// /// # Arguments /// - `channel_id`: The channel ID to query. /// /// # Returns - /// - `Option<&Vec>>>`: The list of downstreams or `None`. + /// - `Option<&Vec>>>`: The list of downstream's or `None`. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>>; - /// Removes all downstreams associated with a channel ID. + /// Removes all downstream's associated with a channel ID. /// /// # Arguments - /// - `channel_id`: The channel ID to remove downstreams from. + /// - `channel_id`: The channel ID to remove downstream's from. /// /// # Returns - /// - `Vec>>`: The removed downstreams. + /// - `Vec>>`: The removed downstream's. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>>; /// Removes a specific downstream. @@ -266,10 +266,10 @@ pub trait DownstreamMiningSelector: /// - `Option>>`: The downstream or `None`. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>>; - /// Retrieves all downstreams. + /// Retrieves all downstream's. /// /// # Returns - /// - `Vec>>`: All downstreams. + /// - `Vec>>`: All downstream's. fn get_all_downstreams(&self) -> Vec>>; } @@ -298,10 +298,10 @@ impl NullDownstreamMiningSelector { } impl DownstreamMiningSelector for NullDownstreamMiningSelector { - /// Called when a standard channel open request is received. - /// - /// This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op - /// implementation. + // Called when a standard channel open request is received. + // + // This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op + // implementation. fn on_open_standard_channel_request( &mut self, _request_id: u32, @@ -310,9 +310,9 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_request") } - /// Called when a standard channel open request is successful. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Called when a standard channel open request is successful. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn on_open_standard_channel_success( &mut self, _request_id: u32, @@ -322,37 +322,37 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_success") } - /// Retrieves the downstreams in a specific channel. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves the downstream'ss in a specific channel. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn get_downstreams_in_channel(&self, _channel_id: u32) -> Option<&Vec>>> { unreachable!("get_downstreams_in_channel") } - /// Removes downstreams in a specific channel. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Removes downstream's in a specific channel. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstreams_in_channel(&mut self, _channel_id: u32) -> Vec>> { unreachable!("remove_downstreams_in_channel") } - /// Removes a specific downstream node. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Removes a specific downstream node. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstream(&mut self, _d: &Arc>) { unreachable!("remove_downstream") } - /// Retrieves the downstream associated with a specific channel ID. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves the downstream associated with a specific channel ID. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn downstream_from_channel_id(&self, _channel_id: u32) -> Option>> { unreachable!("downstream_from_channel_id") } - /// Retrieves all downstream nodes managed by this selector. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves all downstream nodes managed by this selector. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn get_all_downstreams(&self) -> Vec>> { unreachable!("get_all_downstreams") } @@ -459,14 +459,14 @@ impl< Up: IsMiningUpstream, > UpstreamMiningSelctor for GeneralMiningSelector { - /// Handles the `SetupConnection` process and determines the pairable upstream nodes. - /// - /// # Arguments - /// - `pair_settings`: The settings for pairing downstream and upstream nodes. - /// - /// # Returns - /// - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. - /// - `Err`: If no upstreams are pairable. + // Handles the `SetupConnection` process and determines the pairable upstream nodes. + // + // # Arguments + // - `pair_settings`: The settings for pairing downstream and upstream nodes. + // + // # Returns + // - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. + // - `Err`: If no upstreams are pairable. fn on_setup_connection( &mut self, pair_settings: &PairSettings, @@ -489,14 +489,14 @@ impl< Err(Error::NoPairableUpstream((2, 2, 0))) } - /// Retrieves an upstream node by its ID. - /// - /// # Arguments - /// - `upstream_id`: The unique ID of the upstream node. - /// - /// # Returns - /// - `Some`: The upstream node. - /// - `None`: If no upstream is found. + // Retrieves an upstream node by its ID. + // + // # Arguments + // - `upstream_id`: The unique ID of the upstream node. + // + // # Returns + // - `Some`: The upstream node. + // - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>> { self.id_to_upstream.get(&upstream_id).cloned() } diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index f818eba1f..149958fcf 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -282,17 +282,17 @@ pub enum InputError { /// how do we set the adequate target? /// /// According to \[1] and \[2], it is possible to model the probability of finding a block with -/// a random variable X whose distribution is negtive hypergeometric \[3]. +/// a random variable X whose distribution is negative hypergeometric \[3]. /// Such a variable is characterized as follows. Say that there are n (2^256) elements (possible /// hash values), of which t (values <= target) are defined as success and the remaining as -/// failures. The variable X has codomain the positive integers, and X=k is the event where element +/// failures. The variable X has co-domain the positive integers, and X=k is the event where element /// are drawn one after the other, without replacement, and only the k-th element is successful. /// The expected value of this variable is (n-t)/(t+1). /// So, on average, a miner has to perform (2^256-t)/(t+1) hashes before finding hash whose value /// is below the target t. If the pool wants, on average, a share every s seconds, then, on /// average, the miner has to perform h*s hashes before finding one that is smaller than the /// target, where h is the miner's hashrate. Therefore, s*h= (2^256-t)/(t+1). If we consider h the -/// global bitcoin's hashrate, s = 600 seconds and t the bicoin global target, then, for all the +/// global bitcoin's hashrate, s = 600 seconds and t the bitcoin global target, then, for all the /// blocks we tried, the two members of the equations have the same order of magnitude and, most /// of the cases, they coincide with the first two digits. We take this as evidence of the /// correctness of our calculations. Thus, if the pool wants on average a share every s @@ -372,7 +372,7 @@ pub fn hash_rate_from_target(target: U256<'static>, share_per_min: f64) -> Resul let max_target = Uint256::from_be_bytes(max_target); let numerator = max_target - (target - Uint256::one()); - // now we calcualte the denominator s(t+1) + // now we calculate the denominator s(t+1) // *100 here to move the fractional bit up so we can make this an int later let shares_occurrency_frequence = 60_f64 / (share_per_min) * 100.0; // note that t+1 cannot be zero because t unsigned. Therefore the denominator is zero if and @@ -416,7 +416,7 @@ pub struct GroupId { } impl GroupId { - /// New GroupId it starts with groups 0, since 0 is reserved for hom downstreams + /// New GroupId it starts with groups 0, since 0 is reserved for hom downstream's pub fn new() -> Self { Self { group_ids: Id::new(), @@ -429,8 +429,8 @@ impl GroupId { self.group_ids.next() } - /// Create a channel for a paricular group and return the channel id - /// _group_id is left for a future use of this API where we have an hirearchy of ids so that we + /// Create a channel for a particular group and return the channel id + /// _group_id is left for a future use of this API where we have an hierarchy of ids so that we /// don't break old versions pub fn new_channel_id(&mut self, _group_id: u32) -> u32 { self.channel_ids.next() From 573c62623f464c84b167224bf4cb93671d4a3155 Mon Sep 17 00:00:00 2001 From: plebhash Date: Wed, 11 Dec 2024 20:08:02 +0700 Subject: [PATCH 12/12] docs for job_creator module --- protocols/v2/roles-logic-sv2/src/job_creator.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index 31ef5f224..0156f2119 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -49,6 +49,7 @@ pub fn tx_outputs_to_costum_scripts(tx_outputs: &[u8]) -> Vec { } impl JobsCreators { + /// constructor pub fn new(extranonce_len: u8) -> Self { Self { lasts_new_template: Vec::new(), @@ -60,6 +61,7 @@ impl JobsCreators { } } + /// get template id from job pub fn get_template_id_from_job(&self, job_id: u32) -> Option { self.job_to_template_id.get(&job_id).map(|x| x - 1) } @@ -135,6 +137,7 @@ impl JobsCreators { } } +/// convert custom job into extended job pub fn extended_job_from_custom_job( referenced_job: &mining_sv2::SetCustomMiningJob, pool_signature: String,