diff --git a/.github/workflows/crypto-tests.yml b/.github/workflows/crypto-tests.yml index d9d1df08b..bf20ede3b 100644 --- a/.github/workflows/crypto-tests.yml +++ b/.github/workflows/crypto-tests.yml @@ -35,6 +35,10 @@ jobs: -p multiexp \ -p schnorr-signatures \ -p dleq \ + -p generalized-bulletproofs \ + -p generalized-bulletproofs-circuit-abstraction \ + -p ec-divisors \ + -p generalized-bulletproofs-ec-gadgets \ -p dkg \ -p modular-frost \ -p frost-schnorrkel diff --git a/Cargo.lock b/Cargo.lock index d743f1df6..ff21fe66c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1109,6 +1109,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] @@ -2195,15 +2196,27 @@ dependencies = [ name = "dkg" version = "0.5.1" dependencies = [ + "blake2", "borsh", "chacha20", "ciphersuite", "dleq", + "ec-divisors", + "embedwards25519", "flexible-transcript", + "generalized-bulletproofs", + "generalized-bulletproofs-circuit-abstraction", + "generalized-bulletproofs-ec-gadgets", + "generic-array 1.1.0", "multiexp", + "pasta_curves", + "rand", + "rand_chacha", "rand_core", "schnorr-signatures", + "secq256k1", "std-shims", + "subtle", "thiserror", "zeroize", ] @@ -2295,6 +2308,18 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +[[package]] +name = "ec-divisors" +version = "0.1.0" +dependencies = [ + "dalek-ff-group", + "group", + "hex", + "pasta_curves", + "rand_core", + "zeroize", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -2375,6 +2400,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "embedwards25519" +version = "0.1.0" +dependencies = [ + "blake2", + "ciphersuite", + "crypto-bigint", + "dalek-ff-group", + "ec-divisors", + "ff-group-tests", + "generalized-bulletproofs-ec-gadgets", + "generic-array 0.14.7", + "hex", + "hex-literal", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + [[package]] name = "enum-as-inner" version = "0.5.1" @@ -3046,6 +3091,36 @@ dependencies = [ "serde_json", ] +[[package]] +name = "generalized-bulletproofs" +version = "0.1.0" +dependencies = [ + "blake2", + "ciphersuite", + "flexible-transcript", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs-circuit-abstraction", + "generic-array 1.1.0", +] + [[package]] name = "generator" version = "0.8.1" @@ -5789,8 +5864,7 @@ dependencies = [ [[package]] name = "pasta_curves" version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +source = "git+https://github.com/kayabaNerve/pasta_curves?rev=a46b5be95cacbff54d06aad8d3bbcba42e05d616#a46b5be95cacbff54d06aad8d3bbcba42e05d616" dependencies = [ "blake2b_simd", "ff", @@ -5799,6 +5873,7 @@ dependencies = [ "rand", "static_assertions", "subtle", + "zeroize", ] [[package]] @@ -7922,6 +7997,26 @@ dependencies = [ "cc", ] +[[package]] +name = "secq256k1" +version = "0.1.0" +dependencies = [ + "blake2", + "ciphersuite", + "crypto-bigint", + "ec-divisors", + "ff-group-tests", + "generalized-bulletproofs-ec-gadgets", + "generic-array 0.14.7", + "hex", + "hex-literal", + "k256", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -8006,6 +8101,7 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" name = "serai-abi" version = "0.1.0" dependencies = [ + "bitvec", "borsh", "frame-support", "parity-scale-codec", @@ -8030,6 +8126,7 @@ version = "0.1.0" dependencies = [ "async-lock", "bitcoin", + "bitvec", "blake2", "ciphersuite", "dockertest", @@ -8087,6 +8184,7 @@ name = "serai-coordinator" version = "0.1.0" dependencies = [ "async-trait", + "bitvec", "blake2", "borsh", "ciphersuite", @@ -8124,10 +8222,12 @@ dependencies = [ "ciphersuite", "dkg", "dockertest", + "embedwards25519", "hex", "parity-scale-codec", "rand_core", "schnorrkel", + "secq256k1", "serai-client", "serai-docker-tests", "serai-message-queue", @@ -8379,7 +8479,9 @@ dependencies = [ name = "serai-node" version = "0.1.0" dependencies = [ + "ciphersuite", "clap", + "embedwards25519", "frame-benchmarking", "futures-util", "hex", @@ -8405,6 +8507,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "schnorrkel", + "secq256k1", "serai-env", "serai-runtime", "sp-api", @@ -8426,11 +8529,13 @@ name = "serai-orchestrator" version = "0.0.1" dependencies = [ "ciphersuite", + "embedwards25519", "flexible-transcript", "hex", "home", "rand_chacha", "rand_core", + "secq256k1", "zalloc", "zeroize", ] @@ -8440,6 +8545,7 @@ name = "serai-primitives" version = "0.1.0" dependencies = [ "borsh", + "ciphersuite", "frame-support", "parity-scale-codec", "rand_core", @@ -8458,11 +8564,14 @@ version = "0.1.0" dependencies = [ "async-trait", "bitcoin-serai", + "blake2", "borsh", "ciphersuite", "const-hex", "dalek-ff-group", + "dkg", "dockertest", + "ec-divisors", "env_logger", "ethereum-serai", "flexible-transcript", @@ -8620,9 +8729,9 @@ dependencies = [ name = "serai-validator-sets-pallet" version = "0.1.0" dependencies = [ + "bitvec", "frame-support", "frame-system", - "hashbrown 0.14.5", "pallet-babe", "pallet-grandpa", "parity-scale-codec", @@ -8631,6 +8740,7 @@ dependencies = [ "serai-dex-pallet", "serai-primitives", "serai-validator-sets-primitives", + "serde", "sp-application-crypto", "sp-core", "sp-io", diff --git a/Cargo.toml b/Cargo.toml index 3416d2225..bce4ebe38 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,9 +30,16 @@ members = [ "crypto/ciphersuite", "crypto/multiexp", - "crypto/schnorr", "crypto/dleq", + + "crypto/evrf/secq256k1", + "crypto/evrf/embedwards25519", + "crypto/evrf/generalized-bulletproofs", + "crypto/evrf/circuit-abstraction", + "crypto/evrf/divisors", + "crypto/evrf/ec-gadgets", + "crypto/dkg", "crypto/frost", "crypto/schnorrkel", @@ -118,18 +125,32 @@ members = [ # to the extensive operations required for Bulletproofs [profile.dev.package] subtle = { opt-level = 3 } -curve25519-dalek = { opt-level = 3 } ff = { opt-level = 3 } group = { opt-level = 3 } crypto-bigint = { opt-level = 3 } +secp256k1 = { opt-level = 3 } +curve25519-dalek = { opt-level = 3 } dalek-ff-group = { opt-level = 3 } minimal-ed448 = { opt-level = 3 } multiexp = { opt-level = 3 } -monero-serai = { opt-level = 3 } +secq256k1 = { opt-level = 3 } +embedwards25519 = { opt-level = 3 } +generalized-bulletproofs = { opt-level = 3 } +generalized-bulletproofs-circuit-abstraction = { opt-level = 3 } +ec-divisors = { opt-level = 3 } +generalized-bulletproofs-ec-gadgets = { opt-level = 3 } + +dkg = { opt-level = 3 } + +monero-generators = { opt-level = 3 } +monero-borromean = { opt-level = 3 } +monero-bulletproofs = { opt-level = 3 } +monero-mlsag = { opt-level = 3 } +monero-clsag = { opt-level = 3 } [profile.release] panic = "unwind" @@ -158,6 +179,9 @@ matches = { path = "patches/matches" } option-ext = { path = "patches/option-ext" } directories-next = { path = "patches/directories-next" } +# The official pasta_curves repo doesn't support Zeroize +pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" } + # https://github.com/alloy-rs/core/issues/717 alloy-sol-type-parser = { git = "https://github.com/alloy-rs/core", rev = "446b9d2fbce12b88456152170709a3eaac929af0" } diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index ae4e2be7a..85865650b 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -20,6 +20,7 @@ workspace = true async-trait = { version = "0.1", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["std"] } +bitvec = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 58de348d2..87db0135e 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -16,7 +16,6 @@ use ciphersuite::{ Ciphersuite, Ristretto, }; use schnorr::SchnorrSignature; -use frost::Participant; use serai_db::{DbTxn, Db}; @@ -114,16 +113,17 @@ async fn add_tributary( // If we're rebooting, we'll re-fire this message // This is safe due to the message-queue deduplicating based off the intent system let set = spec.set(); - let our_i = spec - .i(&[], Ristretto::generator() * key.deref()) - .expect("adding a tributary for a set we aren't in set for"); + processors .send( set.network, processor_messages::key_gen::CoordinatorMessage::GenerateKey { - id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 }, - params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), - shares: u16::from(our_i.end) - u16::from(our_i.start), + session: set.session, + threshold: spec.t(), + evrf_public_keys: spec.evrf_public_keys(), + // TODO + // params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), + // shares: u16::from(our_i.end) - u16::from(our_i.start), }, ) .await; @@ -166,12 +166,9 @@ async fn handle_processor_message( // We'll only receive these if we fired GenerateKey, which we'll only do if if we're // in-set, making the Tributary relevant ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, .. } | - key_gen::ProcessorMessage::InvalidCommitments { id, .. } | - key_gen::ProcessorMessage::Shares { id, .. } | - key_gen::ProcessorMessage::InvalidShare { id, .. } | - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } | - key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session), + key_gen::ProcessorMessage::Participation { session, .. } | + key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } | + key_gen::ProcessorMessage::Blame { session, .. } => Some(*session), }, ProcessorMessage::Sign(inner_msg) => match inner_msg { // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing @@ -421,125 +418,33 @@ async fn handle_processor_message( let txs = match msg.msg.clone() { ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, commitments } => { - vec![Transaction::DkgCommitments { - attempt: id.attempt, - commitments, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => { - // This doesn't have guaranteed timing - // - // While the party *should* be fatally slashed and not included in future attempts, - // they'll actually be fatally slashed (assuming liveness before the Tributary retires) - // and not included in future attempts *which begin after the latency window completes* - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - faulty, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::Shares { id, mut shares } => { - // Create a MuSig-based machine to inform Substrate of this key generation - let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt); - - let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt) - .expect("participating in a DKG attempt yet we didn't track who was removed yet?"); - let our_i = spec - .i(&removed, pub_key) - .expect("processor message to DKG for an attempt we aren't a validator in"); - - // `tx_shares` needs to be done here as while it can be serialized from the HashMap - // without further context, it can't be deserialized without context - let mut tx_shares = Vec::with_capacity(shares.len()); - for shares in &mut shares { - tx_shares.push(vec![]); - for i in 1 ..= spec.n(&removed) { - let i = Participant::new(i).unwrap(); - if our_i.contains(&i) { - if shares.contains_key(&i) { - panic!("processor sent us our own shares"); - } - continue; - } - tx_shares.last_mut().unwrap().push( - shares.remove(&i).expect("processor didn't send share for another validator"), - ); - } - } - - vec![Transaction::DkgShares { - attempt: id.attempt, - shares: tx_shares, - confirmation_nonces: nonces, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => { - vec![Transaction::InvalidDkgShare { - attempt: id.attempt, - accuser, - faulty, - blame, - signed: Transaction::empty_signed(), - }] + key_gen::ProcessorMessage::Participation { session, participation } => { + assert_eq!(session, spec.set().session); + vec![Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }] } - key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { - // TODO2: Check the KeyGenId fields - - // Tell the Tributary the key pair, get back the share for the MuSig signature - let share = crate::tributary::generated_key_pair::( + key_gen::ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } => { + assert_eq!(session, spec.set().session); + crate::tributary::generated_key_pair::( &mut txn, - key, - spec, + genesis, &KeyPair(Public(substrate_key), network_key.try_into().unwrap()), - id.attempt, ); - // TODO: Move this into generated_key_pair? - match share { - Ok(share) => { - vec![Transaction::DkgConfirmed { - attempt: id.attempt, - confirmation_share: share, - signed: Transaction::empty_signed(), - }] - } - Err(p) => { - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - p, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - } - } - key_gen::ProcessorMessage::Blame { id, participant } => { - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - participant, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, + // Create a MuSig-based machine to inform Substrate of this key generation + let confirmation_nonces = + crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, 0); + + vec![Transaction::DkgConfirmationNonces { + attempt: 0, + confirmation_nonces, signed: Transaction::empty_signed(), }] } + key_gen::ProcessorMessage::Blame { session, participant } => { + assert_eq!(session, spec.set().session); + let participant = spec.reverse_lookup_i(participant).unwrap(); + vec![Transaction::RemoveParticipant { participant, signed: Transaction::empty_signed() }] + } }, ProcessorMessage::Sign(msg) => match msg { sign::ProcessorMessage::InvalidParticipant { .. } => { diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index fb1e3aed2..d1946b7e5 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -10,7 +10,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use serai_client::{ SeraiError, Block, Serai, TemporalSerai, - primitives::{BlockHash, NetworkId}, + primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId}, validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent}, in_instructions::InInstructionsEvent, coins::CoinsEvent, @@ -60,13 +60,46 @@ async fn handle_new_set( { log::info!("present in set {:?}", set); - let set_data = { + let validators; + let mut evrf_public_keys = vec![]; + { let serai = serai.as_of(block.hash()); let serai = serai.validator_sets(); let set_participants = serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); - set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::>() + validators = set_participants + .iter() + .map(|(k, w)| { + ( + ::read_G::<&[u8]>(&mut k.0.as_ref()) + .expect("invalid key registered as participant"), + u16::try_from(*w).unwrap(), + ) + }) + .collect::>(); + for (validator, _) in set_participants { + // This is only run for external networks which always do a DKG for Serai + let substrate = serai + .embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519) + .await? + .expect("Serai called NewSet on a validator without an Embedwards25519 key"); + // `embedded_elliptic_curves` is documented to have the second entry be the + // network-specific curve (if it exists and is distinct from Embedwards25519) + let network = + if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) { + serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect( + "Serai called NewSet on a validator without the embedded key required for the network", + ) + } else { + substrate.clone() + }; + evrf_public_keys.push(( + <[u8; 32]>::try_from(substrate) + .expect("validator-sets pallet accepted a key of an invalid length"), + network, + )); + } }; let time = if let Ok(time) = block.time() { @@ -90,7 +123,7 @@ async fn handle_new_set( const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120; let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY; - let spec = TributarySpec::new(block.hash(), time, set, set_data); + let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys); log::info!("creating new tributary for {:?}", spec.set()); diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 7fc6a0647..746c611b4 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -7,12 +7,8 @@ use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng, OsRng}; use futures_util::{task::Poll, poll}; -use ciphersuite::{ - group::{ff::Field, GroupEncoding}, - Ciphersuite, Ristretto, -}; +use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto}; -use sp_application_crypto::sr25519; use borsh::BorshDeserialize; use serai_client::{ primitives::NetworkId, @@ -52,12 +48,22 @@ pub fn new_spec( let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin }; - let set_participants = keys + let validators = keys .iter() - .map(|key| (sr25519::Public((::generator() * **key).to_bytes()), 1)) + .map(|key| ((::generator() * **key), 1)) .collect::>(); - let res = TributarySpec::new(serai_block, start_time, set, set_participants); + // Generate random eVRF keys as none of these test rely on them to have any structure + let mut evrf_keys = vec![]; + for _ in 0 .. keys.len() { + let mut substrate = [0; 32]; + OsRng.fill_bytes(&mut substrate); + let mut network = vec![0; 64]; + OsRng.fill_bytes(&mut network); + evrf_keys.push((substrate, network)); + } + + let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys); assert_eq!( TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), res, diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs index 04a528f90..aafa9a339 100644 --- a/coordinator/src/tests/tributary/dkg.rs +++ b/coordinator/src/tests/tributary/dkg.rs @@ -1,5 +1,4 @@ use core::time::Duration; -use std::collections::HashMap; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; @@ -9,7 +8,7 @@ use frost::Participant; use sp_runtime::traits::Verify; use serai_client::{ - primitives::{SeraiAddress, Signature}, + primitives::Signature, validator_sets::primitives::{ValidatorSet, KeyPair}, }; @@ -17,10 +16,7 @@ use tokio::time::sleep; use serai_db::{Get, DbTxn, Db, MemDb}; -use processor_messages::{ - key_gen::{self, KeyGenId}, - CoordinatorMessage, -}; +use processor_messages::{key_gen, CoordinatorMessage}; use tributary::{TransactionTrait, Tributary}; @@ -54,44 +50,41 @@ async fn dkg_test() { tokio::spawn(run_tributaries(tributaries.clone())); let mut txs = vec![]; - // Create DKG commitments for each key + // Create DKG participation for each key for key in &keys { - let attempt = 0; - let mut commitments = vec![0; 256]; - OsRng.fill_bytes(&mut commitments); + let mut participation = vec![0; 4096]; + OsRng.fill_bytes(&mut participation); - let mut tx = Transaction::DkgCommitments { - attempt, - commitments: vec![commitments], - signed: Transaction::empty_signed(), - }; + let mut tx = + Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } let block_before_tx = tributaries[0].1.tip().await; - // Publish all commitments but one - for (i, tx) in txs.iter().enumerate().skip(1) { + // Publish t-1 participations + let t = ((keys.len() * 2) / 3) + 1; + for (i, tx) in txs.iter().take(t - 1).enumerate() { assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); - } - - // Wait until these are included - for tx in txs.iter().skip(1) { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; } - let expected_commitments: HashMap<_, _> = txs + let expected_participations = txs .iter() .enumerate() .map(|(i, tx)| { - if let Transaction::DkgCommitments { commitments, .. } = tx { - (Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) + if let Transaction::DkgParticipation { participation, .. } = tx { + CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation { + session: spec.set().session, + participant: Participant::new((i + 1).try_into().unwrap()).unwrap(), + participation: participation.clone(), + }) } else { - panic!("txs had non-commitments"); + panic!("txs wasn't a DkgParticipation"); } }) - .collect(); + .collect::>(); async fn new_processors( db: &mut MemDb, @@ -120,28 +113,30 @@ async fn dkg_test() { processors } - // Instantiate a scanner and verify it has nothing to report + // Instantiate a scanner and verify it has the first two participations to report (and isn't + // waiting for `t`) let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; - assert!(processors.0.read().await.is_empty()); + assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1); - // Publish the last commitment + // Publish the rest of the participations let block_before_tx = tributaries[0].1.tip().await; - assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; - sleep(Duration::from_secs(Tributary::::block_time().into())).await; + for tx in txs.iter().skip(t - 1) { + assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true)); + wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; + } - // Verify the scanner emits a KeyGen::Commitments message + // Verify the scanner emits all KeyGen::Participations messages handle_new_blocks::<_, _, _, _, _, LocalP2p>( &mut dbs[0], &keys[0], &|_, _, _, _| async { - panic!("provided TX caused recognized_id to be called after Commitments") + panic!("provided TX caused recognized_id to be called after DkgParticipation") }, &processors, &(), &|_| async { panic!( - "test tried to publish a new Tributary TX from handle_application_tx after Commitments" + "test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation" ) }, &spec, @@ -150,17 +145,11 @@ async fn dkg_test() { .await; { let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); - let mut expected_commitments = expected_commitments.clone(); - expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap()); - assert_eq!( - msgs.pop_front().unwrap(), - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - commitments: expected_commitments - }) - ); + assert_eq!(msgs.len(), keys.len()); + for expected in &expected_participations { + assert_eq!(&msgs.pop_front().unwrap(), expected); + } assert!(msgs.is_empty()); } @@ -168,38 +157,31 @@ async fn dkg_test() { for (i, key) in keys.iter().enumerate().skip(1) { let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); - let mut expected_commitments = expected_commitments.clone(); - expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); - assert_eq!( - msgs.pop_front().unwrap(), - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - commitments: expected_commitments - }) - ); + assert_eq!(msgs.len(), keys.len()); + for expected in &expected_participations { + assert_eq!(&msgs.pop_front().unwrap(), expected); + } assert!(msgs.is_empty()); } - // Now do shares + let mut substrate_key = [0; 32]; + OsRng.fill_bytes(&mut substrate_key); + let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; + OsRng.fill_bytes(&mut network_key); + let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap()); + let mut txs = vec![]; - for (k, key) in keys.iter().enumerate() { - let attempt = 0; + for (i, key) in keys.iter().enumerate() { + let mut txn = dbs[i].txn(); - let mut shares = vec![vec![]]; - for i in 0 .. keys.len() { - if i != k { - let mut share = vec![0; 256]; - OsRng.fill_bytes(&mut share); - shares.last_mut().unwrap().push(share); - } - } + // Claim we've generated the key pair + crate::tributary::generated_key_pair::(&mut txn, spec.genesis(), &key_pair); - let mut txn = dbs[k].txn(); - let mut tx = Transaction::DkgShares { + // Publish the nonces + let attempt = 0; + let mut tx = Transaction::DkgConfirmationNonces { attempt, - shares, confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0), signed: Transaction::empty_signed(), }; @@ -207,139 +189,42 @@ async fn dkg_test() { tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } - let block_before_tx = tributaries[0].1.tip().await; - for (i, tx) in txs.iter().enumerate().skip(1) { + for (i, tx) in txs.iter().enumerate() { assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } - for tx in txs.iter().skip(1) { + for tx in &txs { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; } - // With just 4 sets of shares, nothing should happen yet - handle_new_blocks::<_, _, _, _, _, LocalP2p>( - &mut dbs[0], - &keys[0], - &|_, _, _, _| async { - panic!("provided TX caused recognized_id to be called after some shares") - }, - &processors, - &(), - &|_| async { - panic!( - "test tried to publish a new Tributary TX from handle_application_tx after some shares" - ) - }, - &spec, - &tributaries[0].1.reader(), - ) - .await; - assert_eq!(processors.0.read().await.len(), 1); - assert!(processors.0.read().await[&spec.set().network].is_empty()); - - // Publish the final set of shares - let block_before_tx = tributaries[0].1.tip().await; - assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; - sleep(Duration::from_secs(Tributary::::block_time().into())).await; - - // Each scanner should emit a distinct shares message - let shares_for = |i: usize| { - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - shares: vec![txs - .iter() - .enumerate() - .filter_map(|(l, tx)| { - if let Transaction::DkgShares { shares, .. } = tx { - if i == l { - None - } else { - let relative_i = i - (if i > l { 1 } else { 0 }); - Some(( - Participant::new((l + 1).try_into().unwrap()).unwrap(), - shares[0][relative_i].clone(), - )) - } - } else { - panic!("txs had non-shares"); - } - }) - .collect::>()], - }) - }; - - // Any scanner which has handled the prior blocks should only emit the new event + // This should not cause any new processor event as the processor doesn't handle DKG confirming for (i, key) in keys.iter().enumerate() { handle_new_blocks::<_, _, _, _, _, LocalP2p>( &mut dbs[i], key, - &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, + &|_, _, _, _| async { + panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces") + }, &processors, &(), - &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, + // The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces + &|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) }, &spec, &tributaries[i].1.reader(), ) .await; { - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); - assert!(msgs.is_empty()); + assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty()); } } - // Yet new scanners should emit all events - for (i, key) in keys.iter().enumerate() { - let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await; - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - let mut expected_commitments = expected_commitments.clone(); - expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); - assert_eq!( - msgs.pop_front().unwrap(), - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - commitments: expected_commitments - }) - ); - assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); - assert!(msgs.is_empty()); - } - - // Send DkgConfirmed - let mut substrate_key = [0; 32]; - OsRng.fill_bytes(&mut substrate_key); - let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; - OsRng.fill_bytes(&mut network_key); - let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap()); - - let mut txs = vec![]; - for (i, key) in keys.iter().enumerate() { - let attempt = 0; - let mut txn = dbs[i].txn(); - let share = - crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair, 0).unwrap(); - txn.commit(); - - let mut tx = Transaction::DkgConfirmed { - attempt, - confirmation_share: share, - signed: Transaction::empty_signed(), - }; - tx.sign(&mut OsRng, spec.genesis(), key); - txs.push(tx); - } - let block_before_tx = tributaries[0].1.tip().await; - for (i, tx) in txs.iter().enumerate() { - assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); - } - for tx in &txs { - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; - } + // Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares + // This means in the block after the next block, the keys should be set onto Serai + // Sleep twice as long as two blocks, in case there's some stability issue + sleep(Duration::from_secs( + 2 * 2 * u64::from(Tributary::::block_time()), + )) + .await; struct CheckPublishSetKeys { spec: TributarySpec, @@ -351,19 +236,24 @@ async fn dkg_test() { &self, _db: &(impl Sync + Get), set: ValidatorSet, - removed: Vec, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) { assert_eq!(set, self.spec.set()); - assert!(removed.is_empty()); assert_eq!(self.key_pair, key_pair); assert!(signature.verify( - &*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair), + &*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair), &serai_client::Public( frost::dkg::musig::musig_key::( &serai_client::validator_sets::primitives::musig_context(set), - &self.spec.validators().into_iter().map(|(validator, _)| validator).collect::>() + &self + .spec + .validators() + .into_iter() + .zip(signature_participants) + .filter_map(|((validator, _), included)| included.then_some(validator)) + .collect::>() ) .unwrap() .to_bytes() diff --git a/coordinator/src/tests/tributary/mod.rs b/coordinator/src/tests/tributary/mod.rs index c3f983116..340809e18 100644 --- a/coordinator/src/tests/tributary/mod.rs +++ b/coordinator/src/tests/tributary/mod.rs @@ -6,7 +6,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto}; use scale::{Encode, Decode}; use serai_client::{ - primitives::{SeraiAddress, Signature}, + primitives::Signature, validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair}, }; use processor_messages::coordinator::SubstrateSignableId; @@ -32,8 +32,8 @@ impl PublishSeraiTransaction for () { &self, _db: &(impl Sync + serai_db::Get), _set: ValidatorSet, - _removed: Vec, _key_pair: KeyPair, + _signature_participants: bitvec::vec::BitVec, _signature: Signature, ) { panic!("publish_set_keys was called in test") @@ -84,23 +84,25 @@ fn tx_size_limit() { use tributary::TRANSACTION_SIZE_LIMIT; let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1; - let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients; - // Handwave the DKG Commitments size as the size of the commitments to the coefficients and - // 1024 bytes for all overhead - let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024; - assert!( - u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= - (handwaved_dkg_commitments_size * max_key_shares_per_individual) - ); - - // Encryption key, PoP (2 elements), message - let elements_per_share = 4; - let handwaved_dkg_shares_size = - (elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024; - assert!( - u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= - (handwaved_dkg_shares_size * max_key_shares_per_individual) - ); + // n coefficients + // 2 ECDH values per recipient, and the encrypted share + let elements_outside_of_proof = max_dkg_coefficients + ((2 + 1) * MAX_KEY_SHARES_PER_SET); + // Then Pedersen Vector Commitments for each DH done, and the associated overhead in the proof + // It's handwaved as one commitment per DH, where we do 2 per coefficient and 1 for the explicit + // ECDHs + let vector_commitments = (2 * max_dkg_coefficients) + (2 * MAX_KEY_SHARES_PER_SET); + // Then we have commitments to the `t` polynomial of length 2 + 2 nc, where nc is the amount of + // commitments + let t_commitments = 2 + (2 * vector_commitments); + // The remainder of the proof should be ~30 elements + let proof_elements = 30; + + let handwaved_dkg_size = + ((elements_outside_of_proof + vector_commitments + t_commitments + proof_elements) * + MAX_KEY_LEN) + + 1024; + // Further scale by two in case of any errors in the above + assert!(u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (2 * handwaved_dkg_size)); } #[test] @@ -143,84 +145,34 @@ fn serialize_sign_data() { #[test] fn serialize_transaction() { - test_read_write(&Transaction::RemoveParticipantDueToDkg { + test_read_write(&Transaction::RemoveParticipant { participant: ::G::random(&mut OsRng), signed: random_signed_with_nonce(&mut OsRng, 0), }); - { - let mut commitments = vec![random_vec(&mut OsRng, 512)]; - for _ in 0 .. (OsRng.next_u64() % 100) { - let mut temp = commitments[0].clone(); - OsRng.fill_bytes(&mut temp); - commitments.push(temp); - } - test_read_write(&Transaction::DkgCommitments { - attempt: random_u32(&mut OsRng), - commitments, - signed: random_signed_with_nonce(&mut OsRng, 0), - }); - } - - { - // This supports a variable share length, and variable amount of sent shares, yet share length - // and sent shares is expected to be constant among recipients - let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap(); - let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap(); - // Create a valid vec of shares - let mut shares = vec![]; - // Create up to 150 participants - for _ in 0 ..= (OsRng.next_u64() % 150) { - // Give each sender multiple shares - let mut sender_shares = vec![]; - for _ in 0 .. amount_of_shares { - let mut share = vec![0; share_len]; - OsRng.fill_bytes(&mut share); - sender_shares.push(share); - } - shares.push(sender_shares); - } - - test_read_write(&Transaction::DkgShares { - attempt: random_u32(&mut OsRng), - shares, - confirmation_nonces: { - let mut nonces = [0; 64]; - OsRng.fill_bytes(&mut nonces); - nonces - }, - signed: random_signed_with_nonce(&mut OsRng, 1), - }); - } + test_read_write(&Transaction::DkgParticipation { + participation: random_vec(&mut OsRng, 4096), + signed: random_signed_with_nonce(&mut OsRng, 0), + }); - for i in 0 .. 2 { - test_read_write(&Transaction::InvalidDkgShare { - attempt: random_u32(&mut OsRng), - accuser: frost::Participant::new( - u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), - ) - .unwrap(), - faulty: frost::Participant::new( - u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), - ) - .unwrap(), - blame: if i == 0 { - None - } else { - Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty()) - }, - signed: random_signed_with_nonce(&mut OsRng, 2), - }); - } + test_read_write(&Transaction::DkgConfirmationNonces { + attempt: random_u32(&mut OsRng), + confirmation_nonces: { + let mut nonces = [0; 64]; + OsRng.fill_bytes(&mut nonces); + nonces + }, + signed: random_signed_with_nonce(&mut OsRng, 0), + }); - test_read_write(&Transaction::DkgConfirmed { + test_read_write(&Transaction::DkgConfirmationShare { attempt: random_u32(&mut OsRng), confirmation_share: { let mut share = [0; 32]; OsRng.fill_bytes(&mut share); share }, - signed: random_signed_with_nonce(&mut OsRng, 2), + signed: random_signed_with_nonce(&mut OsRng, 1), }); { diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 18f60864d..a0b688392 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -29,7 +29,7 @@ async fn sync_test() { let mut keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); // Ensure this can have a node fail - assert!(spec.n(&[]) > spec.t()); + assert!(spec.n() > spec.t()); let mut tributaries = new_tributaries(&keys, &spec) .await @@ -142,7 +142,7 @@ async fn sync_test() { // Because only `t` validators are used in a commit, take n - t nodes offline // leaving only `t` nodes. Which should force it to participate in the consensus // of next blocks. - let spares = usize::from(spec.n(&[]) - spec.t()); + let spares = usize::from(spec.n() - spec.t()); for thread in p2p_threads.iter().take(spares) { thread.abort(); } diff --git a/coordinator/src/tests/tributary/tx.rs b/coordinator/src/tests/tributary/tx.rs index da9433b67..9b948f365 100644 --- a/coordinator/src/tests/tributary/tx.rs +++ b/coordinator/src/tests/tributary/tx.rs @@ -37,15 +37,14 @@ async fn tx_test() { usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap(); let key = keys[sender].clone(); - let attempt = 0; - let mut commitments = vec![0; 256]; - OsRng.fill_bytes(&mut commitments); - - // Create the TX with a null signature so we can get its sig hash let block_before_tx = tributaries[sender].1.tip().await; - let mut tx = Transaction::DkgCommitments { - attempt, - commitments: vec![commitments.clone()], + // Create the TX with a null signature so we can get its sig hash + let mut tx = Transaction::DkgParticipation { + participation: { + let mut participation = vec![0; 4096]; + OsRng.fill_bytes(&mut participation); + participation + }, signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, spec.genesis(), &key); diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index fda1c47ba..095f18af1 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -18,7 +18,6 @@ use crate::tributary::{Label, Transaction}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] pub enum Topic { - Dkg, DkgConfirmation, SubstrateSign(SubstrateSignableId), Sign([u8; 32]), @@ -46,15 +45,13 @@ pub enum Accumulation { create_db!( Tributary { SeraiBlockNumber: (hash: [u8; 32]) -> u64, - SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32], + SeraiDkgCompleted: (set: ValidatorSet) -> [u8; 32], TributaryBlockNumber: (block: [u8; 32]) -> u32, LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], // TODO: Revisit the point of this FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, - RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>, - OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>, // TODO: Combine these two FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32, @@ -67,11 +64,9 @@ create_db!( DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec, - DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, + DkgParticipation: (genesis: [u8; 32], from: u16) -> Vec, ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, - DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, - KeyToDkgAttempt: (key: [u8; 32]) -> u32, - DkgLocallyCompleted: (genesis: [u8; 32]) -> (), + DkgKeyPair: (genesis: [u8; 32]) -> KeyPair, PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, @@ -123,12 +118,12 @@ impl AttemptDb { pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option { let attempt = Self::get(getter, genesis, &topic); - // Don't require explicit recognition of the Dkg topic as it starts when the chain does + // Don't require explicit recognition of the DkgConfirmation topic as it starts when the chain + // does // Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it // should always happen (eventually) if attempt.is_none() && - ((topic == Topic::Dkg) || - (topic == Topic::DkgConfirmation) || + ((topic == Topic::DkgConfirmation) || (topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport))) { return Some(0); @@ -155,16 +150,12 @@ impl ReattemptDb { // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5 // Assumes no event will take longer than 15 minutes, yet grows the time in case there are // network bandwidth issues - let mut reattempt_delay = BASE_REATTEMPT_DELAY * + let reattempt_delay = BASE_REATTEMPT_DELAY * ((AttemptDb::attempt(txn, genesis, topic) .expect("scheduling re-attempt for unknown topic") / 3) + 1) .min(3); - // Allow more time for DKGs since they have an extra round and much more data - if matches!(topic, Topic::Dkg) { - reattempt_delay *= 4; - } let upon_block = current_block_number + reattempt_delay; let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]); diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index fbce7dd9a..c5378cc7e 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -13,7 +13,7 @@ use serai_client::{Signature, validator_sets::primitives::KeyPair}; use tributary::{Signed, TransactionKind, TransactionTrait}; use processor_messages::{ - key_gen::{self, KeyGenId}, + key_gen::self, coordinator::{self, SubstrateSignableId, SubstrateSignId}, sign::{self, SignId}, }; @@ -38,33 +38,20 @@ pub fn dkg_confirmation_nonces( txn: &mut impl DbTxn, attempt: u32, ) -> [u8; 64] { - DkgConfirmer::new(key, spec, txn, attempt) - .expect("getting DKG confirmation nonces for unknown attempt") - .preprocess() + DkgConfirmer::new(key, spec, txn, attempt).preprocess() } pub fn generated_key_pair( txn: &mut D::Transaction<'_>, - key: &Zeroizing<::F>, - spec: &TributarySpec, + genesis: [u8; 32], key_pair: &KeyPair, - attempt: u32, -) -> Result<[u8; 32], Participant> { - DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair); - KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt); - let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); - DkgConfirmer::new(key, spec, txn, attempt) - .expect("claiming to have generated a key pair for an unrecognized attempt") - .share(preprocesses, key_pair) +) { + DkgKeyPair::set(txn, genesis, key_pair); } -fn unflatten( - spec: &TributarySpec, - removed: &[::G], - data: &mut HashMap>, -) { +fn unflatten(spec: &TributarySpec, data: &mut HashMap>) { for (validator, _) in spec.validators() { - let Some(range) = spec.i(removed, validator) else { continue }; + let Some(range) = spec.i(validator) else { continue }; let Some(all_segments) = data.remove(&range.start) else { continue; }; @@ -88,7 +75,6 @@ impl< { fn accumulate( &mut self, - removed: &[::G], data_spec: &DataSpecification, signer: ::G, data: &Vec, @@ -99,10 +85,7 @@ impl< panic!("accumulating data for a participant multiple times"); } let signer_shares = { - let Some(signer_i) = self.spec.i(removed, signer) else { - log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes())); - return Accumulation::NotReady; - }; + let signer_i = self.spec.i(signer).expect("transaction signer wasn't a member of the set"); u16::from(signer_i.end) - u16::from(signer_i.start) }; @@ -115,11 +98,7 @@ impl< // If 2/3rds of the network participated in this preprocess, queue it for an automatic // re-attempt - // DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg - if (data_spec.label == Label::Preprocess) && - received_range.contains(&self.spec.t()) && - (data_spec.topic != Topic::DkgConfirmation) - { + if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) { // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this // is an old entry // This is an assert, not part of the if check, as old data shouldn't be here in the first @@ -129,10 +108,7 @@ impl< } // If we have all the needed commitments/preprocesses/shares, tell the processor - let needs_everyone = - (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation); - let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() }; - if received_range.contains(&needed) { + if received_range.contains(&self.spec.t()) { log::debug!( "accumulation for entry {:?} attempt #{} is ready", &data_spec.topic, @@ -141,7 +117,7 @@ impl< let mut data = HashMap::new(); for validator in self.spec.validators().iter().map(|validator| validator.0) { - let Some(i) = self.spec.i(removed, validator) else { continue }; + let Some(i) = self.spec.i(validator) else { continue }; data.insert( i.start, if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { @@ -152,10 +128,10 @@ impl< ); } - assert_eq!(data.len(), usize::from(needed)); + assert_eq!(data.len(), usize::from(self.spec.t())); // Remove our own piece of data, if we were involved - if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) { + if let Some(i) = self.spec.i(Ristretto::generator() * self.our_key.deref()) { if data.remove(&i.start).is_some() { return Accumulation::Ready(DataSet::Participating(data)); } @@ -167,7 +143,6 @@ impl< fn handle_data( &mut self, - removed: &[::G], data_spec: &DataSpecification, bytes: &Vec, signed: &Signed, @@ -213,21 +188,15 @@ impl< // TODO: If this is shares, we need to check they are part of the selected signing set // Accumulate this data - self.accumulate(removed, data_spec, signed.signer, bytes) + self.accumulate(data_spec, signed.signer, bytes) } fn check_sign_data_len( &mut self, - removed: &[::G], signer: ::G, len: usize, ) -> Result<(), ()> { - let Some(signer_i) = self.spec.i(removed, signer) else { - // TODO: Ensure processor doesn't so participate/check how it handles removals for being - // offline - self.fatal_slash(signer.to_bytes(), "signer participated despite being removed"); - Err(())? - }; + let signer_i = self.spec.i(signer).expect("signer wasn't a member of the set"); if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { self.fatal_slash( signer.to_bytes(), @@ -254,12 +223,9 @@ impl< } match tx { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { - if self.spec.i(&[], participant).is_none() { - self.fatal_slash( - participant.to_bytes(), - "RemoveParticipantDueToDkg vote for non-validator", - ); + Transaction::RemoveParticipant { participant, signed } => { + if self.spec.i(participant).is_none() { + self.fatal_slash(participant.to_bytes(), "RemoveParticipant vote for non-validator"); return; } @@ -274,268 +240,106 @@ impl< let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0); let signer_votes = - self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?"); + self.spec.i(signed.signer).expect("signer wasn't a validator for this network?"); let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start); VotesToRemove::set(self.txn, genesis, participant, &new_votes); if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) { - self.fatal_slash(participant, "RemoveParticipantDueToDkg vote") - } - } - - Transaction::DkgCommitments { attempt, commitments, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt"); - return; - }; - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else { - return; - }; - let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; - match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) { - Accumulation::Ready(DataSet::Participating(mut commitments)) => { - log::info!("got all DkgCommitments for {}", hex::encode(genesis)); - unflatten(self.spec, &removed, &mut commitments); - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: self.spec.set().session, attempt }, - commitments, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - assert!( - removed.contains(&(Ristretto::generator() * self.our_key.deref())), - "NotParticipating in a DkgCommitments we weren't removed for" - ); - } - Accumulation::NotReady => {} - } - } - - Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt"); - return; - }; - let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref())); - - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else { - return; - }; - - let Some(sender_i) = self.spec.i(&removed, signed.signer) else { - self.fatal_slash( - signed.signer.to_bytes(), - "DkgShares for a DKG they aren't participating in", - ); - return; - }; - let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); - for shares in &shares { - if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) { - self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares"); - return; - } - } - - // Save each share as needed for blame - for (from_offset, shares) in shares.iter().enumerate() { - let from = - Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap()) - .unwrap(); - - for (to_offset, share) in shares.iter().enumerate() { - // 0-indexed (the enumeration) to 1-indexed (Participant) - let mut to = u16::try_from(to_offset).unwrap() + 1; - // Adjust for the omission of the sender's own shares - if to >= u16::from(sender_i.start) { - to += u16::from(sender_i.end) - u16::from(sender_i.start); - } - let to = Participant::new(to).unwrap(); - - DkgShare::set(self.txn, genesis, from.into(), to.into(), share); - } - } - - // Filter down to only our share's bytes for handle - let our_shares = if let Some(our_i) = - self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) - { - if sender_i == our_i { - vec![] - } else { - // 1-indexed to 0-indexed - let mut our_i_pos = u16::from(our_i.start) - 1; - // Handle the omission of the sender's own data - if u16::from(our_i.start) > u16::from(sender_i.start) { - our_i_pos -= sender_is_len; - } - let our_i_pos = usize::from(our_i_pos); - shares - .iter_mut() - .map(|shares| { - shares - .drain( - our_i_pos .. - (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), - ) - .collect::>() - }) - .collect() - } - } else { - assert!( - not_participating, - "we didn't have an i while handling DkgShares we weren't removed for" - ); - // Since we're not participating, simply save vec![] for our shares - vec![] - }; - // Drop shares as it's presumably been mutated into invalidity - drop(shares); - - let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; - let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); - match self.handle_data(&removed, &data_spec, &encoded_data, &signed) { - Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { - log::info!("got all DkgShares for {}", hex::encode(genesis)); - - let mut confirmation_nonces = HashMap::new(); - let mut shares = HashMap::new(); - for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares { - let (these_confirmation_nonces, these_shares) = - <(Vec, Vec)>::decode(&mut confirmation_nonces_and_shares.as_slice()) - .unwrap(); - confirmation_nonces.insert(participant, these_confirmation_nonces); - shares.insert(participant, these_shares); - } - ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); - - // shares is a HashMap>>>, with the values representing: - // - Each of the sender's shares - // - Each of the our shares - // - Each share - // We need a Vec>>, with the outer being each of ours - let mut expanded_shares = vec![]; - for (sender_start_i, shares) in shares { - let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); - for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { - for (our_share_i, our_share) in our_shares.into_iter().enumerate() { - if expanded_shares.len() <= our_share_i { - expanded_shares.push(HashMap::new()); - } - expanded_shares[our_share_i].insert( - Participant::new( - u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), - ) - .unwrap(), - our_share, - ); - } - } - } - - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: self.spec.set().session, attempt }, - shares: expanded_shares, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for"); - } - Accumulation::NotReady => {} + self.fatal_slash(participant, "RemoveParticipant vote") } } - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self - .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt"); - return; - }; - let Some(range) = self.spec.i(&removed, signed.signer) else { - self.fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare for a DKG they aren't participating in", - ); - return; - }; - if !range.contains(&accuser) { - self.fatal_slash( - signed.signer.to_bytes(), - "accused with a Participant index which wasn't theirs", - ); - return; - } - if range.contains(&faulty) { - self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare"); - return; - } - - let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else { - self.fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare had a non-existent faulty participant", - ); - return; - }; + Transaction::DkgParticipation { participation, signed } => { + // Send the participation to the processor self .processors .send( self.spec.set().network, - key_gen::CoordinatorMessage::VerifyBlame { - id: KeyGenId { session: self.spec.set().session, attempt }, - accuser, - accused: faulty, - share, - blame, + key_gen::CoordinatorMessage::Participation { + session: self.spec.set().session, + participant: self + .spec + .i(signed.signer) + .expect("signer wasn't a validator for this network?") + .start, + participation, }, ) .await; } - Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); - return; - }; + Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => { + let data_spec = + DataSpecification { topic: Topic::DkgConfirmation, label: Label::Preprocess, attempt }; + match self.handle_data(&data_spec, &confirmation_nonces.to_vec(), &signed) { + Accumulation::Ready(DataSet::Participating(confirmation_nonces)) => { + log::info!( + "got all DkgConfirmationNonces for {}, attempt {attempt}", + hex::encode(genesis) + ); + ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); + + // Send the expected DkgConfirmationShare + // TODO: Slight race condition here due to set, publish tx, then commit txn + let key_pair = DkgKeyPair::get(self.txn, genesis) + .expect("participating in confirming key we don't have"); + let mut tx = match DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) + .share(confirmation_nonces, &key_pair) + { + Ok(confirmation_share) => Transaction::DkgConfirmationShare { + attempt, + confirmation_share, + signed: Transaction::empty_signed(), + }, + Err(participant) => Transaction::RemoveParticipant { + participant: self.spec.reverse_lookup_i(participant).unwrap(), + signed: Transaction::empty_signed(), + }, + }; + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; + } + Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {} + } + } + + Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => { let data_spec = DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; - match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { + match self.handle_data(&data_spec, &confirmation_share.to_vec(), &signed) { Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); - - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - panic!( - "DkgConfirmed for everyone yet didn't have the removed parties for this attempt", - ); - }; + log::info!( + "got all DkgConfirmationShare for {}, attempt {attempt}", + hex::encode(genesis) + ); let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); + // TODO: This can technically happen under very very very specific timing as the txn - // put happens before DkgConfirmed, yet the txn commit isn't guaranteed to - let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( - "in DkgConfirmed handling, which happens after everyone \ - (including us) fires DkgConfirmed, yet no confirming key pair", + // put happens before DkgConfirmationShare, yet the txn isn't guaranteed to be + // committed + let key_pair = DkgKeyPair::get(self.txn, genesis).expect( + "in DkgConfirmationShare handling, which happens after everyone \ + (including us) fires DkgConfirmationShare, yet no confirming key pair", ); - let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) - .expect("confirming DKG for unrecognized attempt"); + + // Determine the bitstring representing who participated before we move `shares` + let validators = self.spec.validators(); + let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len()); + for (participant, _) in validators { + signature_participants.push( + (participant == (::generator() * self.our_key.deref())) || + shares.contains_key(&self.spec.i(participant).unwrap().start), + ); + } + + // Produce the final signature + let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt); let sig = match confirmer.complete(preprocesses, &key_pair, shares) { Ok(sig) => sig, Err(p) => { - let mut tx = Transaction::RemoveParticipantDueToDkg { - participant: self.spec.reverse_lookup_i(&removed, p).unwrap(), + let mut tx = Transaction::RemoveParticipant { + participant: self.spec.reverse_lookup_i(p).unwrap(), signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, genesis, self.our_key); @@ -544,23 +348,18 @@ impl< } }; - DkgLocallyCompleted::set(self.txn, genesis, &()); - self .publish_serai_tx .publish_set_keys( self.db, self.spec.set(), - removed.into_iter().map(|key| key.to_bytes().into()).collect(), key_pair, + signature_participants, Signature(sig), ) .await; } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG confirmination shares") - } - Accumulation::NotReady => {} + Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {} } } @@ -618,19 +417,8 @@ impl< } Transaction::SubstrateSign(data) => { - // Provided transactions ensure synchrony on any signing protocol, and we won't start - // signing with threshold keys before we've confirmed them on-chain - let Some(removed) = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - else { - self.fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ); - return; - }; let signer = data.signed.signer; - let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { + let Ok(()) = self.check_sign_data_len(signer, data.data.len()) else { return; }; let expected_len = match data.label { @@ -653,11 +441,11 @@ impl< attempt: data.attempt, }; let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) + self.handle_data(&data_spec, &data.data.encode(), &data.signed) else { return; }; - unflatten(self.spec, &removed, &mut results); + unflatten(self.spec, &mut results); let id = SubstrateSignId { session: self.spec.set().session, @@ -678,16 +466,7 @@ impl< } Transaction::Sign(data) => { - let Some(removed) = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - else { - self.fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ); - return; - }; - let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else { + let Ok(()) = self.check_sign_data_len(data.signed.signer, data.data.len()) else { return; }; @@ -697,9 +476,9 @@ impl< attempt: data.attempt, }; if let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) + self.handle_data(&data_spec, &data.data.encode(), &data.signed) { - unflatten(self.spec, &removed, &mut results); + unflatten(self.spec, &mut results); let id = SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; self @@ -740,8 +519,7 @@ impl< } Transaction::SlashReport(points, signed) => { - // Uses &[] as we only need the length which is independent to who else was removed - let signer_range = self.spec.i(&[], signed.signer).unwrap(); + let signer_range = self.spec.i(signed.signer).unwrap(); let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start); if points.len() != (self.spec.validators().len() - 1) { self.fatal_slash( diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs index cc9bdb1ea..6e2f26610 100644 --- a/coordinator/src/tributary/mod.rs +++ b/coordinator/src/tributary/mod.rs @@ -1,7 +1,3 @@ -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; - -use serai_client::validator_sets::primitives::ValidatorSet; - use tributary::{ ReadWrite, transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, @@ -24,39 +20,6 @@ pub use handle::*; pub mod scanner; -pub fn removed_as_of_dkg_attempt( - getter: &impl Get, - genesis: [u8; 32], - attempt: u32, -) -> Option::G>> { - if attempt == 0 { - Some(vec![]) - } else { - RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| { - keys.iter().map(|key| ::G::from_bytes(key).unwrap()).collect() - }) - } -} - -pub fn removed_as_of_set_keys( - getter: &impl Get, - set: ValidatorSet, - genesis: [u8; 32], -) -> Option::G>> { - // SeraiDkgCompleted has the key placed on-chain. - // This key can be uniquely mapped to an attempt so long as one participant was honest, which we - // assume as a presumably honest participant. - // Resolve from generated key to attempt to fatally slashed as of attempt. - - // This expect will trigger if this is prematurely called and Substrate has tracked the keys yet - // we haven't locally synced and handled the Tributary - // All callers of this, at the time of writing, ensure the Tributary has sufficiently synced - // making the panic with context more desirable than the None - let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?) - .expect("key completed on-chain didn't have an attempt related"); - removed_as_of_dkg_attempt(getter, genesis, attempt) -} - pub async fn publish_signed_transaction( txn: &mut D::Transaction<'_>, tributary: &Tributary, diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index 9b56e0a0f..c0b906ed8 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -1,15 +1,17 @@ -use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration}; -use std::{sync::Arc, collections::HashSet}; +use core::{marker::PhantomData, future::Future, time::Duration}; +use std::sync::Arc; use zeroize::Zeroizing; +use rand_core::OsRng; + use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use tokio::sync::broadcast; use scale::{Encode, Decode}; use serai_client::{ - primitives::{SeraiAddress, Signature}, + primitives::Signature, validator_sets::primitives::{KeyPair, ValidatorSet}, Serai, }; @@ -67,8 +69,8 @@ pub trait PublishSeraiTransaction { &self, db: &(impl Sync + Get), set: ValidatorSet, - removed: Vec, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ); } @@ -129,17 +131,12 @@ mod impl_pst_for_serai { &self, db: &(impl Sync + Get), set: ValidatorSet, - removed: Vec, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) { - // TODO: BoundedVec as an arg to avoid this expect - let tx = SeraiValidatorSets::set_keys( - set.network, - removed.try_into().expect("removing more than allowed"), - key_pair, - signature, - ); + let tx = + SeraiValidatorSets::set_keys(set.network, key_pair, signature_participants, signature); async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool { if matches!(serai.keys(set).await, Ok(Some(_))) { log::info!("another coordinator set key pair for {:?}", set); @@ -249,18 +246,15 @@ impl< let genesis = self.spec.genesis(); - let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); - // Calculate the shares still present, spinning if not enough are - // still_present_shares is used by a below branch, yet it's a natural byproduct of checking if - // we should spin, hence storing it in a variable here - let still_present_shares = { + { // Start with the original n value - let mut present_shares = self.spec.n(&[]); + let mut present_shares = self.spec.n(); // Remove everyone fatally slashed + let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); for removed in ¤t_fatal_slashes { let original_i_for_removed = - self.spec.i(&[], *removed).expect("removed party was never present"); + self.spec.i(*removed).expect("removed party was never present"); let removed_shares = u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start); present_shares -= removed_shares; @@ -276,79 +270,17 @@ impl< tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } - - present_shares - }; + } for topic in ReattemptDb::take(self.txn, genesis, self.block_number) { let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic); - log::info!("re-attempting {topic:?} with attempt {attempt}"); + log::info!("potentially re-attempting {topic:?} with attempt {attempt}"); // Slash people who failed to participate as expected in the prior attempt { let prior_attempt = attempt - 1; - let (removed, expected_participants) = match topic { - Topic::Dkg => { - // Every validator who wasn't removed is expected to have participated - let removed = - crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt) - .expect("prior attempt didn't have its removed saved to disk"); - let removed_set = removed.iter().copied().collect::>(); - ( - removed, - self - .spec - .validators() - .into_iter() - .filter_map(|(validator, _)| { - Some(validator).filter(|validator| !removed_set.contains(validator)) - }) - .collect(), - ) - } - Topic::DkgConfirmation => { - panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg") - } - Topic::SubstrateSign(_) | Topic::Sign(_) => { - let removed = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - .expect("SubstrateSign/Sign yet have yet to set keys"); - // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] - let expected_participants = vec![]; - (removed, expected_participants) - } - }; - - let (expected_topic, expected_label) = match topic { - Topic::Dkg => { - let n = self.spec.n(&removed); - // If we got all the DKG shares, we should be on DKG confirmation - let share_spec = - DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt }; - if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n { - // Label::Share since there is no Label::Preprocess for DkgConfirmation since the - // preprocess is part of Topic::Dkg Label::Share - (Topic::DkgConfirmation, Label::Share) - } else { - let preprocess_spec = DataSpecification { - topic: Topic::Dkg, - label: Label::Preprocess, - attempt: prior_attempt, - }; - // If we got all the DKG preprocesses, DKG shares - if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n { - // Label::Share since there is no Label::Preprocess for DkgConfirmation since the - // preprocess is part of Topic::Dkg Label::Share - (Topic::Dkg, Label::Share) - } else { - (Topic::Dkg, Label::Preprocess) - } - } - } - Topic::DkgConfirmation => unreachable!(), - // If we got enough participants to move forward, then we expect shares from them all - Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share), - }; + // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] + let expected_participants: Vec<::G> = vec![]; let mut did_not_participate = vec![]; for expected_participant in expected_participants { @@ -356,8 +288,9 @@ impl< self.txn, genesis, &DataSpecification { - topic: expected_topic, - label: expected_label, + topic, + // Since we got the preprocesses, we were supposed to get the shares + label: Label::Share, attempt: prior_attempt, }, &expected_participant.to_bytes(), @@ -373,15 +306,8 @@ impl< // Accordingly, clear did_not_participate // TODO - // If during the DKG, explicitly mark these people as having been offline - // TODO: If they were offline sufficiently long ago, don't strike them off - if topic == Topic::Dkg { - let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]); - for did_not_participate in did_not_participate { - existing.push(did_not_participate.to_bytes()); - } - OfflineDuringDkg::set(self.txn, genesis, &existing); - } + // TODO: Increment the slash points of people who didn't preprocess in some expected window + // of time // Slash everyone who didn't participate as expected // This may be overzealous as if a minority detects a completion, they'll abort yet the @@ -411,75 +337,22 @@ impl< then preprocesses. This only sends preprocesses). */ match topic { - Topic::Dkg => { - let mut removed = current_fatal_slashes.clone(); - - let t = self.spec.t(); - { - let mut present_shares = still_present_shares; - - // Load the parties marked as offline across the various attempts - let mut offline = OfflineDuringDkg::get(self.txn, genesis) - .unwrap_or(vec![]) - .iter() - .map(|key| ::G::from_bytes(key).unwrap()) - .collect::>(); - // Pop from the list to prioritize the removal of those recently offline - while let Some(offline) = offline.pop() { - // Make sure they weren't removed already (such as due to being fatally slashed) - // This also may trigger if they were offline across multiple attempts - if removed.contains(&offline) { - continue; - } - - // If we can remove them and still meet the threshold, do so - let original_i_for_offline = - self.spec.i(&[], offline).expect("offline was never present?"); - let offline_shares = - u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start); - if (present_shares - offline_shares) >= t { - present_shares -= offline_shares; - removed.push(offline); - } - - // If we've removed as many people as we can, break - if present_shares == t { - break; - } - } - } - - RemovedAsOfDkgAttempt::set( - self.txn, - genesis, - attempt, - &removed.iter().map(::G::to_bytes).collect(), - ); - - if DkgLocallyCompleted::get(self.txn, genesis).is_none() { - let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) - else { - continue; + Topic::DkgConfirmation => { + if SeraiDkgCompleted::get(self.txn, self.spec.set()).is_none() { + log::info!("re-attempting DKG confirmation with attempt {attempt}"); + + // Since it wasn't completed, publish our nonces for the next attempt + let confirmation_nonces = + crate::tributary::dkg_confirmation_nonces(self.our_key, self.spec, self.txn, attempt); + let mut tx = Transaction::DkgConfirmationNonces { + attempt, + confirmation_nonces, + signed: Transaction::empty_signed(), }; - - // Since it wasn't completed, instruct the processor to start the next attempt - let id = - processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt }; - - let params = - frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap(); - let shares = u16::from(our_i.end) - u16::from(our_i.start); - - self - .processors - .send( - self.spec.set().network, - processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares }, - ) - .await; + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; } } - Topic::DkgConfirmation => unreachable!(), Topic::SubstrateSign(inner_id) => { let id = processor_messages::coordinator::SubstrateSignId { session: self.spec.set().session, @@ -496,6 +369,8 @@ impl< crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) .map_or(0, |cosign| cosign.block_number); if latest_cosign < block_number { + log::info!("re-attempting cosigning {block_number:?} with attempt {attempt}"); + // Instruct the processor to start the next attempt self .processors @@ -512,6 +387,8 @@ impl< SubstrateSignableId::Batch(batch) => { // If the Batch hasn't appeared on-chain... if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() { + log::info!("re-attempting signing batch {batch:?} with attempt {attempt}"); + // Instruct the processor to start the next attempt // The processor won't continue if it's already signed a Batch // Prior checking if the Batch is on-chain just may reduce the non-participating @@ -529,6 +406,11 @@ impl< // If this Tributary hasn't been retired... // (published SlashReport/took too long to do so) if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() { + log::info!( + "re-attempting signing slash report for {:?} with attempt {attempt}", + self.spec.set() + ); + let report = SlashReport::get(self.txn, self.spec.set()) .expect("re-attempting signing a SlashReport we don't have?"); self @@ -575,8 +457,7 @@ impl< }; // Assign them 0 points for themselves report.insert(i, 0); - // Uses &[] as we only need the length which is independent to who else was removed - let signer_i = self.spec.i(&[], validator).unwrap(); + let signer_i = self.spec.i(validator).unwrap(); let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start); // Push `n` copies, one for each of their shares for _ in 0 .. signer_len { diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs index a90ed4799..af334149b 100644 --- a/coordinator/src/tributary/signing_protocol.rs +++ b/coordinator/src/tributary/signing_protocol.rs @@ -55,7 +55,7 @@ */ use core::ops::Deref; -use std::collections::HashMap; +use std::collections::{HashSet, HashMap}; use zeroize::{Zeroize, Zeroizing}; @@ -63,10 +63,7 @@ use rand_core::OsRng; use blake2::{Digest, Blake2s256}; -use ciphersuite::{ - group::{ff::PrimeField, GroupEncoding}, - Ciphersuite, Ristretto, -}; +use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; use frost::{ FrostError, dkg::{Participant, musig::musig}, @@ -77,10 +74,8 @@ use frost_schnorrkel::Schnorrkel; use scale::Encode; -use serai_client::{ - Public, - validator_sets::primitives::{KeyPair, musig_context, set_keys_message}, -}; +#[rustfmt::skip] +use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}; use serai_db::*; @@ -89,6 +84,7 @@ use crate::tributary::TributarySpec; create_db!( SigningProtocolDb { CachedPreprocesses: (context: &impl Encode) -> [u8; 32] + DataSignedWith: (context: &impl Encode) -> (Vec, HashMap>), } ); @@ -117,16 +113,22 @@ impl SigningProtocol<'_, T, C> { }; let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); - let algorithm = Schnorrkel::new(b"substrate"); + // Create the MuSig keys let keys: ThresholdKeys = musig(&musig_context(self.spec.set()), self.key, participants) .expect("signing for a set we aren't in/validator present multiple times") .into(); + // Define the algorithm + let algorithm = Schnorrkel::new(b"substrate"); + + // Check if we've prior preprocessed if CachedPreprocesses::get(self.txn, &self.context).is_none() { + // If we haven't, we create a machine solely to obtain the preprocess with let (machine, _) = AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); + // Cache and save the preprocess to disk let mut cache = machine.cache(); assert_eq!(cache.0.len(), 32); #[allow(clippy::needless_range_loop)] @@ -137,13 +139,15 @@ impl SigningProtocol<'_, T, C> { CachedPreprocesses::set(self.txn, &self.context, &cache.0); } + // We're now guaranteed to have the preprocess, hence why this `unwrap` is safe let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); - let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); + let mut cached = Zeroizing::new(cached); #[allow(clippy::needless_range_loop)] for b in 0 .. 32 { cached[b] ^= encryption_key_slice[b]; } encryption_key_slice.zeroize(); + // Create the machine from the cached preprocess let (machine, preprocess) = AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); @@ -156,8 +160,29 @@ impl SigningProtocol<'_, T, C> { mut serialized_preprocesses: HashMap>, msg: &[u8], ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = self.preprocess_internal(participants).0; + // We can't clear the preprocess as we sitll need it to accumulate all of the shares + // We do save the message we signed so any future calls with distinct messages panic + // This assumes the txn deciding this data is committed before the share is broaadcast + if let Some((existing_msg, existing_preprocesses)) = + DataSignedWith::get(self.txn, &self.context) + { + assert_eq!(msg, &existing_msg, "obtaining a signature share for a distinct message"); + assert_eq!( + &serialized_preprocesses, &existing_preprocesses, + "obtaining a signature share with a distinct set of preprocesses" + ); + } else { + DataSignedWith::set( + self.txn, + &self.context, + &(msg.to_vec(), serialized_preprocesses.clone()), + ); + } + + // Get the preprocessed machine + let (machine, _) = self.preprocess_internal(participants); + // Deserialize all the preprocesses let mut participants = serialized_preprocesses.keys().copied().collect::>(); participants.sort(); let mut preprocesses = HashMap::new(); @@ -170,13 +195,14 @@ impl SigningProtocol<'_, T, C> { ); } + // Sign the share let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::MissingParticipant(_) => panic!("unexpected error during sign: {e:?}"), FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, })?; @@ -207,24 +233,24 @@ impl SigningProtocol<'_, T, C> { } // Get the keys of the participants, noted by their threshold is, and return a new map indexed by -// the MuSig is. +// their MuSig is. fn threshold_i_map_to_keys_and_musig_i_map( spec: &TributarySpec, - removed: &[::G], our_key: &Zeroizing<::F>, mut map: HashMap>, ) -> (Vec<::G>, HashMap>) { // Insert our own index so calculations aren't offset let our_threshold_i = spec - .i(removed, ::generator() * our_key.deref()) - .expect("MuSig t-of-n signing a for a protocol we were removed from") + .i(::generator() * our_key.deref()) + .expect("not in a set we're signing for") .start; + // Asserts we weren't unexpectedly already present assert!(map.insert(our_threshold_i, vec![]).is_none()); let spec_validators = spec.validators(); let key_from_threshold_i = |threshold_i| { for (key, _) in &spec_validators { - if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start { + if threshold_i == spec.i(*key).expect("validator wasn't in a set they're in").start { return *key; } } @@ -235,29 +261,37 @@ fn threshold_i_map_to_keys_and_musig_i_map( let mut threshold_is = map.keys().copied().collect::>(); threshold_is.sort(); for threshold_i in threshold_is { - sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); + sorted.push(( + threshold_i, + key_from_threshold_i(threshold_i), + map.remove(&threshold_i).unwrap(), + )); } // Now that signers are sorted, with their shares, create a map with the is needed for MuSig let mut participants = vec![]; let mut map = HashMap::new(); - for (raw_i, (key, share)) in sorted.into_iter().enumerate() { - let musig_i = u16::try_from(raw_i).unwrap() + 1; + let mut our_musig_i = None; + for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() { + let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap(); + if threshold_i == our_threshold_i { + our_musig_i = Some(musig_i); + } participants.push(key); - map.insert(Participant::new(musig_i).unwrap(), share); + map.insert(musig_i, share); } - map.remove(&our_threshold_i).unwrap(); + map.remove(&our_musig_i.unwrap()).unwrap(); (participants, map) } -type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>; +type DkgConfirmerSigningProtocol<'a, T> = + SigningProtocol<'a, T, (&'static [u8; 12], ValidatorSet, u32)>; pub(crate) struct DkgConfirmer<'a, T: DbTxn> { key: &'a Zeroizing<::F>, spec: &'a TributarySpec, - removed: Vec<::G>, txn: &'a mut T, attempt: u32, } @@ -268,19 +302,19 @@ impl DkgConfirmer<'_, T> { spec: &'a TributarySpec, txn: &'a mut T, attempt: u32, - ) -> Option> { - // This relies on how confirmations are inlined into the DKG protocol and they accordingly - // share attempts - let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?; - Some(DkgConfirmer { key, spec, removed, txn, attempt }) + ) -> DkgConfirmer<'a, T> { + DkgConfirmer { key, spec, txn, attempt } } + fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { - let context = (b"DkgConfirmer", self.attempt); + let context = (b"DkgConfirmer", self.spec.set(), self.attempt); SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } } fn preprocess_internal(&mut self) -> (AlgorithmSignMachine, [u8; 64]) { - let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); + // This preprocesses with just us as we only decide the participants after obtaining + // preprocesses + let participants = vec![::generator() * self.key.deref()]; self.signing_protocol().preprocess_internal(&participants) } // Get the preprocess for this confirmation. @@ -293,14 +327,9 @@ impl DkgConfirmer<'_, T> { preprocesses: HashMap>, key_pair: &KeyPair, ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); - let preprocesses = - threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1; - let msg = set_keys_message( - &self.spec.set(), - &self.removed.iter().map(|key| Public(key.to_bytes())).collect::>(), - key_pair, - ); + let (participants, preprocesses) = + threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses); + let msg = set_keys_message(&self.spec.set(), key_pair); self.signing_protocol().share_internal(&participants, preprocesses, &msg) } // Get the share for this confirmation, if the preprocesses are valid. @@ -318,8 +347,9 @@ impl DkgConfirmer<'_, T> { key_pair: &KeyPair, shares: HashMap>, ) -> Result<[u8; 64], Participant> { - let shares = - threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1; + assert_eq!(preprocesses.keys().collect::>(), shares.keys().collect::>()); + + let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1; let machine = self .share_internal(preprocesses, key_pair) diff --git a/coordinator/src/tributary/spec.rs b/coordinator/src/tributary/spec.rs index 92905490f..efc792e69 100644 --- a/coordinator/src/tributary/spec.rs +++ b/coordinator/src/tributary/spec.rs @@ -9,7 +9,7 @@ use frost::Participant; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet}; +use serai_client::validator_sets::primitives::ValidatorSet; fn borsh_serialize_validators( validators: &Vec<(::G, u16)>, @@ -49,6 +49,7 @@ pub struct TributarySpec { deserialize_with = "borsh_deserialize_validators" )] validators: Vec<(::G, u16)>, + evrf_public_keys: Vec<([u8; 32], Vec)>, } impl TributarySpec { @@ -56,16 +57,10 @@ impl TributarySpec { serai_block: [u8; 32], start_time: u64, set: ValidatorSet, - set_participants: Vec<(PublicKey, u16)>, + validators: Vec<(::G, u16)>, + evrf_public_keys: Vec<([u8; 32], Vec)>, ) -> TributarySpec { - let mut validators = vec![]; - for (participant, shares) in set_participants { - let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) - .expect("invalid key registered as participant"); - validators.push((participant, shares)); - } - - Self { serai_block, start_time, set, validators } + Self { serai_block, start_time, set, validators, evrf_public_keys } } pub fn set(&self) -> ValidatorSet { @@ -88,24 +83,15 @@ impl TributarySpec { self.start_time } - pub fn n(&self, removed_validators: &[::G]) -> u16 { - self - .validators - .iter() - .map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight }) - .sum() + pub fn n(&self) -> u16 { + self.validators.iter().map(|(_, weight)| *weight).sum() } pub fn t(&self) -> u16 { - // t doesn't change with regards to the amount of removed validators - ((2 * self.n(&[])) / 3) + 1 + ((2 * self.n()) / 3) + 1 } - pub fn i( - &self, - removed_validators: &[::G], - key: ::G, - ) -> Option> { + pub fn i(&self, key: ::G) -> Option> { let mut all_is = HashMap::new(); let mut i = 1; for (validator, weight) in &self.validators { @@ -116,34 +102,12 @@ impl TributarySpec { i += weight; } - let original_i = all_is.get(&key)?.clone(); - let mut result_i = original_i.clone(); - for removed_validator in removed_validators { - let removed_i = all_is - .get(removed_validator) - .expect("removed validator wasn't present in set to begin with"); - // If the queried key was removed, return None - if &original_i == removed_i { - return None; - } - - // If the removed was before the queried, shift the queried down accordingly - if removed_i.start < original_i.start { - let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start); - result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap(); - result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap(); - } - } - Some(result_i) + Some(all_is.get(&key)?.clone()) } - pub fn reverse_lookup_i( - &self, - removed_validators: &[::G], - i: Participant, - ) -> Option<::G> { + pub fn reverse_lookup_i(&self, i: Participant) -> Option<::G> { for (validator, _) in &self.validators { - if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) { + if self.i(*validator).map_or(false, |range| range.contains(&i)) { return Some(*validator); } } @@ -153,4 +117,8 @@ impl TributarySpec { pub fn validators(&self) -> Vec<(::G, u64)> { self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() } + + pub fn evrf_public_keys(&self) -> Vec<([u8; 32], Vec)> { + self.evrf_public_keys.clone() + } } diff --git a/coordinator/src/tributary/transaction.rs b/coordinator/src/tributary/transaction.rs index 8d8bdd4cd..860dbd0f7 100644 --- a/coordinator/src/tributary/transaction.rs +++ b/coordinator/src/tributary/transaction.rs @@ -12,7 +12,6 @@ use ciphersuite::{ Ciphersuite, Ristretto, }; use schnorr::SchnorrSignature; -use frost::Participant; use scale::{Encode, Decode}; use processor_messages::coordinator::SubstrateSignableId; @@ -130,32 +129,26 @@ impl SignData { #[derive(Clone, PartialEq, Eq)] pub enum Transaction { - RemoveParticipantDueToDkg { + RemoveParticipant { participant: ::G, signed: Signed, }, - DkgCommitments { - attempt: u32, - commitments: Vec>, + DkgParticipation { + participation: Vec, signed: Signed, }, - DkgShares { + DkgConfirmationNonces { + // The confirmation attempt attempt: u32, - // Sending Participant, Receiving Participant, Share - shares: Vec>>, + // The nonces for DKG confirmation attempt #attempt confirmation_nonces: [u8; 64], signed: Signed, }, - InvalidDkgShare { - attempt: u32, - accuser: Participant, - faulty: Participant, - blame: Option>, - signed: Signed, - }, - DkgConfirmed { + DkgConfirmationShare { + // The confirmation attempt attempt: u32, + // The share for DKG confirmation attempt #attempt confirmation_share: [u8; 32], signed: Signed, }, @@ -197,29 +190,22 @@ pub enum Transaction { impl Debug for Transaction { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt - .debug_struct("Transaction::RemoveParticipantDueToDkg") + Transaction::RemoveParticipant { participant, signed } => fmt + .debug_struct("Transaction::RemoveParticipant") .field("participant", &hex::encode(participant.to_bytes())) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), - Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt - .debug_struct("Transaction::DkgCommitments") - .field("attempt", attempt) + Transaction::DkgParticipation { signed, .. } => fmt + .debug_struct("Transaction::DkgParticipation") .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), - Transaction::DkgShares { attempt, signed, .. } => fmt - .debug_struct("Transaction::DkgShares") + Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt + .debug_struct("Transaction::DkgConfirmationNonces") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), - Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt - .debug_struct("Transaction::InvalidDkgShare") - .field("attempt", attempt) - .field("accuser", accuser) - .field("faulty", faulty) - .finish_non_exhaustive(), - Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt - .debug_struct("Transaction::DkgConfirmed") + Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt + .debug_struct("Transaction::DkgConfirmationShare") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), @@ -261,43 +247,32 @@ impl ReadWrite for Transaction { reader.read_exact(&mut kind)?; match kind[0] { - 0 => Ok(Transaction::RemoveParticipantDueToDkg { + 0 => Ok(Transaction::RemoveParticipant { participant: Ristretto::read_G(reader)?, signed: Signed::read_without_nonce(reader, 0)?, }), 1 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let commitments = { - let mut commitments_len = [0; 1]; - reader.read_exact(&mut commitments_len)?; - let commitments_len = usize::from(commitments_len[0]); - if commitments_len == 0 { - Err(io::Error::other("zero commitments in DkgCommitments"))?; - } + let participation = { + let mut participation_len = [0; 4]; + reader.read_exact(&mut participation_len)?; + let participation_len = u32::from_le_bytes(participation_len); - let mut each_commitments_len = [0; 2]; - reader.read_exact(&mut each_commitments_len)?; - let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); - if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { + if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() { Err(io::Error::other( - "commitments present in transaction exceeded transaction size limit", + "participation present in transaction exceeded transaction size limit", ))?; } - let mut commitments = vec![vec![]; commitments_len]; - for commitments in &mut commitments { - *commitments = vec![0; each_commitments_len]; - reader.read_exact(commitments)?; - } - commitments + let participation_len = usize::try_from(participation_len).unwrap(); + + let mut participation = vec![0; participation_len]; + reader.read_exact(&mut participation)?; + participation }; let signed = Signed::read_without_nonce(reader, 0)?; - Ok(Transaction::DkgCommitments { attempt, commitments, signed }) + Ok(Transaction::DkgParticipation { participation, signed }) } 2 => { @@ -305,36 +280,12 @@ impl ReadWrite for Transaction { reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); - let shares = { - let mut share_quantity = [0; 1]; - reader.read_exact(&mut share_quantity)?; - - let mut key_share_quantity = [0; 1]; - reader.read_exact(&mut key_share_quantity)?; - - let mut share_len = [0; 2]; - reader.read_exact(&mut share_len)?; - let share_len = usize::from(u16::from_le_bytes(share_len)); - - let mut all_shares = vec![]; - for _ in 0 .. share_quantity[0] { - let mut shares = vec![]; - for _ in 0 .. key_share_quantity[0] { - let mut share = vec![0; share_len]; - reader.read_exact(&mut share)?; - shares.push(share); - } - all_shares.push(shares); - } - all_shares - }; - let mut confirmation_nonces = [0; 64]; reader.read_exact(&mut confirmation_nonces)?; - let signed = Signed::read_without_nonce(reader, 1)?; + let signed = Signed::read_without_nonce(reader, 0)?; - Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) + Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed }) } 3 => { @@ -342,53 +293,21 @@ impl ReadWrite for Transaction { reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); - let mut accuser = [0; 2]; - reader.read_exact(&mut accuser)?; - let accuser = Participant::new(u16::from_le_bytes(accuser)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut faulty = [0; 2]; - reader.read_exact(&mut faulty)?; - let faulty = Participant::new(u16::from_le_bytes(faulty)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut blame_len = [0; 2]; - reader.read_exact(&mut blame_len)?; - let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; - reader.read_exact(&mut blame)?; - - // This shares a nonce with DkgConfirmed as only one is expected - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::InvalidDkgShare { - attempt, - accuser, - faulty, - blame: Some(blame).filter(|blame| !blame.is_empty()), - signed, - }) - } - - 4 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - let mut confirmation_share = [0; 32]; reader.read_exact(&mut confirmation_share)?; - let signed = Signed::read_without_nonce(reader, 2)?; + let signed = Signed::read_without_nonce(reader, 1)?; - Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) + Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed }) } - 5 => { + 4 => { let mut block = [0; 32]; reader.read_exact(&mut block)?; Ok(Transaction::CosignSubstrateBlock(block)) } - 6 => { + 5 => { let mut block = [0; 32]; reader.read_exact(&mut block)?; let mut batch = [0; 4]; @@ -396,16 +315,16 @@ impl ReadWrite for Transaction { Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) }) } - 7 => { + 6 => { let mut block = [0; 8]; reader.read_exact(&mut block)?; Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) } - 8 => SignData::read(reader).map(Transaction::SubstrateSign), - 9 => SignData::read(reader).map(Transaction::Sign), + 7 => SignData::read(reader).map(Transaction::SubstrateSign), + 8 => SignData::read(reader).map(Transaction::Sign), - 10 => { + 9 => { let mut plan = [0; 32]; reader.read_exact(&mut plan)?; @@ -420,7 +339,7 @@ impl ReadWrite for Transaction { Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) } - 11 => { + 10 => { let mut len = [0]; reader.read_exact(&mut len)?; let len = len[0]; @@ -445,109 +364,59 @@ impl ReadWrite for Transaction { fn write(&self, writer: &mut W) -> io::Result<()> { match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { + Transaction::RemoveParticipant { participant, signed } => { writer.write_all(&[0])?; writer.write_all(&participant.to_bytes())?; signed.write_without_nonce(writer) } - Transaction::DkgCommitments { attempt, commitments, signed } => { + Transaction::DkgParticipation { participation, signed } => { writer.write_all(&[1])?; - writer.write_all(&attempt.to_le_bytes())?; - if commitments.is_empty() { - Err(io::Error::other("zero commitments in DkgCommitments"))? - } - writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; - for commitments_i in commitments { - if commitments_i.len() != commitments[0].len() { - Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? - } - } - writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; - for commitments in commitments { - writer.write_all(commitments)?; - } + writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?; + writer.write_all(participation)?; signed.write_without_nonce(writer) } - Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { + Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => { writer.write_all(&[2])?; writer.write_all(&attempt.to_le_bytes())?; - - // `shares` is a Vec which is supposed to map to a HashMap>. Since we - // bound participants to 150, this conversion is safe if a valid in-memory transaction. - writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; - // This assumes at least one share is being sent to another party - writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; - let share_len = shares[0][0].len(); - // For BLS12-381 G2, this would be: - // - A 32-byte share - // - A 96-byte ephemeral key - // - A 128-byte signature - // Hence why this has to be u16 - writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; - - for these_shares in shares { - assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); - for share in these_shares { - assert_eq!(share.len(), share_len, "sent shares were of variable length"); - writer.write_all(share)?; - } - } - writer.write_all(confirmation_nonces)?; signed.write_without_nonce(writer) } - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { + Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => { writer.write_all(&[3])?; writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(&u16::from(*accuser).to_le_bytes())?; - writer.write_all(&u16::from(*faulty).to_le_bytes())?; - - // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length - assert!(blame.as_ref().map_or(1, Vec::len) != 0); - let blame_len = - u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); - writer.write_all(&blame_len.to_le_bytes())?; - writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; - - signed.write_without_nonce(writer) - } - - Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { - writer.write_all(&[4])?; - writer.write_all(&attempt.to_le_bytes())?; writer.write_all(confirmation_share)?; signed.write_without_nonce(writer) } Transaction::CosignSubstrateBlock(block) => { - writer.write_all(&[5])?; + writer.write_all(&[4])?; writer.write_all(block) } Transaction::Batch { block, batch } => { - writer.write_all(&[6])?; + writer.write_all(&[5])?; writer.write_all(block)?; writer.write_all(&batch.to_le_bytes()) } Transaction::SubstrateBlock(block) => { - writer.write_all(&[7])?; + writer.write_all(&[6])?; writer.write_all(&block.to_le_bytes()) } Transaction::SubstrateSign(data) => { - writer.write_all(&[8])?; + writer.write_all(&[7])?; data.write(writer) } Transaction::Sign(data) => { - writer.write_all(&[9])?; + writer.write_all(&[8])?; data.write(writer) } Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { - writer.write_all(&[10])?; + writer.write_all(&[9])?; writer.write_all(plan)?; writer .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; @@ -556,7 +425,7 @@ impl ReadWrite for Transaction { signature.write(writer) } Transaction::SlashReport(points, signed) => { - writer.write_all(&[11])?; + writer.write_all(&[10])?; writer.write_all(&[u8::try_from(points.len()).unwrap()])?; for points in points { writer.write_all(&points.to_le_bytes())?; @@ -570,15 +439,16 @@ impl ReadWrite for Transaction { impl TransactionTrait for Transaction { fn kind(&self) -> TransactionKind<'_> { match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { + Transaction::RemoveParticipant { participant, signed } => { TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) } - Transaction::DkgCommitments { attempt, commitments: _, signed } | - Transaction::DkgShares { attempt, signed, .. } | - Transaction::InvalidDkgShare { attempt, signed, .. } | - Transaction::DkgConfirmed { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) + Transaction::DkgParticipation { signed, .. } => { + TransactionKind::Signed(b"dkg".to_vec(), signed) + } + Transaction::DkgConfirmationNonces { attempt, signed, .. } | + Transaction::DkgConfirmationShare { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed) } Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), @@ -645,11 +515,14 @@ impl Transaction { fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here let nonce = match tx { - Transaction::RemoveParticipantDueToDkg { .. } => 0, + Transaction::RemoveParticipant { .. } => 0, - Transaction::DkgCommitments { .. } => 0, - Transaction::DkgShares { .. } => 1, - Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, + Transaction::DkgParticipation { .. } => 0, + // Uses a nonce of 0 as it has an internal attempt counter we distinguish by + Transaction::DkgConfirmationNonces { .. } => 0, + // Uses a nonce of 1 due to internal attempt counter and due to following + // DkgConfirmationNonces + Transaction::DkgConfirmationShare { .. } => 1, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), @@ -668,11 +541,10 @@ impl Transaction { nonce, #[allow(clippy::match_same_arms)] match tx { - Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } | - Transaction::DkgCommitments { ref mut signed, .. } | - Transaction::DkgShares { ref mut signed, .. } | - Transaction::InvalidDkgShare { ref mut signed, .. } | - Transaction::DkgConfirmed { ref mut signed, .. } => signed, + Transaction::RemoveParticipant { ref mut signed, .. } | + Transaction::DkgParticipation { ref mut signed, .. } | + Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed, + Transaction::DkgConfirmationShare { ref mut signed, .. } => signed, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 0ea74bfe6..9b23dc6cd 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -50,13 +50,17 @@ pub(crate) use crate::tendermint::*; pub mod tests; /// Size limit for an individual transaction. -pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000; +// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking +// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs +// with 49 key shares, and signing 120 Monero inputs with 49 key shares. +// TODO: Add a test for these properties +pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000; /// Amount of transactions a single account may have in the mempool. pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; /// Block size limit. -// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious +// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious // participant from flooding disks and causing out of space errors in order processes. -pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; +pub const BLOCK_SIZE_LIMIT: usize = 2_001_000; pub(crate) const TENDERMINT_MESSAGE: u8 = 0; pub(crate) const TRANSACTION_MESSAGE: u8 = 1; diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index 7ed301f56..cde0d1539 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -36,9 +36,26 @@ multiexp = { path = "../multiexp", version = "0.4", default-features = false } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false } dleq = { path = "../dleq", version = "^0.4.1", default-features = false } +# eVRF DKG dependencies +subtle = { version = "2", default-features = false, features = ["std"], optional = true } +generic-array = { version = "1", default-features = false, features = ["alloc"], optional = true } +blake2 = { version = "0.10", default-features = false, features = ["std"], optional = true } +rand_chacha = { version = "0.3", default-features = false, features = ["std"], optional = true } +generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", default-features = false, optional = true } +ec-divisors = { path = "../evrf/divisors", default-features = false, optional = true } +generalized-bulletproofs-circuit-abstraction = { path = "../evrf/circuit-abstraction", optional = true } +generalized-bulletproofs-ec-gadgets = { path = "../evrf/ec-gadgets", optional = true } + +secq256k1 = { path = "../evrf/secq256k1", optional = true } +embedwards25519 = { path = "../evrf/embedwards25519", optional = true } + [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } +rand = { version = "0.8", default-features = false, features = ["std"] } ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] } +generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] } +ec-divisors = { path = "../evrf/divisors", features = ["pasta"] } +pasta_curves = "0.5" [features] std = [ @@ -62,5 +79,22 @@ std = [ "dleq/serialize" ] borsh = ["dep:borsh"] +evrf = [ + "std", + + "dep:subtle", + "dep:generic-array", + + "dep:blake2", + "dep:rand_chacha", + + "dep:generalized-bulletproofs", + "dep:ec-divisors", + "dep:generalized-bulletproofs-circuit-abstraction", + "dep:generalized-bulletproofs-ec-gadgets", +] +evrf-secp256k1 = ["evrf", "ciphersuite/secp256k1", "secq256k1"] +evrf-ed25519 = ["evrf", "ciphersuite/ed25519", "embedwards25519"] +evrf-ristretto = ["evrf", "ciphersuite/ristretto", "embedwards25519"] tests = ["rand_core/getrandom"] default = ["std"] diff --git a/crypto/dkg/src/encryption.rs b/crypto/dkg/src/encryption.rs index 51cf6b060..1ad721f66 100644 --- a/crypto/dkg/src/encryption.rs +++ b/crypto/dkg/src/encryption.rs @@ -98,11 +98,11 @@ fn ecdh(private: &Zeroizing, public: C::G) -> Zeroizing(context: &str, ecdh: &Zeroizing) -> ChaCha20 { +fn cipher(context: [u8; 32], ecdh: &Zeroizing) -> ChaCha20 { // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly // TODO: https://github.com/serai-dex/serai/issues/151 let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript.domain_separate(b"encryption_key"); @@ -134,7 +134,7 @@ fn cipher(context: &str, ecdh: &Zeroizing) -> ChaCha20 { fn encrypt( rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, to: C::G, mut msg: Zeroizing, @@ -197,7 +197,7 @@ impl EncryptedMessage { pub(crate) fn invalidate_msg( &mut self, rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, ) { // Invalidate the message by specifying a new key/Schnorr PoP @@ -219,7 +219,7 @@ impl EncryptedMessage { pub(crate) fn invalidate_share_serialization( &mut self, rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, to: C::G, ) { @@ -243,7 +243,7 @@ impl EncryptedMessage { pub(crate) fn invalidate_share_value( &mut self, rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, to: C::G, ) { @@ -300,14 +300,14 @@ impl EncryptionKeyProof { // This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no // root of trust other than their existence in the assumed-to-exist external authenticated channel. fn pop_challenge( - context: &str, + context: [u8; 32], nonce: C::G, key: C::G, sender: Participant, msg: &[u8], ) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript.domain_separate(b"proof_of_possession"); @@ -323,9 +323,9 @@ fn pop_challenge( C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr")) } -fn encryption_key_transcript(context: &str) -> RecommendedTranscript { +fn encryption_key_transcript(context: [u8; 32]) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript } @@ -337,14 +337,74 @@ pub(crate) enum DecryptionError { InvalidProof, } +// A simple box for managing decryption. +#[derive(Clone, Debug)] +pub(crate) struct Decryption { + context: [u8; 32], + enc_keys: HashMap, +} + +impl Decryption { + pub(crate) fn new(context: [u8; 32]) -> Self { + Self { context, enc_keys: HashMap::new() } + } + pub(crate) fn register( + &mut self, + participant: Participant, + msg: EncryptionKeyMessage, + ) -> M { + assert!( + !self.enc_keys.contains_key(&participant), + "Re-registering encryption key for a participant" + ); + self.enc_keys.insert(participant, msg.enc_key); + msg.msg + } + + // Given a message, and the intended decryptor, and a proof for its key, decrypt the message. + // Returns None if the key was wrong. + pub(crate) fn decrypt_with_proof( + &self, + from: Participant, + decryptor: Participant, + mut msg: EncryptedMessage, + // There's no encryption key proof if the accusation is of an invalid signature + proof: Option>, + ) -> Result, DecryptionError> { + if !msg.pop.verify( + msg.key, + pop_challenge::(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), + ) { + Err(DecryptionError::InvalidSignature)?; + } + + if let Some(proof) = proof { + // Verify this is the decryption key for this message + proof + .dleq + .verify( + &mut encryption_key_transcript(self.context), + &[C::generator(), msg.key], + &[self.enc_keys[&decryptor], *proof.key], + ) + .map_err(|_| DecryptionError::InvalidProof)?; + + cipher::(self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); + Ok(msg.msg) + } else { + Err(DecryptionError::InvalidProof) + } + } +} + // A simple box for managing encryption. #[derive(Clone)] pub(crate) struct Encryption { - context: String, - i: Option, + context: [u8; 32], + i: Participant, enc_key: Zeroizing, enc_pub_key: C::G, - enc_keys: HashMap, + decryption: Decryption, } impl fmt::Debug for Encryption { @@ -354,7 +414,7 @@ impl fmt::Debug for Encryption { .field("context", &self.context) .field("i", &self.i) .field("enc_pub_key", &self.enc_pub_key) - .field("enc_keys", &self.enc_keys) + .field("decryption", &self.decryption) .finish_non_exhaustive() } } @@ -363,7 +423,7 @@ impl Zeroize for Encryption { fn zeroize(&mut self) { self.enc_key.zeroize(); self.enc_pub_key.zeroize(); - for (_, mut value) in self.enc_keys.drain() { + for (_, mut value) in self.decryption.enc_keys.drain() { value.zeroize(); } } @@ -371,8 +431,8 @@ impl Zeroize for Encryption { impl Encryption { pub(crate) fn new( - context: String, - i: Option, + context: [u8; 32], + i: Participant, rng: &mut R, ) -> Self { let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); @@ -381,7 +441,7 @@ impl Encryption { i, enc_pub_key: C::generator() * enc_key.deref(), enc_key, - enc_keys: HashMap::new(), + decryption: Decryption::new(context), } } @@ -394,12 +454,7 @@ impl Encryption { participant: Participant, msg: EncryptionKeyMessage, ) -> M { - assert!( - !self.enc_keys.contains_key(&participant), - "Re-registering encryption key for a participant" - ); - self.enc_keys.insert(participant, msg.enc_key); - msg.msg + self.decryption.register(participant, msg) } pub(crate) fn encrypt( @@ -408,7 +463,7 @@ impl Encryption { participant: Participant, msg: Zeroizing, ) -> EncryptedMessage { - encrypt(rng, &self.context, self.i.unwrap(), self.enc_keys[&participant], msg) + encrypt(rng, self.context, self.i, self.decryption.enc_keys[&participant], msg) } pub(crate) fn decrypt( @@ -426,18 +481,18 @@ impl Encryption { batch, batch_id, msg.key, - pop_challenge::(&self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), + pop_challenge::(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), ); let key = ecdh::(&self.enc_key, msg.key); - cipher::(&self.context, &key).apply_keystream(msg.msg.as_mut().as_mut()); + cipher::(self.context, &key).apply_keystream(msg.msg.as_mut().as_mut()); ( msg.msg, EncryptionKeyProof { key, dleq: DLEqProof::prove( rng, - &mut encryption_key_transcript(&self.context), + &mut encryption_key_transcript(self.context), &[C::generator(), msg.key], &self.enc_key, ), @@ -445,38 +500,7 @@ impl Encryption { ) } - // Given a message, and the intended decryptor, and a proof for its key, decrypt the message. - // Returns None if the key was wrong. - pub(crate) fn decrypt_with_proof( - &self, - from: Participant, - decryptor: Participant, - mut msg: EncryptedMessage, - // There's no encryption key proof if the accusation is of an invalid signature - proof: Option>, - ) -> Result, DecryptionError> { - if !msg.pop.verify( - msg.key, - pop_challenge::(&self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), - ) { - Err(DecryptionError::InvalidSignature)?; - } - - if let Some(proof) = proof { - // Verify this is the decryption key for this message - proof - .dleq - .verify( - &mut encryption_key_transcript(&self.context), - &[C::generator(), msg.key], - &[self.enc_keys[&decryptor], *proof.key], - ) - .map_err(|_| DecryptionError::InvalidProof)?; - - cipher::(&self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); - Ok(msg.msg) - } else { - Err(DecryptionError::InvalidProof) - } + pub(crate) fn into_decryption(self) -> Decryption { + self.decryption } } diff --git a/crypto/dkg/src/evrf/mod.rs b/crypto/dkg/src/evrf/mod.rs new file mode 100644 index 000000000..3d043138a --- /dev/null +++ b/crypto/dkg/src/evrf/mod.rs @@ -0,0 +1,584 @@ +/* + We implement a DKG using an eVRF, as detailed in the eVRF paper. For the eVRF itself, we do not + use a Paillier-based construction, nor the detailed construction premised on a Bulletproof. + + For reference, the detailed construction premised on a Bulletproof involves two curves, notated + here as `C` and `E`, where the scalar field of `C` is the field of `E`. Accordingly, Bulletproofs + over `C` can efficiently perform group operations of points of curve `E`. Each participant has a + private point (`P_i`) on curve `E` committed to over curve `C`. The eVRF selects a pair of + scalars `a, b`, where the participant proves in-Bulletproof the points `A_i, B_i` are + `a * P_i, b * P_i`. The eVRF proceeds to commit to `A_i.x + B_i.x` in a Pedersen Commitment. + + Our eVRF uses + [Generalized Bulletproofs]( + https://repo.getmonero.org/monero-project/ccs-proposals + /uploads/a9baa50c38c6312efc0fea5c6a188bb9/gbp.pdf + ). + This allows us much larger witnesses without growing the reference string, and enables us to + efficiently sample challenges off in-circuit variables (via placing the variables in a vector + commitment, then challenging from a transcript of the commitments). We proceed to use + [elliptic curve divisors]( + https://repo.getmonero.org/-/project/54/ + uploads/eb1bf5b4d4855a3480c38abf895bd8e8/Veridise_Divisor_Proofs.pdf + ) + (which require the ability to sample a challenge off in-circuit variables) to prove discrete + logarithms efficiently. + + This is done via having a private scalar (`p_i`) on curve `E`, not a private point, and + publishing the public key for it (`P_i = p_i * G`, where `G` is a generator of `E`). The eVRF + samples two points with unknown discrete logarithms `A, B`, and the circuit proves a Pedersen + Commitment commits to `(p_i * A).x + (p_i * B).x`. + + With the eVRF established, we now detail our other novel aspect. The eVRF paper expects secret + shares to be sent to the other parties yet does not detail a precise way to do so. If we + encrypted the secret shares with some stream cipher, each recipient would have to attest validity + or accuse the sender of impropriety. We want an encryption scheme where anyone can verify the + secret shares were encrypted properly, without additional info, efficiently. + + Please note from the published commitments, it's possible to calculcate a commitment to the + secret share each party should receive (`V_i`). + + We have the sender sample two scalars per recipient, denoted `x_i, y_i` (where `i` is the + recipient index). They perform the eVRF to prove a Pedersen Commitment commits to + `z_i = (x_i * P_i).x + (y_i * P_i).x` and `x_i, y_i` are the discrete logarithms of `X_i, Y_i` + over `G`. They then publish the encrypted share `s_i + z_i` and `X_i, Y_i`. + + The recipient is able to decrypt the share via calculating + `s_i - ((p_i * X_i).x + (p_i * Y_i).x)`. + + To verify the secret share, we have the `F` terms of the Pedersen Commitments revealed (where + `F, H` are generators of `C`, `F` is used for binding and `H` for blinding). This already needs + to be done for the eVRF outputs used within the DKG, in order to obtain thecommitments to the + coefficients. When we have the commitment `Z_i = ((p_i * A).x + (p_i * B).x) * F`, we simply + check `s_i * F = Z_i + V_i`. + + In order to open the Pedersen Commitments to their `F` terms, we transcript the commitments and + the claimed openings, then assign random weights to each pair of `(commitment, opening). The + prover proves knowledge of the discrete logarithm of the sum weighted commitments, minus the sum + sum weighted openings, over `H`. + + The benefit to this construction is that given an broadcast channel which is reliable and + ordered, only `t` messages must be broadcast from honest parties in order to create a `t`-of-`n` + multisig. If the encrypted secret shares were not verifiable, one would need at least `t + n` + messages to ensure every participant has a correct dealing and can participate in future + reconstructions of the secret. This would also require all `n` parties be online, whereas this is + robust to threshold `t`. +*/ + +use core::ops::Deref; +use std::{ + io::{self, Read, Write}, + collections::{HashSet, HashMap}, +}; + +use rand_core::{RngCore, CryptoRng}; + +use zeroize::{Zeroize, Zeroizing}; + +use blake2::{Digest, Blake2s256}; +use ciphersuite::{ + group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, + }, + Ciphersuite, +}; +use multiexp::multiexp_vartime; + +use generalized_bulletproofs::arithmetic_circuit_proof::*; +use ec_divisors::DivisorCurve; + +use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore, ThresholdKeys}; + +pub(crate) mod proof; +use proof::*; +pub use proof::{EvrfCurve, EvrfGenerators}; + +/// Participation in the DKG. +/// +/// `Participation` is meant to be broadcast to all other participants over an authenticated, +/// reliable broadcast channel. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Participation { + proof: Vec, + encrypted_secret_shares: HashMap, +} + +impl Participation { + pub fn read(reader: &mut R, n: u16) -> io::Result { + // TODO: Replace `len` with some calculcation deterministic to the params + let mut len = [0; 4]; + reader.read_exact(&mut len)?; + let len = usize::try_from(u32::from_le_bytes(len)).expect("<32-bit platform?"); + + // Don't allocate a buffer for the claimed length + // Read chunks until we reach the claimed length + // This means if we were told to read GB, we must actually be sent GB before allocating as such + const CHUNK_SIZE: usize = 1024; + let mut proof = Vec::with_capacity(len.min(CHUNK_SIZE)); + while proof.len() < len { + let next_chunk = (len - proof.len()).min(CHUNK_SIZE); + let old_proof_len = proof.len(); + proof.resize(old_proof_len + next_chunk, 0); + reader.read_exact(&mut proof[old_proof_len ..])?; + } + + let mut encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + for i in (1 ..= n).map(Participant) { + encrypted_secret_shares.insert(i, C::read_F(reader)?); + } + + Ok(Self { proof, encrypted_secret_shares }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&u32::try_from(self.proof.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.proof)?; + for i in (1 ..= u16::try_from(self.encrypted_secret_shares.len()) + .expect("writing a Participation which has a n > u16::MAX")) + .map(Participant) + { + writer.write_all(self.encrypted_secret_shares[&i].to_repr().as_ref())?; + } + Ok(()) + } +} + +fn polynomial( + coefficients: &[Zeroizing], + l: Participant, +) -> Zeroizing { + let l = F::from(u64::from(u16::from(l))); + // This should never be reached since Participant is explicitly non-zero + assert!(l != F::ZERO, "zero participant passed to polynomial"); + let mut share = Zeroizing::new(F::ZERO); + for (idx, coefficient) in coefficients.iter().rev().enumerate() { + *share += coefficient.deref(); + if idx != (coefficients.len() - 1) { + *share *= l; + } + } + share +} + +#[allow(clippy::type_complexity)] +fn share_verification_statements( + rng: &mut (impl RngCore + CryptoRng), + commitments: &[C::G], + n: u16, + encryption_commitments: &[C::G], + encrypted_secret_shares: &HashMap, +) -> (C::F, Vec<(C::F, C::G)>) { + debug_assert_eq!(usize::from(n), encryption_commitments.len()); + debug_assert_eq!(usize::from(n), encrypted_secret_shares.len()); + + let mut g_scalar = C::F::ZERO; + let mut pairs = Vec::with_capacity(commitments.len() + encryption_commitments.len()); + for commitment in commitments { + pairs.push((C::F::ZERO, *commitment)); + } + + let mut weight; + for (i, enc_share) in encrypted_secret_shares { + let enc_commitment = encryption_commitments[usize::from(u16::from(*i)) - 1]; + + weight = C::F::random(&mut *rng); + + // s_i F + g_scalar += weight * enc_share; + // - Z_i + let weight = -weight; + pairs.push((weight, enc_commitment)); + // - V_i + { + let i = C::F::from(u64::from(u16::from(*i))); + // The first `commitments.len()` pairs are for the commitments + (0 .. commitments.len()).fold(weight, |exp, j| { + pairs[j].0 += exp; + exp * i + }); + } + } + + (g_scalar, pairs) +} + +/// Errors from the eVRF DKG. +#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] +pub enum EvrfError { + #[error("n, the amount of participants, exceeded a u16")] + TooManyParticipants, + #[error("the threshold t wasn't in range 1 <= t <= n")] + InvalidThreshold, + #[error("a public key was the identity point")] + PublicKeyWasIdentity, + #[error("participating in a DKG we aren't a participant in")] + NotAParticipant, + #[error("a participant with an unrecognized ID participated")] + NonExistentParticipant, + #[error("the passed in generators did not have enough generators for this DKG")] + NotEnoughGenerators, +} + +/// The result of calling EvrfDkg::verify. +pub enum VerifyResult { + Valid(EvrfDkg), + Invalid(Vec), + NotEnoughParticipants, +} + +/// Struct to perform/verify the DKG with. +#[derive(Debug)] +pub struct EvrfDkg { + t: u16, + n: u16, + evrf_public_keys: Vec<::G>, + group_key: C::G, + verification_shares: HashMap, + #[allow(clippy::type_complexity)] + encrypted_secret_shares: + HashMap::G; 2], C::F)>>, +} + +impl EvrfDkg { + // Form the initial transcript for the proofs. + fn initial_transcript( + invocation: [u8; 32], + evrf_public_keys: &[::G], + t: u16, + ) -> [u8; 32] { + let mut transcript = Blake2s256::new(); + transcript.update(invocation); + for key in evrf_public_keys { + transcript.update(key.to_bytes().as_ref()); + } + transcript.update(t.to_le_bytes()); + transcript.finalize().into() + } + + /// Participate in performing the DKG for the specified parameters. + /// + /// The context MUST be unique across invocations. Reuse of context will lead to sharing + /// prior-shared secrets. + /// + /// Public keys are not allowed to be the identity point. This will error if any are. + pub fn participate( + rng: &mut (impl RngCore + CryptoRng), + generators: &EvrfGenerators, + context: [u8; 32], + t: u16, + evrf_public_keys: &[::G], + evrf_private_key: &Zeroizing<::F>, + ) -> Result, EvrfError> { + let Ok(n) = u16::try_from(evrf_public_keys.len()) else { Err(EvrfError::TooManyParticipants)? }; + if (t == 0) || (t > n) { + Err(EvrfError::InvalidThreshold)?; + } + if evrf_public_keys.iter().any(|key| bool::from(key.is_identity())) { + Err(EvrfError::PublicKeyWasIdentity)?; + }; + let evrf_public_key = ::generator() * evrf_private_key.deref(); + if !evrf_public_keys.iter().any(|key| *key == evrf_public_key) { + Err(EvrfError::NotAParticipant)?; + }; + + let transcript = Self::initial_transcript(context, evrf_public_keys, t); + // Further bind to the participant index so each index gets unique generators + // This allows reusing eVRF public keys as the prover + let mut per_proof_transcript = Blake2s256::new(); + per_proof_transcript.update(transcript); + per_proof_transcript.update(evrf_public_key.to_bytes()); + + // The above transcript is expected to be binding to all arguments here + // The generators are constant to this ciphersuite's generator, and the parameters are + // transcripted + let EvrfProveResult { coefficients, encryption_masks, proof } = match Evrf::prove( + rng, + &generators.0, + per_proof_transcript.finalize().into(), + usize::from(t), + evrf_public_keys, + evrf_private_key, + ) { + Ok(res) => res, + Err(AcError::NotEnoughGenerators) => Err(EvrfError::NotEnoughGenerators)?, + Err( + AcError::DifferingLrLengths | + AcError::InconsistentAmountOfConstraints | + AcError::ConstrainedNonExistentTerm | + AcError::ConstrainedNonExistentCommitment | + AcError::InconsistentWitness | + AcError::Ip(_) | + AcError::IncompleteProof, + ) => { + panic!("failed to prove for the eVRF proof") + } + }; + + let mut encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + for (l, encryption_mask) in (1 ..= n).map(Participant).zip(encryption_masks) { + let share = polynomial::(&coefficients, l); + encrypted_secret_shares.insert(l, *share + *encryption_mask); + } + + Ok(Participation { proof, encrypted_secret_shares }) + } + + /// Check if a batch of `Participation`s are valid. + /// + /// If any `Participation` is invalid, the list of all invalid participants will be returned. + /// If all `Participation`s are valid and there's at least `t`, an instance of this struct + /// (usable to obtain a threshold share of generated key) is returned. If all are valid and + /// there's not at least `t`, `VerifyResult::NotEnoughParticipants` is returned. + /// + /// This DKG is unbiased if all `n` people participate. This DKG is biased if only a threshold + /// participate. + pub fn verify( + rng: &mut (impl RngCore + CryptoRng), + generators: &EvrfGenerators, + context: [u8; 32], + t: u16, + evrf_public_keys: &[::G], + participations: &HashMap>, + ) -> Result, EvrfError> { + let Ok(n) = u16::try_from(evrf_public_keys.len()) else { Err(EvrfError::TooManyParticipants)? }; + if (t == 0) || (t > n) { + Err(EvrfError::InvalidThreshold)?; + } + if evrf_public_keys.iter().any(|key| bool::from(key.is_identity())) { + Err(EvrfError::PublicKeyWasIdentity)?; + }; + for i in participations.keys() { + if u16::from(*i) > n { + Err(EvrfError::NonExistentParticipant)?; + } + } + + let mut valid = HashMap::with_capacity(participations.len()); + let mut faulty = HashSet::new(); + + let transcript = Self::initial_transcript(context, evrf_public_keys, t); + + let mut evrf_verifier = generators.0.batch_verifier(); + for (i, participation) in participations { + let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1]; + + let mut per_proof_transcript = Blake2s256::new(); + per_proof_transcript.update(transcript); + per_proof_transcript.update(evrf_public_key.to_bytes()); + + // Clone the verifier so if this proof is faulty, it doesn't corrupt the verifier + let mut verifier_clone = evrf_verifier.clone(); + let Ok(data) = Evrf::::verify( + rng, + &generators.0, + &mut verifier_clone, + per_proof_transcript.finalize().into(), + usize::from(t), + evrf_public_keys, + evrf_public_key, + &participation.proof, + ) else { + faulty.insert(*i); + continue; + }; + evrf_verifier = verifier_clone; + + valid.insert(*i, (participation.encrypted_secret_shares.clone(), data)); + } + debug_assert_eq!(valid.len() + faulty.len(), participations.len()); + + // Perform the batch verification of the eVRFs + if !generators.0.verify(evrf_verifier) { + // If the batch failed, verify them each individually + for (i, participation) in participations { + if faulty.contains(i) { + continue; + } + let mut evrf_verifier = generators.0.batch_verifier(); + Evrf::::verify( + rng, + &generators.0, + &mut evrf_verifier, + context, + usize::from(t), + evrf_public_keys, + evrf_public_keys[usize::from(u16::from(*i)) - 1], + &participation.proof, + ) + .expect("evrf failed basic checks yet prover wasn't prior marked faulty"); + if !generators.0.verify(evrf_verifier) { + valid.remove(i); + faulty.insert(*i); + } + } + } + debug_assert_eq!(valid.len() + faulty.len(), participations.len()); + + // Perform the batch verification of the shares + let mut sum_encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + let mut sum_masks = HashMap::with_capacity(usize::from(n)); + let mut all_encrypted_secret_shares = HashMap::with_capacity(usize::from(t)); + { + let mut share_verification_statements_actual = HashMap::with_capacity(valid.len()); + if !{ + let mut g_scalar = C::F::ZERO; + let mut pairs = Vec::with_capacity(valid.len() * (usize::from(t) + evrf_public_keys.len())); + for (i, (encrypted_secret_shares, data)) in &valid { + let (this_g_scalar, mut these_pairs) = share_verification_statements::( + &mut *rng, + &data.coefficients, + evrf_public_keys + .len() + .try_into() + .expect("n prior checked to be <= u16::MAX couldn't be converted to a u16"), + &data.encryption_commitments, + encrypted_secret_shares, + ); + // Queue this into our batch + g_scalar += this_g_scalar; + pairs.extend(&these_pairs); + + // Also push this g_scalar onto these_pairs so these_pairs can be verified individually + // upon error + these_pairs.push((this_g_scalar, generators.0.g())); + share_verification_statements_actual.insert(*i, these_pairs); + + // Also format this data as we'd need it upon success + let mut formatted_encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + for (j, enc_share) in encrypted_secret_shares { + /* + We calculcate verification shares as the sum of the encrypted scalars, minus their + masks. This only does one scalar multiplication, and `1+t` point additions (with + one negation), and is accordingly much cheaper than interpolating the commitments. + This is only possible because already interpolated the commitments to verify the + encrypted secret share. + */ + let sum_encrypted_secret_share = + sum_encrypted_secret_shares.get(j).copied().unwrap_or(C::F::ZERO); + let sum_mask = sum_masks.get(j).copied().unwrap_or(C::G::identity()); + sum_encrypted_secret_shares.insert(*j, sum_encrypted_secret_share + enc_share); + + let j_index = usize::from(u16::from(*j)) - 1; + sum_masks.insert(*j, sum_mask + data.encryption_commitments[j_index]); + + formatted_encrypted_secret_shares.insert(*j, (data.ecdh_keys[j_index], *enc_share)); + } + all_encrypted_secret_shares.insert(*i, formatted_encrypted_secret_shares); + } + pairs.push((g_scalar, generators.0.g())); + bool::from(multiexp_vartime(&pairs).is_identity()) + } { + // If the batch failed, verify them each individually + for (i, pairs) in share_verification_statements_actual { + if !bool::from(multiexp_vartime(&pairs).is_identity()) { + valid.remove(&i); + faulty.insert(i); + } + } + } + } + debug_assert_eq!(valid.len() + faulty.len(), participations.len()); + + let mut faulty = faulty.into_iter().collect::>(); + if !faulty.is_empty() { + faulty.sort_unstable(); + return Ok(VerifyResult::Invalid(faulty)); + } + + // We check at least t key shares of people have participated in contributing entropy + // Since the key shares of the participants exceed t, meaning if they're malicious they can + // reconstruct the key regardless, this is safe to the threshold + { + let mut participating_weight = 0; + let mut evrf_public_keys_mut = evrf_public_keys.to_vec(); + for i in valid.keys() { + let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1]; + + // Remove this key from the Vec to prevent double-counting + /* + Double-counting would be a risk if multiple participants shared an eVRF public key and + participated. This code does still allow such participants (in order to let participants + be weighted), and any one of them participating will count as all participating. This is + fine as any one such participant will be able to decrypt the shares for themselves and + all other participants, so this is still a key generated by an amount of participants who + could simply reconstruct the key. + */ + let start_len = evrf_public_keys_mut.len(); + evrf_public_keys_mut.retain(|key| *key != evrf_public_key); + let end_len = evrf_public_keys_mut.len(); + let count = start_len - end_len; + + participating_weight += count; + } + if participating_weight < usize::from(t) { + return Ok(VerifyResult::NotEnoughParticipants); + } + } + + // If we now have >= t participations, calculate the group key and verification shares + + // The group key is the sum of the zero coefficients + let group_key = valid.values().map(|(_, evrf_data)| evrf_data.coefficients[0]).sum::(); + + // Calculate each user's verification share + let mut verification_shares = HashMap::with_capacity(usize::from(n)); + for i in (1 ..= n).map(Participant) { + verification_shares + .insert(i, (C::generator() * sum_encrypted_secret_shares[&i]) - sum_masks[&i]); + } + + Ok(VerifyResult::Valid(EvrfDkg { + t, + n, + evrf_public_keys: evrf_public_keys.to_vec(), + group_key, + verification_shares, + encrypted_secret_shares: all_encrypted_secret_shares, + })) + } + + pub fn keys( + &self, + evrf_private_key: &Zeroizing<::F>, + ) -> Vec> { + let evrf_public_key = ::generator() * evrf_private_key.deref(); + let mut is = Vec::with_capacity(1); + for (i, evrf_key) in self.evrf_public_keys.iter().enumerate() { + if *evrf_key == evrf_public_key { + let i = u16::try_from(i).expect("n <= u16::MAX yet i > u16::MAX?"); + let i = Participant(1 + i); + is.push(i); + } + } + + let mut res = Vec::with_capacity(is.len()); + for i in is { + let mut secret_share = Zeroizing::new(C::F::ZERO); + for shares in self.encrypted_secret_shares.values() { + let (ecdh_keys, enc_share) = shares[&i]; + + let mut ecdh = Zeroizing::new(C::F::ZERO); + for point in ecdh_keys { + let (mut x, mut y) = + ::G::to_xy(point * evrf_private_key.deref()).unwrap(); + *ecdh += x; + x.zeroize(); + y.zeroize(); + } + *secret_share += enc_share - ecdh.deref(); + } + + debug_assert_eq!(self.verification_shares[&i], C::generator() * secret_share.deref()); + + res.push(ThresholdKeys::from(ThresholdCore { + params: ThresholdParams::new(self.t, self.n, i).unwrap(), + interpolation: Interpolation::Lagrange, + secret_share, + group_key: self.group_key, + verification_shares: self.verification_shares.clone(), + })); + } + res + } +} diff --git a/crypto/dkg/src/evrf/proof.rs b/crypto/dkg/src/evrf/proof.rs new file mode 100644 index 000000000..ce9c57d14 --- /dev/null +++ b/crypto/dkg/src/evrf/proof.rs @@ -0,0 +1,861 @@ +use core::{marker::PhantomData, ops::Deref, fmt}; + +use subtle::*; +use zeroize::{Zeroize, Zeroizing}; + +use rand_core::{RngCore, CryptoRng, SeedableRng}; +use rand_chacha::ChaCha20Rng; + +use generic_array::{typenum::Unsigned, ArrayLength, GenericArray}; + +use blake2::{Digest, Blake2s256}; +use ciphersuite::{ + group::{ + ff::{Field, PrimeField, PrimeFieldBits}, + Group, GroupEncoding, + }, + Ciphersuite, +}; + +use generalized_bulletproofs::{ + *, + transcript::{Transcript as ProverTranscript, VerifierTranscript}, + arithmetic_circuit_proof::*, +}; +use generalized_bulletproofs_circuit_abstraction::*; + +use ec_divisors::{DivisorCurve, new_divisor}; +use generalized_bulletproofs_ec_gadgets::*; + +/// A pair of curves to perform the eVRF with. +pub trait EvrfCurve: Ciphersuite { + type EmbeddedCurve: Ciphersuite::F>>; + type EmbeddedCurveParameters: DiscreteLogParameters; +} + +#[cfg(feature = "evrf-secp256k1")] +impl EvrfCurve for ciphersuite::Secp256k1 { + type EmbeddedCurve = secq256k1::Secq256k1; + type EmbeddedCurveParameters = secq256k1::Secq256k1; +} + +#[cfg(feature = "evrf-ed25519")] +impl EvrfCurve for ciphersuite::Ed25519 { + type EmbeddedCurve = embedwards25519::Embedwards25519; + type EmbeddedCurveParameters = embedwards25519::Embedwards25519; +} + +#[cfg(feature = "evrf-ristretto")] +impl EvrfCurve for ciphersuite::Ristretto { + type EmbeddedCurve = embedwards25519::Embedwards25519; + type EmbeddedCurveParameters = embedwards25519::Embedwards25519; +} + +fn sample_point(rng: &mut (impl RngCore + CryptoRng)) -> C::G { + let mut repr = ::Repr::default(); + loop { + rng.fill_bytes(repr.as_mut()); + if let Ok(point) = C::read_G(&mut repr.as_ref()) { + if bool::from(!point.is_identity()) { + return point; + } + } + } +} + +/// Generators for eVRF proof. +#[derive(Clone, Debug)] +pub struct EvrfGenerators(pub(crate) Generators); + +impl EvrfGenerators { + /// Create a new set of generators. + pub fn new(max_threshold: u16, max_participants: u16) -> EvrfGenerators { + let g = C::generator(); + let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(g.to_bytes()).into()); + let h = sample_point::(&mut rng); + let (_, generators) = + Evrf::::muls_and_generators_to_use(max_threshold.into(), max_participants.into()); + let mut g_bold = vec![]; + let mut h_bold = vec![]; + for _ in 0 .. generators { + g_bold.push(sample_point::(&mut rng)); + h_bold.push(sample_point::(&mut rng)); + } + Self(Generators::new(g, h, g_bold, h_bold).unwrap()) + } +} + +/// The result of proving for an eVRF. +pub(crate) struct EvrfProveResult { + /// The coefficients for use in the DKG. + pub(crate) coefficients: Vec>, + /// The masks to encrypt secret shares with. + pub(crate) encryption_masks: Vec>, + /// The proof itself. + pub(crate) proof: Vec, +} + +/// The result of verifying an eVRF. +pub(crate) struct EvrfVerifyResult { + /// The commitments to the coefficients for use in the DKG. + pub(crate) coefficients: Vec, + /// The ephemeral public keys to perform ECDHs with + pub(crate) ecdh_keys: Vec<[::G; 2]>, + /// The commitments to the masks used to encrypt secret shares with. + pub(crate) encryption_commitments: Vec, +} + +impl fmt::Debug for EvrfVerifyResult { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("EvrfVerifyResult").finish_non_exhaustive() + } +} + +/// A struct to prove/verify eVRFs with. +pub(crate) struct Evrf(PhantomData); +impl Evrf { + // Sample uniform points (via rejection-sampling) on the embedded elliptic curve + fn transcript_to_points( + seed: [u8; 32], + coefficients: usize, + ) -> Vec<::G> { + // We need to do two Diffie-Hellman's per coefficient in order to achieve an unbiased result + let quantity = 2 * coefficients; + + let mut rng = ChaCha20Rng::from_seed(seed); + let mut res = Vec::with_capacity(quantity); + for _ in 0 .. quantity { + res.push(sample_point::(&mut rng)); + } + res + } + + /// Read a Variable from a theoretical vector commitment tape + fn read_one_from_tape(generators_to_use: usize, start: &mut usize) -> Variable { + // Each commitment has twice as many variables as generators in use + let commitment = *start / (2 * generators_to_use); + // The index will be less than the amount of generators in use, as half are left and half are + // right + let index = *start % generators_to_use; + let res = if (*start / generators_to_use) % 2 == 0 { + Variable::CG { commitment, index } + } else { + Variable::CH { commitment, index } + }; + *start += 1; + res + } + + /// Read a set of variables from a theoretical vector commitment tape + fn read_from_tape( + generators_to_use: usize, + start: &mut usize, + ) -> GenericArray { + let mut buf = Vec::with_capacity(N::USIZE); + for _ in 0 .. N::USIZE { + buf.push(Self::read_one_from_tape(generators_to_use, start)); + } + GenericArray::from_slice(&buf).clone() + } + + /// Read `PointWithDlog`s, which share a discrete logarithm, from the theoretical vector + /// commitment tape. + fn point_with_dlogs( + start: &mut usize, + quantity: usize, + generators_to_use: usize, + ) -> Vec> { + // We define a serialized tape of the discrete logarithm, then for each divisor/point, we push: + // zero, x**i, y x**i, y, x_coord, y_coord + // We then chunk that into vector commitments + // Here, we take the assumed layout and generate the expected `Variable`s for this layout + + let dlog = Self::read_from_tape(generators_to_use, start); + + let mut res = Vec::with_capacity(quantity); + let mut read_point_with_dlog = || { + let zero = Self::read_one_from_tape(generators_to_use, start); + let x_from_power_of_2 = Self::read_from_tape(generators_to_use, start); + let yx = Self::read_from_tape(generators_to_use, start); + let y = Self::read_one_from_tape(generators_to_use, start); + let divisor = Divisor { zero, x_from_power_of_2, yx, y }; + + let point = ( + Self::read_one_from_tape(generators_to_use, start), + Self::read_one_from_tape(generators_to_use, start), + ); + + res.push(PointWithDlog { dlog: dlog.clone(), divisor, point }); + }; + + for _ in 0 .. quantity { + read_point_with_dlog(); + } + res + } + + fn muls_and_generators_to_use(coefficients: usize, ecdhs: usize) -> (usize, usize) { + const MULS_PER_DH: usize = 7; + // 1 DH to prove the discrete logarithm corresponds to the eVRF public key + // 2 DHs per generated coefficient + // 2 DHs per generated ECDH + let expected_muls = MULS_PER_DH * (1 + (2 * coefficients) + (2 * 2 * ecdhs)); + let generators_to_use = { + let mut padded_pow_of_2 = 1; + while padded_pow_of_2 < expected_muls { + padded_pow_of_2 <<= 1; + } + // This may as small as 16, which would create an excessive amount of vector commitments + // We set a floor of 1024 rows for bandwidth reasons + padded_pow_of_2.max(1024) + }; + (expected_muls, generators_to_use) + } + + fn circuit( + curve_spec: &CurveSpec, + evrf_public_key: (C::F, C::F), + coefficients: usize, + ecdh_commitments: &[[(C::F, C::F); 2]], + generator_tables: &[GeneratorTable], + circuit: &mut Circuit, + transcript: &mut impl Transcript, + ) { + let (expected_muls, generators_to_use) = + Self::muls_and_generators_to_use(coefficients, ecdh_commitments.len()); + let (challenge, challenged_generators) = + circuit.discrete_log_challenge(transcript, curve_spec, generator_tables); + debug_assert_eq!(challenged_generators.len(), 1 + (2 * coefficients) + ecdh_commitments.len()); + + // The generators tables/challenged generators are expected to have the following layouts + // G, coefficients * [A, B], ecdhs * [P] + #[allow(non_snake_case)] + let challenged_G = &challenged_generators[0]; + + // Execute the circuit for the coefficients + let mut tape_pos = 0; + { + let mut point_with_dlogs = + Self::point_with_dlogs(&mut tape_pos, 1 + (2 * coefficients), generators_to_use) + .into_iter(); + + // Verify the discrete logarithm is in the fact the discrete logarithm of the eVRF public key + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_G, + ); + circuit.equality(LinComb::from(point.x()), &LinComb::empty().constant(evrf_public_key.0)); + circuit.equality(LinComb::from(point.y()), &LinComb::empty().constant(evrf_public_key.1)); + + // Verify the DLog claims against the sampled points + for (i, pair) in challenged_generators[1 ..].chunks(2).take(coefficients).enumerate() { + let mut lincomb = LinComb::empty(); + debug_assert_eq!(pair.len(), 2); + for challenged_generator in pair { + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_generator, + ); + // For each point in this pair, add its x coordinate to a lincomb + lincomb = lincomb.term(C::F::ONE, point.x()); + } + // Constrain the sum of the two x coordinates to be equal to the value in the Pedersen + // commitment + circuit.equality(lincomb, &LinComb::from(Variable::V(i))); + } + debug_assert!(point_with_dlogs.next().is_none()); + } + + // Now execute the circuit for the ECDHs + let mut challenged_generators = challenged_generators.iter().skip(1 + (2 * coefficients)); + for (i, ecdh) in ecdh_commitments.iter().enumerate() { + let challenged_generator = challenged_generators.next().unwrap(); + let mut lincomb = LinComb::empty(); + for ecdh in ecdh { + let mut point_with_dlogs = + Self::point_with_dlogs(&mut tape_pos, 2, generators_to_use).into_iter(); + + // One proof of the ECDH secret * G for the commitment published + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_G, + ); + circuit.equality(LinComb::from(point.x()), &LinComb::empty().constant(ecdh.0)); + circuit.equality(LinComb::from(point.y()), &LinComb::empty().constant(ecdh.1)); + + // One proof of the ECDH secret * P for the ECDH + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_generator, + ); + // For each point in this pair, add its x coordinate to a lincomb + lincomb = lincomb.term(C::F::ONE, point.x()); + } + + // Constrain the sum of the two x coordinates to be equal to the value in the Pedersen + // commitment + circuit.equality(lincomb, &LinComb::from(Variable::V(coefficients + i))); + } + + debug_assert_eq!(expected_muls, circuit.muls()); + debug_assert!(challenged_generators.next().is_none()); + } + + /// Convert a scalar to a sequence of coefficients for the polynomial 2**i, where the sum of the + /// coefficients is F::NUM_BITS. + /// + /// Despite the name, the returned coefficients are not guaranteed to be bits (0 or 1). + /// + /// This scalar will presumably be used in a discrete log proof. That requires calculating a + /// divisor which is variable time to the amount of points interpolated. Since the amount of + /// points interpolated is equal to the sum of the coefficients in the polynomial, we need all + /// scalars to have a constant sum of their coefficients (instead of one variable to its bits). + /// + /// We achieve this by finding the highest non-0 coefficient, decrementing it, and increasing the + /// immediately less significant coefficient by 2. This increases the sum of the coefficients by + /// 1 (-1+2=1). + fn scalar_to_bits(scalar: &::F) -> Vec { + let num_bits = u64::from(<::EmbeddedCurve as Ciphersuite>::F::NUM_BITS); + + // Obtain the bits of the private key + let num_bits_usize = usize::try_from(num_bits).unwrap(); + let mut decomposition = vec![0; num_bits_usize]; + for (i, bit) in scalar.to_le_bits().into_iter().take(num_bits_usize).enumerate() { + let bit = u64::from(u8::from(bit)); + decomposition[i] = bit; + } + + // The following algorithm only works if the value of the scalar exceeds num_bits + // If it isn't, we increase it by the modulus such that it does exceed num_bits + { + let mut less_than_num_bits = Choice::from(0); + for i in 0 .. num_bits { + less_than_num_bits |= scalar.ct_eq(&::F::from(i)); + } + let mut decomposition_of_modulus = vec![0; num_bits_usize]; + // Decompose negative one + for (i, bit) in (-::F::ONE) + .to_le_bits() + .into_iter() + .take(num_bits_usize) + .enumerate() + { + let bit = u64::from(u8::from(bit)); + decomposition_of_modulus[i] = bit; + } + // Increment it by one + decomposition_of_modulus[0] += 1; + + // Add the decomposition onto the decomposition of the modulus + for i in 0 .. num_bits_usize { + let new_decomposition = <_>::conditional_select( + &decomposition[i], + &(decomposition[i] + decomposition_of_modulus[i]), + less_than_num_bits, + ); + decomposition[i] = new_decomposition; + } + } + + // Calculcate the sum of the coefficients + let mut sum_of_coefficients: u64 = 0; + for decomposition in &decomposition { + sum_of_coefficients += *decomposition; + } + + /* + Now, because we added a log2(k)-bit number to a k-bit number, we may have our sum of + coefficients be *too high*. We attempt to reduce the sum of the coefficients accordingly. + + This algorithm is guaranteed to complete as expected. Take the sequence `222`. `222` becomes + `032` becomes `013`. Even if the next coefficient in the sequence is `2`, the third + coefficient will be reduced once and the next coefficient (`2`, increased to `3`) will only + be eligible for reduction once. This demonstrates, even for a worst case of log2(k) `2`s + followed by `1`s (as possible if the modulus is a Mersenne prime), the log2(k) `2`s can be + reduced as necessary so long as there is a single coefficient after (requiring the entire + sequence be at least of length log2(k) + 1). For a 2-bit number, log2(k) + 1 == 2, so this + holds for any odd prime field. + + To fully type out the demonstration for the Mersenne prime 3, with scalar to encode 1 (the + highest value less than the number of bits): + + 10 - Little-endian bits of 1 + 21 - Little-endian bits of 1, plus the modulus + 02 - After one reduction, where the sum of the coefficients does in fact equal 2 (the target) + */ + { + let mut log2_num_bits = 0; + while (1 << log2_num_bits) < num_bits { + log2_num_bits += 1; + } + + for _ in 0 .. log2_num_bits { + // If the sum of coefficients is the amount of bits, we're done + let mut done = sum_of_coefficients.ct_eq(&num_bits); + + for i in 0 .. (num_bits_usize - 1) { + let should_act = (!done) & decomposition[i].ct_gt(&1); + // Subtract 2 from this coefficient + let amount_to_sub = <_>::conditional_select(&0, &2, should_act); + decomposition[i] -= amount_to_sub; + // Add 1 to the next coefficient + let amount_to_add = <_>::conditional_select(&0, &1, should_act); + decomposition[i + 1] += amount_to_add; + + // Also update the sum of coefficients + sum_of_coefficients -= <_>::conditional_select(&0, &1, should_act); + + // If we updated the coefficients this loop iter, we're done for this loop iter + done |= should_act; + } + } + } + + for _ in 0 .. num_bits { + // If the sum of coefficients is the amount of bits, we're done + let mut done = sum_of_coefficients.ct_eq(&num_bits); + + // Find the highest coefficient currently non-zero + for i in (1 .. decomposition.len()).rev() { + // If this is non-zero, we should decrement this coefficient if we haven't already + // decremented a coefficient this round + let is_non_zero = !(0.ct_eq(&decomposition[i])); + let should_act = (!done) & is_non_zero; + + // Update this coefficient and the prior coefficient + let amount_to_sub = <_>::conditional_select(&0, &1, should_act); + decomposition[i] -= amount_to_sub; + + let amount_to_add = <_>::conditional_select(&0, &2, should_act); + // i must be at least 1, so i - 1 will be at least 0 (meaning it's safe to index with) + decomposition[i - 1] += amount_to_add; + + // Also update the sum of coefficients + sum_of_coefficients += <_>::conditional_select(&0, &1, should_act); + + // If we updated the coefficients this loop iter, we're done for this loop iter + done |= should_act; + } + } + debug_assert!(bool::from(decomposition.iter().sum::().ct_eq(&num_bits))); + + decomposition + } + + /// Prove a point on an elliptic curve had its discrete logarithm generated via an eVRF. + pub(crate) fn prove( + rng: &mut (impl RngCore + CryptoRng), + generators: &Generators, + transcript: [u8; 32], + coefficients: usize, + ecdh_public_keys: &[<::EmbeddedCurve as Ciphersuite>::G], + evrf_private_key: &Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + ) -> Result, AcError> { + let curve_spec = CurveSpec { + a: <::EmbeddedCurve as Ciphersuite>::G::a(), + b: <::EmbeddedCurve as Ciphersuite>::G::b(), + }; + + // A tape of the discrete logarithm, then [zero, x**i, y x**i, y, x_coord, y_coord] + let mut vector_commitment_tape = vec![]; + + let mut generator_tables = Vec::with_capacity(1 + (2 * coefficients) + ecdh_public_keys.len()); + + // A function to calculate a divisor and push it onto the tape + // This defines a vec, divisor_points, outside of the fn to reuse its allocation + let mut divisor_points = + Vec::with_capacity((::F::NUM_BITS as usize) + 1); + let mut divisor = + |vector_commitment_tape: &mut Vec<_>, + dlog: &[u64], + push_generator: bool, + generator: <::EmbeddedCurve as Ciphersuite>::G, + dh: <::EmbeddedCurve as Ciphersuite>::G| { + if push_generator { + let (x, y) = ::G::to_xy(generator).unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + + { + let mut generator = generator; + for coefficient in dlog { + let mut coefficient = *coefficient; + while coefficient != 0 { + coefficient -= 1; + divisor_points.push(generator); + } + generator = generator.double(); + } + debug_assert_eq!( + dlog.iter().sum::(), + u64::from(::F::NUM_BITS) + ); + } + divisor_points.push(-dh); + let mut divisor = new_divisor(&divisor_points).unwrap().normalize_x_coefficient(); + divisor_points.zeroize(); + + vector_commitment_tape.push(divisor.zero_coefficient); + + for coefficient in divisor.x_coefficients.iter().skip(1) { + vector_commitment_tape.push(*coefficient); + } + for _ in divisor.x_coefficients.len() .. + ::XCoefficientsMinusOne::USIZE + { + vector_commitment_tape.push(::F::ZERO); + } + + for coefficient in divisor.yx_coefficients.first().unwrap_or(&vec![]) { + vector_commitment_tape.push(*coefficient); + } + for _ in divisor.yx_coefficients.first().unwrap_or(&vec![]).len() .. + ::YxCoefficients::USIZE + { + vector_commitment_tape.push(::F::ZERO); + } + + vector_commitment_tape + .push(divisor.y_coefficients.first().copied().unwrap_or(::F::ZERO)); + + divisor.zeroize(); + drop(divisor); + + let (x, y) = ::G::to_xy(dh).unwrap(); + vector_commitment_tape.push(x); + vector_commitment_tape.push(y); + + (x, y) + }; + + // Start with the coefficients + let evrf_public_key; + let mut actual_coefficients = Vec::with_capacity(coefficients); + { + let mut dlog = Self::scalar_to_bits(evrf_private_key); + let points = Self::transcript_to_points(transcript, coefficients); + + // Start by pushing the discrete logarithm onto the tape + for coefficient in &dlog { + vector_commitment_tape.push(<_>::from(*coefficient)); + } + + // Push a divisor for proving that we're using the correct scalar + evrf_public_key = divisor( + &mut vector_commitment_tape, + &dlog, + true, + <::EmbeddedCurve as Ciphersuite>::generator(), + <::EmbeddedCurve as Ciphersuite>::generator() * evrf_private_key.deref(), + ); + + // Push a divisor for each point we use in the eVRF + for pair in points.chunks(2) { + let mut res = Zeroizing::new(C::F::ZERO); + for point in pair { + let (dh_x, _) = divisor( + &mut vector_commitment_tape, + &dlog, + true, + *point, + *point * evrf_private_key.deref(), + ); + *res += dh_x; + } + actual_coefficients.push(res); + } + debug_assert_eq!(actual_coefficients.len(), coefficients); + + dlog.zeroize(); + } + + // Now do the ECDHs for the encryption + let mut encryption_masks = Vec::with_capacity(ecdh_public_keys.len()); + let mut ecdh_commitments = Vec::with_capacity(2 * ecdh_public_keys.len()); + let mut ecdh_commitments_xy = Vec::with_capacity(ecdh_public_keys.len()); + for ecdh_public_key in ecdh_public_keys { + ecdh_commitments_xy.push([(C::F::ZERO, C::F::ZERO); 2]); + + let mut res = Zeroizing::new(C::F::ZERO); + for j in 0 .. 2 { + let mut ecdh_private_key; + loop { + ecdh_private_key = ::F::random(&mut *rng); + // Generate a non-0 ECDH private key, as necessary to not produce an identity output + // Identity isn't representable with the divisors, hence the explicit effort + if bool::from(!ecdh_private_key.is_zero()) { + break; + } + } + let mut dlog = Self::scalar_to_bits(&ecdh_private_key); + let ecdh_commitment = ::generator() * ecdh_private_key; + ecdh_commitments.push(ecdh_commitment); + ecdh_commitments_xy.last_mut().unwrap()[j] = + <::G as DivisorCurve>::to_xy(ecdh_commitment).unwrap(); + + // Start by pushing the discrete logarithm onto the tape + for coefficient in &dlog { + vector_commitment_tape.push(<_>::from(*coefficient)); + } + + // Push a divisor for proving that we're using the correct scalar for the commitment + divisor( + &mut vector_commitment_tape, + &dlog, + false, + <::EmbeddedCurve as Ciphersuite>::generator(), + <::EmbeddedCurve as Ciphersuite>::generator() * ecdh_private_key, + ); + // Push a divisor for the key we're performing the ECDH with + let (dh_x, _) = divisor( + &mut vector_commitment_tape, + &dlog, + j == 0, + *ecdh_public_key, + *ecdh_public_key * ecdh_private_key, + ); + *res += dh_x; + + ecdh_private_key.zeroize(); + dlog.zeroize(); + } + encryption_masks.push(res); + } + debug_assert_eq!(encryption_masks.len(), ecdh_public_keys.len()); + + // Now that we have the vector commitment tape, chunk it + let (_, generators_to_use) = + Self::muls_and_generators_to_use(coefficients, ecdh_public_keys.len()); + + let mut vector_commitments = + Vec::with_capacity(vector_commitment_tape.len().div_ceil(2 * generators_to_use)); + for chunk in vector_commitment_tape.chunks(2 * generators_to_use) { + let g_values = chunk[.. generators_to_use.min(chunk.len())].to_vec().into(); + let h_values = chunk[generators_to_use.min(chunk.len()) ..].to_vec().into(); + vector_commitments.push(PedersenVectorCommitment { + g_values, + h_values, + mask: C::F::random(&mut *rng), + }); + } + + vector_commitment_tape.zeroize(); + drop(vector_commitment_tape); + + let mut commitments = Vec::with_capacity(coefficients + ecdh_public_keys.len()); + for coefficient in &actual_coefficients { + commitments.push(PedersenCommitment { value: **coefficient, mask: C::F::random(&mut *rng) }); + } + for enc_mask in &encryption_masks { + commitments.push(PedersenCommitment { value: **enc_mask, mask: C::F::random(&mut *rng) }); + } + + let mut transcript = ProverTranscript::new(transcript); + let commited_commitments = transcript.write_commitments( + vector_commitments + .iter() + .map(|commitment| { + commitment + .commit(generators.g_bold_slice(), generators.h_bold_slice(), generators.h()) + .ok_or(AcError::NotEnoughGenerators) + }) + .collect::>()?, + commitments + .iter() + .map(|commitment| commitment.commit(generators.g(), generators.h())) + .collect(), + ); + for ecdh_commitment in ecdh_commitments { + transcript.push_point(ecdh_commitment); + } + + let mut circuit = Circuit::prove(vector_commitments, commitments.clone()); + Self::circuit( + &curve_spec, + evrf_public_key, + coefficients, + &ecdh_commitments_xy, + &generator_tables, + &mut circuit, + &mut transcript, + ); + + let (statement, Some(witness)) = circuit + .statement( + generators.reduce(generators_to_use).ok_or(AcError::NotEnoughGenerators)?, + commited_commitments, + ) + .unwrap() + else { + panic!("proving yet wasn't yielded the witness"); + }; + statement.prove(&mut *rng, &mut transcript, witness).unwrap(); + + // Push the reveal onto the transcript + for commitment in &commitments { + transcript.push_point(generators.g() * commitment.value); + } + + // Define a weight to aggregate the commitments with + let mut agg_weights = Vec::with_capacity(commitments.len()); + agg_weights.push(C::F::ONE); + while agg_weights.len() < commitments.len() { + agg_weights.push(transcript.challenge::()); + } + let mut x = commitments + .iter() + .zip(&agg_weights) + .map(|(commitment, weight)| commitment.mask * *weight) + .sum::(); + + // Do a Schnorr PoK for the randomness of the aggregated Pedersen commitment + let mut r = C::F::random(&mut *rng); + transcript.push_point(generators.h() * r); + let c = transcript.challenge::(); + transcript.push_scalar(r + (c * x)); + r.zeroize(); + x.zeroize(); + + Ok(EvrfProveResult { + coefficients: actual_coefficients, + encryption_masks, + proof: transcript.complete(), + }) + } + + /// Verify an eVRF proof, returning the commitments output. + #[allow(clippy::too_many_arguments)] + pub(crate) fn verify( + rng: &mut (impl RngCore + CryptoRng), + generators: &Generators, + verifier: &mut BatchVerifier, + transcript: [u8; 32], + coefficients: usize, + ecdh_public_keys: &[<::EmbeddedCurve as Ciphersuite>::G], + evrf_public_key: <::EmbeddedCurve as Ciphersuite>::G, + proof: &[u8], + ) -> Result, ()> { + let curve_spec = CurveSpec { + a: <::EmbeddedCurve as Ciphersuite>::G::a(), + b: <::EmbeddedCurve as Ciphersuite>::G::b(), + }; + + let mut generator_tables = Vec::with_capacity(1 + (2 * coefficients) + ecdh_public_keys.len()); + { + let (x, y) = + ::G::to_xy(::generator()) + .unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + let points = Self::transcript_to_points(transcript, coefficients); + for generator in points { + let (x, y) = ::G::to_xy(generator).unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + for generator in ecdh_public_keys { + let (x, y) = ::G::to_xy(*generator).unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + + let (_, generators_to_use) = + Self::muls_and_generators_to_use(coefficients, ecdh_public_keys.len()); + + let mut transcript = VerifierTranscript::new(transcript, proof); + + let dlog_len = ::ScalarBits::USIZE; + let divisor_len = 1 + + ::XCoefficientsMinusOne::USIZE + + ::YxCoefficients::USIZE + + 1; + let dlog_proof_len = divisor_len + 2; + + let coeffs_vc_variables = dlog_len + ((1 + (2 * coefficients)) * dlog_proof_len); + let ecdhs_vc_variables = ((2 * ecdh_public_keys.len()) * dlog_len) + + ((2 * 2 * ecdh_public_keys.len()) * dlog_proof_len); + let vcs = (coeffs_vc_variables + ecdhs_vc_variables).div_ceil(2 * generators_to_use); + + let all_commitments = + transcript.read_commitments(vcs, coefficients + ecdh_public_keys.len()).map_err(|_| ())?; + let commitments = all_commitments.V().to_vec(); + + let mut ecdh_keys = Vec::with_capacity(ecdh_public_keys.len()); + let mut ecdh_keys_xy = Vec::with_capacity(ecdh_public_keys.len()); + for _ in 0 .. ecdh_public_keys.len() { + let ecdh_keys_i = [ + transcript.read_point::().map_err(|_| ())?, + transcript.read_point::().map_err(|_| ())?, + ]; + ecdh_keys.push(ecdh_keys_i); + // This bans zero ECDH keys + ecdh_keys_xy.push([ + <::G as DivisorCurve>::to_xy(ecdh_keys_i[0]).ok_or(())?, + <::G as DivisorCurve>::to_xy(ecdh_keys_i[1]).ok_or(())?, + ]); + } + + let mut circuit = Circuit::verify(); + Self::circuit( + &curve_spec, + ::G::to_xy(evrf_public_key).ok_or(())?, + coefficients, + &ecdh_keys_xy, + &generator_tables, + &mut circuit, + &mut transcript, + ); + + let (statement, None) = + circuit.statement(generators.reduce(generators_to_use).ok_or(())?, all_commitments).unwrap() + else { + panic!("verifying yet was yielded a witness"); + }; + + statement.verify(rng, verifier, &mut transcript).map_err(|_| ())?; + + // Read the openings for the commitments + let mut openings = Vec::with_capacity(commitments.len()); + for _ in 0 .. commitments.len() { + openings.push(transcript.read_point::().map_err(|_| ())?); + } + + // Verify the openings of the commitments + let mut agg_weights = Vec::with_capacity(commitments.len()); + agg_weights.push(C::F::ONE); + while agg_weights.len() < commitments.len() { + agg_weights.push(transcript.challenge::()); + } + + let sum_points = + openings.iter().zip(&agg_weights).map(|(point, weight)| *point * *weight).sum::(); + let sum_commitments = + commitments.into_iter().zip(agg_weights).map(|(point, weight)| point * weight).sum::(); + #[allow(non_snake_case)] + let A = sum_commitments - sum_points; + + #[allow(non_snake_case)] + let R = transcript.read_point::().map_err(|_| ())?; + let c = transcript.challenge::(); + let s = transcript.read_scalar::().map_err(|_| ())?; + + // Doesn't batch verify this as we can't access the internals of the GBP batch verifier + if (R + (A * c)) != (generators.h() * s) { + Err(())?; + } + + if !transcript.complete().is_empty() { + Err(())? + }; + + let encryption_commitments = openings[coefficients ..].to_vec(); + let coefficients = openings[.. coefficients].to_vec(); + Ok(EvrfVerifyResult { coefficients, ecdh_keys, encryption_commitments }) + } +} diff --git a/crypto/dkg/src/lib.rs b/crypto/dkg/src/lib.rs index 478f400f0..48037bcdf 100644 --- a/crypto/dkg/src/lib.rs +++ b/crypto/dkg/src/lib.rs @@ -21,6 +21,10 @@ pub mod encryption; #[cfg(feature = "std")] pub mod pedpop; +/// The one-round DKG described in the [eVRF paper](https://eprint.iacr.org/2024/397). +#[cfg(all(feature = "std", feature = "evrf"))] +pub mod evrf; + /// Promote keys between ciphersuites. #[cfg(feature = "std")] pub mod promote; @@ -205,25 +209,37 @@ mod lib { } } - /// Calculate the lagrange coefficient for a signing set. - pub fn lagrange(i: Participant, included: &[Participant]) -> F { - let i_f = F::from(u64::from(u16::from(i))); + #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] + pub(crate) enum Interpolation { + Constant(Vec), + Lagrange, + } - let mut num = F::ONE; - let mut denom = F::ONE; - for l in included { - if i == *l { - continue; + impl Interpolation { + pub(crate) fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F { + match self { + Interpolation::Constant(c) => c[usize::from(u16::from(i) - 1)], + Interpolation::Lagrange => { + let i_f = F::from(u64::from(u16::from(i))); + + let mut num = F::ONE; + let mut denom = F::ONE; + for l in included { + if i == *l { + continue; + } + + let share = F::from(u64::from(u16::from(*l))); + num *= share; + denom *= share - i_f; + } + + // Safe as this will only be 0 if we're part of the above loop + // (which we have an if case to avoid) + num * denom.invert().unwrap() + } } - - let share = F::from(u64::from(u16::from(*l))); - num *= share; - denom *= share - i_f; } - - // Safe as this will only be 0 if we're part of the above loop - // (which we have an if case to avoid) - num * denom.invert().unwrap() } /// Keys and verification shares generated by a DKG. @@ -232,6 +248,8 @@ mod lib { pub struct ThresholdCore { /// Threshold Parameters. pub(crate) params: ThresholdParams, + /// The interpolation method used. + pub(crate) interpolation: Interpolation, /// Secret share key. pub(crate) secret_share: Zeroizing, @@ -246,6 +264,7 @@ mod lib { fmt .debug_struct("ThresholdCore") .field("params", &self.params) + .field("interpolation", &self.interpolation) .field("group_key", &self.group_key) .field("verification_shares", &self.verification_shares) .finish_non_exhaustive() @@ -255,6 +274,7 @@ mod lib { impl Zeroize for ThresholdCore { fn zeroize(&mut self) { self.params.zeroize(); + self.interpolation.zeroize(); self.secret_share.zeroize(); self.group_key.zeroize(); for share in self.verification_shares.values_mut() { @@ -266,16 +286,14 @@ mod lib { impl ThresholdCore { pub(crate) fn new( params: ThresholdParams, + interpolation: Interpolation, secret_share: Zeroizing, verification_shares: HashMap, ) -> ThresholdCore { let t = (1 ..= params.t()).map(Participant).collect::>(); - ThresholdCore { - params, - secret_share, - group_key: t.iter().map(|i| verification_shares[i] * lagrange::(*i, &t)).sum(), - verification_shares, - } + let group_key = + t.iter().map(|i| verification_shares[i] * interpolation.interpolation_factor(*i, &t)).sum(); + ThresholdCore { params, interpolation, secret_share, group_key, verification_shares } } /// Parameters for these keys. @@ -304,6 +322,15 @@ mod lib { writer.write_all(&self.params.t.to_le_bytes())?; writer.write_all(&self.params.n.to_le_bytes())?; writer.write_all(&self.params.i.to_bytes())?; + match &self.interpolation { + Interpolation::Constant(c) => { + writer.write_all(&[0])?; + for c in c { + writer.write_all(c.to_repr().as_ref())?; + } + } + Interpolation::Lagrange => writer.write_all(&[1])?, + }; let mut share_bytes = self.secret_share.to_repr(); writer.write_all(share_bytes.as_ref())?; share_bytes.as_mut().zeroize(); @@ -352,6 +379,20 @@ mod lib { ) }; + let mut interpolation = [0]; + reader.read_exact(&mut interpolation)?; + let interpolation = match interpolation[0] { + 0 => Interpolation::Constant({ + let mut res = Vec::with_capacity(usize::from(n)); + for _ in 0 .. n { + res.push(C::read_F(reader)?); + } + res + }), + 1 => Interpolation::Lagrange, + _ => Err(io::Error::other("invalid interpolation method"))?, + }; + let secret_share = Zeroizing::new(C::read_F(reader)?); let mut verification_shares = HashMap::new(); @@ -361,6 +402,7 @@ mod lib { Ok(ThresholdCore::new( ThresholdParams::new(t, n, i).map_err(|_| io::Error::other("invalid parameters"))?, + interpolation, secret_share, verification_shares, )) @@ -383,6 +425,7 @@ mod lib { /// View of keys, interpolated and offset for usage. #[derive(Clone)] pub struct ThresholdView { + interpolation: Interpolation, offset: C::F, group_key: C::G, included: Vec, @@ -395,6 +438,7 @@ mod lib { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("ThresholdView") + .field("interpolation", &self.interpolation) .field("offset", &self.offset) .field("group_key", &self.group_key) .field("included", &self.included) @@ -480,12 +524,13 @@ mod lib { included.sort(); let mut secret_share = Zeroizing::new( - lagrange::(self.params().i(), &included) * self.secret_share().deref(), + self.core.interpolation.interpolation_factor(self.params().i(), &included) * + self.secret_share().deref(), ); let mut verification_shares = self.verification_shares(); for (i, share) in &mut verification_shares { - *share *= lagrange::(*i, &included); + *share *= self.core.interpolation.interpolation_factor(*i, &included); } // The offset is included by adding it to the participant with the lowest ID @@ -496,6 +541,7 @@ mod lib { *verification_shares.get_mut(&included[0]).unwrap() += C::generator() * offset; Ok(ThresholdView { + interpolation: self.core.interpolation.clone(), offset, group_key: self.group_key(), secret_share, @@ -528,6 +574,14 @@ mod lib { &self.included } + /// Return the interpolation factor for a signer. + pub fn interpolation_factor(&self, participant: Participant) -> Option { + if !self.included.contains(&participant) { + None? + } + Some(self.interpolation.interpolation_factor(participant, &self.included)) + } + /// Return the interpolated, offset secret share. pub fn secret_share(&self) -> &Zeroizing { &self.secret_share diff --git a/crypto/dkg/src/musig.rs b/crypto/dkg/src/musig.rs index 4d6b54c8f..828432722 100644 --- a/crypto/dkg/src/musig.rs +++ b/crypto/dkg/src/musig.rs @@ -7,8 +7,6 @@ use std_shims::collections::HashMap; #[cfg(feature = "std")] use zeroize::Zeroizing; -#[cfg(feature = "std")] -use ciphersuite::group::ff::Field; use ciphersuite::{ group::{Group, GroupEncoding}, Ciphersuite, @@ -16,7 +14,7 @@ use ciphersuite::{ use crate::DkgError; #[cfg(feature = "std")] -use crate::{Participant, ThresholdParams, ThresholdCore, lagrange}; +use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore}; fn check_keys(keys: &[C::G]) -> Result> { if keys.is_empty() { @@ -67,6 +65,7 @@ pub fn musig_key(context: &[u8], keys: &[C::G]) -> Result(context, keys)?; let mut res = C::G::identity(); for i in 1 ..= keys_len { + // TODO: Calculate this with a multiexp res += keys[usize::from(i - 1)] * binding_factor::(transcript.clone(), i); } Ok(res) @@ -104,38 +103,26 @@ pub fn musig( binding.push(binding_factor::(transcript.clone(), i)); } - // Multiply our private key by our binding factor - let mut secret_share = private_key.clone(); - *secret_share *= binding[pos]; + // Our secret share is our private key + let secret_share = private_key.clone(); // Calculate verification shares let mut verification_shares = HashMap::new(); - // When this library offers a ThresholdView for a specific signing set, it applies the lagrange - // factor - // Since this is a n-of-n scheme, there's only one possible signing set, and one possible - // lagrange factor - // In the name of simplicity, we define the group key as the sum of all bound keys - // Accordingly, the secret share must be multiplied by the inverse of the lagrange factor, along - // with all verification shares - // This is less performant than simply defining the group key as the sum of all post-lagrange - // bound keys, yet the simplicity is preferred - let included = (1 ..= keys_len) - // This error also shouldn't be possible, for the same reasons as documented above - .map(|l| Participant::new(l).ok_or(DkgError::InvalidSigningSet)) - .collect::, _>>()?; let mut group_key = C::G::identity(); - for (l, p) in included.iter().enumerate() { - let bound = keys[l] * binding[l]; - group_key += bound; - - let lagrange_inv = lagrange::(*p, &included).invert().unwrap(); - if params.i() == *p { - *secret_share *= lagrange_inv; - } - verification_shares.insert(*p, bound * lagrange_inv); + for l in 1 ..= keys_len { + let key = keys[usize::from(l) - 1]; + group_key += key * binding[usize::from(l - 1)]; + + // These errors also shouldn't be possible, for the same reasons as documented above + verification_shares.insert(Participant::new(l).ok_or(DkgError::InvalidSigningSet)?, key); } debug_assert_eq!(C::generator() * secret_share.deref(), verification_shares[¶ms.i()]); debug_assert_eq!(musig_key::(context, keys).unwrap(), group_key); - Ok(ThresholdCore { params, secret_share, group_key, verification_shares }) + Ok(ThresholdCore::new( + params, + Interpolation::Constant(binding), + secret_share, + verification_shares, + )) } diff --git a/crypto/dkg/src/pedpop.rs b/crypto/dkg/src/pedpop.rs index 1faeebe56..adfc6958f 100644 --- a/crypto/dkg/src/pedpop.rs +++ b/crypto/dkg/src/pedpop.rs @@ -22,9 +22,9 @@ use multiexp::{multiexp_vartime, BatchVerifier}; use schnorr::SchnorrSignature; use crate::{ - Participant, DkgError, ThresholdParams, ThresholdCore, validate_map, + Participant, DkgError, ThresholdParams, Interpolation, ThresholdCore, validate_map, encryption::{ - ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, EncryptionKeyProof, + ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, Decryption, EncryptionKeyProof, DecryptionError, }, }; @@ -32,10 +32,10 @@ use crate::{ type FrostError = DkgError>; #[allow(non_snake_case)] -fn challenge(context: &str, l: Participant, R: &[u8], Am: &[u8]) -> C::F { +fn challenge(context: [u8; 32], l: Participant, R: &[u8], Am: &[u8]) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG FROST v0.2"); transcript.domain_separate(b"schnorr_proof_of_knowledge"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript.append_message(b"participant", l.to_bytes()); transcript.append_message(b"nonce", R); transcript.append_message(b"commitments", Am); @@ -86,15 +86,15 @@ impl ReadWrite for Commitments { #[derive(Debug, Zeroize)] pub struct KeyGenMachine { params: ThresholdParams, - context: String, + context: [u8; 32], _curve: PhantomData, } impl KeyGenMachine { /// Create a new machine to generate a key. /// - /// The context string should be unique among multisigs. - pub fn new(params: ThresholdParams, context: String) -> KeyGenMachine { + /// The context should be unique among multisigs. + pub fn new(params: ThresholdParams, context: [u8; 32]) -> KeyGenMachine { KeyGenMachine { params, context, _curve: PhantomData } } @@ -129,11 +129,11 @@ impl KeyGenMachine { // There's no reason to spend the time and effort to make this deterministic besides a // general obsession with canonicity and determinism though r, - challenge::(&self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg), + challenge::(self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg), ); // Additionally create an encryption mechanism to protect the secret shares - let encryption = Encryption::new(self.context.clone(), Some(self.params.i), rng); + let encryption = Encryption::new(self.context, self.params.i, rng); // Step 4: Broadcast let msg = @@ -225,7 +225,7 @@ impl ReadWrite for SecretShare { #[derive(Zeroize)] pub struct SecretShareMachine { params: ThresholdParams, - context: String, + context: [u8; 32], coefficients: Vec>, our_commitments: Vec, encryption: Encryption, @@ -274,7 +274,7 @@ impl SecretShareMachine { &mut batch, l, msg.commitments[0], - challenge::(&self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg), + challenge::(self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg), ); commitments.insert(l, msg.commitments.drain(..).collect::>()); @@ -472,9 +472,10 @@ impl KeyMachine { let KeyMachine { commitments, encryption, params, secret } = self; Ok(BlameMachine { commitments, - encryption, + encryption: encryption.into_decryption(), result: Some(ThresholdCore { params, + interpolation: Interpolation::Lagrange, secret_share: secret, group_key: stripes[0], verification_shares, @@ -486,7 +487,7 @@ impl KeyMachine { /// A machine capable of handling blame proofs. pub struct BlameMachine { commitments: HashMap>, - encryption: Encryption, + encryption: Decryption, result: Option>, } @@ -505,7 +506,6 @@ impl Zeroize for BlameMachine { for commitments in self.commitments.values_mut() { commitments.zeroize(); } - self.encryption.zeroize(); self.result.zeroize(); } } @@ -598,14 +598,13 @@ impl AdditionalBlameMachine { /// authenticated as having come from the supposed party and verified as valid. Usage of invalid /// commitments is considered undefined behavior, and may cause everything from inaccurate blame /// to panics. - pub fn new( - rng: &mut R, - context: String, + pub fn new( + context: [u8; 32], n: u16, mut commitment_msgs: HashMap>>, ) -> Result> { let mut commitments = HashMap::new(); - let mut encryption = Encryption::new(context, None, rng); + let mut encryption = Decryption::new(context); for i in 1 ..= n { let i = Participant::new(i).unwrap(); let Some(msg) = commitment_msgs.remove(&i) else { Err(DkgError::MissingParticipant(i))? }; diff --git a/crypto/dkg/src/promote.rs b/crypto/dkg/src/promote.rs index 7cad4f23f..c8dcaed0c 100644 --- a/crypto/dkg/src/promote.rs +++ b/crypto/dkg/src/promote.rs @@ -113,6 +113,7 @@ impl> GeneratorPromotion< Ok(ThresholdKeys { core: Arc::new(ThresholdCore::new( params, + self.base.core.interpolation.clone(), self.base.secret_share().clone(), verification_shares, )), diff --git a/crypto/dkg/src/tests/evrf/mod.rs b/crypto/dkg/src/tests/evrf/mod.rs new file mode 100644 index 000000000..e6fd22307 --- /dev/null +++ b/crypto/dkg/src/tests/evrf/mod.rs @@ -0,0 +1,79 @@ +use std::collections::HashMap; + +use zeroize::Zeroizing; +use rand_core::OsRng; +use rand::seq::SliceRandom; + +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use crate::{ + Participant, + evrf::*, + tests::{THRESHOLD, PARTICIPANTS, recover_key}, +}; + +mod proof; +use proof::{Pallas, Vesta}; + +#[test] +fn evrf_dkg() { + let generators = EvrfGenerators::::new(THRESHOLD, PARTICIPANTS); + let context = [0; 32]; + + let mut priv_keys = vec![]; + let mut pub_keys = vec![]; + for i in 0 .. PARTICIPANTS { + let priv_key = ::F::random(&mut OsRng); + pub_keys.push(::generator() * priv_key); + priv_keys.push((Participant::new(1 + i).unwrap(), Zeroizing::new(priv_key))); + } + + let mut participations = HashMap::new(); + // Shuffle the private keys so we iterate over a random subset of them + priv_keys.shuffle(&mut OsRng); + for (i, priv_key) in priv_keys.iter().take(usize::from(THRESHOLD)) { + participations.insert( + *i, + EvrfDkg::::participate( + &mut OsRng, + &generators, + context, + THRESHOLD, + &pub_keys, + priv_key, + ) + .unwrap(), + ); + } + + let VerifyResult::Valid(dkg) = EvrfDkg::::verify( + &mut OsRng, + &generators, + context, + THRESHOLD, + &pub_keys, + &participations, + ) + .unwrap() else { + panic!("verify didn't return VerifyResult::Valid") + }; + + let mut group_key = None; + let mut verification_shares = None; + let mut all_keys = HashMap::new(); + for (i, priv_key) in priv_keys { + let keys = dkg.keys(&priv_key).into_iter().next().unwrap(); + assert_eq!(keys.params().i(), i); + assert_eq!(keys.params().t(), THRESHOLD); + assert_eq!(keys.params().n(), PARTICIPANTS); + group_key = group_key.or(Some(keys.group_key())); + verification_shares = verification_shares.or(Some(keys.verification_shares())); + assert_eq!(Some(keys.group_key()), group_key); + assert_eq!(Some(keys.verification_shares()), verification_shares); + + all_keys.insert(i, keys); + } + + // TODO: Test for all possible combinations of keys + assert_eq!(Pallas::generator() * recover_key(&all_keys), group_key.unwrap()); +} diff --git a/crypto/dkg/src/tests/evrf/proof.rs b/crypto/dkg/src/tests/evrf/proof.rs new file mode 100644 index 000000000..5750c6c47 --- /dev/null +++ b/crypto/dkg/src/tests/evrf/proof.rs @@ -0,0 +1,118 @@ +use std::time::Instant; + +use rand_core::OsRng; + +use zeroize::{Zeroize, Zeroizing}; +use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2}; +use blake2::{Digest, Blake2b512}; + +use ciphersuite::{ + group::{ + ff::{FromUniformBytes, Field, PrimeField}, + Group, + }, + Ciphersuite, Secp256k1, Ed25519, Ristretto, +}; +use pasta_curves::{Ep, Eq, Fp, Fq}; + +use generalized_bulletproofs::tests::generators; +use generalized_bulletproofs_ec_gadgets::DiscreteLogParameters; + +use crate::evrf::proof::*; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] +pub(crate) struct Pallas; +impl Ciphersuite for Pallas { + type F = Fq; + type G = Ep; + type H = Blake2b512; + const ID: &'static [u8] = b"Pallas"; + fn generator() -> Ep { + Ep::generator() + } + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + // This naive concat may be insecure in a real world deployment + // This is solely test code so it's fine + Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into()) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] +pub(crate) struct Vesta; +impl Ciphersuite for Vesta { + type F = Fp; + type G = Eq; + type H = Blake2b512; + const ID: &'static [u8] = b"Vesta"; + fn generator() -> Eq { + Eq::generator() + } + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + // This naive concat may be insecure in a real world deployment + // This is solely test code so it's fine + Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into()) + } +} + +pub struct VestaParams; +impl DiscreteLogParameters for VestaParams { + type ScalarBits = U<{ <::F as PrimeField>::NUM_BITS as usize }>; + type XCoefficients = Quot, U2>; + type XCoefficientsMinusOne = Diff; + type YxCoefficients = Diff, U1>, U2>, U2>; +} + +impl EvrfCurve for Pallas { + type EmbeddedCurve = Vesta; + type EmbeddedCurveParameters = VestaParams; +} + +fn evrf_proof_test() { + let generators = generators(1024); + let vesta_private_key = Zeroizing::new(::F::random(&mut OsRng)); + let ecdh_public_keys = [ + ::G::random(&mut OsRng), + ::G::random(&mut OsRng), + ]; + let time = Instant::now(); + let res = + Evrf::::prove(&mut OsRng, &generators, [0; 32], 1, &ecdh_public_keys, &vesta_private_key) + .unwrap(); + println!("Proving time: {:?}", time.elapsed()); + + let time = Instant::now(); + let mut verifier = generators.batch_verifier(); + Evrf::::verify( + &mut OsRng, + &generators, + &mut verifier, + [0; 32], + 1, + &ecdh_public_keys, + C::EmbeddedCurve::generator() * *vesta_private_key, + &res.proof, + ) + .unwrap(); + assert!(generators.verify(verifier)); + println!("Verifying time: {:?}", time.elapsed()); +} + +#[test] +fn pallas_evrf_proof_test() { + evrf_proof_test::(); +} + +#[test] +fn secp256k1_evrf_proof_test() { + evrf_proof_test::(); +} + +#[test] +fn ed25519_evrf_proof_test() { + evrf_proof_test::(); +} + +#[test] +fn ristretto_evrf_proof_test() { + evrf_proof_test::(); +} diff --git a/crypto/dkg/src/tests/mod.rs b/crypto/dkg/src/tests/mod.rs index f21d72540..4399d72a2 100644 --- a/crypto/dkg/src/tests/mod.rs +++ b/crypto/dkg/src/tests/mod.rs @@ -6,7 +6,7 @@ use rand_core::{RngCore, CryptoRng}; use ciphersuite::{group::ff::Field, Ciphersuite}; -use crate::{Participant, ThresholdCore, ThresholdKeys, lagrange, musig::musig as musig_fn}; +use crate::{Participant, ThresholdCore, ThresholdKeys, musig::musig as musig_fn}; mod musig; pub use musig::test_musig; @@ -19,6 +19,9 @@ use pedpop::pedpop_gen; mod promote; use promote::test_generator_promotion; +#[cfg(all(test, feature = "evrf"))] +mod evrf; + /// Constant amount of participants to use when testing. pub const PARTICIPANTS: u16 = 5; /// Constant threshold of participants to use when testing. @@ -43,7 +46,8 @@ pub fn recover_key(keys: &HashMap> let included = keys.keys().copied().collect::>(); let group_private = keys.iter().fold(C::F::ZERO, |accum, (i, keys)| { - accum + (lagrange::(*i, &included) * keys.secret_share().deref()) + accum + + (first.core.interpolation.interpolation_factor(*i, &included) * keys.secret_share().deref()) }); assert_eq!(C::generator() * group_private, first.group_key(), "failed to recover keys"); group_private diff --git a/crypto/dkg/src/tests/pedpop.rs b/crypto/dkg/src/tests/pedpop.rs index 3ae383e33..42d7af671 100644 --- a/crypto/dkg/src/tests/pedpop.rs +++ b/crypto/dkg/src/tests/pedpop.rs @@ -14,7 +14,7 @@ use crate::{ type PedPoPEncryptedMessage = EncryptedMessage::F>>; type PedPoPSecretShares = HashMap>; -const CONTEXT: &str = "DKG Test Key Generation"; +const CONTEXT: [u8; 32] = *b"DKG Test Key Generation "; // Commit, then return commitment messages, enc keys, and shares #[allow(clippy::type_complexity)] @@ -31,7 +31,7 @@ fn commit_enc_keys_and_shares( let mut enc_keys = HashMap::new(); for i in (1 ..= PARTICIPANTS).map(Participant) { let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(); - let machine = KeyGenMachine::::new(params, CONTEXT.to_string()); + let machine = KeyGenMachine::::new(params, CONTEXT); let (machine, these_commitments) = machine.generate_coefficients(rng); machines.insert(i, machine); @@ -147,14 +147,12 @@ mod literal { // Verify machines constructed with AdditionalBlameMachine::new work assert_eq!( - AdditionalBlameMachine::new( - &mut OsRng, - CONTEXT.to_string(), - PARTICIPANTS, - commitment_msgs.clone() - ) - .unwrap() - .blame(ONE, TWO, msg.clone(), blame.clone()), + AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame( + ONE, + TWO, + msg.clone(), + blame.clone() + ), ONE, ); } diff --git a/crypto/evrf/circuit-abstraction/Cargo.toml b/crypto/evrf/circuit-abstraction/Cargo.toml new file mode 100644 index 000000000..1346be49f --- /dev/null +++ b/crypto/evrf/circuit-abstraction/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +description = "An abstraction for arithmetic circuits over Generalized Bulletproofs" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/circuit-abstraction" +authors = ["Luke Parker "] +keywords = ["bulletproofs", "circuit"] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } + +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] } + +generalized-bulletproofs = { path = "../generalized-bulletproofs" } diff --git a/crypto/evrf/circuit-abstraction/LICENSE b/crypto/evrf/circuit-abstraction/LICENSE new file mode 100644 index 000000000..659881f1a --- /dev/null +++ b/crypto/evrf/circuit-abstraction/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/circuit-abstraction/README.md b/crypto/evrf/circuit-abstraction/README.md new file mode 100644 index 000000000..95149d93c --- /dev/null +++ b/crypto/evrf/circuit-abstraction/README.md @@ -0,0 +1,3 @@ +# Generalized Bulletproofs Circuit Abstraction + +A circuit abstraction around `generalized-bulletproofs`. diff --git a/crypto/evrf/circuit-abstraction/src/gadgets.rs b/crypto/evrf/circuit-abstraction/src/gadgets.rs new file mode 100644 index 000000000..08e5214e0 --- /dev/null +++ b/crypto/evrf/circuit-abstraction/src/gadgets.rs @@ -0,0 +1,39 @@ +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use crate::*; + +impl Circuit { + /// Constrain two linear combinations to be equal. + pub fn equality(&mut self, a: LinComb, b: &LinComb) { + self.constrain_equal_to_zero(a - b); + } + + /// Calculate (and constrain) the inverse of a value. + /// + /// A linear combination may optionally be passed as a constraint for the value being inverted. + /// A reference to the inverted value and its inverse is returned. + /// + /// May panic if any linear combinations reference non-existent terms, the witness isn't provided + /// when proving/is provided when verifying, or if the witness is 0 (and accordingly doesn't have + /// an inverse). + pub fn inverse( + &mut self, + lincomb: Option>, + witness: Option, + ) -> (Variable, Variable) { + let (l, r, o) = self.mul(lincomb, None, witness.map(|f| (f, f.invert().unwrap()))); + // The output of a value multiplied by its inverse is 1 + // Constrain `1 o - 1 = 0` + self.constrain_equal_to_zero(LinComb::from(o).constant(-C::F::ONE)); + (l, r) + } + + /// Constrain two linear combinations as inequal. + /// + /// May panic if any linear combinations reference non-existent terms. + pub fn inequality(&mut self, a: LinComb, b: &LinComb, witness: Option<(C::F, C::F)>) { + let l_constraint = a - b; + // The existence of a multiplicative inverse means a-b != 0, which means a != b + self.inverse(Some(l_constraint), witness.map(|(a, b)| a - b)); + } +} diff --git a/crypto/evrf/circuit-abstraction/src/lib.rs b/crypto/evrf/circuit-abstraction/src/lib.rs new file mode 100644 index 000000000..9971480d8 --- /dev/null +++ b/crypto/evrf/circuit-abstraction/src/lib.rs @@ -0,0 +1,192 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use zeroize::{Zeroize, ZeroizeOnDrop}; + +use ciphersuite::{ + group::ff::{Field, PrimeField}, + Ciphersuite, +}; + +use generalized_bulletproofs::{ + ScalarVector, PedersenCommitment, PedersenVectorCommitment, ProofGenerators, + transcript::{Transcript as ProverTranscript, VerifierTranscript, Commitments}, + arithmetic_circuit_proof::{AcError, ArithmeticCircuitStatement, ArithmeticCircuitWitness}, +}; +pub use generalized_bulletproofs::arithmetic_circuit_proof::{Variable, LinComb}; + +mod gadgets; + +/// A trait for the transcript, whether proving for verifying, as necessary for sampling +/// challenges. +pub trait Transcript { + /// Sample a challenge from the transacript. + /// + /// It is the caller's responsibility to have properly transcripted all variables prior to + /// sampling this challenge. + fn challenge(&mut self) -> F; +} +impl Transcript for ProverTranscript { + fn challenge(&mut self) -> F { + self.challenge() + } +} +impl Transcript for VerifierTranscript<'_> { + fn challenge(&mut self) -> F { + self.challenge() + } +} + +/// The witness for the satisfaction of this circuit. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] +struct ProverData { + aL: Vec, + aR: Vec, + C: Vec>, + V: Vec>, +} + +/// A struct representing a circuit. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Circuit { + muls: usize, + // A series of linear combinations which must evaluate to 0. + constraints: Vec>, + prover: Option>, +} + +impl Circuit { + /// Returns the amount of multiplications used by this circuit. + pub fn muls(&self) -> usize { + self.muls + } + + /// Create an instance to prove satisfaction of a circuit with. + // TODO: Take the transcript here + #[allow(clippy::type_complexity)] + pub fn prove( + vector_commitments: Vec>, + commitments: Vec>, + ) -> Self { + Self { + muls: 0, + constraints: vec![], + prover: Some(ProverData { aL: vec![], aR: vec![], C: vector_commitments, V: commitments }), + } + } + + /// Create an instance to verify a proof with. + // TODO: Take the transcript here + pub fn verify() -> Self { + Self { muls: 0, constraints: vec![], prover: None } + } + + /// Evaluate a linear combination. + /// + /// Yields WL aL + WR aR + WO aO + WCG CG + WCH CH + WV V + c. + /// + /// May panic if the linear combination references non-existent terms. + /// + /// Returns None if not a prover. + pub fn eval(&self, lincomb: &LinComb) -> Option { + self.prover.as_ref().map(|prover| { + let mut res = lincomb.c(); + for (index, weight) in lincomb.WL() { + res += prover.aL[*index] * weight; + } + for (index, weight) in lincomb.WR() { + res += prover.aR[*index] * weight; + } + for (index, weight) in lincomb.WO() { + res += prover.aL[*index] * prover.aR[*index] * weight; + } + for (WCG, C) in lincomb.WCG().iter().zip(&prover.C) { + for (j, weight) in WCG { + res += C.g_values[*j] * weight; + } + } + for (WCH, C) in lincomb.WCH().iter().zip(&prover.C) { + for (j, weight) in WCH { + res += C.h_values[*j] * weight; + } + } + for (index, weight) in lincomb.WV() { + res += prover.V[*index].value * weight; + } + res + }) + } + + /// Multiply two values, optionally constrained, returning the constrainable left/right/out + /// terms. + /// + /// May panic if any linear combinations reference non-existent terms or if the witness isn't + /// provided when proving/is provided when verifying. + pub fn mul( + &mut self, + a: Option>, + b: Option>, + witness: Option<(C::F, C::F)>, + ) -> (Variable, Variable, Variable) { + let l = Variable::aL(self.muls); + let r = Variable::aR(self.muls); + let o = Variable::aO(self.muls); + self.muls += 1; + + debug_assert_eq!(self.prover.is_some(), witness.is_some()); + if let Some(witness) = witness { + let prover = self.prover.as_mut().unwrap(); + prover.aL.push(witness.0); + prover.aR.push(witness.1); + } + + if let Some(a) = a { + self.constrain_equal_to_zero(a.term(-C::F::ONE, l)); + } + if let Some(b) = b { + self.constrain_equal_to_zero(b.term(-C::F::ONE, r)); + } + + (l, r, o) + } + + /// Constrain a linear combination to be equal to 0. + /// + /// May panic if the linear combination references non-existent terms. + pub fn constrain_equal_to_zero(&mut self, lincomb: LinComb) { + self.constraints.push(lincomb); + } + + /// Obtain the statement for this circuit. + /// + /// If configured as the prover, the witness to use is also returned. + #[allow(clippy::type_complexity)] + pub fn statement( + self, + generators: ProofGenerators<'_, C>, + commitments: Commitments, + ) -> Result<(ArithmeticCircuitStatement<'_, C>, Option>), AcError> { + let statement = ArithmeticCircuitStatement::new(generators, self.constraints, commitments)?; + + let witness = self + .prover + .map(|mut prover| { + // We can't deconstruct the witness as it implements Drop (per ZeroizeOnDrop) + // Accordingly, we take the values within it and move forward with those + let mut aL = vec![]; + std::mem::swap(&mut prover.aL, &mut aL); + let mut aR = vec![]; + std::mem::swap(&mut prover.aR, &mut aR); + let mut C = vec![]; + std::mem::swap(&mut prover.C, &mut C); + let mut V = vec![]; + std::mem::swap(&mut prover.V, &mut V); + ArithmeticCircuitWitness::new(ScalarVector::from(aL), ScalarVector::from(aR), C, V) + }) + .transpose()?; + + Ok((statement, witness)) + } +} diff --git a/crypto/evrf/divisors/Cargo.toml b/crypto/evrf/divisors/Cargo.toml new file mode 100644 index 000000000..d4e3a2d0d --- /dev/null +++ b/crypto/evrf/divisors/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "ec-divisors" +version = "0.1.0" +description = "A library for calculating elliptic curve divisors" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/divisors" +authors = ["Luke Parker "] +keywords = ["ciphersuite", "ff", "group"] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rand_core = { version = "0.6", default-features = false } +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } + +group = "0.13" + +hex = { version = "0.4", optional = true } +dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"], optional = true } +pasta_curves = { version = "0.5", default-features = false, features = ["bits", "alloc"], optional = true } + +[dev-dependencies] +rand_core = { version = "0.6", features = ["getrandom"] } + +hex = "0.4" +dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"] } +pasta_curves = { version = "0.5", default-features = false, features = ["bits", "alloc"] } + +[features] +ed25519 = ["hex", "dalek-ff-group"] +pasta = ["pasta_curves"] diff --git a/crypto/evrf/divisors/LICENSE b/crypto/evrf/divisors/LICENSE new file mode 100644 index 000000000..36fd4d600 --- /dev/null +++ b/crypto/evrf/divisors/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/divisors/README.md b/crypto/evrf/divisors/README.md new file mode 100644 index 000000000..51ba542ae --- /dev/null +++ b/crypto/evrf/divisors/README.md @@ -0,0 +1,4 @@ +# Elliptic Curve Divisors + +An implementation of a representation for and construction of elliptic curve +divisors, intended for Eagen's [EC IP work](https://eprint.iacr.org/2022/596). diff --git a/crypto/evrf/divisors/src/lib.rs b/crypto/evrf/divisors/src/lib.rs new file mode 100644 index 000000000..d71aa8a4d --- /dev/null +++ b/crypto/evrf/divisors/src/lib.rs @@ -0,0 +1,287 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use group::{ + ff::{Field, PrimeField}, + Group, +}; + +mod poly; +pub use poly::*; + +#[cfg(test)] +mod tests; + +/// A curve usable with this library. +pub trait DivisorCurve: Group { + /// An element of the field this curve is defined over. + type FieldElement: PrimeField; + + /// The A in the curve equation y^2 = x^3 + A x + B. + fn a() -> Self::FieldElement; + /// The B in the curve equation y^2 = x^3 + A x + B. + fn b() -> Self::FieldElement; + + /// y^2 - x^3 - A x - B + /// + /// Section 2 of the security proofs define this modulus. + /// + /// This MUST NOT be overriden. + // TODO: Move to an extension trait + fn divisor_modulus() -> Poly { + Poly { + // 0 y**1, 1 y*2 + y_coefficients: vec![Self::FieldElement::ZERO, Self::FieldElement::ONE], + yx_coefficients: vec![], + x_coefficients: vec![ + // - A x + -Self::a(), + // 0 x^2 + Self::FieldElement::ZERO, + // - x^3 + -Self::FieldElement::ONE, + ], + // - B + zero_coefficient: -Self::b(), + } + } + + /// Convert a point to its x and y coordinates. + /// + /// Returns None if passed the point at infinity. + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)>; +} + +/// Calculate the slope and intercept between two points. +/// +/// This function panics when `a @ infinity`, `b @ infinity`, `a == b`, or when `a == -b`. +pub(crate) fn slope_intercept(a: C, b: C) -> (C::FieldElement, C::FieldElement) { + let (ax, ay) = C::to_xy(a).unwrap(); + debug_assert_eq!(C::divisor_modulus().eval(ax, ay), C::FieldElement::ZERO); + let (bx, by) = C::to_xy(b).unwrap(); + debug_assert_eq!(C::divisor_modulus().eval(bx, by), C::FieldElement::ZERO); + let slope = (by - ay) * + Option::::from((bx - ax).invert()) + .expect("trying to get slope/intercept of points sharing an x coordinate"); + let intercept = by - (slope * bx); + debug_assert!(bool::from((ay - (slope * ax) - intercept).is_zero())); + debug_assert!(bool::from((by - (slope * bx) - intercept).is_zero())); + (slope, intercept) +} + +// The line interpolating two points. +fn line(a: C, mut b: C) -> Poly { + // If they're both the point at infinity, we simply set the line to one + if bool::from(a.is_identity() & b.is_identity()) { + return Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: C::FieldElement::ONE, + }; + } + + // If either point is the point at infinity, or these are additive inverses, the line is + // `1 * x - x`. The first `x` is a term in the polynomial, the `x` is the `x` coordinate of these + // points (of which there is one, as the second point is either at infinity or has a matching `x` + // coordinate). + if bool::from(a.is_identity() | b.is_identity()) || (a == -b) { + let (x, _) = C::to_xy(if !bool::from(a.is_identity()) { a } else { b }).unwrap(); + return Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![C::FieldElement::ONE], + zero_coefficient: -x, + }; + } + + // If the points are equal, we use the line interpolating the sum of these points with the point + // at infinity + if a == b { + b = -a.double(); + } + + let (slope, intercept) = slope_intercept::(a, b); + + // Section 4 of the proofs explicitly state the line `L = y - lambda * x - mu` + // y - (slope * x) - intercept + Poly { + y_coefficients: vec![C::FieldElement::ONE], + yx_coefficients: vec![], + x_coefficients: vec![-slope], + zero_coefficient: -intercept, + } +} + +/// Create a divisor interpolating the following points. +/// +/// Returns None if: +/// - No points were passed in +/// - The points don't sum to the point at infinity +/// - A passed in point was the point at infinity +#[allow(clippy::new_ret_no_self)] +pub fn new_divisor(points: &[C]) -> Option> { + // A single point is either the point at infinity, or this doesn't sum to the point at infinity + // Both cause us to return None + if points.len() < 2 { + None?; + } + if points.iter().sum::() != C::identity() { + None?; + } + + // Create the initial set of divisors + let mut divs = vec![]; + let mut iter = points.iter().copied(); + while let Some(a) = iter.next() { + if a == C::identity() { + None?; + } + + let b = iter.next(); + if b == Some(C::identity()) { + None?; + } + + // Draw the line between those points + divs.push((a + b.unwrap_or(C::identity()), line::(a, b.unwrap_or(-a)))); + } + + let modulus = C::divisor_modulus(); + + // Pair them off until only one remains + while divs.len() > 1 { + let mut next_divs = vec![]; + // If there's an odd amount of divisors, carry the odd one out to the next iteration + if (divs.len() % 2) == 1 { + next_divs.push(divs.pop().unwrap()); + } + + while let Some((a, a_div)) = divs.pop() { + let (b, b_div) = divs.pop().unwrap(); + + // Merge the two divisors + let numerator = a_div.mul_mod(b_div, &modulus).mul_mod(line::(a, b), &modulus); + let denominator = line::(a, -a).mul_mod(line::(b, -b), &modulus); + let (q, r) = numerator.div_rem(&denominator); + assert_eq!(r, Poly::zero()); + + next_divs.push((a + b, q)); + } + + divs = next_divs; + } + + // Return the unified divisor + Some(divs.remove(0).1) +} + +#[cfg(any(test, feature = "pasta"))] +mod pasta { + use group::{ff::Field, Curve}; + use pasta_curves::{ + arithmetic::{Coordinates, CurveAffine}, + Ep, Fp, Eq, Fq, + }; + use crate::DivisorCurve; + + impl DivisorCurve for Ep { + type FieldElement = Fp; + + fn a() -> Self::FieldElement { + Self::FieldElement::ZERO + } + fn b() -> Self::FieldElement { + Self::FieldElement::from(5u64) + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + Option::>::from(point.to_affine().coordinates()) + .map(|coords| (*coords.x(), *coords.y())) + } + } + + impl DivisorCurve for Eq { + type FieldElement = Fq; + + fn a() -> Self::FieldElement { + Self::FieldElement::ZERO + } + fn b() -> Self::FieldElement { + Self::FieldElement::from(5u64) + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + Option::>::from(point.to_affine().coordinates()) + .map(|coords| (*coords.x(), *coords.y())) + } + } +} + +#[cfg(any(test, feature = "ed25519"))] +mod ed25519 { + use group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, + }; + use dalek_ff_group::{FieldElement, EdwardsPoint}; + + impl crate::DivisorCurve for EdwardsPoint { + type FieldElement = FieldElement; + + // Wei25519 a/b + // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.3 + fn a() -> Self::FieldElement { + let mut be_bytes = + hex::decode("2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa984914a144").unwrap(); + be_bytes.reverse(); + let le_bytes = be_bytes; + Self::FieldElement::from_repr(le_bytes.try_into().unwrap()).unwrap() + } + fn b() -> Self::FieldElement { + let mut be_bytes = + hex::decode("7b425ed097b425ed097b425ed097b425ed097b425ed097b4260b5e9c7710c864").unwrap(); + be_bytes.reverse(); + let le_bytes = be_bytes; + + Self::FieldElement::from_repr(le_bytes.try_into().unwrap()).unwrap() + } + + // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + if bool::from(point.is_identity()) { + None?; + } + + // Extract the y coordinate from the compressed point + let mut edwards_y = point.to_bytes(); + let x_is_odd = edwards_y[31] >> 7; + edwards_y[31] &= (1 << 7) - 1; + let edwards_y = Self::FieldElement::from_repr(edwards_y).unwrap(); + + // Recover the x coordinate + let edwards_y_sq = edwards_y * edwards_y; + let D = -Self::FieldElement::from(121665u64) * + Self::FieldElement::from(121666u64).invert().unwrap(); + let mut edwards_x = ((edwards_y_sq - Self::FieldElement::ONE) * + ((D * edwards_y_sq) + Self::FieldElement::ONE).invert().unwrap()) + .sqrt() + .unwrap(); + if u8::from(bool::from(edwards_x.is_odd())) != x_is_odd { + edwards_x = -edwards_x; + } + + // Calculate the x and y coordinates for Wei25519 + let edwards_y_plus_one = Self::FieldElement::ONE + edwards_y; + let one_minus_edwards_y = Self::FieldElement::ONE - edwards_y; + let wei_x = (edwards_y_plus_one * one_minus_edwards_y.invert().unwrap()) + + (Self::FieldElement::from(486662u64) * Self::FieldElement::from(3u64).invert().unwrap()); + let c = + (-(Self::FieldElement::from(486662u64) + Self::FieldElement::from(2u64))).sqrt().unwrap(); + let wei_y = c * edwards_y_plus_one * (one_minus_edwards_y * edwards_x).invert().unwrap(); + Some((wei_x, wei_y)) + } + } +} diff --git a/crypto/evrf/divisors/src/poly.rs b/crypto/evrf/divisors/src/poly.rs new file mode 100644 index 000000000..b818433bc --- /dev/null +++ b/crypto/evrf/divisors/src/poly.rs @@ -0,0 +1,430 @@ +use core::ops::{Add, Neg, Sub, Mul, Rem}; + +use zeroize::Zeroize; + +use group::ff::PrimeField; + +/// A structure representing a Polynomial with x**i, y**i, and y**i * x**j terms. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct Poly> { + /// c[i] * y ** (i + 1) + pub y_coefficients: Vec, + /// c[i][j] * y ** (i + 1) x ** (j + 1) + pub yx_coefficients: Vec>, + /// c[i] * x ** (i + 1) + pub x_coefficients: Vec, + /// Coefficient for x ** 0, y ** 0, and x ** 0 y ** 0 (the coefficient for 1) + pub zero_coefficient: F, +} + +impl> Poly { + /// A polynomial for zero. + pub fn zero() -> Self { + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: F::ZERO, + } + } + + /// The amount of terms in the polynomial. + #[allow(clippy::len_without_is_empty)] + #[must_use] + pub fn len(&self) -> usize { + self.y_coefficients.len() + + self.yx_coefficients.iter().map(Vec::len).sum::() + + self.x_coefficients.len() + + usize::from(u8::from(self.zero_coefficient != F::ZERO)) + } + + // Remove high-order zero terms, allowing the length of the vectors to equal the amount of terms. + pub(crate) fn tidy(&mut self) { + let tidy = |vec: &mut Vec| { + while vec.last() == Some(&F::ZERO) { + vec.pop(); + } + }; + + tidy(&mut self.y_coefficients); + for vec in self.yx_coefficients.iter_mut() { + tidy(vec); + } + while self.yx_coefficients.last() == Some(&vec![]) { + self.yx_coefficients.pop(); + } + tidy(&mut self.x_coefficients); + } +} + +impl> Add<&Self> for Poly { + type Output = Self; + + fn add(mut self, other: &Self) -> Self { + // Expand to be the neeeded size + while self.y_coefficients.len() < other.y_coefficients.len() { + self.y_coefficients.push(F::ZERO); + } + while self.yx_coefficients.len() < other.yx_coefficients.len() { + self.yx_coefficients.push(vec![]); + } + for i in 0 .. other.yx_coefficients.len() { + while self.yx_coefficients[i].len() < other.yx_coefficients[i].len() { + self.yx_coefficients[i].push(F::ZERO); + } + } + while self.x_coefficients.len() < other.x_coefficients.len() { + self.x_coefficients.push(F::ZERO); + } + + // Perform the addition + for (i, coeff) in other.y_coefficients.iter().enumerate() { + self.y_coefficients[i] += coeff; + } + for (i, coeffs) in other.yx_coefficients.iter().enumerate() { + for (j, coeff) in coeffs.iter().enumerate() { + self.yx_coefficients[i][j] += coeff; + } + } + for (i, coeff) in other.x_coefficients.iter().enumerate() { + self.x_coefficients[i] += coeff; + } + self.zero_coefficient += other.zero_coefficient; + + self.tidy(); + self + } +} + +impl> Neg for Poly { + type Output = Self; + + fn neg(mut self) -> Self { + for y_coeff in self.y_coefficients.iter_mut() { + *y_coeff = -*y_coeff; + } + for yx_coeffs in self.yx_coefficients.iter_mut() { + for yx_coeff in yx_coeffs.iter_mut() { + *yx_coeff = -*yx_coeff; + } + } + for x_coeff in self.x_coefficients.iter_mut() { + *x_coeff = -*x_coeff; + } + self.zero_coefficient = -self.zero_coefficient; + + self + } +} + +impl> Sub for Poly { + type Output = Self; + + fn sub(self, other: Self) -> Self { + self + &-other + } +} + +impl> Mul for Poly { + type Output = Self; + + fn mul(mut self, scalar: F) -> Self { + if scalar == F::ZERO { + return Poly::zero(); + } + + for y_coeff in self.y_coefficients.iter_mut() { + *y_coeff *= scalar; + } + for coeffs in self.yx_coefficients.iter_mut() { + for coeff in coeffs.iter_mut() { + *coeff *= scalar; + } + } + for x_coeff in self.x_coefficients.iter_mut() { + *x_coeff *= scalar; + } + self.zero_coefficient *= scalar; + self + } +} + +impl> Poly { + #[must_use] + fn shift_by_x(mut self, power_of_x: usize) -> Self { + if power_of_x == 0 { + return self; + } + + // Shift up every x coefficient + for _ in 0 .. power_of_x { + self.x_coefficients.insert(0, F::ZERO); + for yx_coeffs in &mut self.yx_coefficients { + yx_coeffs.insert(0, F::ZERO); + } + } + + // Move the zero coefficient + self.x_coefficients[power_of_x - 1] = self.zero_coefficient; + self.zero_coefficient = F::ZERO; + + // Move the y coefficients + // Start by creating yx coefficients with the necessary powers of x + let mut yx_coefficients_to_push = vec![]; + while yx_coefficients_to_push.len() < power_of_x { + yx_coefficients_to_push.push(F::ZERO); + } + // Now, ensure the yx coefficients has the slots for the y coefficients we're moving + while self.yx_coefficients.len() < self.y_coefficients.len() { + self.yx_coefficients.push(yx_coefficients_to_push.clone()); + } + // Perform the move + for (i, y_coeff) in self.y_coefficients.drain(..).enumerate() { + self.yx_coefficients[i][power_of_x - 1] = y_coeff; + } + + self + } + + #[must_use] + fn shift_by_y(mut self, power_of_y: usize) -> Self { + if power_of_y == 0 { + return self; + } + + // Shift up every y coefficient + for _ in 0 .. power_of_y { + self.y_coefficients.insert(0, F::ZERO); + self.yx_coefficients.insert(0, vec![]); + } + + // Move the zero coefficient + self.y_coefficients[power_of_y - 1] = self.zero_coefficient; + self.zero_coefficient = F::ZERO; + + // Move the x coefficients + self.yx_coefficients[power_of_y - 1] = self.x_coefficients; + self.x_coefficients = vec![]; + + self + } +} + +impl> Mul for Poly { + type Output = Self; + + fn mul(self, other: Self) -> Self { + let mut res = self.clone() * other.zero_coefficient; + + for (i, y_coeff) in other.y_coefficients.iter().enumerate() { + let scaled = self.clone() * *y_coeff; + res = res + &scaled.shift_by_y(i + 1); + } + + for (y_i, yx_coeffs) in other.yx_coefficients.iter().enumerate() { + for (x_i, yx_coeff) in yx_coeffs.iter().enumerate() { + let scaled = self.clone() * *yx_coeff; + res = res + &scaled.shift_by_y(y_i + 1).shift_by_x(x_i + 1); + } + } + + for (i, x_coeff) in other.x_coefficients.iter().enumerate() { + let scaled = self.clone() * *x_coeff; + res = res + &scaled.shift_by_x(i + 1); + } + + res.tidy(); + res + } +} + +impl> Poly { + /// Perform multiplication mod `modulus`. + #[must_use] + pub fn mul_mod(self, other: Self, modulus: &Self) -> Self { + ((self % modulus) * (other % modulus)) % modulus + } + + /// Perform division, returning the result and remainder. + /// + /// Panics upon division by zero, with undefined behavior if a non-tidy divisor is used. + #[must_use] + pub fn div_rem(self, divisor: &Self) -> (Self, Self) { + // The leading y coefficient and associated x coefficient. + let leading_y = |poly: &Self| -> (_, _) { + if poly.y_coefficients.len() > poly.yx_coefficients.len() { + (poly.y_coefficients.len(), 0) + } else if !poly.yx_coefficients.is_empty() { + (poly.yx_coefficients.len(), poly.yx_coefficients.last().unwrap().len()) + } else { + (0, poly.x_coefficients.len()) + } + }; + + let (div_y, div_x) = leading_y(divisor); + // If this divisor is actually a scalar, don't perform long division + if (div_y == 0) && (div_x == 0) { + return (self * divisor.zero_coefficient.invert().unwrap(), Poly::zero()); + } + + // Remove leading terms until the value is less than the divisor + let mut quotient: Poly = Poly::zero(); + let mut remainder = self.clone(); + loop { + // If there's nothing left to divide, return + if remainder == Poly::zero() { + break; + } + + let (rem_y, rem_x) = leading_y(&remainder); + if (rem_y < div_y) || (rem_x < div_x) { + break; + } + + let get = |poly: &Poly, y_pow: usize, x_pow: usize| -> F { + if (y_pow == 0) && (x_pow == 0) { + poly.zero_coefficient + } else if x_pow == 0 { + poly.y_coefficients[y_pow - 1] + } else if y_pow == 0 { + poly.x_coefficients[x_pow - 1] + } else { + poly.yx_coefficients[y_pow - 1][x_pow - 1] + } + }; + let coeff_numerator = get(&remainder, rem_y, rem_x); + let coeff_denominator = get(divisor, div_y, div_x); + + // We want coeff_denominator scaled by x to equal coeff_numerator + // x * d = n + // n / d = x + let mut quotient_term = Poly::zero(); + // Because this is the coefficient for the leading term of a tidied polynomial, it must be + // non-zero + quotient_term.zero_coefficient = coeff_numerator * coeff_denominator.invert().unwrap(); + + // Add the necessary yx powers + let delta_y = rem_y - div_y; + let delta_x = rem_x - div_x; + let quotient_term = quotient_term.shift_by_y(delta_y).shift_by_x(delta_x); + + let to_remove = quotient_term.clone() * divisor.clone(); + debug_assert_eq!(get(&to_remove, rem_y, rem_x), coeff_numerator); + + remainder = remainder - to_remove; + quotient = quotient + "ient_term; + } + debug_assert_eq!((quotient.clone() * divisor.clone()) + &remainder, self); + + (quotient, remainder) + } +} + +impl> Rem<&Self> for Poly { + type Output = Self; + + fn rem(self, modulus: &Self) -> Self { + self.div_rem(modulus).1 + } +} + +impl> Poly { + /// Evaluate this polynomial with the specified x/y values. + /// + /// Panics on polynomials with terms whose powers exceed 2**64. + #[must_use] + pub fn eval(&self, x: F, y: F) -> F { + let mut res = self.zero_coefficient; + for (pow, coeff) in + self.y_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + res += y.pow([pow]) * coeff; + } + for (y_pow, coeffs) in + self.yx_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + let y_pow = y.pow([y_pow]); + for (x_pow, coeff) in + coeffs.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + res += y_pow * x.pow([x_pow]) * coeff; + } + } + for (pow, coeff) in + self.x_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + res += x.pow([pow]) * coeff; + } + res + } + + /// Differentiate a polynomial, reduced by a modulus with a leading y term y**2 x**0, by x and y. + /// + /// This function panics if a y**2 term is present within the polynomial. + #[must_use] + pub fn differentiate(&self) -> (Poly, Poly) { + assert!(self.y_coefficients.len() <= 1); + assert!(self.yx_coefficients.len() <= 1); + + // Differentation by x practically involves: + // - Dropping everything without an x component + // - Shifting everything down a power of x + // - Multiplying the new coefficient by the power it prior was used with + let diff_x = { + let mut diff_x = Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: F::ZERO, + }; + if !self.x_coefficients.is_empty() { + let mut x_coeffs = self.x_coefficients.clone(); + diff_x.zero_coefficient = x_coeffs.remove(0); + diff_x.x_coefficients = x_coeffs; + + let mut prior_x_power = F::from(2); + for x_coeff in &mut diff_x.x_coefficients { + *x_coeff *= prior_x_power; + prior_x_power += F::ONE; + } + } + + if !self.yx_coefficients.is_empty() { + let mut yx_coeffs = self.yx_coefficients[0].clone(); + diff_x.y_coefficients = vec![yx_coeffs.remove(0)]; + diff_x.yx_coefficients = vec![yx_coeffs]; + + let mut prior_x_power = F::from(2); + for yx_coeff in &mut diff_x.yx_coefficients[0] { + *yx_coeff *= prior_x_power; + prior_x_power += F::ONE; + } + } + + diff_x.tidy(); + diff_x + }; + + // Differentation by y is trivial + // It's the y coefficient as the zero coefficient, and the yx coefficients as the x + // coefficients + // This is thanks to any y term over y^2 being reduced out + let diff_y = Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: self.yx_coefficients.first().cloned().unwrap_or(vec![]), + zero_coefficient: self.y_coefficients.first().cloned().unwrap_or(F::ZERO), + }; + + (diff_x, diff_y) + } + + /// Normalize the x coefficient to 1. + /// + /// Panics if there is no x coefficient to normalize or if it cannot be normalized to 1. + #[must_use] + pub fn normalize_x_coefficient(self) -> Self { + let scalar = self.x_coefficients[0].invert().unwrap(); + self * scalar + } +} diff --git a/crypto/evrf/divisors/src/tests/mod.rs b/crypto/evrf/divisors/src/tests/mod.rs new file mode 100644 index 000000000..bd8de441a --- /dev/null +++ b/crypto/evrf/divisors/src/tests/mod.rs @@ -0,0 +1,235 @@ +use rand_core::OsRng; + +use group::{ff::Field, Group}; +use dalek_ff_group::EdwardsPoint; +use pasta_curves::{Ep, Eq}; + +use crate::{DivisorCurve, Poly, new_divisor}; + +// Equation 4 in the security proofs +fn check_divisor(points: Vec) { + // Create the divisor + let divisor = new_divisor::(&points).unwrap(); + let eval = |c| { + let (x, y) = C::to_xy(c).unwrap(); + divisor.eval(x, y) + }; + + // Decide challgenges + let c0 = C::random(&mut OsRng); + let c1 = C::random(&mut OsRng); + let c2 = -(c0 + c1); + let (slope, intercept) = crate::slope_intercept::(c0, c1); + + let mut rhs = ::FieldElement::ONE; + for point in points { + let (x, y) = C::to_xy(point).unwrap(); + rhs *= intercept - (y - (slope * x)); + } + assert_eq!(eval(c0) * eval(c1) * eval(c2), rhs); +} + +fn test_divisor() { + for i in 1 ..= 255 { + println!("Test iteration {i}"); + + // Select points + let mut points = vec![]; + for _ in 0 .. i { + points.push(C::random(&mut OsRng)); + } + points.push(-points.iter().sum::()); + println!("Points {}", points.len()); + + // Perform the original check + check_divisor(points.clone()); + + // Create the divisor + let divisor = new_divisor::(&points).unwrap(); + + // For a divisor interpolating 256 points, as one does when interpreting a 255-bit discrete log + // with the result of its scalar multiplication against a fixed generator, the lengths of the + // yx/x coefficients shouldn't supersede the following bounds + assert!((divisor.yx_coefficients.first().unwrap_or(&vec![]).len()) <= 126); + assert!((divisor.x_coefficients.len() - 1) <= 127); + assert!( + (1 + divisor.yx_coefficients.first().unwrap_or(&vec![]).len() + + (divisor.x_coefficients.len() - 1) + + 1) <= + 255 + ); + + // Decide challgenges + let c0 = C::random(&mut OsRng); + let c1 = C::random(&mut OsRng); + let c2 = -(c0 + c1); + let (slope, intercept) = crate::slope_intercept::(c0, c1); + + // Perform the Logarithmic derivative check + { + let dx_over_dz = { + let dx = Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![C::FieldElement::ZERO, C::FieldElement::from(3)], + zero_coefficient: C::a(), + }; + + let dy = Poly { + y_coefficients: vec![C::FieldElement::from(2)], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: C::FieldElement::ZERO, + }; + + let dz = (dy.clone() * -slope) + &dx; + + // We want dx/dz, and dz/dx is equal to dy/dx - slope + // Sagemath claims this, dy / dz, is the proper inverse + (dy, dz) + }; + + { + let sanity_eval = |c| { + let (x, y) = C::to_xy(c).unwrap(); + dx_over_dz.0.eval(x, y) * dx_over_dz.1.eval(x, y).invert().unwrap() + }; + let sanity = sanity_eval(c0) + sanity_eval(c1) + sanity_eval(c2); + // This verifies the dx/dz polynomial is correct + assert_eq!(sanity, C::FieldElement::ZERO); + } + + // Logarithmic derivative check + let test = |divisor: Poly<_>| { + let (dx, dy) = divisor.differentiate(); + + let lhs = |c| { + let (x, y) = C::to_xy(c).unwrap(); + + let n_0 = (C::FieldElement::from(3) * (x * x)) + C::a(); + let d_0 = (C::FieldElement::from(2) * y).invert().unwrap(); + let p_0_n_0 = n_0 * d_0; + + let n_1 = dy.eval(x, y); + let first = p_0_n_0 * n_1; + + let second = dx.eval(x, y); + + let d_1 = divisor.eval(x, y); + + let fraction_1_n = first + second; + let fraction_1_d = d_1; + + let fraction_2_n = dx_over_dz.0.eval(x, y); + let fraction_2_d = dx_over_dz.1.eval(x, y); + + fraction_1_n * fraction_2_n * (fraction_1_d * fraction_2_d).invert().unwrap() + }; + let lhs = lhs(c0) + lhs(c1) + lhs(c2); + + let mut rhs = C::FieldElement::ZERO; + for point in &points { + let (x, y) = ::to_xy(*point).unwrap(); + rhs += (intercept - (y - (slope * x))).invert().unwrap(); + } + + assert_eq!(lhs, rhs); + }; + // Test the divisor and the divisor with a normalized x coefficient + test(divisor.clone()); + test(divisor.normalize_x_coefficient()); + } + } +} + +fn test_same_point() { + let mut points = vec![C::random(&mut OsRng)]; + points.push(points[0]); + points.push(-points.iter().sum::()); + check_divisor(points); +} + +fn test_subset_sum_to_infinity() { + // Internally, a binary tree algorithm is used + // This executes the first pass to end up with [0, 0] for further reductions + { + let mut points = vec![C::random(&mut OsRng)]; + points.push(-points[0]); + + let next = C::random(&mut OsRng); + points.push(next); + points.push(-next); + check_divisor(points); + } + + // This executes the first pass to end up with [0, X, -X, 0] + { + let mut points = vec![C::random(&mut OsRng)]; + points.push(-points[0]); + + let x_1 = C::random(&mut OsRng); + let x_2 = C::random(&mut OsRng); + points.push(x_1); + points.push(x_2); + + points.push(-x_1); + points.push(-x_2); + + let next = C::random(&mut OsRng); + points.push(next); + points.push(-next); + check_divisor(points); + } +} + +#[test] +fn test_divisor_pallas() { + test_divisor::(); + test_same_point::(); + test_subset_sum_to_infinity::(); +} + +#[test] +fn test_divisor_vesta() { + test_divisor::(); + test_same_point::(); + test_subset_sum_to_infinity::(); +} + +#[test] +fn test_divisor_ed25519() { + // Since we're implementing Wei25519 ourselves, check the isomorphism works as expected + { + let incomplete_add = |p1, p2| { + let (x1, y1) = EdwardsPoint::to_xy(p1).unwrap(); + let (x2, y2) = EdwardsPoint::to_xy(p2).unwrap(); + + // mmadd-1998-cmo + let u = y2 - y1; + let uu = u * u; + let v = x2 - x1; + let vv = v * v; + let vvv = v * vv; + let R = vv * x1; + let A = uu - vvv - R.double(); + let x3 = v * A; + let y3 = (u * (R - A)) - (vvv * y1); + let z3 = vvv; + + // Normalize from XYZ to XY + let x3 = x3 * z3.invert().unwrap(); + let y3 = y3 * z3.invert().unwrap(); + + // Edwards addition -> Wei25519 coordinates should be equivalent to Wei25519 addition + assert_eq!(EdwardsPoint::to_xy(p1 + p2).unwrap(), (x3, y3)); + }; + + for _ in 0 .. 256 { + incomplete_add(EdwardsPoint::random(&mut OsRng), EdwardsPoint::random(&mut OsRng)); + } + } + + test_divisor::(); + test_same_point::(); + test_subset_sum_to_infinity::(); +} diff --git a/crypto/evrf/divisors/src/tests/poly.rs b/crypto/evrf/divisors/src/tests/poly.rs new file mode 100644 index 000000000..c630a69e5 --- /dev/null +++ b/crypto/evrf/divisors/src/tests/poly.rs @@ -0,0 +1,129 @@ +use group::ff::Field; +use pasta_curves::Ep; + +use crate::{DivisorCurve, Poly}; + +type F = ::FieldElement; + +#[test] +fn test_poly() { + let zero = F::ZERO; + let one = F::ONE; + + { + let mut poly = Poly::zero(); + poly.y_coefficients = vec![zero, one]; + + let mut modulus = Poly::zero(); + modulus.y_coefficients = vec![one]; + assert_eq!(poly % &modulus, Poly::zero()); + } + + { + let mut poly = Poly::zero(); + poly.y_coefficients = vec![zero, one]; + + let mut squared = Poly::zero(); + squared.y_coefficients = vec![zero, zero, zero, one]; + assert_eq!(poly.clone() * poly.clone(), squared); + } + + { + let mut a = Poly::zero(); + a.zero_coefficient = F::from(2u64); + + let mut b = Poly::zero(); + b.zero_coefficient = F::from(3u64); + + let mut res = Poly::zero(); + res.zero_coefficient = F::from(6u64); + assert_eq!(a.clone() * b.clone(), res); + + b.y_coefficients = vec![F::from(4u64)]; + res.y_coefficients = vec![F::from(8u64)]; + assert_eq!(a.clone() * b.clone(), res); + assert_eq!(b.clone() * a.clone(), res); + + a.x_coefficients = vec![F::from(5u64)]; + res.x_coefficients = vec![F::from(15u64)]; + res.yx_coefficients = vec![vec![F::from(20u64)]]; + assert_eq!(a.clone() * b.clone(), res); + assert_eq!(b * a.clone(), res); + + // res is now 20xy + 8*y + 15*x + 6 + // res ** 2 = + // 400*x^2*y^2 + 320*x*y^2 + 64*y^2 + 600*x^2*y + 480*x*y + 96*y + 225*x^2 + 180*x + 36 + + let mut squared = Poly::zero(); + squared.y_coefficients = vec![F::from(96u64), F::from(64u64)]; + squared.yx_coefficients = + vec![vec![F::from(480u64), F::from(600u64)], vec![F::from(320u64), F::from(400u64)]]; + squared.x_coefficients = vec![F::from(180u64), F::from(225u64)]; + squared.zero_coefficient = F::from(36u64); + assert_eq!(res.clone() * res, squared); + } +} + +#[test] +fn test_differentation() { + let random = || F::random(&mut OsRng); + + let input = Poly { + y_coefficients: vec![random()], + yx_coefficients: vec![vec![random()]], + x_coefficients: vec![random(), random(), random()], + zero_coefficient: random(), + }; + let (diff_x, diff_y) = input.differentiate(); + assert_eq!( + diff_x, + Poly { + y_coefficients: vec![input.yx_coefficients[0][0]], + yx_coefficients: vec![], + x_coefficients: vec![ + F::from(2) * input.x_coefficients[1], + F::from(3) * input.x_coefficients[2] + ], + zero_coefficient: input.x_coefficients[0], + } + ); + assert_eq!( + diff_y, + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![input.yx_coefficients[0][0]], + zero_coefficient: input.y_coefficients[0], + } + ); + + let input = Poly { + y_coefficients: vec![random()], + yx_coefficients: vec![vec![random(), random()]], + x_coefficients: vec![random(), random(), random(), random()], + zero_coefficient: random(), + }; + let (diff_x, diff_y) = input.differentiate(); + assert_eq!( + diff_x, + Poly { + y_coefficients: vec![input.yx_coefficients[0][0]], + yx_coefficients: vec![vec![F::from(2) * input.yx_coefficients[0][1]]], + x_coefficients: vec![ + F::from(2) * input.x_coefficients[1], + F::from(3) * input.x_coefficients[2], + F::from(4) * input.x_coefficients[3], + ], + zero_coefficient: input.x_coefficients[0], + } + ); + assert_eq!( + diff_y, + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![input.yx_coefficients[0][0], input.yx_coefficients[0][1]], + zero_coefficient: input.y_coefficients[0], + } + ); +} diff --git a/crypto/evrf/ec-gadgets/Cargo.toml b/crypto/evrf/ec-gadgets/Cargo.toml new file mode 100644 index 000000000..cbd356397 --- /dev/null +++ b/crypto/evrf/ec-gadgets/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +description = "Gadgets for working with an embedded Elliptic Curve in a Generalized Bulletproofs circuit" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/ec-gadgets" +authors = ["Luke Parker "] +keywords = ["bulletproofs", "circuit", "divisors"] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +generic-array = { version = "1", default-features = false, features = ["alloc"] } + +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] } + +generalized-bulletproofs-circuit-abstraction = { path = "../circuit-abstraction" } diff --git a/crypto/evrf/ec-gadgets/LICENSE b/crypto/evrf/ec-gadgets/LICENSE new file mode 100644 index 000000000..659881f1a --- /dev/null +++ b/crypto/evrf/ec-gadgets/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/ec-gadgets/README.md b/crypto/evrf/ec-gadgets/README.md new file mode 100644 index 000000000..95149d93c --- /dev/null +++ b/crypto/evrf/ec-gadgets/README.md @@ -0,0 +1,3 @@ +# Generalized Bulletproofs Circuit Abstraction + +A circuit abstraction around `generalized-bulletproofs`. diff --git a/crypto/evrf/ec-gadgets/src/dlog.rs b/crypto/evrf/ec-gadgets/src/dlog.rs new file mode 100644 index 000000000..ef4b8c830 --- /dev/null +++ b/crypto/evrf/ec-gadgets/src/dlog.rs @@ -0,0 +1,529 @@ +use core::fmt; + +use ciphersuite::{ + group::ff::{Field, PrimeField, BatchInverter}, + Ciphersuite, +}; + +use generalized_bulletproofs_circuit_abstraction::*; + +use crate::*; + +/// Parameters for a discrete logarithm proof. +/// +/// This isn't required to be implemented by the Field/Group/Ciphersuite, solely a struct, to +/// enable parameterization of discrete log proofs to the bitlength of the discrete logarithm. +/// While that may be F::NUM_BITS, a discrete log proof a for a full scalar, it could also be 64, +/// a discrete log proof for a u64 (such as if opening a Pedersen commitment in-circuit). +pub trait DiscreteLogParameters { + /// The amount of bits used to represent a scalar. + type ScalarBits: ArrayLength; + + /// The amount of x**i coefficients in a divisor. + /// + /// This is the amount of points in a divisor (the amount of bits in a scalar, plus one) divided + /// by two. + type XCoefficients: ArrayLength; + + /// The amount of x**i coefficients in a divisor, minus one. + type XCoefficientsMinusOne: ArrayLength; + + /// The amount of y x**i coefficients in a divisor. + /// + /// This is the amount of points in a divisor (the amount of bits in a scalar, plus one) plus + /// one, divided by two, minus two. + type YxCoefficients: ArrayLength; +} + +/// A tabled generator for proving/verifying discrete logarithm claims. +#[derive(Clone)] +pub struct GeneratorTable( + GenericArray<(F, F), Parameters::ScalarBits>, +); + +impl fmt::Debug + for GeneratorTable +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt + .debug_struct("GeneratorTable") + .field("x", &self.0[0].0) + .field("y", &self.0[0].1) + .finish_non_exhaustive() + } +} + +impl GeneratorTable { + /// Create a new table for this generator. + /// + /// The generator is assumed to be well-formed and on-curve. This function may panic if it's not. + pub fn new(curve: &CurveSpec, generator_x: F, generator_y: F) -> Self { + // mdbl-2007-bl + fn dbl(a: F, x1: F, y1: F) -> (F, F) { + let xx = x1 * x1; + let w = a + (xx + xx.double()); + let y1y1 = y1 * y1; + let r = y1y1 + y1y1; + let sss = (y1 * r).double().double(); + let rr = r * r; + + let b = x1 + r; + let b = (b * b) - xx - rr; + + let h = (w * w) - b.double(); + let x3 = h.double() * y1; + let y3 = (w * (b - h)) - rr.double(); + let z3 = sss; + + // Normalize from XYZ to XY + let z3_inv = z3.invert().unwrap(); + let x3 = x3 * z3_inv; + let y3 = y3 * z3_inv; + + (x3, y3) + } + + let mut res = Self(GenericArray::default()); + res.0[0] = (generator_x, generator_y); + for i in 1 .. Parameters::ScalarBits::USIZE { + let last = res.0[i - 1]; + res.0[i] = dbl(curve.a, last.0, last.1); + } + + res + } +} + +/// A representation of the divisor. +/// +/// The coefficient for x**1 is explicitly excluded as it's expected to be normalized to 1. +#[derive(Clone)] +pub struct Divisor { + /// The coefficient for the `y` term of the divisor. + /// + /// There is never more than one `y**i x**0` coefficient as the leading term of the modulus is + /// `y**2`. It's assumed the coefficient is non-zero (and present) as it will be for any divisor + /// exceeding trivial complexity. + pub y: Variable, + /// The coefficients for the `y**1 x**i` terms of the polynomial. + // This subtraction enforces the divisor to have at least 4 points which is acceptable. + // TODO: Double check these constants + pub yx: GenericArray, + /// The coefficients for the `x**i` terms of the polynomial, skipping x**1. + /// + /// x**1 is skipped as it's expected to be normalized to 1, and therefore constant, in order to + /// ensure the divisor is non-zero (as necessary for the proof to be complete). + // Subtract 1 from the length due to skipping the coefficient for x**1 + pub x_from_power_of_2: GenericArray, + /// The constant term in the polynomial (alternatively, the coefficient for y**0 x**0). + pub zero: Variable, +} + +/// A point, its discrete logarithm, and the divisor to prove it. +#[derive(Clone)] +pub struct PointWithDlog { + /// The point which is supposedly the result of scaling the generator by the discrete logarithm. + pub point: (Variable, Variable), + /// The discrete logarithm, represented as coefficients of a polynomial of 2**i. + pub dlog: GenericArray, + /// The divisor interpolating the relevant doublings of generator with the inverse of the point. + pub divisor: Divisor, +} + +/// A struct containing a point used for the evaluation of a divisor. +/// +/// Preprocesses and caches as much of the calculation as possible to minimize work upon reuse of +/// challenge points. +struct ChallengePoint { + y: F, + yx: GenericArray, + x: GenericArray, + p_0_n_0: F, + x_p_0_n_0: GenericArray, + p_1_n: F, + p_1_d: F, +} + +impl ChallengePoint { + fn new( + curve: &CurveSpec, + // The slope between all of the challenge points + slope: F, + // The x and y coordinates + x: F, + y: F, + // The inversion of twice the y coordinate + // We accept this as an argument so that the caller can calculcate these with a batch inversion + inv_two_y: F, + ) -> Self { + // Powers of x, skipping x**0 + let divisor_x_len = Parameters::XCoefficients::USIZE; + let mut x_pows = GenericArray::default(); + x_pows[0] = x; + for i in 1 .. divisor_x_len { + let last = x_pows[i - 1]; + x_pows[i] = last * x; + } + + // Powers of x multiplied by y + let divisor_yx_len = Parameters::YxCoefficients::USIZE; + let mut yx = GenericArray::default(); + // Skips x**0 + yx[0] = y * x; + for i in 1 .. divisor_yx_len { + let last = yx[i - 1]; + yx[i] = last * x; + } + + let x_sq = x.square(); + let three_x_sq = x_sq.double() + x_sq; + let three_x_sq_plus_a = three_x_sq + curve.a; + let two_y = y.double(); + + // p_0_n_0 from `DivisorChallenge` + let p_0_n_0 = three_x_sq_plus_a * inv_two_y; + let mut x_p_0_n_0 = GenericArray::default(); + // Since this iterates over x, which skips x**0, this also skips p_0_n_0 x**0 + for (i, x) in x_pows.iter().take(divisor_yx_len).enumerate() { + x_p_0_n_0[i] = p_0_n_0 * x; + } + + // p_1_n from `DivisorChallenge` + let p_1_n = two_y; + // p_1_d from `DivisorChallenge` + let p_1_d = (-slope * p_1_n) + three_x_sq_plus_a; + + ChallengePoint { x: x_pows, y, yx, p_0_n_0, x_p_0_n_0, p_1_n, p_1_d } + } +} + +// `DivisorChallenge` from the section `Discrete Log Proof` +fn divisor_challenge_eval( + circuit: &mut Circuit, + divisor: &Divisor, + challenge: &ChallengePoint, +) -> Variable { + // The evaluation of the divisor differentiated by y, further multiplied by p_0_n_0 + // Differentation drops everything without a y coefficient, and drops what remains by a power + // of y + // (y**1 -> y**0, yx**i -> x**i) + // This aligns with p_0_n_1 from `DivisorChallenge` + let p_0_n_1 = { + let mut p_0_n_1 = LinComb::empty().term(challenge.p_0_n_0, divisor.y); + for (j, var) in divisor.yx.iter().enumerate() { + // This does not raise by `j + 1` as x_p_0_n_0 omits x**0 + p_0_n_1 = p_0_n_1.term(challenge.x_p_0_n_0[j], *var); + } + p_0_n_1 + }; + + // The evaluation of the divisor differentiated by x + // This aligns with p_0_n_2 from `DivisorChallenge` + let p_0_n_2 = { + // The coefficient for x**1 is 1, so 1 becomes the new zero coefficient + let mut p_0_n_2 = LinComb::empty().constant(C::F::ONE); + + // Handle the new y coefficient + p_0_n_2 = p_0_n_2.term(challenge.y, divisor.yx[0]); + + // Handle the new yx coefficients + for (j, yx) in divisor.yx.iter().enumerate().skip(1) { + // For the power which was shifted down, we multiply this coefficient + // 3 x**2 -> 2 * 3 x**1 + let original_power_of_x = C::F::from(u64::try_from(j + 1).unwrap()); + // `j - 1` so `j = 1` indexes yx[0] as yx[0] is the y x**1 + // (yx omits y x**0) + let this_weight = original_power_of_x * challenge.yx[j - 1]; + p_0_n_2 = p_0_n_2.term(this_weight, *yx); + } + + // Handle the x coefficients + // We don't skip the first one as `x_from_power_of_2` already omits x**1 + for (i, x) in divisor.x_from_power_of_2.iter().enumerate() { + // i + 2 as the paper expects i to start from 1 and be + 1, yet we start from 0 + let original_power_of_x = C::F::from(u64::try_from(i + 2).unwrap()); + // Still x[i] as x[0] is x**1 + let this_weight = original_power_of_x * challenge.x[i]; + + p_0_n_2 = p_0_n_2.term(this_weight, *x); + } + + p_0_n_2 + }; + + // p_0_n from `DivisorChallenge` + let p_0_n = p_0_n_1 + &p_0_n_2; + + // Evaluation of the divisor + // p_0_d from `DivisorChallenge` + let p_0_d = { + let mut p_0_d = LinComb::empty().term(challenge.y, divisor.y); + + for (var, c_yx) in divisor.yx.iter().zip(&challenge.yx) { + p_0_d = p_0_d.term(*c_yx, *var); + } + + for (i, var) in divisor.x_from_power_of_2.iter().enumerate() { + // This `i+1` is preserved, despite most not being as x omits x**0, as this assumes we + // start with `i=1` + p_0_d = p_0_d.term(challenge.x[i + 1], *var); + } + + // Adding x effectively adds a `1 x` term, ensuring the divisor isn't 0 + p_0_d.term(C::F::ONE, divisor.zero).constant(challenge.x[0]) + }; + + // Calculate the joint numerator + // p_n from `DivisorChallenge` + let p_n = p_0_n * challenge.p_1_n; + // Calculate the joint denominator + // p_d from `DivisorChallenge` + let p_d = p_0_d * challenge.p_1_d; + + // We want `n / d = o` + // `n / d = o` == `n = d * o` + // These are safe unwraps as they're solely done by the prover and should always be non-zero + let witness = + circuit.eval(&p_d).map(|p_d| (p_d, circuit.eval(&p_n).unwrap() * p_d.invert().unwrap())); + let (_l, o, n_claim) = circuit.mul(Some(p_d), None, witness); + circuit.equality(p_n, &n_claim.into()); + o +} + +/// A challenge to evaluate divisors with. +/// +/// This challenge must be sampled after writing the commitments to the transcript. This challenge +/// is reusable across various divisors. +pub struct DiscreteLogChallenge { + c0: ChallengePoint, + c1: ChallengePoint, + c2: ChallengePoint, + slope: F, + intercept: F, +} + +/// A generator which has been challenged and is ready for use in evaluating discrete logarithm +/// claims. +pub struct ChallengedGenerator( + GenericArray, +); + +/// Gadgets for proving the discrete logarithm of points on an elliptic curve defined over the +/// scalar field of the curve of the Bulletproof. +pub trait EcDlogGadgets { + /// Sample a challenge for a series of discrete logarithm claims. + /// + /// This must be called after writing the commitments to the transcript. + /// + /// The generators are assumed to be non-empty. They are not transcripted. If your generators are + /// dynamic, they must be properly transcripted into the context. + /// + /// May panic/have undefined behavior if an assumption is broken. + #[allow(clippy::type_complexity)] + fn discrete_log_challenge( + &self, + transcript: &mut T, + curve: &CurveSpec, + generators: &[GeneratorTable], + ) -> (DiscreteLogChallenge, Vec>); + + /// Prove this point has the specified discrete logarithm over the specified generator. + /// + /// The discrete logarithm is not validated to be in a canonical form. The only guarantee made on + /// it is that it's a consistent representation of _a_ discrete logarithm (reuse won't enable + /// re-interpretation as a distinct discrete logarithm). + /// + /// This does ensure the point is on-curve. + /// + /// This MUST only be called with `Variable`s present within commitments. + /// + /// May panic/have undefined behavior if an assumption is broken, or if passed an invalid + /// witness. + fn discrete_log( + &mut self, + curve: &CurveSpec, + point: PointWithDlog, + challenge: &DiscreteLogChallenge, + challenged_generator: &ChallengedGenerator, + ) -> OnCurve; +} + +impl EcDlogGadgets for Circuit { + // This is part of `DiscreteLog` from `Discrete Log Proof`, specifically, the challenges and + // the calculations dependent solely on them + fn discrete_log_challenge( + &self, + transcript: &mut T, + curve: &CurveSpec, + generators: &[GeneratorTable], + ) -> (DiscreteLogChallenge, Vec>) { + // Get the challenge points + // TODO: Implement a proper hash to curve + let (c0_x, c0_y) = loop { + let c0_x: C::F = transcript.challenge(); + let Some(c0_y) = + Option::::from(((c0_x.square() * c0_x) + (curve.a * c0_x) + curve.b).sqrt()) + else { + continue; + }; + // Takes the even y coordinate as to not be dependent on whatever root the above sqrt + // happens to returns + // TODO: Randomly select which to take + break (c0_x, if bool::from(c0_y.is_odd()) { -c0_y } else { c0_y }); + }; + let (c1_x, c1_y) = loop { + let c1_x: C::F = transcript.challenge(); + let Some(c1_y) = + Option::::from(((c1_x.square() * c1_x) + (curve.a * c1_x) + curve.b).sqrt()) + else { + continue; + }; + break (c1_x, if bool::from(c1_y.is_odd()) { -c1_y } else { c1_y }); + }; + + // mmadd-1998-cmo + fn incomplete_add(x1: F, y1: F, x2: F, y2: F) -> Option<(F, F)> { + if x1 == x2 { + None? + } + + let u = y2 - y1; + let uu = u * u; + let v = x2 - x1; + let vv = v * v; + let vvv = v * vv; + let r = vv * x1; + let a = uu - vvv - r.double(); + let x3 = v * a; + let y3 = (u * (r - a)) - (vvv * y1); + let z3 = vvv; + + // Normalize from XYZ to XY + let z3_inv = Option::::from(z3.invert())?; + let x3 = x3 * z3_inv; + let y3 = y3 * z3_inv; + + Some((x3, y3)) + } + + let (c2_x, c2_y) = incomplete_add::(c0_x, c0_y, c1_x, c1_y) + .expect("randomly selected points shared an x coordinate"); + // We want C0, C1, C2 = -(C0 + C1) + let c2_y = -c2_y; + + // Calculate the slope and intercept + // Safe invert as these x coordinates must be distinct due to passing the above incomplete_add + let slope = (c1_y - c0_y) * (c1_x - c0_x).invert().unwrap(); + let intercept = c0_y - (slope * c0_x); + + // Calculate the inversions for 2 c_y (for each c) and all of the challenged generators + let mut inversions = vec![C::F::ZERO; 3 + (generators.len() * Parameters::ScalarBits::USIZE)]; + + // Needed for the left-hand side eval + { + inversions[0] = c0_y.double(); + inversions[1] = c1_y.double(); + inversions[2] = c2_y.double(); + } + + // Perform the inversions for the generators + for (i, generator) in generators.iter().enumerate() { + // Needed for the right-hand side eval + for (j, generator) in generator.0.iter().enumerate() { + // `DiscreteLog` has weights of `(mu - (G_i.y + (slope * G_i.x)))**-1` in its last line + inversions[3 + (i * Parameters::ScalarBits::USIZE) + j] = + intercept - (generator.1 - (slope * generator.0)); + } + } + for challenge_inversion in &inversions { + // This should be unreachable barring negligible probability + if challenge_inversion.is_zero().into() { + panic!("trying to invert 0"); + } + } + let mut scratch = vec![C::F::ZERO; inversions.len()]; + let _ = BatchInverter::invert_with_external_scratch(&mut inversions, &mut scratch); + + let mut inversions = inversions.into_iter(); + let inv_c0_two_y = inversions.next().unwrap(); + let inv_c1_two_y = inversions.next().unwrap(); + let inv_c2_two_y = inversions.next().unwrap(); + + let c0 = ChallengePoint::new(curve, slope, c0_x, c0_y, inv_c0_two_y); + let c1 = ChallengePoint::new(curve, slope, c1_x, c1_y, inv_c1_two_y); + let c2 = ChallengePoint::new(curve, slope, c2_x, c2_y, inv_c2_two_y); + + // Fill in the inverted values + let mut challenged_generators = Vec::with_capacity(generators.len()); + for _ in 0 .. generators.len() { + let mut challenged_generator = GenericArray::default(); + for i in 0 .. Parameters::ScalarBits::USIZE { + challenged_generator[i] = inversions.next().unwrap(); + } + challenged_generators.push(ChallengedGenerator(challenged_generator)); + } + + (DiscreteLogChallenge { c0, c1, c2, slope, intercept }, challenged_generators) + } + + // `DiscreteLog` from `Discrete Log Proof` + fn discrete_log( + &mut self, + curve: &CurveSpec, + point: PointWithDlog, + challenge: &DiscreteLogChallenge, + challenged_generator: &ChallengedGenerator, + ) -> OnCurve { + let PointWithDlog { divisor, dlog, point } = point; + + // Ensure this is being safely called + let arg_iter = [point.0, point.1, divisor.y, divisor.zero]; + let arg_iter = arg_iter.iter().chain(divisor.yx.iter()); + let arg_iter = arg_iter.chain(divisor.x_from_power_of_2.iter()); + let arg_iter = arg_iter.chain(dlog.iter()); + for variable in arg_iter { + debug_assert!( + matches!(variable, Variable::CG { .. } | Variable::CH { .. } | Variable::V(_)), + "discrete log proofs requires all arguments belong to commitments", + ); + } + + // Check the point is on curve + let point = self.on_curve(curve, point); + + // The challenge has already been sampled so those lines aren't necessary + + // lhs from the paper, evaluating the divisor + let lhs_eval = LinComb::from(divisor_challenge_eval(self, &divisor, &challenge.c0)) + + &LinComb::from(divisor_challenge_eval(self, &divisor, &challenge.c1)) + + &LinComb::from(divisor_challenge_eval(self, &divisor, &challenge.c2)); + + // Interpolate the doublings of the generator + let mut rhs_eval = LinComb::empty(); + // We call this `bit` yet it's not constrained to being a bit + // It's presumed to be yet may be malleated + for (bit, weight) in dlog.into_iter().zip(&challenged_generator.0) { + rhs_eval = rhs_eval.term(*weight, bit); + } + + // Interpolate the output point + // intercept - (y - (slope * x)) + // intercept - y + (slope * x) + // -y + (slope * x) + intercept + // EXCEPT the output point we're proving the discrete log for isn't the one interpolated + // Its negative is, so -y becomes y + // y + (slope * x) + intercept + let output_interpolation = LinComb::empty() + .constant(challenge.intercept) + .term(C::F::ONE, point.y) + .term(challenge.slope, point.x); + let output_interpolation_eval = self.eval(&output_interpolation); + let (_output_interpolation, inverse) = + self.inverse(Some(output_interpolation), output_interpolation_eval); + rhs_eval = rhs_eval.term(C::F::ONE, inverse); + + self.equality(lhs_eval, &rhs_eval); + + point + } +} diff --git a/crypto/evrf/ec-gadgets/src/lib.rs b/crypto/evrf/ec-gadgets/src/lib.rs new file mode 100644 index 000000000..463eedd66 --- /dev/null +++ b/crypto/evrf/ec-gadgets/src/lib.rs @@ -0,0 +1,130 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use generic_array::{typenum::Unsigned, ArrayLength, GenericArray}; + +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use generalized_bulletproofs_circuit_abstraction::*; + +mod dlog; +pub use dlog::*; + +/// The specification of a short Weierstrass curve over the field `F`. +/// +/// The short Weierstrass curve is defined via the formula `y**2 = x**3 + a*x + b`. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct CurveSpec { + /// The `a` constant in the curve formula. + pub a: F, + /// The `b` constant in the curve formula. + pub b: F, +} + +/// A struct for a point on a towered curve which has been confirmed to be on-curve. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct OnCurve { + pub(crate) x: Variable, + pub(crate) y: Variable, +} + +impl OnCurve { + /// The variable for the x-coordinate. + pub fn x(&self) -> Variable { + self.x + } + /// The variable for the y-coordinate. + pub fn y(&self) -> Variable { + self.y + } +} + +/// Gadgets for working with points on an elliptic curve defined over the scalar field of the curve +/// of the Bulletproof. +pub trait EcGadgets { + /// Constrain an x and y coordinate as being on the specified curve. + /// + /// The specified curve is defined over the scalar field of the curve this proof is performed + /// over, offering efficient arithmetic. + /// + /// May panic if the prover and the point is not actually on-curve. + fn on_curve(&mut self, curve: &CurveSpec, point: (Variable, Variable)) -> OnCurve; + + /// Perform incomplete addition for a fixed point and an on-curve point. + /// + /// `a` is the x and y coordinates of the fixed point, assumed to be on-curve. + /// + /// `b` is a point prior checked to be on-curve. + /// + /// `c` is a point prior checked to be on-curve, constrained to be the sum of `a` and `b`. + /// + /// `a` and `b` are checked to have distinct x coordinates. + /// + /// This function may panic if `a` is malformed or if the prover and `c` is not actually the sum + /// of `a` and `b`. + fn incomplete_add_fixed(&mut self, a: (C::F, C::F), b: OnCurve, c: OnCurve) -> OnCurve; +} + +impl EcGadgets for Circuit { + fn on_curve(&mut self, curve: &CurveSpec, (x, y): (Variable, Variable)) -> OnCurve { + let x_eval = self.eval(&LinComb::from(x)); + let (_x, _x_2, x2) = + self.mul(Some(LinComb::from(x)), Some(LinComb::from(x)), x_eval.map(|x| (x, x))); + let (_x, _x_2, x3) = + self.mul(Some(LinComb::from(x2)), Some(LinComb::from(x)), x_eval.map(|x| (x * x, x))); + let expected_y2 = LinComb::from(x3).term(curve.a, x).constant(curve.b); + + let y_eval = self.eval(&LinComb::from(y)); + let (_y, _y_2, y2) = + self.mul(Some(LinComb::from(y)), Some(LinComb::from(y)), y_eval.map(|y| (y, y))); + + self.equality(y2.into(), &expected_y2); + + OnCurve { x, y } + } + + fn incomplete_add_fixed(&mut self, a: (C::F, C::F), b: OnCurve, c: OnCurve) -> OnCurve { + // Check b.x != a.0 + { + let bx_lincomb = LinComb::from(b.x); + let bx_eval = self.eval(&bx_lincomb); + self.inequality(bx_lincomb, &LinComb::empty().constant(a.0), bx_eval.map(|bx| (bx, a.0))); + } + + let (x0, y0) = (a.0, a.1); + let (x1, y1) = (b.x, b.y); + let (x2, y2) = (c.x, c.y); + + let slope_eval = self.eval(&LinComb::from(x1)).map(|x1| { + let y1 = self.eval(&LinComb::from(b.y)).unwrap(); + + (y1 - y0) * (x1 - x0).invert().unwrap() + }); + + // slope * (x1 - x0) = y1 - y0 + let x1_minus_x0 = LinComb::from(x1).constant(-x0); + let x1_minus_x0_eval = self.eval(&x1_minus_x0); + let (slope, _r, o) = + self.mul(None, Some(x1_minus_x0), slope_eval.map(|slope| (slope, x1_minus_x0_eval.unwrap()))); + self.equality(LinComb::from(o), &LinComb::from(y1).constant(-y0)); + + // slope * (x2 - x0) = -y2 - y0 + let x2_minus_x0 = LinComb::from(x2).constant(-x0); + let x2_minus_x0_eval = self.eval(&x2_minus_x0); + let (_slope, _x2_minus_x0, o) = self.mul( + Some(slope.into()), + Some(x2_minus_x0), + slope_eval.map(|slope| (slope, x2_minus_x0_eval.unwrap())), + ); + self.equality(o.into(), &LinComb::empty().term(-C::F::ONE, y2).constant(-y0)); + + // slope * slope = x0 + x1 + x2 + let (_slope, _slope_2, o) = + self.mul(Some(slope.into()), Some(slope.into()), slope_eval.map(|slope| (slope, slope))); + self.equality(o.into(), &LinComb::from(x1).term(C::F::ONE, x2).constant(x0)); + + OnCurve { x: x2, y: y2 } + } +} diff --git a/crypto/evrf/embedwards25519/Cargo.toml b/crypto/evrf/embedwards25519/Cargo.toml new file mode 100644 index 000000000..bbae482b1 --- /dev/null +++ b/crypto/evrf/embedwards25519/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "embedwards25519" +version = "0.1.0" +description = "A curve defined over the Ed25519 scalar field" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/embedwards25519" +authors = ["Luke Parker "] +keywords = ["curve25519", "ed25519", "ristretto255", "group"] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rustversion = "1" +hex-literal = { version = "0.4", default-features = false } + +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] } +subtle = { version = "^2.4", default-features = false, features = ["std"] } + +generic-array = { version = "0.14", default-features = false } +crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } + +dalek-ff-group = { path = "../../dalek-ff-group", version = "0.4", default-features = false } + +blake2 = { version = "0.10", default-features = false, features = ["std"] } +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] } +ec-divisors = { path = "../divisors" } +generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets" } + +[dev-dependencies] +hex = "0.4" + +rand_core = { version = "0.6", features = ["std"] } + +ff-group-tests = { path = "../../ff-group-tests" } diff --git a/crypto/evrf/embedwards25519/LICENSE b/crypto/evrf/embedwards25519/LICENSE new file mode 100644 index 000000000..91d893c11 --- /dev/null +++ b/crypto/evrf/embedwards25519/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/embedwards25519/README.md b/crypto/evrf/embedwards25519/README.md new file mode 100644 index 000000000..5f7f5e475 --- /dev/null +++ b/crypto/evrf/embedwards25519/README.md @@ -0,0 +1,21 @@ +# embedwards25519 + +A curve defined over the Ed25519 scalar field. + +This curve was found via +[tevador's script](https://gist.github.com/tevador/4524c2092178df08996487d4e272b096) +for finding curves (specifically, curve cycles), modified to search for curves +whose field is the Ed25519 scalar field (not the Ed25519 field). + +``` +p = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed +q = 0x0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b +D = -420435 +y^2 = x^3 - 3*x + 4188043517836764736459661287169077812555441231147410753119540549773825148767 +``` + +The embedding degree is `(q-1)/2`. + +This curve should not be used with single-coordinate ladders, and points should +always be represented in a compressed form (preventing receiving off-curve +points). diff --git a/crypto/evrf/embedwards25519/src/backend.rs b/crypto/evrf/embedwards25519/src/backend.rs new file mode 100644 index 000000000..304fa0bc5 --- /dev/null +++ b/crypto/evrf/embedwards25519/src/backend.rs @@ -0,0 +1,293 @@ +use zeroize::Zeroize; + +// Use black_box when possible +#[rustversion::since(1.66)] +use core::hint::black_box; +#[rustversion::before(1.66)] +fn black_box(val: T) -> T { + val +} + +pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 { + let bit_ref = black_box(bit_ref); + + let mut bit = black_box(*bit_ref); + let res = black_box(bit as u8); + bit.zeroize(); + debug_assert!((res | 1) == 1); + + bit_ref.zeroize(); + res +} + +macro_rules! math_op { + ( + $Value: ident, + $Other: ident, + $Op: ident, + $op_fn: ident, + $Assign: ident, + $assign_fn: ident, + $function: expr + ) => { + impl $Op<$Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl $Assign<$Other> for $Value { + fn $assign_fn(&mut self, other: $Other) { + self.0 = $function(self.0, other.0); + } + } + impl<'a> $Op<&'a $Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: &'a $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl<'a> $Assign<&'a $Other> for $Value { + fn $assign_fn(&mut self, other: &'a $Other) { + self.0 = $function(self.0, other.0); + } + } + }; +} + +macro_rules! from_wrapper { + ($wrapper: ident, $inner: ident, $uint: ident) => { + impl From<$uint> for $wrapper { + fn from(a: $uint) -> $wrapper { + Self(Residue::new(&$inner::from(a))) + } + } + }; +} + +macro_rules! field { + ( + $FieldName: ident, + $ResidueType: ident, + + $MODULUS_STR: ident, + $MODULUS: ident, + $WIDE_MODULUS: ident, + + $NUM_BITS: literal, + $MULTIPLICATIVE_GENERATOR: literal, + $S: literal, + $ROOT_OF_UNITY: literal, + $DELTA: literal, + ) => { + use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::{Sum, Product}, + }; + + use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable}; + use rand_core::RngCore; + + use crypto_bigint::{Integer, NonZero, Encoding, impl_modulus}; + + use ciphersuite::group::ff::{ + Field, PrimeField, FieldBits, PrimeFieldBits, helpers::sqrt_ratio_generic, + }; + + use $crate::backend::u8_from_bool; + + fn reduce(x: U512) -> U256 { + U256::from_le_slice(&x.rem(&NonZero::new($WIDE_MODULUS).unwrap()).to_le_bytes()[.. 32]) + } + + impl ConstantTimeEq for $FieldName { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } + } + + impl ConditionallySelectable for $FieldName { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $FieldName(Residue::conditional_select(&a.0, &b.0, choice)) + } + } + + math_op!($FieldName, $FieldName, Add, add, AddAssign, add_assign, |x: $ResidueType, y| x + .add(&y)); + math_op!($FieldName, $FieldName, Sub, sub, SubAssign, sub_assign, |x: $ResidueType, y| x + .sub(&y)); + math_op!($FieldName, $FieldName, Mul, mul, MulAssign, mul_assign, |x: $ResidueType, y| x + .mul(&y)); + + from_wrapper!($FieldName, U256, u8); + from_wrapper!($FieldName, U256, u16); + from_wrapper!($FieldName, U256, u32); + from_wrapper!($FieldName, U256, u64); + from_wrapper!($FieldName, U256, u128); + + impl Neg for $FieldName { + type Output = $FieldName; + fn neg(self) -> $FieldName { + Self(self.0.neg()) + } + } + + impl<'a> Neg for &'a $FieldName { + type Output = $FieldName; + fn neg(self) -> Self::Output { + (*self).neg() + } + } + + impl $FieldName { + /// Perform an exponentation. + pub fn pow(&self, other: $FieldName) -> $FieldName { + let mut table = [Self(Residue::ONE); 16]; + table[1] = *self; + for i in 2 .. 16 { + table[i] = table[i - 1] * self; + } + + let mut res = Self(Residue::ONE); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res *= res; + } + } + + let mut factor = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + factor = Self::conditional_select(&factor, &candidate, usize::from(bits).ct_eq(&j)); + } + res *= factor; + bits = 0; + } + } + res + } + } + + impl Field for $FieldName { + const ZERO: Self = Self(Residue::ZERO); + const ONE: Self = Self(Residue::ONE); + + fn random(mut rng: impl RngCore) -> Self { + let mut bytes = [0; 64]; + rng.fill_bytes(&mut bytes); + $FieldName(Residue::new(&reduce(U512::from_le_slice(bytes.as_ref())))) + } + + fn square(&self) -> Self { + Self(self.0.square()) + } + fn double(&self) -> Self { + *self + self + } + + fn invert(&self) -> CtOption { + let res = self.0.invert(); + CtOption::new(Self(res.0), res.1.into()) + } + + fn sqrt(&self) -> CtOption { + // (p + 1) // 4, as valid since p % 4 == 3 + let mod_plus_one_div_four = $MODULUS.saturating_add(&U256::ONE).wrapping_div(&(4u8.into())); + let res = self.pow(Self($ResidueType::new_checked(&mod_plus_one_div_four).unwrap())); + CtOption::new(res, res.square().ct_eq(self)) + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + sqrt_ratio_generic(num, div) + } + } + + impl PrimeField for $FieldName { + type Repr = [u8; 32]; + + const MODULUS: &'static str = $MODULUS_STR; + + const NUM_BITS: u32 = $NUM_BITS; + const CAPACITY: u32 = $NUM_BITS - 1; + + const TWO_INV: Self = $FieldName($ResidueType::new(&U256::from_u8(2)).invert().0); + + const MULTIPLICATIVE_GENERATOR: Self = + Self(Residue::new(&U256::from_u8($MULTIPLICATIVE_GENERATOR))); + const S: u32 = $S; + + const ROOT_OF_UNITY: Self = $FieldName(Residue::new(&U256::from_be_hex($ROOT_OF_UNITY))); + const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0); + + const DELTA: Self = $FieldName(Residue::new(&U256::from_be_hex($DELTA))); + + fn from_repr(bytes: Self::Repr) -> CtOption { + let res = U256::from_le_slice(&bytes); + CtOption::new($FieldName(Residue::new(&res)), res.ct_lt(&$MODULUS)) + } + fn to_repr(&self) -> Self::Repr { + let mut repr = [0; 32]; + repr.copy_from_slice(&self.0.retrieve().to_le_bytes()); + repr + } + + fn is_odd(&self) -> Choice { + self.0.retrieve().is_odd() + } + } + + impl PrimeFieldBits for $FieldName { + type ReprBits = [u8; 32]; + + fn to_le_bits(&self) -> FieldBits { + self.to_repr().into() + } + + fn char_le_bits() -> FieldBits { + let mut repr = [0; 32]; + repr.copy_from_slice(&MODULUS.to_le_bytes()); + repr.into() + } + } + + impl Sum<$FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + let mut res = $FieldName::ZERO; + for item in iter { + res += item; + } + res + } + } + + impl<'a> Sum<&'a $FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + iter.cloned().sum() + } + } + + impl Product<$FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + let mut res = $FieldName::ONE; + for item in iter { + res *= item; + } + res + } + } + + impl<'a> Product<&'a $FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + iter.cloned().product() + } + } + }; +} diff --git a/crypto/evrf/embedwards25519/src/lib.rs b/crypto/evrf/embedwards25519/src/lib.rs new file mode 100644 index 000000000..858f4ada4 --- /dev/null +++ b/crypto/evrf/embedwards25519/src/lib.rs @@ -0,0 +1,47 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] + +use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2}; +use ciphersuite::group::{ff::PrimeField, Group}; + +#[macro_use] +mod backend; + +mod scalar; +pub use scalar::Scalar; + +pub use dalek_ff_group::Scalar as FieldElement; + +mod point; +pub use point::Point; + +/// Ciphersuite for Embedwards25519. +/// +/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition +/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as +/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. +#[derive(Clone, Copy, PartialEq, Eq, Debug, zeroize::Zeroize)] +pub struct Embedwards25519; +impl ciphersuite::Ciphersuite for Embedwards25519 { + type F = Scalar; + type G = Point; + type H = blake2::Blake2b512; + + const ID: &'static [u8] = b"embedwards25519"; + + fn generator() -> Self::G { + Point::generator() + } + + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { + use blake2::Digest; + Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap()) + } +} + +impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Embedwards25519 { + type ScalarBits = U<{ Scalar::NUM_BITS as usize }>; + type XCoefficients = Quot, U2>; + type XCoefficientsMinusOne = Diff; + type YxCoefficients = Diff, U1>, U2>, U2>; +} diff --git a/crypto/evrf/embedwards25519/src/point.rs b/crypto/evrf/embedwards25519/src/point.rs new file mode 100644 index 000000000..9d24e88ae --- /dev/null +++ b/crypto/evrf/embedwards25519/src/point.rs @@ -0,0 +1,415 @@ +use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::Sum, +}; + +use rand_core::RngCore; + +use zeroize::Zeroize; +use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable}; + +use ciphersuite::group::{ + ff::{Field, PrimeField, PrimeFieldBits}, + Group, GroupEncoding, + prime::PrimeGroup, +}; + +use crate::{backend::u8_from_bool, Scalar, FieldElement}; + +#[allow(non_snake_case)] +fn B() -> FieldElement { + FieldElement::from_repr(hex_literal::hex!( + "5f07603a853f20370b682036210d463e64903a23ea669d07ca26cfc13f594209" + )) + .unwrap() +} + +fn recover_y(x: FieldElement) -> CtOption { + // x**3 - 3 * x + B + ((x.square() * x) - (x.double() + x) + B()).sqrt() +} + +/// Point. +#[derive(Clone, Copy, Debug, Zeroize)] +#[repr(C)] +pub struct Point { + x: FieldElement, // / Z + y: FieldElement, // / Z + z: FieldElement, +} + +impl ConstantTimeEq for Point { + fn ct_eq(&self, other: &Self) -> Choice { + let x1 = self.x * other.z; + let x2 = other.x * self.z; + + let y1 = self.y * other.z; + let y2 = other.y * self.z; + + (self.x.is_zero() & other.x.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2)) + } +} + +impl PartialEq for Point { + fn eq(&self, other: &Point) -> bool { + self.ct_eq(other).into() + } +} + +impl Eq for Point {} + +impl ConditionallySelectable for Point { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Point { + x: FieldElement::conditional_select(&a.x, &b.x, choice), + y: FieldElement::conditional_select(&a.y, &b.y, choice), + z: FieldElement::conditional_select(&a.z, &b.z, choice), + } + } +} + +impl Add for Point { + type Output = Point; + #[allow(non_snake_case)] + fn add(self, other: Self) -> Self { + // add-2015-rcb + + let a = -FieldElement::from(3u64); + let B = B(); + let b3 = B + B + B; + + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + let X2 = other.x; + let Y2 = other.y; + let Z2 = other.z; + + let t0 = X1 * X2; + let t1 = Y1 * Y2; + let t2 = Z1 * Z2; + let t3 = X1 + Y1; + let t4 = X2 + Y2; + let t3 = t3 * t4; + let t4 = t0 + t1; + let t3 = t3 - t4; + let t4 = X1 + Z1; + let t5 = X2 + Z2; + let t4 = t4 * t5; + let t5 = t0 + t2; + let t4 = t4 - t5; + let t5 = Y1 + Z1; + let X3 = Y2 + Z2; + let t5 = t5 * X3; + let X3 = t1 + t2; + let t5 = t5 - X3; + let Z3 = a * t4; + let X3 = b3 * t2; + let Z3 = X3 + Z3; + let X3 = t1 - Z3; + let Z3 = t1 + Z3; + let Y3 = X3 * Z3; + let t1 = t0 + t0; + let t1 = t1 + t0; + let t2 = a * t2; + let t4 = b3 * t4; + let t1 = t1 + t2; + let t2 = t0 - t2; + let t2 = a * t2; + let t4 = t4 + t2; + let t0 = t1 * t4; + let Y3 = Y3 + t0; + let t0 = t5 * t4; + let X3 = t3 * X3; + let X3 = X3 - t0; + let t0 = t3 * t1; + let Z3 = t5 * Z3; + let Z3 = Z3 + t0; + Point { x: X3, y: Y3, z: Z3 } + } +} + +impl AddAssign for Point { + fn add_assign(&mut self, other: Point) { + *self = *self + other; + } +} + +impl Add<&Point> for Point { + type Output = Point; + fn add(self, other: &Point) -> Point { + self + *other + } +} + +impl AddAssign<&Point> for Point { + fn add_assign(&mut self, other: &Point) { + *self += *other; + } +} + +impl Neg for Point { + type Output = Point; + fn neg(self) -> Self { + Point { x: self.x, y: -self.y, z: self.z } + } +} + +impl Sub for Point { + type Output = Point; + #[allow(clippy::suspicious_arithmetic_impl)] + fn sub(self, other: Self) -> Self { + self + other.neg() + } +} + +impl SubAssign for Point { + fn sub_assign(&mut self, other: Point) { + *self = *self - other; + } +} + +impl Sub<&Point> for Point { + type Output = Point; + fn sub(self, other: &Point) -> Point { + self - *other + } +} + +impl SubAssign<&Point> for Point { + fn sub_assign(&mut self, other: &Point) { + *self -= *other; + } +} + +impl Group for Point { + type Scalar = Scalar; + fn random(mut rng: impl RngCore) -> Self { + loop { + let mut bytes = [0; 32]; + rng.fill_bytes(bytes.as_mut()); + let opt = Self::from_bytes(&bytes); + if opt.is_some().into() { + return opt.unwrap(); + } + } + } + fn identity() -> Self { + Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO } + } + fn generator() -> Self { + Point { + x: FieldElement::from_repr(hex_literal::hex!( + "0100000000000000000000000000000000000000000000000000000000000000" + )) + .unwrap(), + y: FieldElement::from_repr(hex_literal::hex!( + "2e4118080a484a3dfbafe2199a0e36b7193581d676c0dadfa376b0265616020c" + )) + .unwrap(), + z: FieldElement::ONE, + } + } + fn is_identity(&self) -> Choice { + self.z.ct_eq(&FieldElement::ZERO) + } + #[allow(non_snake_case)] + fn double(&self) -> Self { + // dbl-2007-bl-2 + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + + let w = (X1 - Z1) * (X1 + Z1); + let w = w.double() + w; + let s = (Y1 * Z1).double(); + let ss = s.square(); + let sss = s * ss; + let R = Y1 * s; + let RR = R.square(); + let B_ = (X1 * R).double(); + let h = w.square() - B_.double(); + let X3 = h * s; + let Y3 = w * (B_ - h) - RR.double(); + let Z3 = sss; + + let res = Self { x: X3, y: Y3, z: Z3 }; + // If self is identity, res will not be well-formed + // Accordingly, we return self if self was the identity + Self::conditional_select(&res, self, self.is_identity()) + } +} + +impl Sum for Point { + fn sum>(iter: I) -> Point { + let mut res = Self::identity(); + for i in iter { + res += i; + } + res + } +} + +impl<'a> Sum<&'a Point> for Point { + fn sum>(iter: I) -> Point { + Point::sum(iter.cloned()) + } +} + +impl Mul for Point { + type Output = Point; + fn mul(self, mut other: Scalar) -> Point { + // Precompute the optimal amount that's a multiple of 2 + let mut table = [Point::identity(); 16]; + table[1] = self; + for i in 2 .. 16 { + table[i] = table[i - 1] + self; + } + + let mut res = Self::identity(); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res = res.double(); + } + } + + let mut term = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + term = Self::conditional_select(&term, candidate, usize::from(bits).ct_eq(&j)); + } + res += term; + bits = 0; + } + } + other.zeroize(); + res + } +} + +impl MulAssign for Point { + fn mul_assign(&mut self, other: Scalar) { + *self = *self * other; + } +} + +impl Mul<&Scalar> for Point { + type Output = Point; + fn mul(self, other: &Scalar) -> Point { + self * *other + } +} + +impl MulAssign<&Scalar> for Point { + fn mul_assign(&mut self, other: &Scalar) { + *self *= *other; + } +} + +impl GroupEncoding for Point { + type Repr = [u8; 32]; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + // Extract and clear the sign bit + let mut bytes = *bytes; + let sign = Choice::from(bytes[31] >> 7); + bytes[31] &= u8::MAX >> 1; + + // Parse x, recover y + FieldElement::from_repr(bytes).and_then(|x| { + let is_identity = x.is_zero(); + + let y = recover_y(x).map(|mut y| { + y = <_>::conditional_select(&y, &-y, y.is_odd().ct_eq(&!sign)); + y + }); + + // If this the identity, set y to 1 + let y = + CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity); + // Create the point if we have a y solution + let point = y.map(|y| Point { x, y, z: FieldElement::ONE }); + + let not_negative_zero = !(is_identity & sign); + // Only return the point if it isn't -0 + CtOption::conditional_select( + &CtOption::new(Point::identity(), 0.into()), + &point, + not_negative_zero, + ) + }) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + Point::from_bytes(bytes) + } + + fn to_bytes(&self) -> Self::Repr { + let Some(z) = Option::::from(self.z.invert()) else { + return [0; 32]; + }; + let x = self.x * z; + let y = self.y * z; + + let mut res = [0; 32]; + res.as_mut().copy_from_slice(&x.to_repr()); + + // The following conditional select normalizes the sign to 0 when x is 0 + let y_sign = u8::conditional_select(&y.is_odd().unwrap_u8(), &0, x.ct_eq(&FieldElement::ZERO)); + res[31] |= y_sign << 7; + res + } +} + +impl PrimeGroup for Point {} + +impl ec_divisors::DivisorCurve for Point { + type FieldElement = FieldElement; + + fn a() -> Self::FieldElement { + -FieldElement::from(3u64) + } + fn b() -> Self::FieldElement { + B() + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + let z: Self::FieldElement = Option::from(point.z.invert())?; + Some((point.x * z, point.y * z)) + } +} + +#[test] +fn test_curve() { + ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng); +} + +#[test] +fn generator() { + assert_eq!( + Point::generator(), + Point::from_bytes(&hex_literal::hex!( + "0100000000000000000000000000000000000000000000000000000000000000" + )) + .unwrap() + ); +} + +#[test] +fn zero_x_is_invalid() { + assert!(Option::::from(recover_y(FieldElement::ZERO)).is_none()); +} + +// Checks random won't infinitely loop +#[test] +fn random() { + Point::random(&mut rand_core::OsRng); +} diff --git a/crypto/evrf/embedwards25519/src/scalar.rs b/crypto/evrf/embedwards25519/src/scalar.rs new file mode 100644 index 000000000..f2d6e61f2 --- /dev/null +++ b/crypto/evrf/embedwards25519/src/scalar.rs @@ -0,0 +1,52 @@ +use zeroize::{DefaultIsZeroes, Zeroize}; + +use crypto_bigint::{ + U256, U512, + modular::constant_mod::{ResidueParams, Residue}, +}; + +const MODULUS_STR: &str = "0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b"; + +impl_modulus!(EmbedwardsQ, U256, MODULUS_STR); +type ResidueType = Residue; + +/// The Scalar field of Embedwards25519. +/// +/// This is equivalent to the field secp256k1 is defined over. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +#[repr(C)] +pub struct Scalar(pub(crate) ResidueType); + +impl DefaultIsZeroes for Scalar {} + +pub(crate) const MODULUS: U256 = U256::from_be_hex(MODULUS_STR); + +const WIDE_MODULUS: U512 = U512::from_be_hex(concat!( + "0000000000000000000000000000000000000000000000000000000000000000", + "0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b", +)); + +field!( + Scalar, + ResidueType, + MODULUS_STR, + MODULUS, + WIDE_MODULUS, + 252, + 10, + 1, + "0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96a", + "0000000000000000000000000000000000000000000000000000000000000064", +); + +impl Scalar { + /// Perform a wide reduction, presumably to obtain a non-biased Scalar field element. + pub fn wide_reduce(bytes: [u8; 64]) -> Scalar { + Scalar(Residue::new(&reduce(U512::from_le_slice(bytes.as_ref())))) + } +} + +#[test] +fn test_scalar_field() { + ff_group_tests::prime_field::test_prime_field_bits::<_, Scalar>(&mut rand_core::OsRng); +} diff --git a/crypto/evrf/generalized-bulletproofs/Cargo.toml b/crypto/evrf/generalized-bulletproofs/Cargo.toml new file mode 100644 index 000000000..9dfc95a53 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "generalized-bulletproofs" +version = "0.1.0" +description = "Generalized Bulletproofs" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/generalized-bulletproofs" +authors = ["Luke Parker "] +keywords = ["ciphersuite", "ff", "group"] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] } + +blake2 = { version = "0.10", default-features = false, features = ["std"] } + +multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] } + +[dev-dependencies] +rand_core = { version = "0.6", features = ["getrandom"] } + +transcript = { package = "flexible-transcript", path = "../../transcript", features = ["recommended"] } + +ciphersuite = { path = "../../ciphersuite", features = ["ristretto"] } + +[features] +tests = [] diff --git a/crypto/evrf/generalized-bulletproofs/LICENSE b/crypto/evrf/generalized-bulletproofs/LICENSE new file mode 100644 index 000000000..ad3c2fd59 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/generalized-bulletproofs/README.md b/crypto/evrf/generalized-bulletproofs/README.md new file mode 100644 index 000000000..da588b8d4 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/README.md @@ -0,0 +1,6 @@ +# Generalized Bulletproofs + +An implementation of +[Generalized Bulletproofs](https://repo.getmonero.org/monero-project/ccs-proposals/uploads/a9baa50c38c6312efc0fea5c6a188bb9/gbp.pdf), +a variant of the Bulletproofs arithmetic circuit statement to support Pedersen +vector commitments. diff --git a/crypto/evrf/generalized-bulletproofs/src/arithmetic_circuit_proof.rs b/crypto/evrf/generalized-bulletproofs/src/arithmetic_circuit_proof.rs new file mode 100644 index 000000000..e0c6e4647 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/arithmetic_circuit_proof.rs @@ -0,0 +1,679 @@ +use rand_core::{RngCore, CryptoRng}; + +use zeroize::{Zeroize, ZeroizeOnDrop}; + +use multiexp::{multiexp, multiexp_vartime}; +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use crate::{ + ScalarVector, PointVector, ProofGenerators, PedersenCommitment, PedersenVectorCommitment, + BatchVerifier, + transcript::*, + lincomb::accumulate_vector, + inner_product::{IpError, IpStatement, IpWitness, P}, +}; +pub use crate::lincomb::{Variable, LinComb}; + +/// An Arithmetic Circuit Statement. +/// +/// Bulletproofs' constraints are of the form +/// `aL * aR = aO, WL * aL + WR * aR + WO * aO = WV * V + c`. +/// +/// Generalized Bulletproofs modifies this to +/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G + WCH * C_H = WV * V + c`. +/// +/// We implement the latter, yet represented (for simplicity) as +/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G + WCH * C_H + WV * V + c = 0`. +#[derive(Clone, Debug)] +pub struct ArithmeticCircuitStatement<'a, C: Ciphersuite> { + generators: ProofGenerators<'a, C>, + + constraints: Vec>, + C: PointVector, + V: PointVector, +} + +impl<'a, C: Ciphersuite> Zeroize for ArithmeticCircuitStatement<'a, C> { + fn zeroize(&mut self) { + self.constraints.zeroize(); + self.C.zeroize(); + self.V.zeroize(); + } +} + +/// The witness for an arithmetic circuit statement. +#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] +pub struct ArithmeticCircuitWitness { + aL: ScalarVector, + aR: ScalarVector, + aO: ScalarVector, + + c: Vec>, + v: Vec>, +} + +/// An error incurred during arithmetic circuit proof operations. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum AcError { + /// The vectors of scalars which are multiplied against each other were of different lengths. + DifferingLrLengths, + /// The matrices of constraints are of different lengths. + InconsistentAmountOfConstraints, + /// A constraint referred to a non-existent term. + ConstrainedNonExistentTerm, + /// A constraint referred to a non-existent commitment. + ConstrainedNonExistentCommitment, + /// There weren't enough generators to prove for this statement. + NotEnoughGenerators, + /// The witness was inconsistent to the statement. + /// + /// Sanity checks on the witness are always performed. If the library is compiled with debug + /// assertions on, the satisfaction of all constraints and validity of the commitmentsd is + /// additionally checked. + InconsistentWitness, + /// There was an error from the inner-product proof. + Ip(IpError), + /// The proof wasn't complete and the necessary values could not be read from the transcript. + IncompleteProof, +} + +impl ArithmeticCircuitWitness { + /// Constructs a new witness instance. + pub fn new( + aL: ScalarVector, + aR: ScalarVector, + c: Vec>, + v: Vec>, + ) -> Result { + if aL.len() != aR.len() { + Err(AcError::DifferingLrLengths)?; + } + + // The Pedersen Vector Commitments don't have their variables' lengths checked as they aren't + // paired off with each other as aL, aR are + + // The PVC commit function ensures there's enough generators for their amount of terms + // If there aren't enough/the same generators when this is proven for, it'll trigger + // InconsistentWitness + + let aO = aL.clone() * &aR; + Ok(ArithmeticCircuitWitness { aL, aR, aO, c, v }) + } +} + +struct YzChallenges { + y_inv: ScalarVector, + z: ScalarVector, +} + +impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> { + // The amount of multiplications performed. + fn n(&self) -> usize { + self.generators.len() + } + + // The amount of constraints. + fn q(&self) -> usize { + self.constraints.len() + } + + // The amount of Pedersen vector commitments. + fn c(&self) -> usize { + self.C.len() + } + + // The amount of Pedersen commitments. + fn m(&self) -> usize { + self.V.len() + } + + /// Create a new ArithmeticCircuitStatement for the specified relationship. + /// + /// The `LinComb`s passed as `constraints` will be bound to evaluate to 0. + /// + /// The constraints are not transcripted. They're expected to be deterministic from the context + /// and higher-level statement. If your constraints are variable, you MUST transcript them before + /// calling prove/verify. + /// + /// The commitments are expected to have been transcripted extenally to this statement's + /// invocation. That's practically ensured by taking a `Commitments` struct here, which is only + /// obtainable via a transcript. + pub fn new( + generators: ProofGenerators<'a, C>, + constraints: Vec>, + commitments: Commitments, + ) -> Result { + let Commitments { C, V } = commitments; + + for constraint in &constraints { + if Some(generators.len()) <= constraint.highest_a_index { + Err(AcError::ConstrainedNonExistentTerm)?; + } + if Some(C.len()) <= constraint.highest_c_index { + Err(AcError::ConstrainedNonExistentCommitment)?; + } + if Some(V.len()) <= constraint.highest_v_index { + Err(AcError::ConstrainedNonExistentCommitment)?; + } + } + + Ok(Self { generators, constraints, C, V }) + } + + fn yz_challenges(&self, y: C::F, z_1: C::F) -> YzChallenges { + let y_inv = y.invert().unwrap(); + let y_inv = ScalarVector::powers(y_inv, self.n()); + + // Powers of z *starting with z**1* + // We could reuse powers and remove the first element, yet this is cheaper than the shift that + // would require + let q = self.q(); + let mut z = ScalarVector(Vec::with_capacity(q)); + z.0.push(z_1); + for _ in 1 .. q { + z.0.push(*z.0.last().unwrap() * z_1); + } + z.0.truncate(q); + + YzChallenges { y_inv, z } + } + + /// Prove for this statement/witness. + pub fn prove( + self, + rng: &mut R, + transcript: &mut Transcript, + mut witness: ArithmeticCircuitWitness, + ) -> Result<(), AcError> { + let n = self.n(); + let c = self.c(); + let m = self.m(); + + // Check the witness length and pad it to the necessary power of two + if witness.aL.len() > n { + Err(AcError::NotEnoughGenerators)?; + } + while witness.aL.len() < n { + witness.aL.0.push(C::F::ZERO); + witness.aR.0.push(C::F::ZERO); + witness.aO.0.push(C::F::ZERO); + } + for c in &mut witness.c { + if c.g_values.len() > n { + Err(AcError::NotEnoughGenerators)?; + } + if c.h_values.len() > n { + Err(AcError::NotEnoughGenerators)?; + } + // The Pedersen vector commitments internally have n terms + while c.g_values.len() < n { + c.g_values.0.push(C::F::ZERO); + } + while c.h_values.len() < n { + c.h_values.0.push(C::F::ZERO); + } + } + + // Check the witness's consistency with the statement + if (c != witness.c.len()) || (m != witness.v.len()) { + Err(AcError::InconsistentWitness)?; + } + + #[cfg(debug_assertions)] + { + for (commitment, opening) in self.V.0.iter().zip(witness.v.iter()) { + if *commitment != opening.commit(self.generators.g(), self.generators.h()) { + Err(AcError::InconsistentWitness)?; + } + } + for (commitment, opening) in self.C.0.iter().zip(witness.c.iter()) { + if Some(*commitment) != + opening.commit( + self.generators.g_bold_slice(), + self.generators.h_bold_slice(), + self.generators.h(), + ) + { + Err(AcError::InconsistentWitness)?; + } + } + for constraint in &self.constraints { + let eval = + constraint + .WL + .iter() + .map(|(i, weight)| *weight * witness.aL[*i]) + .chain(constraint.WR.iter().map(|(i, weight)| *weight * witness.aR[*i])) + .chain(constraint.WO.iter().map(|(i, weight)| *weight * witness.aO[*i])) + .chain( + constraint.WCG.iter().zip(&witness.c).flat_map(|(weights, c)| { + weights.iter().map(|(j, weight)| *weight * c.g_values[*j]) + }), + ) + .chain( + constraint.WCH.iter().zip(&witness.c).flat_map(|(weights, c)| { + weights.iter().map(|(j, weight)| *weight * c.h_values[*j]) + }), + ) + .chain(constraint.WV.iter().map(|(i, weight)| *weight * witness.v[*i].value)) + .chain(core::iter::once(constraint.c)) + .sum::(); + + if eval != C::F::ZERO { + Err(AcError::InconsistentWitness)?; + } + } + } + + let alpha = C::F::random(&mut *rng); + let beta = C::F::random(&mut *rng); + let rho = C::F::random(&mut *rng); + + let AI = { + let alg = witness.aL.0.iter().enumerate().map(|(i, aL)| (*aL, self.generators.g_bold(i))); + let arh = witness.aR.0.iter().enumerate().map(|(i, aR)| (*aR, self.generators.h_bold(i))); + let ah = core::iter::once((alpha, self.generators.h())); + let mut AI_terms = alg.chain(arh).chain(ah).collect::>(); + let AI = multiexp(&AI_terms); + AI_terms.zeroize(); + AI + }; + let AO = { + let aog = witness.aO.0.iter().enumerate().map(|(i, aO)| (*aO, self.generators.g_bold(i))); + let bh = core::iter::once((beta, self.generators.h())); + let mut AO_terms = aog.chain(bh).collect::>(); + let AO = multiexp(&AO_terms); + AO_terms.zeroize(); + AO + }; + + let mut sL = ScalarVector(Vec::with_capacity(n)); + let mut sR = ScalarVector(Vec::with_capacity(n)); + for _ in 0 .. n { + sL.0.push(C::F::random(&mut *rng)); + sR.0.push(C::F::random(&mut *rng)); + } + let S = { + let slg = sL.0.iter().enumerate().map(|(i, sL)| (*sL, self.generators.g_bold(i))); + let srh = sR.0.iter().enumerate().map(|(i, sR)| (*sR, self.generators.h_bold(i))); + let rh = core::iter::once((rho, self.generators.h())); + let mut S_terms = slg.chain(srh).chain(rh).collect::>(); + let S = multiexp(&S_terms); + S_terms.zeroize(); + S + }; + + transcript.push_point(AI); + transcript.push_point(AO); + transcript.push_point(S); + let y = transcript.challenge(); + let z = transcript.challenge(); + let YzChallenges { y_inv, z } = self.yz_challenges(y, z); + let y = ScalarVector::powers(y, n); + + // t is a n'-term polynomial + // While Bulletproofs discuss it as a 6-term polynomial, Generalized Bulletproofs re-defines it + // as `2(n' + 1)`-term, where `n'` is `2 (c + 1)`. + // When `c = 0`, `n' = 2`, and t is `6` (which lines up with Bulletproofs having a 6-term + // polynomial). + + // ni = n' + let ni = 2 * (c + 1); + // These indexes are from the Generalized Bulletproofs paper + #[rustfmt::skip] + let ilr = ni / 2; // 1 if c = 0 + #[rustfmt::skip] + let io = ni; // 2 if c = 0 + #[rustfmt::skip] + let is = ni + 1; // 3 if c = 0 + #[rustfmt::skip] + let jlr = ni / 2; // 1 if c = 0 + #[rustfmt::skip] + let jo = 0; // 0 if c = 0 + #[rustfmt::skip] + let js = ni + 1; // 3 if c = 0 + + // If c = 0, these indexes perfectly align with the stated powers of X from the Bulletproofs + // paper for the following coefficients + + // Declare the l and r polynomials, assigning the traditional coefficients to their positions + let mut l = vec![]; + let mut r = vec![]; + for _ in 0 .. (is + 1) { + l.push(ScalarVector::new(0)); + r.push(ScalarVector::new(0)); + } + + let mut l_weights = ScalarVector::new(n); + let mut r_weights = ScalarVector::new(n); + let mut o_weights = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + accumulate_vector(&mut l_weights, &constraint.WL, *z); + accumulate_vector(&mut r_weights, &constraint.WR, *z); + accumulate_vector(&mut o_weights, &constraint.WO, *z); + } + + l[ilr] = (r_weights * &y_inv) + &witness.aL; + l[io] = witness.aO.clone(); + l[is] = sL; + r[jlr] = l_weights + &(witness.aR.clone() * &y); + r[jo] = o_weights - &y; + r[js] = sR * &y; + + // Pad as expected + for l in &mut l { + debug_assert!((l.len() == 0) || (l.len() == n)); + if l.len() == 0 { + *l = ScalarVector::new(n); + } + } + for r in &mut r { + debug_assert!((r.len() == 0) || (r.len() == n)); + if r.len() == 0 { + *r = ScalarVector::new(n); + } + } + + // We now fill in the vector commitments + // We use unused coefficients of l increasing from 0 (skipping ilr), and unused coefficients of + // r decreasing from n' (skipping jlr) + + let mut cg_weights = Vec::with_capacity(witness.c.len()); + let mut ch_weights = Vec::with_capacity(witness.c.len()); + for i in 0 .. witness.c.len() { + let mut cg = ScalarVector::new(n); + let mut ch = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + if let Some(WCG) = constraint.WCG.get(i) { + accumulate_vector(&mut cg, WCG, *z); + } + if let Some(WCH) = constraint.WCH.get(i) { + accumulate_vector(&mut ch, WCH, *z); + } + } + cg_weights.push(cg); + ch_weights.push(ch); + } + + for (i, (c, (cg_weights, ch_weights))) in + witness.c.iter().zip(cg_weights.into_iter().zip(ch_weights)).enumerate() + { + let i = i + 1; + let j = ni - i; + + l[i] = c.g_values.clone(); + l[j] = ch_weights * &y_inv; + r[j] = cg_weights; + r[i] = (c.h_values.clone() * &y) + &r[i]; + } + + // Multiply them to obtain t + let mut t = ScalarVector::new(1 + (2 * (l.len() - 1))); + for (i, l) in l.iter().enumerate() { + for (j, r) in r.iter().enumerate() { + let new_coeff = i + j; + t[new_coeff] += l.inner_product(r.0.iter()); + } + } + + // Per Bulletproofs, calculate masks tau for each t where (i > 0) && (i != 2) + // Per Generalized Bulletproofs, calculate masks tau for each t where i != n' + // With Bulletproofs, t[0] is zero, hence its omission, yet Generalized Bulletproofs uses it + let mut tau_before_ni = vec![]; + for _ in 0 .. ni { + tau_before_ni.push(C::F::random(&mut *rng)); + } + let mut tau_after_ni = vec![]; + for _ in 0 .. t.0[(ni + 1) ..].len() { + tau_after_ni.push(C::F::random(&mut *rng)); + } + // Calculate commitments to the coefficients of t, blinded by tau + debug_assert_eq!(t.0[0 .. ni].len(), tau_before_ni.len()); + for (t, tau) in t.0[0 .. ni].iter().zip(tau_before_ni.iter()) { + transcript.push_point(multiexp(&[(*t, self.generators.g()), (*tau, self.generators.h())])); + } + debug_assert_eq!(t.0[(ni + 1) ..].len(), tau_after_ni.len()); + for (t, tau) in t.0[(ni + 1) ..].iter().zip(tau_after_ni.iter()) { + transcript.push_point(multiexp(&[(*t, self.generators.g()), (*tau, self.generators.h())])); + } + + let x: ScalarVector = ScalarVector::powers(transcript.challenge(), t.len()); + + let poly_eval = |poly: &[ScalarVector], x: &ScalarVector<_>| -> ScalarVector<_> { + let mut res = ScalarVector::::new(poly[0].0.len()); + for (i, coeff) in poly.iter().enumerate() { + res = res + &(coeff.clone() * x[i]); + } + res + }; + let l = poly_eval(&l, &x); + let r = poly_eval(&r, &x); + + let t_caret = l.inner_product(r.0.iter()); + + let mut V_weights = ScalarVector::new(self.V.len()); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + // We use `-z`, not `z`, as we write our constraint as `... + WV V = 0` not `= WV V + ..` + // This means we need to subtract `WV V` from both sides, which we accomplish here + accumulate_vector(&mut V_weights, &constraint.WV, -*z); + } + + let tau_x = { + let mut tau_x_poly = vec![]; + tau_x_poly.extend(tau_before_ni); + tau_x_poly.push(V_weights.inner_product(witness.v.iter().map(|v| &v.mask))); + tau_x_poly.extend(tau_after_ni); + + let mut tau_x = C::F::ZERO; + for (i, coeff) in tau_x_poly.into_iter().enumerate() { + tau_x += coeff * x[i]; + } + tau_x + }; + + // Calculate u for the powers of x variable to ilr/io/is + let u = { + // Calculate the first part of u + let mut u = (alpha * x[ilr]) + (beta * x[io]) + (rho * x[is]); + + // Incorporate the commitment masks multiplied by the associated power of x + for (i, commitment) in witness.c.iter().enumerate() { + let i = i + 1; + u += x[i] * commitment.mask; + } + u + }; + + // Use the Inner-Product argument to prove for this + // P = t_caret * g + l * g_bold + r * (y_inv * h_bold) + + let mut P_terms = Vec::with_capacity(1 + (2 * self.generators.len())); + debug_assert_eq!(l.len(), r.len()); + for (i, (l, r)) in l.0.iter().zip(r.0.iter()).enumerate() { + P_terms.push((*l, self.generators.g_bold(i))); + P_terms.push((y_inv[i] * r, self.generators.h_bold(i))); + } + + // Protocol 1, inlined, since our IpStatement is for Protocol 2 + transcript.push_scalar(tau_x); + transcript.push_scalar(u); + transcript.push_scalar(t_caret); + let ip_x = transcript.challenge(); + P_terms.push((ip_x * t_caret, self.generators.g())); + IpStatement::new( + self.generators, + y_inv, + ip_x, + // Safe since IpStatement isn't a ZK proof + P::Prover(multiexp_vartime(&P_terms)), + ) + .unwrap() + .prove(transcript, IpWitness::new(l, r).unwrap()) + .map_err(AcError::Ip) + } + + /// Verify a proof for this statement. + pub fn verify( + self, + rng: &mut R, + verifier: &mut BatchVerifier, + transcript: &mut VerifierTranscript, + ) -> Result<(), AcError> { + let n = self.n(); + let c = self.c(); + + let ni = 2 * (c + 1); + + let ilr = ni / 2; + let io = ni; + let is = ni + 1; + let jlr = ni / 2; + + let l_r_poly_len = 1 + ni + 1; + let t_poly_len = (2 * l_r_poly_len) - 1; + + let AI = transcript.read_point::().map_err(|_| AcError::IncompleteProof)?; + let AO = transcript.read_point::().map_err(|_| AcError::IncompleteProof)?; + let S = transcript.read_point::().map_err(|_| AcError::IncompleteProof)?; + let y = transcript.challenge(); + let z = transcript.challenge(); + let YzChallenges { y_inv, z } = self.yz_challenges(y, z); + + let mut l_weights = ScalarVector::new(n); + let mut r_weights = ScalarVector::new(n); + let mut o_weights = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + accumulate_vector(&mut l_weights, &constraint.WL, *z); + accumulate_vector(&mut r_weights, &constraint.WR, *z); + accumulate_vector(&mut o_weights, &constraint.WO, *z); + } + let r_weights = r_weights * &y_inv; + + let delta = r_weights.inner_product(l_weights.0.iter()); + + let mut T_before_ni = Vec::with_capacity(ni); + let mut T_after_ni = Vec::with_capacity(t_poly_len - ni - 1); + for _ in 0 .. ni { + T_before_ni.push(transcript.read_point::().map_err(|_| AcError::IncompleteProof)?); + } + for _ in 0 .. (t_poly_len - ni - 1) { + T_after_ni.push(transcript.read_point::().map_err(|_| AcError::IncompleteProof)?); + } + let x: ScalarVector = ScalarVector::powers(transcript.challenge(), t_poly_len); + + let tau_x = transcript.read_scalar::().map_err(|_| AcError::IncompleteProof)?; + let u = transcript.read_scalar::().map_err(|_| AcError::IncompleteProof)?; + let t_caret = transcript.read_scalar::().map_err(|_| AcError::IncompleteProof)?; + + // Lines 88-90, modified per Generalized Bulletproofs as needed w.r.t. t + { + let verifier_weight = C::F::random(&mut *rng); + // lhs of the equation, weighted to enable batch verification + verifier.g += t_caret * verifier_weight; + verifier.h += tau_x * verifier_weight; + + let mut V_weights = ScalarVector::new(self.V.len()); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + // We use `-z`, not `z`, as we write our constraint as `... + WV V = 0` not `= WV V + ..` + // This means we need to subtract `WV V` from both sides, which we accomplish here + accumulate_vector(&mut V_weights, &constraint.WV, -*z); + } + V_weights = V_weights * x[ni]; + + // rhs of the equation, negated to cause a sum to zero + // `delta - z...`, instead of `delta + z...`, is done for the same reason as in the above WV + // matrix transform + verifier.g -= verifier_weight * + x[ni] * + (delta - z.inner_product(self.constraints.iter().map(|constraint| &constraint.c))); + for pair in V_weights.0.into_iter().zip(self.V.0) { + verifier.additional.push((-verifier_weight * pair.0, pair.1)); + } + for (i, T) in T_before_ni.into_iter().enumerate() { + verifier.additional.push((-verifier_weight * x[i], T)); + } + for (i, T) in T_after_ni.into_iter().enumerate() { + verifier.additional.push((-verifier_weight * x[ni + 1 + i], T)); + } + } + + let verifier_weight = C::F::random(&mut *rng); + // Multiply `x` by `verifier_weight` as this effects `verifier_weight` onto most scalars and + // saves a notable amount of operations + let x = x * verifier_weight; + + // This following block effectively calculates P, within the multiexp + { + verifier.additional.push((x[ilr], AI)); + verifier.additional.push((x[io], AO)); + // h' ** y is equivalent to h as h' is h ** y_inv + let mut log2_n = 0; + while (1 << log2_n) != n { + log2_n += 1; + } + verifier.h_sum[log2_n] -= verifier_weight; + verifier.additional.push((x[is], S)); + + // Lines 85-87 calculate WL, WR, WO + // We preserve them in terms of g_bold and h_bold for a more efficient multiexp + let mut h_bold_scalars = l_weights * x[jlr]; + for (i, wr) in (r_weights * x[jlr]).0.into_iter().enumerate() { + verifier.g_bold[i] += wr; + } + // WO is weighted by x**jo where jo == 0, hence why we can ignore the x term + h_bold_scalars = h_bold_scalars + &(o_weights * verifier_weight); + + let mut cg_weights = Vec::with_capacity(self.C.len()); + let mut ch_weights = Vec::with_capacity(self.C.len()); + for i in 0 .. self.C.len() { + let mut cg = ScalarVector::new(n); + let mut ch = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + if let Some(WCG) = constraint.WCG.get(i) { + accumulate_vector(&mut cg, WCG, *z); + } + if let Some(WCH) = constraint.WCH.get(i) { + accumulate_vector(&mut ch, WCH, *z); + } + } + cg_weights.push(cg); + ch_weights.push(ch); + } + + // Push the terms for C, which increment from 0, and the terms for WC, which decrement from + // n' + for (i, (C, (WCG, WCH))) in + self.C.0.into_iter().zip(cg_weights.into_iter().zip(ch_weights)).enumerate() + { + let i = i + 1; + let j = ni - i; + verifier.additional.push((x[i], C)); + h_bold_scalars = h_bold_scalars + &(WCG * x[j]); + for (i, scalar) in (WCH * &y_inv * x[j]).0.into_iter().enumerate() { + verifier.g_bold[i] += scalar; + } + } + + // All terms for h_bold here have actually been for h_bold', h_bold * y_inv + h_bold_scalars = h_bold_scalars * &y_inv; + for (i, scalar) in h_bold_scalars.0.into_iter().enumerate() { + verifier.h_bold[i] += scalar; + } + + // Remove u * h from P + verifier.h -= verifier_weight * u; + } + + // Prove for lines 88, 92 with an Inner-Product statement + // This inlines Protocol 1, as our IpStatement implements Protocol 2 + let ip_x = transcript.challenge(); + // P is amended with this additional term + verifier.g += verifier_weight * ip_x * t_caret; + IpStatement::new(self.generators, y_inv, ip_x, P::Verifier { verifier_weight }) + .unwrap() + .verify(verifier, transcript) + .map_err(AcError::Ip)?; + + Ok(()) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/inner_product.rs b/crypto/evrf/generalized-bulletproofs/src/inner_product.rs new file mode 100644 index 000000000..ae3ec8766 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/inner_product.rs @@ -0,0 +1,360 @@ +use multiexp::multiexp_vartime; +use ciphersuite::{group::ff::Field, Ciphersuite}; + +#[rustfmt::skip] +use crate::{ScalarVector, PointVector, ProofGenerators, BatchVerifier, transcript::*, padded_pow_of_2}; + +/// An error from proving/verifying Inner-Product statements. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum IpError { + /// An incorrect amount of generators was provided. + IncorrectAmountOfGenerators, + /// The witness was inconsistent to the statement. + /// + /// Sanity checks on the witness are always performed. If the library is compiled with debug + /// assertions on, whether or not this witness actually opens `P` is checked. + InconsistentWitness, + /// The proof wasn't complete and the necessary values could not be read from the transcript. + IncompleteProof, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) enum P { + Verifier { verifier_weight: C::F }, + Prover(C::G), +} + +/// The Bulletproofs Inner-Product statement. +/// +/// This is for usage with Protocol 2 from the Bulletproofs paper. +#[derive(Clone, Debug)] +pub(crate) struct IpStatement<'a, C: Ciphersuite> { + generators: ProofGenerators<'a, C>, + // Weights for h_bold + h_bold_weights: ScalarVector, + // u as the discrete logarithm of G + u: C::F, + // P + P: P, +} + +/// The witness for the Bulletproofs Inner-Product statement. +#[derive(Clone, Debug)] +pub(crate) struct IpWitness { + // a + a: ScalarVector, + // b + b: ScalarVector, +} + +impl IpWitness { + /// Construct a new witness for an Inner-Product statement. + /// + /// If the witness is less than a power of two, it is padded to the nearest power of two. + /// + /// This functions return None if the lengths of a, b are mismatched or either are empty. + pub(crate) fn new(mut a: ScalarVector, mut b: ScalarVector) -> Option { + if a.0.is_empty() || (a.len() != b.len()) { + None?; + } + + // Pad to the nearest power of 2 + let missing = padded_pow_of_2(a.len()) - a.len(); + a.0.reserve(missing); + b.0.reserve(missing); + for _ in 0 .. missing { + a.0.push(C::F::ZERO); + b.0.push(C::F::ZERO); + } + + Some(Self { a, b }) + } +} + +impl<'a, C: Ciphersuite> IpStatement<'a, C> { + /// Create a new Inner-Product statement. + /// + /// This does not perform any transcripting of any variables within this statement. They must be + /// deterministic to the existing transcript. + pub(crate) fn new( + generators: ProofGenerators<'a, C>, + h_bold_weights: ScalarVector, + u: C::F, + P: P, + ) -> Result { + if generators.h_bold_slice().len() != h_bold_weights.len() { + Err(IpError::IncorrectAmountOfGenerators)? + } + Ok(Self { generators, h_bold_weights, u, P }) + } + + /// Prove for this Inner-Product statement. + /// + /// Returns an error if this statement couldn't be proven for (such as if the witness isn't + /// consistent). + pub(crate) fn prove( + self, + transcript: &mut Transcript, + witness: IpWitness, + ) -> Result<(), IpError> { + let (mut g_bold, mut h_bold, u, mut P, mut a, mut b) = { + let IpStatement { generators, h_bold_weights, u, P } = self; + let u = generators.g() * u; + + // Ensure we have the exact amount of generators + if generators.g_bold_slice().len() != witness.a.len() { + Err(IpError::IncorrectAmountOfGenerators)?; + } + // Acquire a local copy of the generators + let g_bold = PointVector::(generators.g_bold_slice().to_vec()); + let h_bold = PointVector::(generators.h_bold_slice().to_vec()).mul_vec(&h_bold_weights); + + let IpWitness { a, b } = witness; + + let P = match P { + P::Prover(point) => point, + P::Verifier { .. } => { + panic!("prove called with a P specification which was for the verifier") + } + }; + + // Ensure this witness actually opens this statement + #[cfg(debug_assertions)] + { + let ag = a.0.iter().cloned().zip(g_bold.0.iter().cloned()); + let bh = b.0.iter().cloned().zip(h_bold.0.iter().cloned()); + let cu = core::iter::once((a.inner_product(b.0.iter()), u)); + if P != multiexp_vartime(&ag.chain(bh).chain(cu).collect::>()) { + Err(IpError::InconsistentWitness)?; + } + } + + (g_bold, h_bold, u, P, a, b) + }; + + // `else: (n > 1)` case, lines 18-35 of the Bulletproofs paper + // This interprets `g_bold.len()` as `n` + while g_bold.len() > 1 { + // Split a, b, g_bold, h_bold as needed for lines 20-24 + let (a1, a2) = a.clone().split(); + let (b1, b2) = b.clone().split(); + + let (g_bold1, g_bold2) = g_bold.split(); + let (h_bold1, h_bold2) = h_bold.split(); + + let n_hat = g_bold1.len(); + + // Sanity + debug_assert_eq!(a1.len(), n_hat); + debug_assert_eq!(a2.len(), n_hat); + debug_assert_eq!(b1.len(), n_hat); + debug_assert_eq!(b2.len(), n_hat); + debug_assert_eq!(g_bold1.len(), n_hat); + debug_assert_eq!(g_bold2.len(), n_hat); + debug_assert_eq!(h_bold1.len(), n_hat); + debug_assert_eq!(h_bold2.len(), n_hat); + + // cl, cr, lines 21-22 + let cl = a1.inner_product(b2.0.iter()); + let cr = a2.inner_product(b1.0.iter()); + + let L = { + let mut L_terms = Vec::with_capacity(1 + (2 * g_bold1.len())); + for (a, g) in a1.0.iter().zip(g_bold2.0.iter()) { + L_terms.push((*a, *g)); + } + for (b, h) in b2.0.iter().zip(h_bold1.0.iter()) { + L_terms.push((*b, *h)); + } + L_terms.push((cl, u)); + // Uses vartime since this isn't a ZK proof + multiexp_vartime(&L_terms) + }; + + let R = { + let mut R_terms = Vec::with_capacity(1 + (2 * g_bold1.len())); + for (a, g) in a2.0.iter().zip(g_bold1.0.iter()) { + R_terms.push((*a, *g)); + } + for (b, h) in b1.0.iter().zip(h_bold2.0.iter()) { + R_terms.push((*b, *h)); + } + R_terms.push((cr, u)); + multiexp_vartime(&R_terms) + }; + + // Now that we've calculate L, R, transcript them to receive x (26-27) + transcript.push_point(L); + transcript.push_point(R); + let x: C::F = transcript.challenge(); + let x_inv = x.invert().unwrap(); + + // The prover and verifier now calculate the following (28-31) + g_bold = PointVector(Vec::with_capacity(g_bold1.len())); + for (a, b) in g_bold1.0.into_iter().zip(g_bold2.0.into_iter()) { + g_bold.0.push(multiexp_vartime(&[(x_inv, a), (x, b)])); + } + h_bold = PointVector(Vec::with_capacity(h_bold1.len())); + for (a, b) in h_bold1.0.into_iter().zip(h_bold2.0.into_iter()) { + h_bold.0.push(multiexp_vartime(&[(x, a), (x_inv, b)])); + } + P = (L * (x * x)) + P + (R * (x_inv * x_inv)); + + // 32-34 + a = (a1 * x) + &(a2 * x_inv); + b = (b1 * x_inv) + &(b2 * x); + } + + // `if n = 1` case from line 14-17 + + // Sanity + debug_assert_eq!(g_bold.len(), 1); + debug_assert_eq!(h_bold.len(), 1); + debug_assert_eq!(a.len(), 1); + debug_assert_eq!(b.len(), 1); + + // We simply send a/b + transcript.push_scalar(a[0]); + transcript.push_scalar(b[0]); + Ok(()) + } + + /* + This has room for optimization worth investigating further. It currently takes + an iterative approach. It can be optimized further via divide and conquer. + + Assume there are 4 challenges. + + Iterative approach (current): + 1. Do the optimal multiplications across challenge column 0 and 1. + 2. Do the optimal multiplications across that result and column 2. + 3. Do the optimal multiplications across that result and column 3. + + Divide and conquer (worth investigating further): + 1. Do the optimal multiplications across challenge column 0 and 1. + 2. Do the optimal multiplications across challenge column 2 and 3. + 3. Multiply both results together. + + When there are 4 challenges (n=16), the iterative approach does 28 multiplications + versus divide and conquer's 24. + */ + fn challenge_products(challenges: &[(C::F, C::F)]) -> Vec { + let mut products = vec![C::F::ONE; 1 << challenges.len()]; + + if !challenges.is_empty() { + products[0] = challenges[0].1; + products[1] = challenges[0].0; + + for (j, challenge) in challenges.iter().enumerate().skip(1) { + let mut slots = (1 << (j + 1)) - 1; + while slots > 0 { + products[slots] = products[slots / 2] * challenge.0; + products[slots - 1] = products[slots / 2] * challenge.1; + + slots = slots.saturating_sub(2); + } + } + + // Sanity check since if the above failed to populate, it'd be critical + for product in &products { + debug_assert!(!bool::from(product.is_zero())); + } + } + + products + } + + /// Queue an Inner-Product proof for batch verification. + /// + /// This will return Err if there is an error. This will return Ok if the proof was successfully + /// queued for batch verification. The caller is required to verify the batch in order to ensure + /// the proof is actually correct. + pub(crate) fn verify( + self, + verifier: &mut BatchVerifier, + transcript: &mut VerifierTranscript, + ) -> Result<(), IpError> { + let IpStatement { generators, h_bold_weights, u, P } = self; + + // Calculate the discrete log w.r.t. 2 for the amount of generators present + let mut lr_len = 0; + while (1 << lr_len) < generators.g_bold_slice().len() { + lr_len += 1; + } + + let weight = match P { + P::Prover(_) => panic!("prove called with a P specification which was for the prover"), + P::Verifier { verifier_weight } => verifier_weight, + }; + + // Again, we start with the `else: (n > 1)` case + + // We need x, x_inv per lines 25-27 for lines 28-31 + let mut L = Vec::with_capacity(lr_len); + let mut R = Vec::with_capacity(lr_len); + let mut xs: Vec = Vec::with_capacity(lr_len); + for _ in 0 .. lr_len { + L.push(transcript.read_point::().map_err(|_| IpError::IncompleteProof)?); + R.push(transcript.read_point::().map_err(|_| IpError::IncompleteProof)?); + xs.push(transcript.challenge()); + } + + // We calculate their inverse in batch + let mut x_invs = xs.clone(); + { + let mut scratch = vec![C::F::ZERO; x_invs.len()]; + ciphersuite::group::ff::BatchInverter::invert_with_external_scratch( + &mut x_invs, + &mut scratch, + ); + } + + // Now, with x and x_inv, we need to calculate g_bold', h_bold', P' + // + // For the sake of performance, we solely want to calculate all of these in terms of scalings + // for g_bold, h_bold, P, and don't want to actually perform intermediary scalings of the + // points + // + // L and R are easy, as it's simply x**2, x**-2 + // + // For the series of g_bold, h_bold, we use the `challenge_products` function + // For how that works, please see its own documentation + let product_cache = { + let mut challenges = Vec::with_capacity(lr_len); + + let x_iter = xs.into_iter().zip(x_invs); + let lr_iter = L.into_iter().zip(R); + for ((x, x_inv), (L, R)) in x_iter.zip(lr_iter) { + challenges.push((x, x_inv)); + verifier.additional.push((weight * x.square(), L)); + verifier.additional.push((weight * x_inv.square(), R)); + } + + Self::challenge_products(&challenges) + }; + + // And now for the `if n = 1` case + let a = transcript.read_scalar::().map_err(|_| IpError::IncompleteProof)?; + let b = transcript.read_scalar::().map_err(|_| IpError::IncompleteProof)?; + let c = a * b; + + // The multiexp of these terms equate to the final permutation of P + // We now add terms for a * g_bold' + b * h_bold' b + c * u, with the scalars negative such + // that the terms sum to 0 for an honest prover + + // The g_bold * a term case from line 16 + #[allow(clippy::needless_range_loop)] + for i in 0 .. generators.g_bold_slice().len() { + verifier.g_bold[i] -= weight * product_cache[i] * a; + } + // The h_bold * b term case from line 16 + for i in 0 .. generators.h_bold_slice().len() { + verifier.h_bold[i] -= + weight * product_cache[product_cache.len() - 1 - i] * b * h_bold_weights[i]; + } + // The c * u term case from line 16 + verifier.g -= weight * c * u; + + Ok(()) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/lib.rs b/crypto/evrf/generalized-bulletproofs/src/lib.rs new file mode 100644 index 000000000..dc88e68c9 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/lib.rs @@ -0,0 +1,328 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use core::fmt; +use std::collections::HashSet; + +use zeroize::Zeroize; + +use multiexp::{multiexp, multiexp_vartime}; +use ciphersuite::{ + group::{ff::Field, Group, GroupEncoding}, + Ciphersuite, +}; + +mod scalar_vector; +pub use scalar_vector::ScalarVector; +mod point_vector; +pub use point_vector::PointVector; + +/// The transcript formats. +pub mod transcript; + +pub(crate) mod inner_product; + +pub(crate) mod lincomb; + +/// The arithmetic circuit proof. +pub mod arithmetic_circuit_proof; + +/// Functionlity useful when testing. +#[cfg(any(test, feature = "tests"))] +pub mod tests; + +/// Calculate the nearest power of two greater than or equivalent to the argument. +pub(crate) fn padded_pow_of_2(i: usize) -> usize { + let mut next_pow_of_2 = 1; + while next_pow_of_2 < i { + next_pow_of_2 <<= 1; + } + next_pow_of_2 +} + +/// An error from working with generators. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum GeneratorsError { + /// The provided list of generators for `g` (bold) was empty. + GBoldEmpty, + /// The provided list of generators for `h` (bold) did not match `g` (bold) in length. + DifferingGhBoldLengths, + /// The amount of provided generators were not a power of two. + NotPowerOfTwo, + /// A generator was used multiple times. + DuplicatedGenerator, +} + +/// A full set of generators. +#[derive(Clone)] +pub struct Generators { + g: C::G, + h: C::G, + + g_bold: Vec, + h_bold: Vec, + h_sum: Vec, +} + +/// A batch verifier of proofs. +#[must_use] +#[derive(Clone)] +pub struct BatchVerifier { + g: C::F, + h: C::F, + + g_bold: Vec, + h_bold: Vec, + h_sum: Vec, + + additional: Vec<(C::F, C::G)>, +} + +impl fmt::Debug for Generators { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let g = self.g.to_bytes(); + let g: &[u8] = g.as_ref(); + + let h = self.h.to_bytes(); + let h: &[u8] = h.as_ref(); + + fmt.debug_struct("Generators").field("g", &g).field("h", &h).finish_non_exhaustive() + } +} + +/// The generators for a specific proof. +/// +/// This potentially have been reduced in size from the original set of generators, as beneficial +/// to performance. +#[derive(Copy, Clone)] +pub struct ProofGenerators<'a, C: Ciphersuite> { + g: &'a C::G, + h: &'a C::G, + + g_bold: &'a [C::G], + h_bold: &'a [C::G], +} + +impl fmt::Debug for ProofGenerators<'_, C> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let g = self.g.to_bytes(); + let g: &[u8] = g.as_ref(); + + let h = self.h.to_bytes(); + let h: &[u8] = h.as_ref(); + + fmt.debug_struct("ProofGenerators").field("g", &g).field("h", &h).finish_non_exhaustive() + } +} + +impl Generators { + /// Construct an instance of Generators for usage with Bulletproofs. + pub fn new( + g: C::G, + h: C::G, + g_bold: Vec, + h_bold: Vec, + ) -> Result { + if g_bold.is_empty() { + Err(GeneratorsError::GBoldEmpty)?; + } + if g_bold.len() != h_bold.len() { + Err(GeneratorsError::DifferingGhBoldLengths)?; + } + if padded_pow_of_2(g_bold.len()) != g_bold.len() { + Err(GeneratorsError::NotPowerOfTwo)?; + } + + let mut set = HashSet::new(); + let mut add_generator = |generator: &C::G| { + assert!(!bool::from(generator.is_identity())); + let bytes = generator.to_bytes(); + !set.insert(bytes.as_ref().to_vec()) + }; + + assert!(!add_generator(&g), "g was prior present in empty set"); + if add_generator(&h) { + Err(GeneratorsError::DuplicatedGenerator)?; + } + for g in &g_bold { + if add_generator(g) { + Err(GeneratorsError::DuplicatedGenerator)?; + } + } + for h in &h_bold { + if add_generator(h) { + Err(GeneratorsError::DuplicatedGenerator)?; + } + } + + let mut running_h_sum = C::G::identity(); + let mut h_sum = vec![]; + let mut next_pow_of_2 = 1; + for (i, h) in h_bold.iter().enumerate() { + running_h_sum += h; + if (i + 1) == next_pow_of_2 { + h_sum.push(running_h_sum); + next_pow_of_2 *= 2; + } + } + + Ok(Generators { g, h, g_bold, h_bold, h_sum }) + } + + /// Create a BatchVerifier for proofs which use these generators. + pub fn batch_verifier(&self) -> BatchVerifier { + BatchVerifier { + g: C::F::ZERO, + h: C::F::ZERO, + + g_bold: vec![C::F::ZERO; self.g_bold.len()], + h_bold: vec![C::F::ZERO; self.h_bold.len()], + h_sum: vec![C::F::ZERO; self.h_sum.len()], + + additional: Vec::with_capacity(128), + } + } + + /// Verify all proofs queued for batch verification in this BatchVerifier. + #[must_use] + pub fn verify(&self, verifier: BatchVerifier) -> bool { + multiexp_vartime( + &[(verifier.g, self.g), (verifier.h, self.h)] + .into_iter() + .chain(verifier.g_bold.into_iter().zip(self.g_bold.iter().cloned())) + .chain(verifier.h_bold.into_iter().zip(self.h_bold.iter().cloned())) + .chain(verifier.h_sum.into_iter().zip(self.h_sum.iter().cloned())) + .chain(verifier.additional) + .collect::>(), + ) + .is_identity() + .into() + } + + /// The `g` generator. + pub fn g(&self) -> C::G { + self.g + } + + /// The `h` generator. + pub fn h(&self) -> C::G { + self.h + } + + /// A slice to view the `g` (bold) generators. + pub fn g_bold_slice(&self) -> &[C::G] { + &self.g_bold + } + + /// A slice to view the `h` (bold) generators. + pub fn h_bold_slice(&self) -> &[C::G] { + &self.h_bold + } + + /// Reduce a set of generators to the quantity necessary to support a certain amount of + /// in-circuit multiplications/terms in a Pedersen vector commitment. + /// + /// Returns None if reducing to 0 or if the generators reduced are insufficient to provide this + /// many generators. + pub fn reduce(&self, generators: usize) -> Option> { + if generators == 0 { + None?; + } + + // Round to the nearest power of 2 + let generators = padded_pow_of_2(generators); + if generators > self.g_bold.len() { + None?; + } + + Some(ProofGenerators { + g: &self.g, + h: &self.h, + + g_bold: &self.g_bold[.. generators], + h_bold: &self.h_bold[.. generators], + }) + } +} + +impl<'a, C: Ciphersuite> ProofGenerators<'a, C> { + pub(crate) fn len(&self) -> usize { + self.g_bold.len() + } + + pub(crate) fn g(&self) -> C::G { + *self.g + } + + pub(crate) fn h(&self) -> C::G { + *self.h + } + + pub(crate) fn g_bold(&self, i: usize) -> C::G { + self.g_bold[i] + } + + pub(crate) fn h_bold(&self, i: usize) -> C::G { + self.h_bold[i] + } + + pub(crate) fn g_bold_slice(&self) -> &[C::G] { + self.g_bold + } + + pub(crate) fn h_bold_slice(&self) -> &[C::G] { + self.h_bold + } +} + +/// The opening of a Pedersen commitment. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] +pub struct PedersenCommitment { + /// The value committed to. + pub value: C::F, + /// The mask blinding the value committed to. + pub mask: C::F, +} + +impl PedersenCommitment { + /// Commit to this value, yielding the Pedersen commitment. + pub fn commit(&self, g: C::G, h: C::G) -> C::G { + multiexp(&[(self.value, g), (self.mask, h)]) + } +} + +/// The opening of a Pedersen vector commitment. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct PedersenVectorCommitment { + /// The values committed to across the `g` (bold) generators. + pub g_values: ScalarVector, + /// The values committed to across the `h` (bold) generators. + pub h_values: ScalarVector, + /// The mask blinding the values committed to. + pub mask: C::F, +} + +impl PedersenVectorCommitment { + /// Commit to the vectors of values. + /// + /// This function returns None if the amount of generators is less than the amount of values + /// within the relevant vector. + pub fn commit(&self, g_bold: &[C::G], h_bold: &[C::G], h: C::G) -> Option { + if (g_bold.len() < self.g_values.len()) || (h_bold.len() < self.h_values.len()) { + None?; + }; + + let mut terms = vec![(self.mask, h)]; + for pair in self.g_values.0.iter().cloned().zip(g_bold.iter().cloned()) { + terms.push(pair); + } + for pair in self.h_values.0.iter().cloned().zip(h_bold.iter().cloned()) { + terms.push(pair); + } + let res = multiexp(&terms); + terms.zeroize(); + Some(res) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/lincomb.rs b/crypto/evrf/generalized-bulletproofs/src/lincomb.rs new file mode 100644 index 000000000..291b3b0b5 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/lincomb.rs @@ -0,0 +1,265 @@ +use core::ops::{Add, Sub, Mul}; + +use zeroize::Zeroize; + +use ciphersuite::group::ff::PrimeField; + +use crate::ScalarVector; + +/// A reference to a variable usable within linear combinations. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[allow(non_camel_case_types)] +pub enum Variable { + /// A variable within the left vector of vectors multiplied against each other. + aL(usize), + /// A variable within the right vector of vectors multiplied against each other. + aR(usize), + /// A variable within the output vector of the left vector multiplied by the right vector. + aO(usize), + /// A variable within a Pedersen vector commitment, committed to with a generator from `g` (bold). + CG { + /// The commitment being indexed. + commitment: usize, + /// The index of the variable. + index: usize, + }, + /// A variable within a Pedersen vector commitment, committed to with a generator from `h` (bold). + CH { + /// The commitment being indexed. + commitment: usize, + /// The index of the variable. + index: usize, + }, + /// A variable within a Pedersen commitment. + V(usize), +} + +// Does a NOP as there shouldn't be anything critical here +impl Zeroize for Variable { + fn zeroize(&mut self) {} +} + +/// A linear combination. +/// +/// Specifically, `WL aL + WR aR + WO aO + WCG C_G + WCH C_H + WV V + c`. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +#[must_use] +pub struct LinComb { + pub(crate) highest_a_index: Option, + pub(crate) highest_c_index: Option, + pub(crate) highest_v_index: Option, + + // Sparse representation of WL/WR/WO + pub(crate) WL: Vec<(usize, F)>, + pub(crate) WR: Vec<(usize, F)>, + pub(crate) WO: Vec<(usize, F)>, + // Sparse representation once within a commitment + pub(crate) WCG: Vec>, + pub(crate) WCH: Vec>, + // Sparse representation of WV + pub(crate) WV: Vec<(usize, F)>, + pub(crate) c: F, +} + +impl From for LinComb { + fn from(constrainable: Variable) -> LinComb { + LinComb::empty().term(F::ONE, constrainable) + } +} + +impl Add<&LinComb> for LinComb { + type Output = Self; + + fn add(mut self, constraint: &Self) -> Self { + self.highest_a_index = self.highest_a_index.max(constraint.highest_a_index); + self.highest_c_index = self.highest_c_index.max(constraint.highest_c_index); + self.highest_v_index = self.highest_v_index.max(constraint.highest_v_index); + + self.WL.extend(&constraint.WL); + self.WR.extend(&constraint.WR); + self.WO.extend(&constraint.WO); + while self.WCG.len() < constraint.WCG.len() { + self.WCG.push(vec![]); + } + while self.WCH.len() < constraint.WCH.len() { + self.WCH.push(vec![]); + } + for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) { + sWC.extend(cWC); + } + for (sWC, cWC) in self.WCH.iter_mut().zip(&constraint.WCH) { + sWC.extend(cWC); + } + self.WV.extend(&constraint.WV); + self.c += constraint.c; + self + } +} + +impl Sub<&LinComb> for LinComb { + type Output = Self; + + fn sub(mut self, constraint: &Self) -> Self { + self.highest_a_index = self.highest_a_index.max(constraint.highest_a_index); + self.highest_c_index = self.highest_c_index.max(constraint.highest_c_index); + self.highest_v_index = self.highest_v_index.max(constraint.highest_v_index); + + self.WL.extend(constraint.WL.iter().map(|(i, weight)| (*i, -*weight))); + self.WR.extend(constraint.WR.iter().map(|(i, weight)| (*i, -*weight))); + self.WO.extend(constraint.WO.iter().map(|(i, weight)| (*i, -*weight))); + while self.WCG.len() < constraint.WCG.len() { + self.WCG.push(vec![]); + } + while self.WCH.len() < constraint.WCH.len() { + self.WCH.push(vec![]); + } + for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) { + sWC.extend(cWC.iter().map(|(i, weight)| (*i, -*weight))); + } + for (sWC, cWC) in self.WCH.iter_mut().zip(&constraint.WCH) { + sWC.extend(cWC.iter().map(|(i, weight)| (*i, -*weight))); + } + self.WV.extend(constraint.WV.iter().map(|(i, weight)| (*i, -*weight))); + self.c -= constraint.c; + self + } +} + +impl Mul for LinComb { + type Output = Self; + + fn mul(mut self, scalar: F) -> Self { + for (_, weight) in self.WL.iter_mut() { + *weight *= scalar; + } + for (_, weight) in self.WR.iter_mut() { + *weight *= scalar; + } + for (_, weight) in self.WO.iter_mut() { + *weight *= scalar; + } + for WC in self.WCG.iter_mut() { + for (_, weight) in WC { + *weight *= scalar; + } + } + for WC in self.WCH.iter_mut() { + for (_, weight) in WC { + *weight *= scalar; + } + } + for (_, weight) in self.WV.iter_mut() { + *weight *= scalar; + } + self.c *= scalar; + self + } +} + +impl LinComb { + /// Create an empty linear combination. + pub fn empty() -> Self { + Self { + highest_a_index: None, + highest_c_index: None, + highest_v_index: None, + WL: vec![], + WR: vec![], + WO: vec![], + WCG: vec![], + WCH: vec![], + WV: vec![], + c: F::ZERO, + } + } + + /// Add a new instance of a term to this linear combination. + pub fn term(mut self, scalar: F, constrainable: Variable) -> Self { + match constrainable { + Variable::aL(i) => { + self.highest_a_index = self.highest_a_index.max(Some(i)); + self.WL.push((i, scalar)) + } + Variable::aR(i) => { + self.highest_a_index = self.highest_a_index.max(Some(i)); + self.WR.push((i, scalar)) + } + Variable::aO(i) => { + self.highest_a_index = self.highest_a_index.max(Some(i)); + self.WO.push((i, scalar)) + } + Variable::CG { commitment: i, index: j } => { + self.highest_c_index = self.highest_c_index.max(Some(i)); + self.highest_a_index = self.highest_a_index.max(Some(j)); + while self.WCG.len() <= i { + self.WCG.push(vec![]); + } + self.WCG[i].push((j, scalar)) + } + Variable::CH { commitment: i, index: j } => { + self.highest_c_index = self.highest_c_index.max(Some(i)); + self.highest_a_index = self.highest_a_index.max(Some(j)); + while self.WCH.len() <= i { + self.WCH.push(vec![]); + } + self.WCH[i].push((j, scalar)) + } + Variable::V(i) => { + self.highest_v_index = self.highest_v_index.max(Some(i)); + self.WV.push((i, scalar)); + } + }; + self + } + + /// Add to the constant c. + pub fn constant(mut self, scalar: F) -> Self { + self.c += scalar; + self + } + + /// View the current weights for aL. + pub fn WL(&self) -> &[(usize, F)] { + &self.WL + } + + /// View the current weights for aR. + pub fn WR(&self) -> &[(usize, F)] { + &self.WR + } + + /// View the current weights for aO. + pub fn WO(&self) -> &[(usize, F)] { + &self.WO + } + + /// View the current weights for CG. + pub fn WCG(&self) -> &[Vec<(usize, F)>] { + &self.WCG + } + + /// View the current weights for CH. + pub fn WCH(&self) -> &[Vec<(usize, F)>] { + &self.WCH + } + + /// View the current weights for V. + pub fn WV(&self) -> &[(usize, F)] { + &self.WV + } + + /// View the current constant. + pub fn c(&self) -> F { + self.c + } +} + +pub(crate) fn accumulate_vector( + accumulator: &mut ScalarVector, + values: &[(usize, F)], + weight: F, +) { + for (i, coeff) in values { + accumulator[*i] += *coeff * weight; + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/point_vector.rs b/crypto/evrf/generalized-bulletproofs/src/point_vector.rs new file mode 100644 index 000000000..82fad519c --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/point_vector.rs @@ -0,0 +1,121 @@ +use core::ops::{Index, IndexMut}; + +use zeroize::Zeroize; + +use ciphersuite::Ciphersuite; + +#[cfg(test)] +use multiexp::multiexp; + +use crate::ScalarVector; + +/// A point vector struct with the functionality necessary for Bulletproofs. +/// +/// The math operations for this panic upon any invalid operation, such as if vectors of different +/// lengths are added. The full extent of invalidity is not fully defined. Only field access is +/// guaranteed to have a safe, public API. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct PointVector(pub(crate) Vec); + +impl Index for PointVector { + type Output = C::G; + fn index(&self, index: usize) -> &C::G { + &self.0[index] + } +} + +impl IndexMut for PointVector { + fn index_mut(&mut self, index: usize) -> &mut C::G { + &mut self.0[index] + } +} + +impl PointVector { + /* + pub(crate) fn add(&self, point: impl AsRef) -> Self { + let mut res = self.clone(); + for val in res.0.iter_mut() { + *val += point.as_ref(); + } + res + } + pub(crate) fn sub(&self, point: impl AsRef) -> Self { + let mut res = self.clone(); + for val in res.0.iter_mut() { + *val -= point.as_ref(); + } + res + } + + pub(crate) fn mul(&self, scalar: impl core::borrow::Borrow) -> Self { + let mut res = self.clone(); + for val in res.0.iter_mut() { + *val *= scalar.borrow(); + } + res + } + + pub(crate) fn add_vec(&self, vector: &Self) -> Self { + debug_assert_eq!(self.len(), vector.len()); + let mut res = self.clone(); + for (i, val) in res.0.iter_mut().enumerate() { + *val += vector.0[i]; + } + res + } + + pub(crate) fn sub_vec(&self, vector: &Self) -> Self { + debug_assert_eq!(self.len(), vector.len()); + let mut res = self.clone(); + for (i, val) in res.0.iter_mut().enumerate() { + *val -= vector.0[i]; + } + res + } + */ + + pub(crate) fn mul_vec(&self, vector: &ScalarVector) -> Self { + debug_assert_eq!(self.len(), vector.len()); + let mut res = self.clone(); + for (i, val) in res.0.iter_mut().enumerate() { + *val *= vector.0[i]; + } + res + } + + #[cfg(test)] + pub(crate) fn multiexp(&self, vector: &crate::ScalarVector) -> C::G { + debug_assert_eq!(self.len(), vector.len()); + let mut res = Vec::with_capacity(self.len()); + for (point, scalar) in self.0.iter().copied().zip(vector.0.iter().copied()) { + res.push((scalar, point)); + } + multiexp(&res) + } + + /* + pub(crate) fn multiexp_vartime(&self, vector: &ScalarVector) -> C::G { + debug_assert_eq!(self.len(), vector.len()); + let mut res = Vec::with_capacity(self.len()); + for (point, scalar) in self.0.iter().copied().zip(vector.0.iter().copied()) { + res.push((scalar, point)); + } + multiexp_vartime(&res) + } + + pub(crate) fn sum(&self) -> C::G { + self.0.iter().sum() + } + */ + + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + pub(crate) fn split(mut self) -> (Self, Self) { + assert!(self.len() > 1); + let r = self.0.split_off(self.0.len() / 2); + debug_assert_eq!(self.len(), r.len()); + (self, PointVector(r)) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/scalar_vector.rs b/crypto/evrf/generalized-bulletproofs/src/scalar_vector.rs new file mode 100644 index 000000000..a9cf4365e --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/scalar_vector.rs @@ -0,0 +1,146 @@ +use core::ops::{Index, IndexMut, Add, Sub, Mul}; + +use zeroize::Zeroize; + +use ciphersuite::group::ff::PrimeField; + +/// A scalar vector struct with the functionality necessary for Bulletproofs. +/// +/// The math operations for this panic upon any invalid operation, such as if vectors of different +/// lengths are added. The full extent of invalidity is not fully defined. Only `new`, `len`, +/// and field access is guaranteed to have a safe, public API. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ScalarVector(pub(crate) Vec); + +impl Zeroize for ScalarVector { + fn zeroize(&mut self) { + self.0.zeroize() + } +} + +impl Index for ScalarVector { + type Output = F; + fn index(&self, index: usize) -> &F { + &self.0[index] + } +} +impl IndexMut for ScalarVector { + fn index_mut(&mut self, index: usize) -> &mut F { + &mut self.0[index] + } +} + +impl Add for ScalarVector { + type Output = ScalarVector; + fn add(mut self, scalar: F) -> Self { + for s in &mut self.0 { + *s += scalar; + } + self + } +} +impl Sub for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, scalar: F) -> Self { + for s in &mut self.0 { + *s -= scalar; + } + self + } +} +impl Mul for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, scalar: F) -> Self { + for s in &mut self.0 { + *s *= scalar; + } + self + } +} + +impl Add<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn add(mut self, other: &ScalarVector) -> Self { + assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s += o; + } + self + } +} +impl Sub<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, other: &ScalarVector) -> Self { + assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s -= o; + } + self + } +} +impl Mul<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, other: &ScalarVector) -> Self { + assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s *= o; + } + self + } +} + +impl ScalarVector { + /// Create a new scalar vector, initialized with `len` zero scalars. + pub fn new(len: usize) -> Self { + ScalarVector(vec![F::ZERO; len]) + } + + pub(crate) fn powers(x: F, len: usize) -> Self { + assert!(len != 0); + + let mut res = Vec::with_capacity(len); + res.push(F::ONE); + res.push(x); + for i in 2 .. len { + res.push(res[i - 1] * x); + } + res.truncate(len); + ScalarVector(res) + } + + /// The length of this scalar vector. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.0.len() + } + + /* + pub(crate) fn sum(mut self) -> F { + self.0.drain(..).sum() + } + */ + + pub(crate) fn inner_product<'a, V: Iterator>(&self, vector: V) -> F { + let mut count = 0; + let mut res = F::ZERO; + for (a, b) in self.0.iter().zip(vector) { + res += *a * b; + count += 1; + } + debug_assert_eq!(self.len(), count); + res + } + + pub(crate) fn split(mut self) -> (Self, Self) { + assert!(self.len() > 1); + let r = self.0.split_off(self.0.len() / 2); + debug_assert_eq!(self.len(), r.len()); + (self, ScalarVector(r)) + } +} + +impl From> for ScalarVector { + fn from(vec: Vec) -> Self { + Self(vec) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/tests/arithmetic_circuit_proof.rs b/crypto/evrf/generalized-bulletproofs/src/tests/arithmetic_circuit_proof.rs new file mode 100644 index 000000000..588a6ae63 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/tests/arithmetic_circuit_proof.rs @@ -0,0 +1,250 @@ +use rand_core::{RngCore, OsRng}; + +use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto}; + +use crate::{ + ScalarVector, PedersenCommitment, PedersenVectorCommitment, + transcript::*, + arithmetic_circuit_proof::{ + Variable, LinComb, ArithmeticCircuitStatement, ArithmeticCircuitWitness, + }, + tests::generators, +}; + +#[test] +fn test_zero_arithmetic_circuit() { + let generators = generators(1); + + let value = ::F::random(&mut OsRng); + let gamma = ::F::random(&mut OsRng); + let commitment = (generators.g() * value) + (generators.h() * gamma); + let V = vec![commitment]; + + let aL = ScalarVector::<::F>(vec![::F::ZERO]); + let aR = aL.clone(); + + let mut transcript = Transcript::new([0; 32]); + let commitments = transcript.write_commitments(vec![], V); + let statement = ArithmeticCircuitStatement::::new( + generators.reduce(1).unwrap(), + vec![], + commitments.clone(), + ) + .unwrap(); + let witness = ArithmeticCircuitWitness::::new( + aL, + aR, + vec![], + vec![PedersenCommitment { value, mask: gamma }], + ) + .unwrap(); + + let proof = { + statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap(); + transcript.complete() + }; + let mut verifier = generators.batch_verifier(); + + let mut transcript = VerifierTranscript::new([0; 32], &proof); + let verifier_commmitments = transcript.read_commitments(0, 1); + assert_eq!(commitments, verifier_commmitments.unwrap()); + statement.verify(&mut OsRng, &mut verifier, &mut transcript).unwrap(); + assert!(generators.verify(verifier)); +} + +#[test] +fn test_vector_commitment_arithmetic_circuit() { + let generators = generators(2); + let reduced = generators.reduce(2).unwrap(); + + let v1 = ::F::random(&mut OsRng); + let v2 = ::F::random(&mut OsRng); + let v3 = ::F::random(&mut OsRng); + let v4 = ::F::random(&mut OsRng); + let gamma = ::F::random(&mut OsRng); + let commitment = (reduced.g_bold(0) * v1) + + (reduced.g_bold(1) * v2) + + (reduced.h_bold(0) * v3) + + (reduced.h_bold(1) * v4) + + (generators.h() * gamma); + let V = vec![]; + let C = vec![commitment]; + + let zero_vec = + || ScalarVector::<::F>(vec![::F::ZERO]); + + let aL = zero_vec(); + let aR = zero_vec(); + + let mut transcript = Transcript::new([0; 32]); + let commitments = transcript.write_commitments(C, V); + let statement = ArithmeticCircuitStatement::::new( + reduced, + vec![LinComb::empty() + .term(::F::ONE, Variable::CG { commitment: 0, index: 0 }) + .term(::F::from(2u64), Variable::CG { commitment: 0, index: 1 }) + .term(::F::from(3u64), Variable::CH { commitment: 0, index: 0 }) + .term(::F::from(4u64), Variable::CH { commitment: 0, index: 1 }) + .constant(-(v1 + (v2 + v2) + (v3 + v3 + v3) + (v4 + v4 + v4 + v4)))], + commitments.clone(), + ) + .unwrap(); + let witness = ArithmeticCircuitWitness::::new( + aL, + aR, + vec![PedersenVectorCommitment { + g_values: ScalarVector(vec![v1, v2]), + h_values: ScalarVector(vec![v3, v4]), + mask: gamma, + }], + vec![], + ) + .unwrap(); + + let proof = { + statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap(); + transcript.complete() + }; + let mut verifier = generators.batch_verifier(); + + let mut transcript = VerifierTranscript::new([0; 32], &proof); + let verifier_commmitments = transcript.read_commitments(1, 0); + assert_eq!(commitments, verifier_commmitments.unwrap()); + statement.verify(&mut OsRng, &mut verifier, &mut transcript).unwrap(); + assert!(generators.verify(verifier)); +} + +#[test] +fn fuzz_test_arithmetic_circuit() { + let generators = generators(32); + + for i in 0 .. 100 { + dbg!(i); + + // Create aL, aR, aO + let mut aL = ScalarVector(vec![]); + let mut aR = ScalarVector(vec![]); + while aL.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() { + aL.0.push(::F::random(&mut OsRng)); + } + while aR.len() < aL.len() { + aR.0.push(::F::random(&mut OsRng)); + } + let aO = aL.clone() * &aR; + + // Create C + let mut C = vec![]; + while C.len() < (OsRng.next_u64() % 16).try_into().unwrap() { + let mut g_values = ScalarVector(vec![]); + while g_values.0.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() { + g_values.0.push(::F::random(&mut OsRng)); + } + let mut h_values = ScalarVector(vec![]); + while h_values.0.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() { + h_values.0.push(::F::random(&mut OsRng)); + } + C.push(PedersenVectorCommitment { + g_values, + h_values, + mask: ::F::random(&mut OsRng), + }); + } + + // Create V + let mut V = vec![]; + while V.len() < (OsRng.next_u64() % 4).try_into().unwrap() { + V.push(PedersenCommitment { + value: ::F::random(&mut OsRng), + mask: ::F::random(&mut OsRng), + }); + } + + // Generate random constraints + let mut constraints = vec![]; + for _ in 0 .. (OsRng.next_u64() % 8).try_into().unwrap() { + let mut eval = ::F::ZERO; + let mut constraint = LinComb::empty(); + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % aL.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::aL(index)); + eval += weight * aL[index]; + } + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % aR.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::aR(index)); + eval += weight * aR[index]; + } + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % aO.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::aO(index)); + eval += weight * aO[index]; + } + + for (commitment, C) in C.iter().enumerate() { + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % C.g_values.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::CG { commitment, index }); + eval += weight * C.g_values[index]; + } + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % C.h_values.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::CH { commitment, index }); + eval += weight * C.h_values[index]; + } + } + + if !V.is_empty() { + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % V.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::V(index)); + eval += weight * V[index].value; + } + } + + constraint = constraint.constant(-eval); + + constraints.push(constraint); + } + + let mut transcript = Transcript::new([0; 32]); + let commitments = transcript.write_commitments( + C.iter() + .map(|C| { + C.commit(generators.g_bold_slice(), generators.h_bold_slice(), generators.h()).unwrap() + }) + .collect(), + V.iter().map(|V| V.commit(generators.g(), generators.h())).collect(), + ); + + let statement = ArithmeticCircuitStatement::::new( + generators.reduce(16).unwrap(), + constraints, + commitments.clone(), + ) + .unwrap(); + + let witness = ArithmeticCircuitWitness::::new(aL, aR, C.clone(), V.clone()).unwrap(); + + let proof = { + statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap(); + transcript.complete() + }; + let mut verifier = generators.batch_verifier(); + + let mut transcript = VerifierTranscript::new([0; 32], &proof); + let verifier_commmitments = transcript.read_commitments(C.len(), V.len()); + assert_eq!(commitments, verifier_commmitments.unwrap()); + statement.verify(&mut OsRng, &mut verifier, &mut transcript).unwrap(); + assert!(generators.verify(verifier)); + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/tests/inner_product.rs b/crypto/evrf/generalized-bulletproofs/src/tests/inner_product.rs new file mode 100644 index 000000000..49b5fc320 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/tests/inner_product.rs @@ -0,0 +1,113 @@ +// The inner product relation is P = sum(g_bold * a, h_bold * b, g * (a * b)) + +use rand_core::OsRng; + +use ciphersuite::{ + group::{ff::Field, Group}, + Ciphersuite, Ristretto, +}; + +use crate::{ + ScalarVector, PointVector, + transcript::*, + inner_product::{P, IpStatement, IpWitness}, + tests::generators, +}; + +#[test] +fn test_zero_inner_product() { + let P = ::G::identity(); + + let generators = generators::(1); + let reduced = generators.reduce(1).unwrap(); + let witness = IpWitness::::new( + ScalarVector::<::F>::new(1), + ScalarVector::<::F>::new(1), + ) + .unwrap(); + + let proof = { + let mut transcript = Transcript::new([0; 32]); + IpStatement::::new( + reduced, + ScalarVector(vec![::F::ONE; 1]), + ::F::ONE, + P::Prover(P), + ) + .unwrap() + .clone() + .prove(&mut transcript, witness) + .unwrap(); + transcript.complete() + }; + + let mut verifier = generators.batch_verifier(); + IpStatement::::new( + reduced, + ScalarVector(vec![::F::ONE; 1]), + ::F::ONE, + P::Verifier { verifier_weight: ::F::ONE }, + ) + .unwrap() + .verify(&mut verifier, &mut VerifierTranscript::new([0; 32], &proof)) + .unwrap(); + assert!(generators.verify(verifier)); +} + +#[test] +fn test_inner_product() { + // P = sum(g_bold * a, h_bold * b) + let generators = generators::(32); + let mut verifier = generators.batch_verifier(); + for i in [1, 2, 4, 8, 16, 32] { + let generators = generators.reduce(i).unwrap(); + let g = generators.g(); + assert_eq!(generators.len(), i); + let mut g_bold = vec![]; + let mut h_bold = vec![]; + for i in 0 .. i { + g_bold.push(generators.g_bold(i)); + h_bold.push(generators.h_bold(i)); + } + let g_bold = PointVector::(g_bold); + let h_bold = PointVector::(h_bold); + + let mut a = ScalarVector::<::F>::new(i); + let mut b = ScalarVector::<::F>::new(i); + + for i in 0 .. i { + a[i] = ::F::random(&mut OsRng); + b[i] = ::F::random(&mut OsRng); + } + + let P = g_bold.multiexp(&a) + h_bold.multiexp(&b) + (g * a.inner_product(b.0.iter())); + + let witness = IpWitness::::new(a, b).unwrap(); + + let proof = { + let mut transcript = Transcript::new([0; 32]); + IpStatement::::new( + generators, + ScalarVector(vec![::F::ONE; i]), + ::F::ONE, + P::Prover(P), + ) + .unwrap() + .prove(&mut transcript, witness) + .unwrap(); + transcript.complete() + }; + + verifier.additional.push((::F::ONE, P)); + IpStatement::::new( + generators, + ScalarVector(vec![::F::ONE; i]), + ::F::ONE, + P::Verifier { verifier_weight: ::F::ONE }, + ) + .unwrap() + .verify(&mut verifier, &mut VerifierTranscript::new([0; 32], &proof)) + .unwrap(); + } + assert!(generators.verify(verifier)); +} diff --git a/crypto/evrf/generalized-bulletproofs/src/tests/mod.rs b/crypto/evrf/generalized-bulletproofs/src/tests/mod.rs new file mode 100644 index 000000000..1b64d378d --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/tests/mod.rs @@ -0,0 +1,27 @@ +use rand_core::OsRng; + +use ciphersuite::{group::Group, Ciphersuite}; + +use crate::{Generators, padded_pow_of_2}; + +#[cfg(test)] +mod inner_product; + +#[cfg(test)] +mod arithmetic_circuit_proof; + +/// Generate a set of generators for testing purposes. +/// +/// This should not be considered secure. +pub fn generators(n: usize) -> Generators { + assert_eq!(padded_pow_of_2(n), n, "amount of generators wasn't a power of 2"); + + let gens = || { + let mut res = Vec::with_capacity(n); + for _ in 0 .. n { + res.push(C::G::random(&mut OsRng)); + } + res + }; + Generators::new(C::G::random(&mut OsRng), C::G::random(&mut OsRng), gens(), gens()).unwrap() +} diff --git a/crypto/evrf/generalized-bulletproofs/src/transcript.rs b/crypto/evrf/generalized-bulletproofs/src/transcript.rs new file mode 100644 index 000000000..75ef35c44 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/transcript.rs @@ -0,0 +1,188 @@ +use std::io; + +use blake2::{Digest, Blake2b512}; + +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, +}; + +use crate::PointVector; + +const SCALAR: u8 = 0; +const POINT: u8 = 1; +const CHALLENGE: u8 = 2; + +fn challenge(digest: &mut Blake2b512) -> F { + // Panic if this is such a wide field, we won't successfully perform a reduction into an unbiased + // scalar + debug_assert!((F::NUM_BITS + 128) < 512); + + digest.update([CHALLENGE]); + let chl = digest.clone().finalize(); + + let mut res = F::ZERO; + for (i, mut byte) in chl.iter().cloned().enumerate() { + for j in 0 .. 8 { + let lsb = byte & 1; + let mut bit = F::from(u64::from(lsb)); + for _ in 0 .. ((i * 8) + j) { + bit = bit.double(); + } + res += bit; + + byte >>= 1; + } + } + + // Negligible probability + if bool::from(res.is_zero()) { + panic!("zero challenge"); + } + + res +} + +/// Commitments written to/read from a transcript. +// We use a dedicated type for this to coerce the caller into transcripting the commitments as +// expected. +#[cfg_attr(test, derive(Clone, PartialEq, Debug))] +pub struct Commitments { + pub(crate) C: PointVector, + pub(crate) V: PointVector, +} + +impl Commitments { + /// The vector commitments. + pub fn C(&self) -> &[C::G] { + &self.C.0 + } + /// The non-vector commitments. + pub fn V(&self) -> &[C::G] { + &self.V.0 + } +} + +/// A transcript for proving proofs. +pub struct Transcript { + digest: Blake2b512, + transcript: Vec, +} + +/* + We define our proofs as Vec and derive our transcripts from the values we deserialize from + them. This format assumes the order of the values read, their size, and their quantity are + constant to the context. +*/ +impl Transcript { + /// Create a new transcript off some context. + pub fn new(context: [u8; 32]) -> Self { + let mut digest = Blake2b512::new(); + digest.update(context); + Self { digest, transcript: Vec::with_capacity(1024) } + } + + /// Push a scalar onto the transcript. + pub fn push_scalar(&mut self, scalar: impl PrimeField) { + self.digest.update([SCALAR]); + let bytes = scalar.to_repr(); + self.digest.update(bytes); + self.transcript.extend(bytes.as_ref()); + } + + /// Push a point onto the transcript. + pub fn push_point(&mut self, point: impl GroupEncoding) { + self.digest.update([POINT]); + let bytes = point.to_bytes(); + self.digest.update(bytes); + self.transcript.extend(bytes.as_ref()); + } + + /// Write the Pedersen (vector) commitments to this transcript. + pub fn write_commitments( + &mut self, + C: Vec, + V: Vec, + ) -> Commitments { + for C in &C { + self.push_point(*C); + } + for V in &V { + self.push_point(*V); + } + Commitments { C: PointVector(C), V: PointVector(V) } + } + + /// Sample a challenge. + pub fn challenge(&mut self) -> F { + challenge(&mut self.digest) + } + + /// Complete a transcript, yielding the fully serialized proof. + pub fn complete(self) -> Vec { + self.transcript + } +} + +/// A transcript for verifying proofs. +pub struct VerifierTranscript<'a> { + digest: Blake2b512, + transcript: &'a [u8], +} + +impl<'a> VerifierTranscript<'a> { + /// Create a new transcript to verify a proof with. + pub fn new(context: [u8; 32], proof: &'a [u8]) -> Self { + let mut digest = Blake2b512::new(); + digest.update(context); + Self { digest, transcript: proof } + } + + /// Read a scalar from the transcript. + pub fn read_scalar(&mut self) -> io::Result { + let scalar = C::read_F(&mut self.transcript)?; + self.digest.update([SCALAR]); + let bytes = scalar.to_repr(); + self.digest.update(bytes); + Ok(scalar) + } + + /// Read a point from the transcript. + pub fn read_point(&mut self) -> io::Result { + let point = C::read_G(&mut self.transcript)?; + self.digest.update([POINT]); + let bytes = point.to_bytes(); + self.digest.update(bytes); + Ok(point) + } + + /// Read the Pedersen (Vector) Commitments from the transcript. + /// + /// The lengths of the vectors are not transcripted. + #[allow(clippy::type_complexity)] + pub fn read_commitments( + &mut self, + C: usize, + V: usize, + ) -> io::Result> { + let mut C_vec = Vec::with_capacity(C); + for _ in 0 .. C { + C_vec.push(self.read_point::()?); + } + let mut V_vec = Vec::with_capacity(V); + for _ in 0 .. V { + V_vec.push(self.read_point::()?); + } + Ok(Commitments { C: PointVector(C_vec), V: PointVector(V_vec) }) + } + + /// Sample a challenge. + pub fn challenge(&mut self) -> F { + challenge(&mut self.digest) + } + + /// Complete the transcript, returning the advanced slice. + pub fn complete(self) -> &'a [u8] { + self.transcript + } +} diff --git a/crypto/evrf/secq256k1/Cargo.toml b/crypto/evrf/secq256k1/Cargo.toml new file mode 100644 index 000000000..c363ca4f2 --- /dev/null +++ b/crypto/evrf/secq256k1/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "secq256k1" +version = "0.1.0" +description = "An implementation of the curve secp256k1 cycles with" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/secq256k1" +authors = ["Luke Parker "] +keywords = ["secp256k1", "secq256k1", "group"] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rustversion = "1" +hex-literal = { version = "0.4", default-features = false } + +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] } +subtle = { version = "^2.4", default-features = false, features = ["std"] } + +generic-array = { version = "0.14", default-features = false } +crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } + +k256 = { version = "0.13", default-features = false, features = ["arithmetic"] } + +blake2 = { version = "0.10", default-features = false, features = ["std"] } +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] } +ec-divisors = { path = "../divisors" } +generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets" } + +[dev-dependencies] +hex = "0.4" + +rand_core = { version = "0.6", features = ["std"] } + +ff-group-tests = { path = "../../ff-group-tests" } diff --git a/crypto/evrf/secq256k1/LICENSE b/crypto/evrf/secq256k1/LICENSE new file mode 100644 index 000000000..91d893c11 --- /dev/null +++ b/crypto/evrf/secq256k1/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/secq256k1/README.md b/crypto/evrf/secq256k1/README.md new file mode 100644 index 000000000..b20ee31fc --- /dev/null +++ b/crypto/evrf/secq256k1/README.md @@ -0,0 +1,5 @@ +# secq256k1 + +An implementation of the curve secp256k1 cycles with. + +Scalars and field elements are encoded in their big-endian formats. diff --git a/crypto/evrf/secq256k1/src/backend.rs b/crypto/evrf/secq256k1/src/backend.rs new file mode 100644 index 000000000..6f8653c8a --- /dev/null +++ b/crypto/evrf/secq256k1/src/backend.rs @@ -0,0 +1,295 @@ +use zeroize::Zeroize; + +// Use black_box when possible +#[rustversion::since(1.66)] +use core::hint::black_box; +#[rustversion::before(1.66)] +fn black_box(val: T) -> T { + val +} + +pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 { + let bit_ref = black_box(bit_ref); + + let mut bit = black_box(*bit_ref); + let res = black_box(bit as u8); + bit.zeroize(); + debug_assert!((res | 1) == 1); + + bit_ref.zeroize(); + res +} + +macro_rules! math_op { + ( + $Value: ident, + $Other: ident, + $Op: ident, + $op_fn: ident, + $Assign: ident, + $assign_fn: ident, + $function: expr + ) => { + impl $Op<$Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl $Assign<$Other> for $Value { + fn $assign_fn(&mut self, other: $Other) { + self.0 = $function(self.0, other.0); + } + } + impl<'a> $Op<&'a $Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: &'a $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl<'a> $Assign<&'a $Other> for $Value { + fn $assign_fn(&mut self, other: &'a $Other) { + self.0 = $function(self.0, other.0); + } + } + }; +} + +macro_rules! from_wrapper { + ($wrapper: ident, $inner: ident, $uint: ident) => { + impl From<$uint> for $wrapper { + fn from(a: $uint) -> $wrapper { + Self(Residue::new(&$inner::from(a))) + } + } + }; +} + +macro_rules! field { + ( + $FieldName: ident, + $ResidueType: ident, + + $MODULUS_STR: ident, + $MODULUS: ident, + $WIDE_MODULUS: ident, + + $NUM_BITS: literal, + $MULTIPLICATIVE_GENERATOR: literal, + $S: literal, + $ROOT_OF_UNITY: literal, + $DELTA: literal, + ) => { + use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::{Sum, Product}, + }; + + use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable}; + use rand_core::RngCore; + + use crypto_bigint::{Integer, NonZero, Encoding, impl_modulus}; + + use ciphersuite::group::ff::{ + Field, PrimeField, FieldBits, PrimeFieldBits, helpers::sqrt_ratio_generic, + }; + + use $crate::backend::u8_from_bool; + + fn reduce(x: U512) -> U256 { + U256::from_le_slice(&x.rem(&NonZero::new($WIDE_MODULUS).unwrap()).to_le_bytes()[.. 32]) + } + + impl ConstantTimeEq for $FieldName { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } + } + + impl ConditionallySelectable for $FieldName { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $FieldName(Residue::conditional_select(&a.0, &b.0, choice)) + } + } + + math_op!($FieldName, $FieldName, Add, add, AddAssign, add_assign, |x: $ResidueType, y| x + .add(&y)); + math_op!($FieldName, $FieldName, Sub, sub, SubAssign, sub_assign, |x: $ResidueType, y| x + .sub(&y)); + math_op!($FieldName, $FieldName, Mul, mul, MulAssign, mul_assign, |x: $ResidueType, y| x + .mul(&y)); + + from_wrapper!($FieldName, U256, u8); + from_wrapper!($FieldName, U256, u16); + from_wrapper!($FieldName, U256, u32); + from_wrapper!($FieldName, U256, u64); + from_wrapper!($FieldName, U256, u128); + + impl Neg for $FieldName { + type Output = $FieldName; + fn neg(self) -> $FieldName { + Self(self.0.neg()) + } + } + + impl<'a> Neg for &'a $FieldName { + type Output = $FieldName; + fn neg(self) -> Self::Output { + (*self).neg() + } + } + + impl $FieldName { + /// Perform an exponentation. + pub fn pow(&self, other: $FieldName) -> $FieldName { + let mut table = [Self(Residue::ONE); 16]; + table[1] = *self; + for i in 2 .. 16 { + table[i] = table[i - 1] * self; + } + + let mut res = Self(Residue::ONE); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res *= res; + } + } + + let mut factor = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + factor = Self::conditional_select(&factor, &candidate, usize::from(bits).ct_eq(&j)); + } + res *= factor; + bits = 0; + } + } + res + } + } + + impl Field for $FieldName { + const ZERO: Self = Self(Residue::ZERO); + const ONE: Self = Self(Residue::ONE); + + fn random(mut rng: impl RngCore) -> Self { + let mut bytes = [0; 64]; + rng.fill_bytes(&mut bytes); + $FieldName(Residue::new(&reduce(U512::from_be_slice(bytes.as_ref())))) + } + + fn square(&self) -> Self { + Self(self.0.square()) + } + fn double(&self) -> Self { + *self + self + } + + fn invert(&self) -> CtOption { + let res = self.0.invert(); + CtOption::new(Self(res.0), res.1.into()) + } + + fn sqrt(&self) -> CtOption { + // (p + 1) // 4, as valid since p % 4 == 3 + let mod_plus_one_div_four = $MODULUS.saturating_add(&U256::ONE).wrapping_div(&(4u8.into())); + let res = self.pow(Self($ResidueType::new_checked(&mod_plus_one_div_four).unwrap())); + CtOption::new(res, res.square().ct_eq(self)) + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + sqrt_ratio_generic(num, div) + } + } + + impl PrimeField for $FieldName { + type Repr = [u8; 32]; + + const MODULUS: &'static str = $MODULUS_STR; + + const NUM_BITS: u32 = $NUM_BITS; + const CAPACITY: u32 = $NUM_BITS - 1; + + const TWO_INV: Self = $FieldName($ResidueType::new(&U256::from_u8(2)).invert().0); + + const MULTIPLICATIVE_GENERATOR: Self = + Self(Residue::new(&U256::from_u8($MULTIPLICATIVE_GENERATOR))); + const S: u32 = $S; + + const ROOT_OF_UNITY: Self = $FieldName(Residue::new(&U256::from_be_hex($ROOT_OF_UNITY))); + const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0); + + const DELTA: Self = $FieldName(Residue::new(&U256::from_be_hex($DELTA))); + + fn from_repr(bytes: Self::Repr) -> CtOption { + let res = U256::from_be_slice(&bytes); + CtOption::new($FieldName(Residue::new(&res)), res.ct_lt(&$MODULUS)) + } + fn to_repr(&self) -> Self::Repr { + let mut repr = [0; 32]; + repr.copy_from_slice(&self.0.retrieve().to_be_bytes()); + repr + } + + fn is_odd(&self) -> Choice { + self.0.retrieve().is_odd() + } + } + + impl PrimeFieldBits for $FieldName { + type ReprBits = [u8; 32]; + + fn to_le_bits(&self) -> FieldBits { + let mut repr = [0; 32]; + repr.copy_from_slice(&self.0.retrieve().to_le_bytes()); + repr.into() + } + + fn char_le_bits() -> FieldBits { + let mut repr = [0; 32]; + repr.copy_from_slice(&MODULUS.to_le_bytes()); + repr.into() + } + } + + impl Sum<$FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + let mut res = $FieldName::ZERO; + for item in iter { + res += item; + } + res + } + } + + impl<'a> Sum<&'a $FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + iter.cloned().sum() + } + } + + impl Product<$FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + let mut res = $FieldName::ONE; + for item in iter { + res *= item; + } + res + } + } + + impl<'a> Product<&'a $FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + iter.cloned().product() + } + } + }; +} diff --git a/crypto/evrf/secq256k1/src/lib.rs b/crypto/evrf/secq256k1/src/lib.rs new file mode 100644 index 000000000..b59078afb --- /dev/null +++ b/crypto/evrf/secq256k1/src/lib.rs @@ -0,0 +1,47 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] + +use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2}; +use ciphersuite::group::{ff::PrimeField, Group}; + +#[macro_use] +mod backend; + +mod scalar; +pub use scalar::Scalar; + +pub use k256::Scalar as FieldElement; + +mod point; +pub use point::Point; + +/// Ciphersuite for Secq256k1. +/// +/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition +/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as +/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. +#[derive(Clone, Copy, PartialEq, Eq, Debug, zeroize::Zeroize)] +pub struct Secq256k1; +impl ciphersuite::Ciphersuite for Secq256k1 { + type F = Scalar; + type G = Point; + type H = blake2::Blake2b512; + + const ID: &'static [u8] = b"secq256k1"; + + fn generator() -> Self::G { + Point::generator() + } + + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { + use blake2::Digest; + Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap()) + } +} + +impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Secq256k1 { + type ScalarBits = U<{ Scalar::NUM_BITS as usize }>; + type XCoefficients = Quot, U2>; + type XCoefficientsMinusOne = Diff; + type YxCoefficients = Diff, U1>, U2>, U2>; +} diff --git a/crypto/evrf/secq256k1/src/point.rs b/crypto/evrf/secq256k1/src/point.rs new file mode 100644 index 000000000..384b68c93 --- /dev/null +++ b/crypto/evrf/secq256k1/src/point.rs @@ -0,0 +1,414 @@ +use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::Sum, +}; + +use rand_core::RngCore; + +use zeroize::Zeroize; +use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable, ConditionallyNegatable}; + +use generic_array::{typenum::U33, GenericArray}; + +use ciphersuite::group::{ + ff::{Field, PrimeField, PrimeFieldBits}, + Group, GroupEncoding, + prime::PrimeGroup, +}; + +use crate::{backend::u8_from_bool, Scalar, FieldElement}; + +fn recover_y(x: FieldElement) -> CtOption { + // x**3 + B since a = 0 + ((x.square() * x) + FieldElement::from(7u64)).sqrt() +} + +/// Point. +#[derive(Clone, Copy, Debug, Zeroize)] +#[repr(C)] +pub struct Point { + x: FieldElement, // / Z + y: FieldElement, // / Z + z: FieldElement, +} + +impl ConstantTimeEq for Point { + fn ct_eq(&self, other: &Self) -> Choice { + let x1 = self.x * other.z; + let x2 = other.x * self.z; + + let y1 = self.y * other.z; + let y2 = other.y * self.z; + + (self.x.is_zero() & other.x.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2)) + } +} + +impl PartialEq for Point { + fn eq(&self, other: &Point) -> bool { + self.ct_eq(other).into() + } +} + +impl Eq for Point {} + +impl ConditionallySelectable for Point { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Point { + x: FieldElement::conditional_select(&a.x, &b.x, choice), + y: FieldElement::conditional_select(&a.y, &b.y, choice), + z: FieldElement::conditional_select(&a.z, &b.z, choice), + } + } +} + +impl Add for Point { + type Output = Point; + #[allow(non_snake_case)] + fn add(self, other: Self) -> Self { + // add-2015-rcb + + let a = FieldElement::ZERO; + let B = FieldElement::from(7u64); + let b3 = B + B + B; + + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + let X2 = other.x; + let Y2 = other.y; + let Z2 = other.z; + + let t0 = X1 * X2; + let t1 = Y1 * Y2; + let t2 = Z1 * Z2; + let t3 = X1 + Y1; + let t4 = X2 + Y2; + let t3 = t3 * t4; + let t4 = t0 + t1; + let t3 = t3 - t4; + let t4 = X1 + Z1; + let t5 = X2 + Z2; + let t4 = t4 * t5; + let t5 = t0 + t2; + let t4 = t4 - t5; + let t5 = Y1 + Z1; + let X3 = Y2 + Z2; + let t5 = t5 * X3; + let X3 = t1 + t2; + let t5 = t5 - X3; + let Z3 = a * t4; + let X3 = b3 * t2; + let Z3 = X3 + Z3; + let X3 = t1 - Z3; + let Z3 = t1 + Z3; + let Y3 = X3 * Z3; + let t1 = t0 + t0; + let t1 = t1 + t0; + let t2 = a * t2; + let t4 = b3 * t4; + let t1 = t1 + t2; + let t2 = t0 - t2; + let t2 = a * t2; + let t4 = t4 + t2; + let t0 = t1 * t4; + let Y3 = Y3 + t0; + let t0 = t5 * t4; + let X3 = t3 * X3; + let X3 = X3 - t0; + let t0 = t3 * t1; + let Z3 = t5 * Z3; + let Z3 = Z3 + t0; + Point { x: X3, y: Y3, z: Z3 } + } +} + +impl AddAssign for Point { + fn add_assign(&mut self, other: Point) { + *self = *self + other; + } +} + +impl Add<&Point> for Point { + type Output = Point; + fn add(self, other: &Point) -> Point { + self + *other + } +} + +impl AddAssign<&Point> for Point { + fn add_assign(&mut self, other: &Point) { + *self += *other; + } +} + +impl Neg for Point { + type Output = Point; + fn neg(self) -> Self { + Point { x: self.x, y: -self.y, z: self.z } + } +} + +impl Sub for Point { + type Output = Point; + #[allow(clippy::suspicious_arithmetic_impl)] + fn sub(self, other: Self) -> Self { + self + other.neg() + } +} + +impl SubAssign for Point { + fn sub_assign(&mut self, other: Point) { + *self = *self - other; + } +} + +impl Sub<&Point> for Point { + type Output = Point; + fn sub(self, other: &Point) -> Point { + self - *other + } +} + +impl SubAssign<&Point> for Point { + fn sub_assign(&mut self, other: &Point) { + *self -= *other; + } +} + +impl Group for Point { + type Scalar = Scalar; + fn random(mut rng: impl RngCore) -> Self { + loop { + let mut bytes = GenericArray::default(); + rng.fill_bytes(bytes.as_mut()); + let opt = Self::from_bytes(&bytes); + if opt.is_some().into() { + return opt.unwrap(); + } + } + } + fn identity() -> Self { + Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO } + } + fn generator() -> Self { + Point { + x: FieldElement::from_repr( + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000001") + .into(), + ) + .unwrap(), + y: FieldElement::from_repr( + hex_literal::hex!("0C7C97045A2074634909ABDF82C9BD0248916189041F2AF0C1B800D1FFC278C0") + .into(), + ) + .unwrap(), + z: FieldElement::ONE, + } + } + fn is_identity(&self) -> Choice { + self.z.ct_eq(&FieldElement::ZERO) + } + #[allow(non_snake_case)] + fn double(&self) -> Self { + // dbl-2007-bl + + let a = FieldElement::ZERO; + + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + + let XX = X1 * X1; + let ZZ = Z1 * Z1; + let w = (a * ZZ) + XX.double() + XX; + let s = (Y1 * Z1).double(); + let ss = s * s; + let sss = s * ss; + let R = Y1 * s; + let RR = R * R; + let B = X1 + R; + let B = (B * B) - XX - RR; + let h = (w * w) - B.double(); + let X3 = h * s; + let Y3 = w * (B - h) - RR.double(); + let Z3 = sss; + + let res = Self { x: X3, y: Y3, z: Z3 }; + // If self is identity, res will not be well-formed + // Accordingly, we return self if self was the identity + Self::conditional_select(&res, self, self.is_identity()) + } +} + +impl Sum for Point { + fn sum>(iter: I) -> Point { + let mut res = Self::identity(); + for i in iter { + res += i; + } + res + } +} + +impl<'a> Sum<&'a Point> for Point { + fn sum>(iter: I) -> Point { + Point::sum(iter.cloned()) + } +} + +impl Mul for Point { + type Output = Point; + fn mul(self, mut other: Scalar) -> Point { + // Precompute the optimal amount that's a multiple of 2 + let mut table = [Point::identity(); 16]; + table[1] = self; + for i in 2 .. 16 { + table[i] = table[i - 1] + self; + } + + let mut res = Self::identity(); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res = res.double(); + } + } + + let mut term = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + term = Self::conditional_select(&term, candidate, usize::from(bits).ct_eq(&j)); + } + res += term; + bits = 0; + } + } + other.zeroize(); + res + } +} + +impl MulAssign for Point { + fn mul_assign(&mut self, other: Scalar) { + *self = *self * other; + } +} + +impl Mul<&Scalar> for Point { + type Output = Point; + fn mul(self, other: &Scalar) -> Point { + self * *other + } +} + +impl MulAssign<&Scalar> for Point { + fn mul_assign(&mut self, other: &Scalar) { + *self *= *other; + } +} + +impl GroupEncoding for Point { + type Repr = GenericArray; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + // Extract and clear the sign bit + let sign = Choice::from(bytes[0] & 1); + + // Parse x, recover y + FieldElement::from_repr(*GenericArray::from_slice(&bytes[1 ..])).and_then(|x| { + let is_identity = x.is_zero(); + + let y = recover_y(x).map(|mut y| { + y.conditional_negate(y.is_odd().ct_eq(&!sign)); + y + }); + + // If this the identity, set y to 1 + let y = + CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity); + // Create the point if we have a y solution + let point = y.map(|y| Point { x, y, z: FieldElement::ONE }); + + let not_negative_zero = !(is_identity & sign); + // Only return the point if it isn't -0 and the sign byte wasn't malleated + CtOption::conditional_select( + &CtOption::new(Point::identity(), 0.into()), + &point, + not_negative_zero & ((bytes[0] & 1).ct_eq(&bytes[0])), + ) + }) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + Point::from_bytes(bytes) + } + + fn to_bytes(&self) -> Self::Repr { + let Some(z) = Option::::from(self.z.invert()) else { + return *GenericArray::from_slice(&[0; 33]); + }; + let x = self.x * z; + let y = self.y * z; + + let mut res = *GenericArray::from_slice(&[0; 33]); + res[1 ..].as_mut().copy_from_slice(&x.to_repr()); + + // The following conditional select normalizes the sign to 0 when x is 0 + let y_sign = u8::conditional_select(&y.is_odd().unwrap_u8(), &0, x.ct_eq(&FieldElement::ZERO)); + res[0] |= y_sign; + res + } +} + +impl PrimeGroup for Point {} + +impl ec_divisors::DivisorCurve for Point { + type FieldElement = FieldElement; + + fn a() -> Self::FieldElement { + FieldElement::from(0u64) + } + fn b() -> Self::FieldElement { + FieldElement::from(7u64) + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + let z: Self::FieldElement = Option::from(point.z.invert())?; + Some((point.x * z, point.y * z)) + } +} + +#[test] +fn test_curve() { + ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng); +} + +#[test] +fn generator() { + assert_eq!( + Point::generator(), + Point::from_bytes(GenericArray::from_slice(&hex_literal::hex!( + "000000000000000000000000000000000000000000000000000000000000000001" + ))) + .unwrap() + ); +} + +#[test] +fn zero_x_is_invalid() { + assert!(Option::::from(recover_y(FieldElement::ZERO)).is_none()); +} + +// Checks random won't infinitely loop +#[test] +fn random() { + Point::random(&mut rand_core::OsRng); +} diff --git a/crypto/evrf/secq256k1/src/scalar.rs b/crypto/evrf/secq256k1/src/scalar.rs new file mode 100644 index 000000000..1bc930a29 --- /dev/null +++ b/crypto/evrf/secq256k1/src/scalar.rs @@ -0,0 +1,52 @@ +use zeroize::{DefaultIsZeroes, Zeroize}; + +use crypto_bigint::{ + U256, U512, + modular::constant_mod::{ResidueParams, Residue}, +}; + +const MODULUS_STR: &str = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F"; + +impl_modulus!(SecQ, U256, MODULUS_STR); +type ResidueType = Residue; + +/// The Scalar field of secq256k1. +/// +/// This is equivalent to the field secp256k1 is defined over. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +#[repr(C)] +pub struct Scalar(pub(crate) ResidueType); + +impl DefaultIsZeroes for Scalar {} + +pub(crate) const MODULUS: U256 = U256::from_be_hex(MODULUS_STR); + +const WIDE_MODULUS: U512 = U512::from_be_hex(concat!( + "0000000000000000000000000000000000000000000000000000000000000000", + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", +)); + +field!( + Scalar, + ResidueType, + MODULUS_STR, + MODULUS, + WIDE_MODULUS, + 256, + 3, + 1, + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + "0000000000000000000000000000000000000000000000000000000000000009", +); + +impl Scalar { + /// Perform a wide reduction, presumably to obtain a non-biased Scalar field element. + pub fn wide_reduce(bytes: [u8; 64]) -> Scalar { + Scalar(Residue::new(&reduce(U512::from_le_slice(bytes.as_ref())))) + } +} + +#[test] +fn test_scalar_field() { + ff_group_tests::prime_field::test_prime_field_bits::<_, Scalar>(&mut rand_core::OsRng); +} diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 7be6478af..dc0453a1c 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -122,6 +122,7 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap for ClsagMultisig { .append_message(b"key_image_share", addendum.key_image_share.compress().to_bytes()); // Accumulate the interpolated share - let interpolated_key_image_share = - addendum.key_image_share * lagrange::(l, view.included()); + let interpolated_key_image_share = addendum.key_image_share * + view + .interpolation_factor(l) + .ok_or(FrostError::InternalError("processing addendum of non-participant"))?; *self.image.as_mut().unwrap() += interpolated_key_image_share; self diff --git a/networks/monero/wallet/src/send/multisig.rs b/networks/monero/wallet/src/send/multisig.rs index b3d58ba5f..d60c5a33f 100644 --- a/networks/monero/wallet/src/send/multisig.rs +++ b/networks/monero/wallet/src/send/multisig.rs @@ -14,7 +14,6 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, Participant, FrostError, ThresholdKeys, - dkg::lagrange, sign::{ Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, @@ -34,7 +33,7 @@ use crate::send::{SendError, SignableTransaction, key_image_sort}; pub struct TransactionMachine { signable: SignableTransaction, - i: Participant, + keys: ThresholdKeys, // The key image generator, and the scalar offset from the spend key key_image_generators_and_offsets: Vec<(EdwardsPoint, Scalar)>, @@ -45,7 +44,7 @@ pub struct TransactionMachine { pub struct TransactionSignMachine { signable: SignableTransaction, - i: Participant, + keys: ThresholdKeys, key_image_generators_and_offsets: Vec<(EdwardsPoint, Scalar)>, clsags: Vec<(ClsagMultisigMaskSender, AlgorithmSignMachine)>, @@ -61,7 +60,7 @@ pub struct TransactionSignatureMachine { impl SignableTransaction { /// Create a FROST signing machine out of this signable transaction. - pub fn multisig(self, keys: &ThresholdKeys) -> Result { + pub fn multisig(self, keys: ThresholdKeys) -> Result { let mut clsags = vec![]; let mut key_image_generators_and_offsets = vec![]; @@ -85,12 +84,7 @@ impl SignableTransaction { clsags.push((clsag_mask_send, AlgorithmMachine::new(clsag, offset))); } - Ok(TransactionMachine { - signable: self, - i: keys.params().i(), - key_image_generators_and_offsets, - clsags, - }) + Ok(TransactionMachine { signable: self, keys, key_image_generators_and_offsets, clsags }) } } @@ -120,7 +114,7 @@ impl PreprocessMachine for TransactionMachine { TransactionSignMachine { signable: self.signable, - i: self.i, + keys: self.keys, key_image_generators_and_offsets: self.key_image_generators_and_offsets, clsags, @@ -173,12 +167,12 @@ impl SignMachine for TransactionSignMachine { // We do not need to be included here, yet this set of signers has yet to be validated // We explicitly remove ourselves to ensure we aren't included twice, if we were redundantly // included - commitments.remove(&self.i); + commitments.remove(&self.keys.params().i()); // Find out who's included let mut included = commitments.keys().copied().collect::>(); // This push won't duplicate due to the above removal - included.push(self.i); + included.push(self.keys.params().i()); // unstable sort may reorder elements of equal order // Given our lack of duplicates, we should have no elements of equal order included.sort_unstable(); @@ -192,12 +186,15 @@ impl SignMachine for TransactionSignMachine { } // Convert the serialized nonces commitments to a parallelized Vec + let view = self.keys.view(included.clone()).map_err(|_| { + FrostError::InvalidSigningSet("couldn't form an interpolated view of the key") + })?; let mut commitments = (0 .. self.clsags.len()) .map(|c| { included .iter() .map(|l| { - let preprocess = if *l == self.i { + let preprocess = if *l == self.keys.params().i() { self.our_preprocess[c].clone() } else { commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?[c].clone() @@ -206,7 +203,7 @@ impl SignMachine for TransactionSignMachine { // While here, calculate the key image as needed to call sign // The CLSAG algorithm will independently calculate the key image/verify these shares key_images[c] += - preprocess.addendum.key_image_share().0 * lagrange::(*l, &included).0; + preprocess.addendum.key_image_share().0 * view.interpolation_factor(*l).unwrap().0; Ok((*l, preprocess)) }) @@ -217,7 +214,7 @@ impl SignMachine for TransactionSignMachine { // The above inserted our own preprocess into these maps (which is unnecessary) // Remove it now for map in &mut commitments { - map.remove(&self.i); + map.remove(&self.keys.params().i()); } // The actual TX will have sorted its inputs by key image diff --git a/networks/monero/wallet/tests/runner/mod.rs b/networks/monero/wallet/tests/runner/mod.rs index b83f939ac..5678ba1be 100644 --- a/networks/monero/wallet/tests/runner/mod.rs +++ b/networks/monero/wallet/tests/runner/mod.rs @@ -285,7 +285,7 @@ macro_rules! test { { let mut machines = HashMap::new(); for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) { - machines.insert(i, tx.clone().multisig(&keys[&i]).unwrap()); + machines.insert(i, tx.clone().multisig(keys[&i].clone()).unwrap()); } frost::tests::sign_without_caching(&mut OsRng, machines, &[]) diff --git a/orchestration/Cargo.toml b/orchestration/Cargo.toml index fca380661..a70e9936f 100644 --- a/orchestration/Cargo.toml +++ b/orchestration/Cargo.toml @@ -24,6 +24,8 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +embedwards25519 = { path = "../crypto/evrf/embedwards25519" } +secq256k1 = { path = "../crypto/evrf/secq256k1" } zalloc = { path = "../common/zalloc" } diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 4655be011..7afec67db 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -25,6 +25,8 @@ use ciphersuite::{ }, Ciphersuite, Ristretto, }; +use embedwards25519::Embedwards25519; +use secq256k1::Secq256k1; mod mimalloc; use mimalloc::mimalloc; @@ -267,6 +269,55 @@ fn infrastructure_keys(network: Network) -> InfrastructureKeys { ]) } +struct EmbeddedCurveKeys { + embedwards25519: (Zeroizing>, Vec), + secq256k1: (Zeroizing>, Vec), +} + +fn embedded_curve_keys(network: Network) -> EmbeddedCurveKeys { + // Generate entropy for the embedded curve keys + + let entropy = { + let path = home::home_dir() + .unwrap() + .join(".serai") + .join(network.label()) + .join("embedded_curve_keys_entropy"); + // Check if there's existing entropy + if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) { + assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes"); + let mut res = Zeroizing::new([0; 32]); + res.copy_from_slice(entropy.as_ref()); + res + } else { + // If there isn't, generate fresh entropy + let mut res = Zeroizing::new([0; 32]); + OsRng.fill_bytes(res.as_mut()); + fs::write(&path, &res).unwrap(); + res + } + }; + + let mut transcript = + RecommendedTranscript::new(b"Serai Orchestrator Embedded Curve Keys Transcript"); + transcript.append_message(b"network", network.label().as_bytes()); + transcript.append_message(b"entropy", entropy); + let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"embedded_curve_keys")); + + EmbeddedCurveKeys { + embedwards25519: { + let key = Zeroizing::new(::F::random(&mut rng)); + let pub_key = Embedwards25519::generator() * key.deref(); + (Zeroizing::new(key.to_repr().as_slice().to_vec()), pub_key.to_bytes().to_vec()) + }, + secq256k1: { + let key = Zeroizing::new(::F::random(&mut rng)); + let pub_key = Secq256k1::generator() * key.deref(); + (Zeroizing::new(key.to_repr().as_slice().to_vec()), pub_key.to_bytes().to_vec()) + }, + } +} + fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); @@ -294,18 +345,15 @@ fn dockerfiles(network: Network) { monero_key.1, ); - let new_entropy = || { - let mut res = Zeroizing::new([0; 32]); - OsRng.fill_bytes(res.as_mut()); - res - }; + let embedded_curve_keys = embedded_curve_keys(network); processor( &orchestration_path, network, "bitcoin", coordinator_key.1, bitcoin_key.0, - new_entropy(), + embedded_curve_keys.embedwards25519.0.clone(), + embedded_curve_keys.secq256k1.0.clone(), ); processor( &orchestration_path, @@ -313,9 +361,18 @@ fn dockerfiles(network: Network) { "ethereum", coordinator_key.1, ethereum_key.0, - new_entropy(), + embedded_curve_keys.embedwards25519.0.clone(), + embedded_curve_keys.secq256k1.0.clone(), + ); + processor( + &orchestration_path, + network, + "monero", + coordinator_key.1, + monero_key.0, + embedded_curve_keys.embedwards25519.0.clone(), + embedded_curve_keys.embedwards25519.0.clone(), ); - processor(&orchestration_path, network, "monero", coordinator_key.1, monero_key.0, new_entropy()); let serai_key = { let serai_key = Zeroizing::new( @@ -346,6 +403,7 @@ fn key_gen(network: Network) { let _ = fs::create_dir_all(&serai_dir); fs::write(key_file, key.to_repr()).expect("couldn't write key"); + // TODO: Move embedded curve key gen here, and print them println!( "Public Key: {}", hex::encode((::generator() * key).to_bytes()) diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index cefe6455b..3387c4ede 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -12,8 +12,9 @@ pub fn processor( network: Network, coin: &'static str, _coordinator_key: ::G, - coin_key: Zeroizing<::F>, - entropy: Zeroizing<[u8; 32]>, + processor_key: Zeroizing<::F>, + substrate_evrf_key: Zeroizing>, + network_evrf_key: Zeroizing>, ) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( @@ -53,8 +54,9 @@ RUN apt install -y ca-certificates let mut env_vars = vec![ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), - ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), - ("ENTROPY", hex::encode(entropy.as_ref())), + ("MESSAGE_QUEUE_KEY", hex::encode(processor_key.to_repr())), + ("SUBSTRATE_EVRF_KEY", hex::encode(substrate_evrf_key)), + ("NETWORK_EVRF_KEY", hex::encode(network_evrf_key)), ("NETWORK", coin.to_string()), ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_HOSTNAME", hostname), diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 9d29bc7c8..fa2f643c3 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -36,7 +36,10 @@ serde_json = { version = "1", default-features = false, features = ["std"] } # Cryptography ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +blake2 = { version = "0.10", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } +ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false } +dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } @@ -81,12 +84,12 @@ dockertest = "0.5" serai-docker-tests = { path = "../tests/docker" } [features] -secp256k1 = ["k256", "frost/secp256k1"] +secp256k1 = ["k256", "dkg/evrf-secp256k1", "frost/secp256k1"] bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] ethereum = ["secp256k1", "ethereum-serai/tests"] -ed25519 = ["dalek-ff-group", "frost/ed25519"] +ed25519 = ["dalek-ff-group", "dkg/evrf-ed25519", "frost/ed25519"] monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] binaries = ["env_logger", "serai-env", "message-queue"] diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 22360a1a5..98af97ce7 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; -use dkg::{Participant, ThresholdParams}; +use dkg::Participant; use serai_primitives::BlockHash; use in_instructions_primitives::{Batch, SignedBatch}; @@ -19,41 +19,31 @@ pub struct SubstrateContext { pub mod key_gen { use super::*; - #[derive( - Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, - )] - pub struct KeyGenId { - pub session: Session, - pub attempt: u32, - } - - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Instructs the Processor to begin the key generation process. // TODO: Should this be moved under Substrate? - GenerateKey { - id: KeyGenId, - params: ThresholdParams, - shares: u16, - }, - // Received commitments for the specified key generation protocol. - Commitments { - id: KeyGenId, - commitments: HashMap>, - }, - // Received shares for the specified key generation protocol. - Shares { - id: KeyGenId, - shares: Vec>>, - }, - /// Instruction to verify a blame accusation. - VerifyBlame { - id: KeyGenId, - accuser: Participant, - accused: Participant, - share: Vec, - blame: Option>, - }, + GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec)> }, + // Received participations for the specified key generation protocol. + Participation { session: Session, participant: Participant, participation: Vec }, + } + + impl core::fmt::Debug for CoordinatorMessage { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + match self { + CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => fmt + .debug_struct("CoordinatorMessage::GenerateKey") + .field("session", &session) + .field("threshold", &threshold) + .field("evrf_public_keys.len()", &evrf_public_keys.len()) + .finish_non_exhaustive(), + CoordinatorMessage::Participation { session, participant, .. } => fmt + .debug_struct("CoordinatorMessage::Participation") + .field("session", &session) + .field("participant", &participant) + .finish_non_exhaustive(), + } + } } impl CoordinatorMessage { @@ -62,42 +52,34 @@ pub mod key_gen { } } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { - // Created commitments for the specified key generation protocol. - Commitments { - id: KeyGenId, - commitments: Vec>, - }, - // Participant published invalid commitments. - InvalidCommitments { - id: KeyGenId, - faulty: Participant, - }, - // Created shares for the specified key generation protocol. - Shares { - id: KeyGenId, - shares: Vec>>, - }, - // Participant published an invalid share. - #[rustfmt::skip] - InvalidShare { - id: KeyGenId, - accuser: Participant, - faulty: Participant, - blame: Option>, - }, + // Participated in the specified key generation protocol. + Participation { session: Session, participation: Vec }, // Resulting keys from the specified key generation protocol. - GeneratedKeyPair { - id: KeyGenId, - substrate_key: [u8; 32], - network_key: Vec, - }, + GeneratedKeyPair { session: Session, substrate_key: [u8; 32], network_key: Vec }, // Blame this participant. - Blame { - id: KeyGenId, - participant: Participant, - }, + Blame { session: Session, participant: Participant }, + } + + impl core::fmt::Debug for ProcessorMessage { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + match self { + ProcessorMessage::Participation { session, .. } => fmt + .debug_struct("ProcessorMessage::Participation") + .field("session", &session) + .finish_non_exhaustive(), + ProcessorMessage::GeneratedKeyPair { session, .. } => fmt + .debug_struct("ProcessorMessage::GeneratedKeyPair") + .field("session", &session) + .finish_non_exhaustive(), + ProcessorMessage::Blame { session, participant } => fmt + .debug_struct("ProcessorMessage::Blame") + .field("session", &session) + .field("participant", &participant) + .finish_non_exhaustive(), + } + } } } @@ -328,16 +310,19 @@ impl CoordinatorMessage { pub fn intent(&self) -> Vec { match self { CoordinatorMessage::KeyGen(msg) => { - // Unique since key gen ID embeds the session and attempt let (sub, id) = match msg { - key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id), - key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id), - key_gen::CoordinatorMessage::Shares { id, .. } => (2, id), - key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id), + // Unique since we only have one attempt per session + key_gen::CoordinatorMessage::GenerateKey { session, .. } => { + (0, borsh::to_vec(session).unwrap()) + } + // Unique since one participation per participant per session + key_gen::CoordinatorMessage::Participation { session, participant, .. } => { + (1, borsh::to_vec(&(session, participant)).unwrap()) + } }; let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub]; - res.extend(&id.encode()); + res.extend(&id); res } CoordinatorMessage::Sign(msg) => { @@ -400,17 +385,21 @@ impl ProcessorMessage { match self { ProcessorMessage::KeyGen(msg) => { let (sub, id) = match msg { - // Unique since KeyGenId - key_gen::ProcessorMessage::Commitments { id, .. } => (0, id), - key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id), - key_gen::ProcessorMessage::Shares { id, .. } => (2, id), - key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id), - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id), - key_gen::ProcessorMessage::Blame { id, .. } => (5, id), + // Unique since we only have one participation per session (due to no re-attempts) + key_gen::ProcessorMessage::Participation { session, .. } => { + (0, borsh::to_vec(session).unwrap()) + } + key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } => { + (1, borsh::to_vec(session).unwrap()) + } + // Unique since we only blame a participant once (as this is fatal) + key_gen::ProcessorMessage::Blame { session, participant } => { + (2, borsh::to_vec(&(session, participant)).unwrap()) + } }; let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub]; - res.extend(&id.encode()); + res.extend(&id); res } ProcessorMessage::Sign(msg) => { diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 297db1948..a059c350f 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -1,18 +1,20 @@ -use std::collections::HashMap; +use std::{ + io, + collections::{HashSet, HashMap}, +}; use zeroize::Zeroizing; -use rand_core::SeedableRng; +use rand_core::{RngCore, SeedableRng, OsRng}; use rand_chacha::ChaCha20Rng; +use blake2::{Digest, Blake2s256}; use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::group::GroupEncoding; -use frost::{ - curve::{Ciphersuite, Ristretto}, - dkg::{ - DkgError, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, pedpop::*, - }, +use ciphersuite::{ + group::{Group, GroupEncoding}, + Ciphersuite, Ristretto, }; +use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::*}; use log::info; @@ -21,6 +23,48 @@ use messages::key_gen::*; use crate::{Get, DbTxn, Db, create_db, networks::Network}; +mod generators { + use core::any::{TypeId, Any}; + use std::{ + sync::{LazyLock, Mutex}, + collections::HashMap, + }; + + use frost::dkg::evrf::*; + + use serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET; + + /// A cache of the generators used by the eVRF DKG. + /// + /// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a + /// generic, this takes advantage of `Any`. This static is isolated in a module to ensure + /// correctness can be evaluated solely by reviewing these few lines of code. + /// + /// This is arguably over-engineered as of right now, as we only need generators for Ristretto + /// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve + /// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve + /// case). + static GENERATORS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); + + pub(crate) fn generators() -> &'static EvrfGenerators { + GENERATORS + .lock() + .unwrap() + .entry(TypeId::of::()) + .or_insert_with(|| { + // If we haven't prior needed generators for this Ciphersuite, generate new ones + Box::leak(Box::new(EvrfGenerators::::new( + ((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(), + MAX_KEY_SHARES_PER_SET.try_into().unwrap(), + ))) + }) + .downcast_ref() + .unwrap() + } +} +use generators::generators; + #[derive(Debug)] pub struct KeyConfirmed { pub substrate_keys: Vec>, @@ -29,15 +73,18 @@ pub struct KeyConfirmed { create_db!( KeyGenDb { - ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16), - // Not scoped to the set since that'd have latter attempts overwrite former - // A former attempt may become the finalized attempt, even if it doesn't in a timely manner - // Overwriting its commitments would be accordingly poor - CommitmentsDb: (key: &KeyGenId) -> HashMap>, - GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec, - // These do assume a key is only used once across sets, which holds true so long as a single - // participant is honest in their execution of the protocol - KeysDb: (network_key: &[u8]) -> Vec, + ParamsDb: (session: &Session) -> (u16, Vec<[u8; 32]>, Vec>), + ParticipationDb: (session: &Session) -> ( + HashMap>, + HashMap>, + ), + // GeneratedKeysDb, KeysDb use `()` for their value as we manually serialize their values + // TODO: Don't do that + GeneratedKeysDb: (session: &Session) -> (), + // These do assume a key is only used once across sets, which holds true if the threshold is + // honest + // TODO: Remove this assumption + KeysDb: (network_key: &[u8]) -> (), SessionDb: (network_key: &[u8]) -> Session, NetworkKeyDb: (session: Session) -> Vec, } @@ -65,8 +112,8 @@ impl GeneratedKeysDb { fn save_keys( txn: &mut impl DbTxn, - id: &KeyGenId, - substrate_keys: &[ThresholdCore], + session: &Session, + substrate_keys: &[ThresholdKeys], network_keys: &[ThresholdKeys], ) { let mut keys = Zeroizing::new(vec![]); @@ -74,14 +121,7 @@ impl GeneratedKeysDb { keys.extend(substrate_keys.serialize().as_slice()); keys.extend(network_keys.serialize().as_slice()); } - txn.put( - Self::key( - &id.session, - &substrate_keys[0].group_key().to_bytes(), - network_keys[0].group_key().to_bytes().as_ref(), - ), - keys, - ); + txn.put(Self::key(session), keys); } } @@ -91,11 +131,8 @@ impl KeysDb { session: Session, key_pair: &KeyPair, ) -> (Vec>, Vec>) { - let (keys_vec, keys) = GeneratedKeysDb::read_keys::( - txn, - &GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()), - ) - .unwrap(); + let (keys_vec, keys) = + GeneratedKeysDb::read_keys::(txn, &GeneratedKeysDb::key(&session)).unwrap(); assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes()); assert_eq!( { @@ -130,32 +167,105 @@ impl KeysDb { } } -type SecretShareMachines = - Vec<(SecretShareMachine, SecretShareMachine<::Curve>)>; -type KeyMachines = Vec<(KeyMachine, KeyMachine<::Curve>)>; +/* + On the Serai blockchain, users specify their public keys on the embedded curves. Substrate does + not have the libraries for the embedded curves and is unable to evaluate if the keys are valid + or not. + + We could add the libraries for the embedded curves to the blockchain, yet this would be a + non-trivial scope for what's effectively an embedded context. It'd also permanently bind our + consensus to these arbitrary curves. We would have the benefit of being able to also require PoKs + for the keys, ensuring no one uses someone else's key (creating oddities there). Since someone + who uses someone else's key can't actually participate, all it does in effect is give more key + shares to the holder of the private key, and make us unable to rely on eVRF keys as a secure way + to index validators (hence the usage of `Participant` throughout the messages here). + + We could remove invalid keys from the DKG, yet this would create a view of the DKG only the + processor (which does have the embedded curves) has. We'd need to reconcile it with the view of + the DKG which does include all keys (even the invalid keys). + + The easiest solution is to keep the views consistent by replacing invalid keys with valid keys + (which no one has the private key for). This keeps the view consistent. This does prevent those + who posted invalid keys from participating, and receiving their keys, which is the understood and + declared effect of them posting invalid keys. Since at least `t` people must honestly participate + for the DKG to complete, and since their honest participation means they had valid keys, we do + ensure at least `t` people participated and the DKG result can be reconstructed. + + We do lose fault tolerance, yet only by losing those faulty. Accordingly, this is accepted. + + Returns the coerced keys and faulty participants. +*/ +fn coerce_keys( + key_bytes: &[impl AsRef<[u8]>], +) -> (Vec<::G>, Vec) { + fn evrf_key(key: &[u8]) -> Option<::G> { + let mut repr = <::G as GroupEncoding>::Repr::default(); + if repr.as_ref().len() != key.len() { + None?; + } + repr.as_mut().copy_from_slice(key); + let point = Option::<::G>::from(<_>::from_bytes(&repr))?; + if bool::from(point.is_identity()) { + None?; + } + Some(point) + } + + let mut keys = Vec::with_capacity(key_bytes.len()); + let mut faulty = vec![]; + for (i, key) in key_bytes.iter().enumerate() { + let i = Participant::new( + 1 + u16::try_from(i).expect("performing a key gen with more than u16::MAX participants"), + ) + .unwrap(); + keys.push(match evrf_key::(key.as_ref()) { + Some(key) => key, + None => { + // Mark this participant faulty + faulty.push(i); + + // Generate a random key + let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(key).into()); + loop { + let mut repr = <::G as GroupEncoding>::Repr::default(); + rng.fill_bytes(repr.as_mut()); + if let Some(key) = + Option::<::G>::from(<_>::from_bytes(&repr)) + { + break key; + } + } + } + }); + } + + (keys, faulty) +} #[derive(Debug)] pub struct KeyGen { db: D, - entropy: Zeroizing<[u8; 32]>, - - active_commit: HashMap, Vec>)>, - #[allow(clippy::type_complexity)] - active_share: HashMap, Vec>>)>, + substrate_evrf_private_key: + Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, } impl KeyGen { #[allow(clippy::new_ret_no_self)] - pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen { - KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() } + pub fn new( + db: D, + substrate_evrf_private_key: Zeroizing< + <::EmbeddedCurve as Ciphersuite>::F, + >, + network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + ) -> KeyGen { + KeyGen { db, substrate_evrf_private_key, network_evrf_private_key } } pub fn in_set(&self, session: &Session) -> bool { // We determine if we're in set using if we have the parameters for a session's key generation - // The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly - // aren't fatally slashed - // TODO: Revisit once we do DKG removals for being offline - ParamsDb::get(&self.db, session, 0).is_some() + // We only have these if we were told to generate a key for this session + ParamsDb::get(&self.db, session).is_some() } #[allow(clippy::type_complexity)] @@ -179,406 +289,351 @@ impl KeyGen { &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, - ) -> ProcessorMessage { - const SUBSTRATE_KEY_CONTEXT: &str = "substrate"; - const NETWORK_KEY_CONTEXT: &str = "network"; - let context = |id: &KeyGenId, key| { + ) -> Vec { + const SUBSTRATE_KEY_CONTEXT: &[u8] = b"substrate"; + const NETWORK_KEY_CONTEXT: &[u8] = b"network"; + fn context(session: Session, key_context: &[u8]) -> [u8; 32] { // TODO2: Also embed the chain ID/genesis block - format!( - "Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}", - id.session, - N::NETWORK, - id.attempt, - key, - ) - }; - - let rng = |label, id: KeyGenId| { - let mut transcript = RecommendedTranscript::new(label); - transcript.append_message(b"entropy", &self.entropy); - transcript.append_message(b"context", context(&id, "rng")); - ChaCha20Rng::from_seed(transcript.rng_seed(b"rng")) - }; - let coefficients_rng = |id| rng(b"Key Gen Coefficients", id); - let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id); - let share_rng = |id| rng(b"Key Gen Share", id); - - let key_gen_machines = |id, params: ThresholdParams, shares| { - let mut rng = coefficients_rng(id); - let mut machines = vec![]; - let mut commitments = vec![]; - for s in 0 .. shares { - let params = ThresholdParams::new( - params.t(), - params.n(), - Participant::new(u16::from(params.i()) + s).unwrap(), - ) - .unwrap(); - let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT)) - .generate_coefficients(&mut rng); - let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT)) - .generate_coefficients(&mut rng); - machines.push((substrate.0, network.0)); - let mut serialized = vec![]; - substrate.1.write(&mut serialized).unwrap(); - network.1.write(&mut serialized).unwrap(); - commitments.push(serialized); - } - (machines, commitments) - }; - - let secret_share_machines = |id, - params: ThresholdParams, - machines: SecretShareMachines, - commitments: HashMap>| - -> Result<_, ProcessorMessage> { - let mut rng = secret_shares_rng(id); - - #[allow(clippy::type_complexity)] - fn handle_machine( - rng: &mut ChaCha20Rng, - id: KeyGenId, - machine: SecretShareMachine, - commitments: HashMap>>, - ) -> Result< - (KeyMachine, HashMap>>), - ProcessorMessage, - > { - match machine.generate_secret_shares(rng, commitments) { - Ok(res) => Ok(res), - Err(e) => match e { - DkgError::ZeroParameter(_, _) | - DkgError::InvalidThreshold(_, _) | - DkgError::InvalidParticipant(_, _) | - DkgError::InvalidSigningSet | - DkgError::InvalidShare { .. } => unreachable!("{e:?}"), - DkgError::InvalidParticipantQuantity(_, _) | - DkgError::DuplicatedParticipant(_) | - DkgError::MissingParticipant(_) => { - panic!("coordinator sent invalid DKG commitments: {e:?}") - } - DkgError::InvalidCommitments(i) => { - Err(ProcessorMessage::InvalidCommitments { id, faulty: i })? - } - }, - } - } + let mut transcript = RecommendedTranscript::new(b"Serai eVRF Key Gen"); + transcript.append_message(b"network", N::ID); + transcript.append_message(b"session", session.0.to_le_bytes()); + transcript.append_message(b"key", key_context); + (&(&transcript.challenge(b"context"))[.. 32]).try_into().unwrap() + } - let mut substrate_commitments = HashMap::new(); - let mut network_commitments = HashMap::new(); - for i in 1 ..= params.n() { - let i = Participant::new(i).unwrap(); - let mut commitments = commitments[&i].as_slice(); - substrate_commitments.insert( - i, - EncryptionKeyMessage::>::read(&mut commitments, params) - .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?, + match msg { + CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => { + info!("Generating new key. Session: {session:?}"); + + // Unzip the vector of eVRF keys + let substrate_evrf_public_keys = + evrf_public_keys.iter().map(|(key, _)| *key).collect::>(); + let network_evrf_public_keys = + evrf_public_keys.into_iter().map(|(_, key)| key).collect::>(); + + let mut participation = Vec::with_capacity(2048); + let mut faulty = HashSet::new(); + + // Participate for both Substrate and the network + fn participate( + context: [u8; 32], + threshold: u16, + evrf_public_keys: &[impl AsRef<[u8]>], + evrf_private_key: &Zeroizing<::F>, + faulty: &mut HashSet, + output: &mut impl io::Write, + ) { + let (coerced_keys, faulty_is) = coerce_keys::(evrf_public_keys); + for faulty_i in faulty_is { + faulty.insert(faulty_i); + } + let participation = EvrfDkg::::participate( + &mut OsRng, + generators(), + context, + threshold, + &coerced_keys, + evrf_private_key, + ); + participation.unwrap().write(output).unwrap(); + } + participate::( + context::(session, SUBSTRATE_KEY_CONTEXT), + threshold, + &substrate_evrf_public_keys, + &self.substrate_evrf_private_key, + &mut faulty, + &mut participation, ); - network_commitments.insert( - i, - EncryptionKeyMessage::>::read(&mut commitments, params) - .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?, + participate::( + context::(session, NETWORK_KEY_CONTEXT), + threshold, + &network_evrf_public_keys, + &self.network_evrf_private_key, + &mut faulty, + &mut participation, ); - if !commitments.is_empty() { - // Malicious Participant included extra bytes in their commitments - // (a potential DoS attack) - Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?; - } - } - - let mut key_machines = vec![]; - let mut shares = vec![]; - for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() { - let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(); - - let mut substrate_commitments = substrate_commitments.clone(); - substrate_commitments.remove(&actual_i); - let (substrate_machine, mut substrate_shares) = - handle_machine::(&mut rng, id, substrate_machine, substrate_commitments)?; - let mut network_commitments = network_commitments.clone(); - network_commitments.remove(&actual_i); - let (network_machine, network_shares) = - handle_machine(&mut rng, id, network_machine, network_commitments.clone())?; + // Save the params + ParamsDb::set( + txn, + &session, + &(threshold, substrate_evrf_public_keys, network_evrf_public_keys), + ); - key_machines.push((substrate_machine, network_machine)); + // Send back our Participation and all faulty parties + let mut faulty = faulty.into_iter().collect::>(); + faulty.sort(); - let mut these_shares: HashMap<_, _> = - substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect(); - for (i, share) in &mut these_shares { - share.extend(network_shares[i].serialize()); + let mut res = Vec::with_capacity(faulty.len() + 1); + for faulty in faulty { + res.push(ProcessorMessage::Blame { session, participant: faulty }); } - shares.push(these_shares); - } - Ok((key_machines, shares)) - }; - - match msg { - CoordinatorMessage::GenerateKey { id, params, shares } => { - info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}"); + res.push(ProcessorMessage::Participation { session, participation }); - // Remove old attempts - if self.active_commit.remove(&id.session).is_none() && - self.active_share.remove(&id.session).is_none() - { - // If we haven't handled this session before, save the params - ParamsDb::set(txn, &id.session, id.attempt, &(params, shares)); - } - - let (machines, commitments) = key_gen_machines(id, params, shares); - self.active_commit.insert(id.session, (machines, commitments.clone())); - - ProcessorMessage::Commitments { id, commitments } + res } - CoordinatorMessage::Commitments { id, mut commitments } => { - info!("Received commitments for {:?}", id); - - if self.active_share.contains_key(&id.session) { - // We should've been told of a new attempt before receiving commitments again - // The coordinator is either missing messages or repeating itself - // Either way, it's faulty - panic!("commitments when already handled commitments"); - } - - let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); - - // Unwrap the machines, rebuilding them if we didn't have them in our cache - // We won't if the processor rebooted - // This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for - // attempt y - // The coordinator is trusted to be proper in this regard - let (prior, our_commitments) = self - .active_commit - .remove(&id.session) - .unwrap_or_else(|| key_gen_machines(id, params, share_quantity)); - - for (i, our_commitments) in our_commitments.into_iter().enumerate() { - assert!(commitments - .insert( - Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(), - our_commitments, - ) - .is_none()); - } - - CommitmentsDb::set(txn, &id, &commitments); - - match secret_share_machines(id, params, prior, commitments) { - Ok((machines, shares)) => { - self.active_share.insert(id.session, (machines, shares.clone())); - ProcessorMessage::Shares { id, shares } - } - Err(e) => e, - } - } + CoordinatorMessage::Participation { session, participant, participation } => { + info!("received participation from {:?} for {:?}", participant, session); + + let (threshold, substrate_evrf_public_keys, network_evrf_public_keys) = + ParamsDb::get(txn, &session).unwrap(); + + let n = substrate_evrf_public_keys + .len() + .try_into() + .expect("performing a key gen with more than u16::MAX participants"); + + // Read these `Participation`s + // If they fail basic sanity checks, fail fast + let (substrate_participation, network_participation) = { + let network_participation_start_pos = { + let mut participation = participation.as_slice(); + let start_len = participation.len(); + + let blame = vec![ProcessorMessage::Blame { session, participant }]; + let Ok(substrate_participation) = + Participation::::read(&mut participation, n) + else { + return blame; + }; + let len_at_network_participation_start_pos = participation.len(); + let Ok(network_participation) = Participation::::read(&mut participation, n) + else { + return blame; + }; - CoordinatorMessage::Shares { id, shares } => { - info!("Received shares for {:?}", id); - - let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); - - // Same commentary on inconsistency as above exists - let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| { - let prior = key_gen_machines(id, params, share_quantity).0; - let (machines, shares) = - secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap()) - .expect("got Shares for a key gen which faulted"); - (machines, shares) - }); - - let mut rng = share_rng(id); - - fn handle_machine( - rng: &mut ChaCha20Rng, - id: KeyGenId, - // These are the params of our first share, not this machine's shares - params: ThresholdParams, - m: usize, - machine: KeyMachine, - shares_ref: &mut HashMap, - ) -> Result, ProcessorMessage> { - let params = ThresholdParams::new( - params.t(), - params.n(), - Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(), - ) - .unwrap(); - - // Parse the shares - let mut shares = HashMap::new(); - for i in 1 ..= params.n() { - let i = Participant::new(i).unwrap(); - let Some(share) = shares_ref.get_mut(&i) else { continue }; - shares.insert( - i, - EncryptedMessage::>::read(share, params).map_err(|_| { - ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None } - })?, - ); - } + // If they added random noise after their participations, they're faulty + // This prevents DoS by causing a slash upon such spam + if !participation.is_empty() { + return blame; + } - Ok( - (match machine.calculate_share(rng, shares) { - Ok(res) => res, - Err(e) => match e { - DkgError::ZeroParameter(_, _) | - DkgError::InvalidThreshold(_, _) | - DkgError::InvalidParticipant(_, _) | - DkgError::InvalidSigningSet | - DkgError::InvalidCommitments(_) => unreachable!("{e:?}"), - DkgError::InvalidParticipantQuantity(_, _) | - DkgError::DuplicatedParticipant(_) | - DkgError::MissingParticipant(_) => { - panic!("coordinator sent invalid DKG shares: {e:?}") + // If we've already generated these keys, we don't actually need to save these + // participations and continue. We solely have to verify them, as to identify malicious + // participants and prevent DoSs, before returning + if txn.get(GeneratedKeysDb::key(&session)).is_some() { + info!("already finished generating a key for {:?}", session); + + match EvrfDkg::::verify( + &mut OsRng, + generators(), + context::(session, SUBSTRATE_KEY_CONTEXT), + threshold, + // Ignores the list of participants who were faulty, as they were prior blamed + &coerce_keys::(&substrate_evrf_public_keys).0, + &HashMap::from([(participant, substrate_participation)]), + ) + .unwrap() + { + VerifyResult::Valid(_) | VerifyResult::NotEnoughParticipants => {} + VerifyResult::Invalid(faulty) => { + assert_eq!(faulty, vec![participant]); + return vec![ProcessorMessage::Blame { session, participant }]; } - DkgError::InvalidShare { participant, blame } => { - Err(ProcessorMessage::InvalidShare { - id, - accuser: params.i(), - faulty: participant, - blame: Some(blame.map(|blame| blame.serialize())).flatten(), - })? + } + + match EvrfDkg::::verify( + &mut OsRng, + generators(), + context::(session, NETWORK_KEY_CONTEXT), + threshold, + // Ignores the list of participants who were faulty, as they were prior blamed + &coerce_keys::(&network_evrf_public_keys).0, + &HashMap::from([(participant, network_participation)]), + ) + .unwrap() + { + VerifyResult::Valid(_) | VerifyResult::NotEnoughParticipants => return vec![], + VerifyResult::Invalid(faulty) => { + assert_eq!(faulty, vec![participant]); + return vec![ProcessorMessage::Blame { session, participant }]; } - }, - }) - .complete(), - ) - } - - let mut substrate_keys = vec![]; - let mut network_keys = vec![]; - for (m, machines) in machines.into_iter().enumerate() { - let mut shares_ref: HashMap = - shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect(); - for (i, our_shares) in our_shares.iter().enumerate() { - if m != i { - assert!(shares_ref - .insert( - Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(), - our_shares - [&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()] - .as_ref(), - ) - .is_none()); + } } - } - let these_substrate_keys = - match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) { - Ok(keys) => keys, - Err(msg) => return msg, - }; - let these_network_keys = - match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) { - Ok(keys) => keys, - Err(msg) => return msg, - }; + // Return the position the network participation starts at + start_len - len_at_network_participation_start_pos + }; - for i in 1 ..= params.n() { - let i = Participant::new(i).unwrap(); - let Some(shares) = shares_ref.get(&i) else { continue }; - if !shares.is_empty() { - return ProcessorMessage::InvalidShare { - id, - accuser: these_substrate_keys.params().i(), - faulty: i, - blame: None, - }; - } - } + // Instead of re-serializing the `Participation`s we read, we just use the relevant + // sections of the existing byte buffer + ( + participation[.. network_participation_start_pos].to_vec(), + participation[network_participation_start_pos ..].to_vec(), + ) + }; - let mut these_network_keys = ThresholdKeys::new(these_network_keys); - N::tweak_keys(&mut these_network_keys); + // Since these are valid `Participation`s, save them + let (mut substrate_participations, mut network_participations) = + ParticipationDb::get(txn, &session) + .unwrap_or((HashMap::with_capacity(1), HashMap::with_capacity(1))); + assert!( + substrate_participations.insert(participant, substrate_participation).is_none() && + network_participations.insert(participant, network_participation).is_none(), + "received participation for someone multiple times" + ); + ParticipationDb::set( + txn, + &session, + &(substrate_participations.clone(), network_participations.clone()), + ); - substrate_keys.push(these_substrate_keys); - network_keys.push(these_network_keys); + // This block is taken from the eVRF DKG itself to evaluate the amount participating + { + let mut participating_weight = 0; + // This uses the Substrate maps as the maps are kept in synchrony + let mut evrf_public_keys_mut = substrate_evrf_public_keys.clone(); + for i in substrate_participations.keys() { + let evrf_public_key = substrate_evrf_public_keys[usize::from(u16::from(*i)) - 1]; + + // Remove this key from the Vec to prevent double-counting + /* + Double-counting would be a risk if multiple participants shared an eVRF public key + and participated. This code does still allow such participants (in order to let + participants be weighted), and any one of them participating will count as all + participating. This is fine as any one such participant will be able to decrypt + the shares for themselves and all other participants, so this is still a key + generated by an amount of participants who could simply reconstruct the key. + */ + let start_len = evrf_public_keys_mut.len(); + evrf_public_keys_mut.retain(|key| *key != evrf_public_key); + let end_len = evrf_public_keys_mut.len(); + let count = start_len - end_len; + + participating_weight += count; + } + if participating_weight < usize::from(threshold) { + return vec![]; + } } - let mut generated_substrate_key = None; - let mut generated_network_key = None; - for keys in substrate_keys.iter().zip(&network_keys) { - if generated_substrate_key.is_none() { - generated_substrate_key = Some(keys.0.group_key()); - generated_network_key = Some(keys.1.group_key()); + // If we now have the threshold participating, verify their `Participation`s + fn verify_dkg( + txn: &mut impl DbTxn, + session: Session, + true_if_substrate_false_if_network: bool, + threshold: u16, + evrf_public_keys: &[impl AsRef<[u8]>], + substrate_participations: &mut HashMap>, + network_participations: &mut HashMap>, + ) -> Result, Vec> { + // Parse the `Participation`s + let participations = (if true_if_substrate_false_if_network { + &*substrate_participations } else { - assert_eq!(generated_substrate_key, Some(keys.0.group_key())); - assert_eq!(generated_network_key, Some(keys.1.group_key())); + &*network_participations + }) + .iter() + .map(|(key, participation)| { + ( + *key, + Participation::read( + &mut participation.as_slice(), + evrf_public_keys.len().try_into().unwrap(), + ) + .expect("prior read participation was invalid"), + ) + }) + .collect(); + + // Actually call verify on the DKG + match EvrfDkg::::verify( + &mut OsRng, + generators(), + context::( + session, + if true_if_substrate_false_if_network { + SUBSTRATE_KEY_CONTEXT + } else { + NETWORK_KEY_CONTEXT + }, + ), + threshold, + // Ignores the list of participants who were faulty, as they were prior blamed + &coerce_keys::(evrf_public_keys).0, + &participations, + ) + .unwrap() + { + // If the DKG was valid, return it + VerifyResult::Valid(dkg) => Ok(dkg), + // This DKG had faulty participants, so create blame messages for them + VerifyResult::Invalid(faulty) => { + let mut blames = vec![]; + for participant in faulty { + // Remove from both maps for simplicity's sake + // There's no point in having one DKG complete yet not the other + assert!(substrate_participations.remove(&participant).is_some()); + assert!(network_participations.remove(&participant).is_some()); + blames.push(ProcessorMessage::Blame { session, participant }); + } + // Since we removed `Participation`s, write the updated versions to the database + ParticipationDb::set( + txn, + &session, + &(substrate_participations.clone(), network_participations.clone()), + ); + Err(blames)? + } + VerifyResult::NotEnoughParticipants => { + // This is the first DKG, and we checked we were at the threshold OR + // This is the second DKG, as the first had no invalid participants, so we're still + // at the threshold + panic!("not enough participants despite checking we were at the threshold") + } } } - GeneratedKeysDb::save_keys::(txn, &id, &substrate_keys, &network_keys); - - ProcessorMessage::GeneratedKeyPair { - id, - substrate_key: generated_substrate_key.unwrap().to_bytes(), - // TODO: This can be made more efficient since tweaked keys may be a subset of keys - network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), - } - } - - CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => { - let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0; - - let mut share_ref = share.as_slice(); - let Ok(substrate_share) = EncryptedMessage::< - Ristretto, - SecretShare<::F>, - >::read(&mut share_ref, params) else { - return ProcessorMessage::Blame { id, participant: accused }; + let substrate_dkg = match verify_dkg::( + txn, + session, + true, + threshold, + &substrate_evrf_public_keys, + &mut substrate_participations, + &mut network_participations, + ) { + Ok(dkg) => dkg, + // If we had any blames, immediately return them as necessary for the safety of + // `verify_dkg` (it assumes we don't call it again upon prior errors) + Err(blames) => return blames, }; - let Ok(network_share) = EncryptedMessage::< - N::Curve, - SecretShare<::F>, - >::read(&mut share_ref, params) else { - return ProcessorMessage::Blame { id, participant: accused }; - }; - if !share_ref.is_empty() { - return ProcessorMessage::Blame { id, participant: accused }; - } - let mut substrate_commitment_msgs = HashMap::new(); - let mut network_commitment_msgs = HashMap::new(); - let commitments = CommitmentsDb::get(txn, &id).unwrap(); - for (i, commitments) in commitments { - let mut commitments = commitments.as_slice(); - substrate_commitment_msgs - .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap()); - network_commitment_msgs - .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap()); - } + let network_dkg = match verify_dkg::( + txn, + session, + false, + threshold, + &network_evrf_public_keys, + &mut substrate_participations, + &mut network_participations, + ) { + Ok(dkg) => dkg, + Err(blames) => return blames, + }; - // There is a mild DoS here where someone with a valid blame bloats it to the maximum size - // Given the ambiguity, and limited potential to DoS (this being called means *someone* is - // getting fatally slashed) voids the need to ensure blame is minimal - let substrate_blame = - blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok()); - let network_blame = - blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok()); - - let substrate_blame = AdditionalBlameMachine::new( - &mut rand_core::OsRng, - context(&id, SUBSTRATE_KEY_CONTEXT), - params.n(), - substrate_commitment_msgs, - ) - .unwrap() - .blame(accuser, accused, substrate_share, substrate_blame); - let network_blame = AdditionalBlameMachine::new( - &mut rand_core::OsRng, - context(&id, NETWORK_KEY_CONTEXT), - params.n(), - network_commitment_msgs, - ) - .unwrap() - .blame(accuser, accused, network_share, network_blame); - - // If the accused was blamed for either, mark them as at fault - if (substrate_blame == accused) || (network_blame == accused) { - return ProcessorMessage::Blame { id, participant: accused }; + // Get our keys from each DKG + // TODO: Some of these keys may be decrypted by us, yet not actually meant for us, if + // another validator set our eVRF public key as their eVRF public key. We either need to + // ensure the coordinator tracks amount of shares we're supposed to have by the eVRF public + // keys OR explicitly reduce to the keys we're supposed to have based on our `i` index. + let substrate_keys = substrate_dkg.keys(&self.substrate_evrf_private_key); + let mut network_keys = network_dkg.keys(&self.network_evrf_private_key); + // Tweak the keys for the network + for network_keys in &mut network_keys { + N::tweak_keys(network_keys); } + GeneratedKeysDb::save_keys::(txn, &session, &substrate_keys, &network_keys); - ProcessorMessage::Blame { id, participant: accuser } + // Since no one we verified was invalid, and we had the threshold, yield the new keys + vec![ProcessorMessage::GeneratedKeyPair { + session, + substrate_key: substrate_keys[0].group_key().to_bytes(), + // TODO: This can be made more efficient since tweaked keys may be a subset of keys + network_key: network_keys[0].group_key().to_bytes().as_ref().to_vec(), + }] } } } diff --git a/processor/src/main.rs b/processor/src/main.rs index e0d97aa68..2d05ad4dc 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -2,8 +2,11 @@ use std::{time::Duration, collections::HashMap}; use zeroize::{Zeroize, Zeroizing}; -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::evrf::EvrfCurve; use log::{info, warn}; use tokio::time::sleep; @@ -128,7 +131,7 @@ struct TributaryMutable { `Burn`s. Substrate also decides when to move to a new multisig, hence why this entire object is - Substate-mutable. + Substrate-mutable. Since MultisigManager should always be verifiable, and the Tributary is temporal, MultisigManager being entirely SubstrateMutable shows proper data pipe-lining. @@ -224,7 +227,9 @@ async fn handle_coordinator_msg( match msg.msg.clone() { CoordinatorMessage::KeyGen(msg) => { - coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await; + for msg in tributary_mutable.key_gen.handle(txn, msg) { + coordinator.send(msg).await; + } } CoordinatorMessage::Sign(msg) => { @@ -485,41 +490,31 @@ async fn boot( network: &N, coordinator: &mut Co, ) -> (D, TributaryMutable, SubstrateMutable) { - let mut entropy_transcript = { - let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified")); - if entropy.len() != 64 { - panic!("entropy isn't the right length"); + fn read_key_from_env(label: &'static str) -> Zeroizing { + let key_hex = + Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); + let bytes = Zeroizing::new( + hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), + ); + + let mut repr = ::Repr::default(); + if repr.as_ref().len() != bytes.len() { + panic!("{label} wasn't the correct length"); } - let mut bytes = - Zeroizing::new(hex::decode(entropy).map_err(|_| ()).expect("entropy wasn't hex-formatted")); - if bytes.len() != 32 { - bytes.zeroize(); - panic!("entropy wasn't 32 bytes"); - } - let mut entropy = Zeroizing::new([0; 32]); - let entropy_mut: &mut [u8] = entropy.as_mut(); - entropy_mut.copy_from_slice(bytes.as_ref()); - - let mut transcript = RecommendedTranscript::new(b"Serai Processor Entropy"); - transcript.append_message(b"entropy", entropy); - transcript - }; - - // TODO: Save a hash of the entropy to the DB and make sure the entropy didn't change - - let mut entropy = |label| { - let mut challenge = entropy_transcript.challenge(label); - let mut res = Zeroizing::new([0; 32]); - let res_mut: &mut [u8] = res.as_mut(); - res_mut.copy_from_slice(&challenge[.. 32]); - challenge.zeroize(); + repr.as_mut().copy_from_slice(bytes.as_slice()); + let res = Zeroizing::new( + Option::from(::from_repr(repr)) + .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), + ); + repr.as_mut().zeroize(); res - }; + } - // We don't need to re-issue GenerateKey orders because the coordinator is expected to - // schedule/notify us of new attempts - // TODO: Is this above comment still true? Not at all due to the planned lack of DKG timeouts? - let key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); + let key_gen = KeyGen::::new( + raw_db.clone(), + read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), + read_key_from_env::<::EmbeddedCurve>("NETWORK_EVRF_KEY"), + ); let (multisig_manager, current_keys, actively_signing) = MultisigManager::new(raw_db, network).await; diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs index ee3cd24af..81838ae12 100644 --- a/processor/src/networks/mod.rs +++ b/processor/src/networks/mod.rs @@ -5,6 +5,7 @@ use async_trait::async_trait; use thiserror::Error; use frost::{ + dkg::evrf::EvrfCurve, curve::{Ciphersuite, Curve}, ThresholdKeys, sign::PreprocessMachine, @@ -240,9 +241,11 @@ pub struct PreparedSend { } #[async_trait] +#[rustfmt::skip] pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { /// The elliptic curve used for this network. - type Curve: Curve; + type Curve: Curve + + EvrfCurve::F>>>; /// The type representing the transaction for this network. type Transaction: Transaction; // TODO: Review use of diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs index 154702fef..6ffa29df2 100644 --- a/processor/src/networks/monero.rs +++ b/processor/src/networks/monero.rs @@ -663,7 +663,7 @@ impl Network for Monero { keys: ThresholdKeys, transaction: SignableTransaction, ) -> Result { - match transaction.0.clone().multisig(&keys) { + match transaction.0.clone().multisig(keys) { Ok(machine) => Ok(machine), Err(e) => panic!("failed to create a multisig machine for TX: {e}"), } diff --git a/processor/src/tests/key_gen.rs b/processor/src/tests/key_gen.rs index 047e006ac..43f0de058 100644 --- a/processor/src/tests/key_gen.rs +++ b/processor/src/tests/key_gen.rs @@ -2,10 +2,13 @@ use std::collections::HashMap; use zeroize::Zeroizing; -use rand_core::{RngCore, OsRng}; +use rand_core::OsRng; -use ciphersuite::group::GroupEncoding; -use frost::{Participant, ThresholdParams, tests::clone_without}; +use ciphersuite::{ + group::{ff::Field, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::{Participant, ThresholdParams, evrf::*}; use serai_db::{DbTxn, Db, MemDb}; @@ -18,113 +21,102 @@ use crate::{ key_gen::{KeyConfirmed, KeyGen}, }; -const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 }; +const SESSION: Session = Session(1); pub fn test_key_gen() { - let mut entropies = HashMap::new(); let mut dbs = HashMap::new(); + let mut substrate_evrf_keys = HashMap::new(); + let mut network_evrf_keys = HashMap::new(); + let mut evrf_public_keys = vec![]; let mut key_gens = HashMap::new(); for i in 1 ..= 5 { - let mut entropy = Zeroizing::new([0; 32]); - OsRng.fill_bytes(entropy.as_mut()); - entropies.insert(i, entropy); let db = MemDb::new(); dbs.insert(i, db.clone()); - key_gens.insert(i, KeyGen::::new(db, entropies[&i].clone())); + + let substrate_evrf_key = Zeroizing::new( + <::EmbeddedCurve as Ciphersuite>::F::random(&mut OsRng), + ); + substrate_evrf_keys.insert(i, substrate_evrf_key.clone()); + let network_evrf_key = Zeroizing::new( + <::EmbeddedCurve as Ciphersuite>::F::random(&mut OsRng), + ); + network_evrf_keys.insert(i, network_evrf_key.clone()); + + evrf_public_keys.push(( + (<::EmbeddedCurve as Ciphersuite>::generator() * *substrate_evrf_key) + .to_bytes(), + (<::EmbeddedCurve as Ciphersuite>::generator() * *network_evrf_key) + .to_bytes() + .as_ref() + .to_vec(), + )); + key_gens + .insert(i, KeyGen::::new(db, substrate_evrf_key.clone(), network_evrf_key.clone())); } - let mut all_commitments = HashMap::new(); + let mut participations = HashMap::new(); for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle( + let mut msgs = key_gen.handle( &mut txn, CoordinatorMessage::GenerateKey { - id: ID, - params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) - .unwrap(), - shares: 1, + session: SESSION, + threshold: 3, + evrf_public_keys: evrf_public_keys.clone(), }, - ) { - assert_eq!(id, ID); - assert_eq!(commitments.len(), 1); - all_commitments - .insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments.swap_remove(0)); - } else { - panic!("didn't get commitments back"); - } - txn.commit(); - } - - // 1 is rebuilt on every step - // 2 is rebuilt here - // 3 ... are rebuilt once, one at each of the following steps - let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| { - key_gens.remove(&i); - key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); - }; - rebuild(&mut key_gens, &dbs, 1); - rebuild(&mut key_gens, &dbs, 2); - - let mut all_shares = HashMap::new(); - for i in 1 ..= 5 { - let key_gen = key_gens.get_mut(&i).unwrap(); - let mut txn = dbs.get_mut(&i).unwrap().txn(); - let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle( - &mut txn, - CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) }, - ) { - assert_eq!(id, ID); - assert_eq!(shares.len(), 1); - all_shares.insert(i, shares.swap_remove(0)); - } else { - panic!("didn't get shares back"); - } + ); + assert_eq!(msgs.len(), 1); + let ProcessorMessage::Participation { session, participation } = msgs.swap_remove(0) else { + panic!("didn't get a participation") + }; + assert_eq!(session, SESSION); + participations.insert(i, participation); txn.commit(); } - // Rebuild 1 and 3 - rebuild(&mut key_gens, &dbs, 1); - rebuild(&mut key_gens, &dbs, 3); - let mut res = None; for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle( - &mut txn, - CoordinatorMessage::Shares { - id: ID, - shares: vec![all_shares - .iter() - .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) - .collect()], - }, - ) { - assert_eq!(id, ID); - if res.is_none() { - res = Some((substrate_key, network_key.clone())); + for j in 1 ..= 5 { + let mut msgs = key_gen.handle( + &mut txn, + CoordinatorMessage::Participation { + session: SESSION, + participant: Participant::new(u16::try_from(j).unwrap()).unwrap(), + participation: participations[&j].clone(), + }, + ); + if j != 3 { + assert!(msgs.is_empty()); + } + if j == 3 { + assert_eq!(msgs.len(), 1); + let ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } = + msgs.swap_remove(0) + else { + panic!("didn't get a generated key pair") + }; + assert_eq!(session, SESSION); + + if res.is_none() { + res = Some((substrate_key, network_key.clone())); + } + assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key)); } - assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key)); - } else { - panic!("didn't get key back"); } + txn.commit(); } let res = res.unwrap(); - // Rebuild 1 and 4 - rebuild(&mut key_gens, &dbs, 1); - rebuild(&mut key_gens, &dbs, 4); - for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm( &mut txn, - ID.session, + SESSION, &KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), ); txn.commit(); diff --git a/spec/DKG Exclusions.md b/spec/DKG Exclusions.md deleted file mode 100644 index 1677da8a0..000000000 --- a/spec/DKG Exclusions.md +++ /dev/null @@ -1,23 +0,0 @@ -Upon an issue with the DKG, the honest validators must remove the malicious -validators. Ideally, a threshold signature would be used, yet that would require -a threshold key (which would require authentication by a MuSig signature). A -MuSig signature which specifies the signing set (or rather, the excluded -signers) achieves the most efficiency. - -While that resolves the on-chain behavior, the Tributary also has to perform -exclusion. This has the following forms: - -1) Rejecting further transactions (required) -2) Rejecting further participation in Tendermint - -With regards to rejecting further participation in Tendermint, it's *ideal* to -remove the validator from the list of validators. Each validator removed from -participation, yet not from the list of validators, increases the likelihood of -the network failing to form consensus. - -With regards to the economic security, an honest 67% may remove a faulty -(explicitly or simply offline) 33%, letting 67% of the remaining 67% (4/9ths) -take control of the associated private keys. In such a case, the malicious -parties are defined as the 4/9ths of validators with access to the private key -and the 33% removed (who together form >67% of the originally intended -validator set and have presumably provided enough stake to cover losses). diff --git a/spec/cryptography/Distributed Key Generation.md b/spec/cryptography/Distributed Key Generation.md index fae5ff905..d0f209c19 100644 --- a/spec/cryptography/Distributed Key Generation.md +++ b/spec/cryptography/Distributed Key Generation.md @@ -1,35 +1,7 @@ # Distributed Key Generation -Serai uses a modification of Pedersen's Distributed Key Generation, which is -actually Feldman's Verifiable Secret Sharing Scheme run by every participant, as -described in the FROST paper. The modification included in FROST was to include -a Schnorr Proof of Knowledge for coefficient zero, preventing rogue key attacks. -This results in a two-round protocol. - -### Encryption - -In order to protect the secret shares during communication, the `dkg` library -establishes a public key for encryption at the start of a given protocol. -Every encrypted message (such as the secret shares) then includes a per-message -encryption key. These two keys are used in an Elliptic-curve Diffie-Hellman -handshake to derive a shared key. This shared key is then hashed to obtain a key -and IV for use in a ChaCha20 stream cipher instance, which is xor'd against a -message to encrypt it. - -### Blame - -Since each message has a distinct key attached, and accordingly a distinct -shared key, it's possible to reveal the shared key for a specific message -without revealing any other message's decryption keys. This is utilized when a -participant misbehaves. A participant who receives an invalid encrypted message -publishes its key, able to without concern for side effects, With the key -published, all participants can decrypt the message in order to decide blame. - -While key reuse by a participant is considered as them revealing the messages -themselves, and therefore out of scope, there is an attack where a malicious -adversary claims another participant's encryption key. They'll fail to encrypt -their message, and the recipient will issue a blame statement. This blame -statement, intended to reveal the malicious adversary, also reveals the message -by the participant whose keys were co-opted. To resolve this, a -proof-of-possession is also included with encrypted messages, ensuring only -those actually with per-message keys can claim to use them. +Serai uses a modification of the one-round Distributed Key Generation described +in the [eVRF](https://eprint.iacr.org/2024/397) paper. We only require a +threshold to participate, sacrificing unbiased for robustness, and implement a +verifiable encryption scheme such that anyone can can verify a ciphertext +encrypts the expected secret share. diff --git a/spec/processor/Processor.md b/spec/processor/Processor.md index ca8cf4282..55d3baf37 100644 --- a/spec/processor/Processor.md +++ b/spec/processor/Processor.md @@ -9,29 +9,23 @@ This document primarily discusses its flow with regards to the coordinator. ### Generate Key On `key_gen::CoordinatorMessage::GenerateKey`, the processor begins a pair of -instances of the distributed key generation protocol specified in the FROST -paper. +instances of the distributed key generation protocol. -The first instance is for a key to use on the external network. The second -instance is for a Ristretto public key used to publish data to the Serai -blockchain. This pair of FROST DKG instances is considered a single instance of -Serai's overall key generation protocol. +The first instance is for a Ristretto public key used to publish data to the +Serai blockchain. The second instance is for a key to use on the external +network. This pair of DKG instances is considered a single instance of Serai's +overall DKG protocol. -The commitments for both protocols are sent to the coordinator in a single -`key_gen::ProcessorMessage::Commitments`. +The participations in both protocols are sent to the coordinator in +`key_gen::ProcessorMessage::Participation` messages, individually, as they come +in. -### Key Gen Commitments +### Key Gen Participations -On `key_gen::CoordinatorMessage::Commitments`, the processor continues the -specified key generation instance. The secret shares for each fellow -participant are sent to the coordinator in a -`key_gen::ProcessorMessage::Shares`. - -#### Key Gen Shares - -On `key_gen::CoordinatorMessage::Shares`, the processor completes the specified -key generation instance. The generated key pair is sent to the coordinator in a -`key_gen::ProcessorMessage::GeneratedKeyPair`. +On `key_gen::CoordinatorMessage::Participation`, the processor stores the +contained participation, verifying participations as sane. Once it receives `t` +honest participations, the processor completes the DKG and sends the generated +key pair to the coordinator in a `key_gen::ProcessorMessage::GeneratedKeyPair`. ### Confirm Key Pair diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 072f7460e..ea26485fd 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -16,8 +16,10 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } -scale-info = { version = "2", default-features = false, features = ["derive"] } +bitvec = { version = "1", default-features = false, features = ["alloc", "serde"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "bit-vec"] } +scale-info = { version = "2", default-features = false, features = ["derive", "bit-vec"] } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } @@ -40,6 +42,8 @@ serai-signals-primitives = { path = "../signals/primitives", version = "0.1", de [features] std = [ + "bitvec/std", + "scale/std", "scale-info/std", diff --git a/substrate/abi/src/validator_sets.rs b/substrate/abi/src/validator_sets.rs index 1e1e33591..7a7bdc006 100644 --- a/substrate/abi/src/validator_sets.rs +++ b/substrate/abi/src/validator_sets.rs @@ -11,10 +11,14 @@ use serai_validator_sets_primitives::*; pub enum Call { set_keys { network: NetworkId, - removed_participants: BoundedVec>, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, }, + set_embedded_elliptic_curve_key { + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + }, report_slashes { network: NetworkId, slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>, diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 629312c01..e653c9af6 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -20,6 +20,8 @@ workspace = true zeroize = "^1.5" thiserror = { version = "1", optional = true } +bitvec = { version = "1", default-features = false, features = ["alloc", "serde"] } + hex = "0.4" scale = { package = "parity-scale-codec", version = "3" } serde = { version = "1", features = ["derive"], optional = true } diff --git a/substrate/client/src/serai/validator_sets.rs b/substrate/client/src/serai/validator_sets.rs index ec67bae0f..899904066 100644 --- a/substrate/client/src/serai/validator_sets.rs +++ b/substrate/client/src/serai/validator_sets.rs @@ -1,13 +1,14 @@ use scale::Encode; use sp_core::sr25519::{Public, Signature}; +use sp_runtime::BoundedVec; use serai_abi::primitives::Amount; pub use serai_abi::validator_sets::primitives; -use primitives::{Session, ValidatorSet, KeyPair}; +use primitives::{MAX_KEY_LEN, Session, ValidatorSet, KeyPair}; use crate::{ - primitives::{NetworkId, SeraiAddress}, + primitives::{EmbeddedEllipticCurve, NetworkId, SeraiAddress}, Transaction, Serai, TemporalSerai, SeraiError, }; @@ -107,6 +108,21 @@ impl<'a> SeraiValidatorSets<'a> { self.0.storage(PALLET, "CurrentSession", network).await } + pub async fn embedded_elliptic_curve_key( + &self, + validator: Public, + embedded_elliptic_curve: EmbeddedEllipticCurve, + ) -> Result>, SeraiError> { + self + .0 + .storage( + PALLET, + "EmbeddedEllipticCurveKeys", + (sp_core::hashing::blake2_128(&validator.encode()), validator, embedded_elliptic_curve), + ) + .await + } + pub async fn participants( &self, network: NetworkId, @@ -188,21 +204,30 @@ impl<'a> SeraiValidatorSets<'a> { pub fn set_keys( network: NetworkId, - removed_participants: sp_runtime::BoundedVec< - SeraiAddress, - sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>, - >, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) -> Transaction { Serai::unsigned(serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::set_keys { network, - removed_participants, key_pair, + signature_participants, signature, })) } + pub fn set_embedded_elliptic_curve_key( + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + ) -> serai_abi::Call { + serai_abi::Call::ValidatorSets( + serai_abi::validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + }, + ) + } + pub fn allocate(network: NetworkId, amount: Amount) -> serai_abi::Call { serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::allocate { network, amount }) } diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index 3238501a3..c3b66c0dd 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -5,6 +5,8 @@ use zeroize::Zeroizing; use rand_core::OsRng; use sp_core::{ + ConstU32, + bounded_vec::BoundedVec, sr25519::{Pair, Signature}, Pair as PairTrait, }; @@ -14,8 +16,9 @@ use frost::dkg::musig::musig; use schnorrkel::Schnorrkel; use serai_client::{ + primitives::EmbeddedEllipticCurve, validator_sets::{ - primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}, + primitives::{MAX_KEY_LEN, ValidatorSet, KeyPair, musig_context, set_keys_message}, ValidatorSetsEvent, }, Amount, Serai, SeraiValidatorSets, @@ -58,7 +61,7 @@ pub async fn set_keys( let sig = frost::tests::sign_without_caching( &mut OsRng, frost::tests::algorithm_machines(&mut OsRng, &Schnorrkel::new(b"substrate"), &musig_keys), - &set_keys_message(&set, &[], &key_pair), + &set_keys_message(&set, &key_pair), ); // Set the key pair @@ -66,8 +69,8 @@ pub async fn set_keys( serai, &SeraiValidatorSets::set_keys( set.network, - vec![].try_into().unwrap(), key_pair.clone(), + bitvec::bitvec!(u8, bitvec::prelude::Lsb0; 1; musig_keys.len()), Signature(sig.to_bytes()), ), ) @@ -82,6 +85,24 @@ pub async fn set_keys( block } +#[allow(dead_code)] +pub async fn set_embedded_elliptic_curve_key( + serai: &Serai, + pair: &Pair, + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + nonce: u32, +) -> [u8; 32] { + // get the call + let tx = serai.sign( + pair, + SeraiValidatorSets::set_embedded_elliptic_curve_key(embedded_elliptic_curve, key), + nonce, + 0, + ); + publish_tx(serai, &tx).await +} + #[allow(dead_code)] pub async fn allocate_stake( serai: &Serai, diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index c2c6c509d..a2ccf22b9 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -7,7 +7,8 @@ use sp_core::{ use serai_client::{ primitives::{ - NETWORKS, NetworkId, BlockHash, insecure_pair_from_name, FAST_EPOCH_DURATION, TARGET_BLOCK_TIME, + FAST_EPOCH_DURATION, TARGET_BLOCK_TIME, NETWORKS, EmbeddedEllipticCurve, NetworkId, BlockHash, + insecure_pair_from_name, }, validator_sets::{ primitives::{Session, ValidatorSet, KeyPair}, @@ -23,7 +24,7 @@ use serai_client::{ mod common; use common::{ tx::publish_tx, - validator_sets::{allocate_stake, deallocate_stake, set_keys}, + validator_sets::{set_embedded_elliptic_curve_key, allocate_stake, deallocate_stake, set_keys}, }; fn get_random_key_pair() -> KeyPair { @@ -223,12 +224,39 @@ async fn validator_set_rotation() { // add 1 participant let last_participant = accounts[4].clone(); + + // If this is the first iteration, set embedded elliptic curve keys + if i == 0 { + for (i, embedded_elliptic_curve) in + [EmbeddedEllipticCurve::Embedwards25519, EmbeddedEllipticCurve::Secq256k1] + .into_iter() + .enumerate() + { + set_embedded_elliptic_curve_key( + &serai, + &last_participant, + embedded_elliptic_curve, + vec![ + 0; + match embedded_elliptic_curve { + EmbeddedEllipticCurve::Embedwards25519 => 32, + EmbeddedEllipticCurve::Secq256k1 => 33, + } + ] + .try_into() + .unwrap(), + i.try_into().unwrap(), + ) + .await; + } + } + let hash = allocate_stake( &serai, network, key_shares[&network], &last_participant, - i.try_into().unwrap(), + (2 + i).try_into().unwrap(), ) .await; participants.push(last_participant.public()); diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index 0e551c72b..5da8ce85b 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -27,6 +27,10 @@ log = "0.4" schnorrkel = "0.11" +ciphersuite = { path = "../../crypto/ciphersuite" } +embedwards25519 = { path = "../../crypto/evrf/embedwards25519" } +secq256k1 = { path = "../../crypto/evrf/secq256k1" } + libp2p = "0.52" sp-core = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index e67674cc5..ddc501b9b 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,13 +1,17 @@ use core::marker::PhantomData; -use std::collections::HashSet; -use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; +use sp_core::Pair as PairTrait; use sc_service::ChainType; +use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use embedwards25519::Embedwards25519; +use secq256k1::Secq256k1; + use serai_runtime::{ - primitives::*, WASM_BINARY, BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig, - CoinsConfig, ValidatorSetsConfig, SignalsConfig, BabeConfig, GrandpaConfig, EmissionsConfig, + primitives::*, validator_sets::AllEmbeddedEllipticCurveKeysAtGenesis, WASM_BINARY, + BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig, CoinsConfig, ValidatorSetsConfig, + SignalsConfig, BabeConfig, GrandpaConfig, EmissionsConfig, }; pub type ChainSpec = sc_service::GenericChainSpec; @@ -16,6 +20,11 @@ fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } +fn insecure_arbitrary_public_key_from_name(name: &'static str) -> Vec { + let key = insecure_arbitrary_key_from_name::(name); + (C::generator() * key).to_bytes().as_ref().to_vec() +} + fn wasm_binary() -> Vec { // TODO: Accept a config of runtime path const WASM_PATH: &str = "/runtime/serai.wasm"; @@ -32,7 +41,21 @@ fn devnet_genesis( validators: &[&'static str], endowed_accounts: Vec, ) -> RuntimeGenesisConfig { - let validators = validators.iter().map(|name| account_from_name(name)).collect::>(); + let validators = validators + .iter() + .map(|name| { + ( + account_from_name(name), + AllEmbeddedEllipticCurveKeysAtGenesis { + embedwards25519: insecure_arbitrary_public_key_from_name::(name) + .try_into() + .unwrap(), + secq256k1: insecure_arbitrary_public_key_from_name::(name).try_into().unwrap(), + }, + ) + }) + .collect::>(); + RuntimeGenesisConfig { system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData }, @@ -68,21 +91,22 @@ fn devnet_genesis( NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))), }) .collect(), - participants: validators.clone(), + participants: validators.iter().map(|(validator, _)| *validator).collect(), }, signals: SignalsConfig::default(), babe: BabeConfig { - authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), + authorities: validators.iter().map(|validator| (validator.0.into(), 1)).collect(), epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), _config: PhantomData, }, grandpa: GrandpaConfig { - authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), + authorities: validators.into_iter().map(|validator| (validator.0.into(), 1)).collect(), _config: PhantomData, }, } } +/* fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { let validators = validators .into_iter() @@ -140,6 +164,7 @@ fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> Runtime }, } } +*/ pub fn development_config() -> ChainSpec { let wasm_binary = wasm_binary(); @@ -218,7 +243,7 @@ pub fn local_config() -> ChainSpec { } pub fn testnet_config() -> ChainSpec { - let wasm_binary = wasm_binary(); + // let wasm_binary = wasm_binary(); ChainSpec::from_genesis( // Name @@ -227,7 +252,7 @@ pub fn testnet_config() -> ChainSpec { "testnet-2", ChainType::Live, move || { - let _ = testnet_genesis(&wasm_binary, vec![]); + // let _ = testnet_genesis(&wasm_binary, vec![]) todo!() }, // Bootnodes diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 0e1e8f387..4a495b53b 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -18,6 +18,8 @@ workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, optional = true } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } @@ -35,7 +37,7 @@ frame-support = { git = "https://github.com/serai-dex/substrate", default-featur rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } [features] -std = ["zeroize", "scale/std", "borsh?/std", "serde?/std", "scale-info/std", "sp-core/std", "sp-runtime/std", "frame-support/std"] +std = ["zeroize", "ciphersuite/std", "scale/std", "borsh?/std", "serde?/std", "scale-info/std", "sp-core/std", "sp-runtime/std", "frame-support/std"] borsh = ["dep:borsh"] serde = ["dep:serde"] default = ["std"] diff --git a/substrate/primitives/src/account.rs b/substrate/primitives/src/account.rs index 77877a149..5c77c28f3 100644 --- a/substrate/primitives/src/account.rs +++ b/substrate/primitives/src/account.rs @@ -90,11 +90,22 @@ impl std::fmt::Display for SeraiAddress { } } +/// Create a Substraate key pair by a name. +/// +/// This should never be considered to have a secure private key. It has effectively no entropy. #[cfg(feature = "std")] pub fn insecure_pair_from_name(name: &str) -> Pair { Pair::from_string(&format!("//{name}"), None).unwrap() } +/// Create a private key for an arbitrary ciphersuite by a name. +/// +/// This key should never be considered a secure private key. It has effectively no entropy. +#[cfg(feature = "std")] +pub fn insecure_arbitrary_key_from_name(name: &str) -> C::F { + C::hash_to_F(b"insecure arbitrary key", name.as_bytes()) +} + pub struct AccountLookup; impl Lookup for AccountLookup { type Source = SeraiAddress; diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index 1213378c4..db396fb51 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -14,6 +14,16 @@ use sp_core::{ConstU32, bounded::BoundedVec}; #[cfg(feature = "borsh")] use crate::{borsh_serialize_bounded_vec, borsh_deserialize_bounded_vec}; +/// Identifier for an embedded elliptic curve. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[cfg_attr(feature = "std", derive(Zeroize))] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum EmbeddedEllipticCurve { + Embedwards25519, + Secq256k1, +} + /// The type used to identify networks. #[derive( Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, PartialOrd, Ord, MaxEncodedLen, TypeInfo, @@ -28,6 +38,23 @@ pub enum NetworkId { Monero, } impl NetworkId { + /// The embedded elliptic curve actively used for this network. + /// + /// This is guaranteed to return `[]`, `[Embedwards25519]`, or + /// `[Embedwards25519, *network specific curve*]`. + pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] { + match self { + // We don't use any embedded elliptic curves for Serai as we don't perform a DKG for Serai + Self::Serai => &[], + // We need to generate a Ristretto key for oraclizing and a Secp256k1 key for the network + Self::Bitcoin | Self::Ethereum => { + &[EmbeddedEllipticCurve::Embedwards25519, EmbeddedEllipticCurve::Secq256k1] + } + // Since the oraclizing key curve is the same as the network's curve, we only need it + Self::Monero => &[EmbeddedEllipticCurve::Embedwards25519], + } + } + pub fn coins(&self) -> &'static [Coin] { match self { Self::Serai => &[Coin::Serai], diff --git a/substrate/runtime/src/abi.rs b/substrate/runtime/src/abi.rs index 48b4a6c74..107389c1e 100644 --- a/substrate/runtime/src/abi.rs +++ b/substrate/runtime/src/abi.rs @@ -92,18 +92,22 @@ impl From for RuntimeCall { Call::ValidatorSets(vs) => match vs { serai_abi::validator_sets::Call::set_keys { network, - removed_participants, key_pair, + signature_participants, signature, } => RuntimeCall::ValidatorSets(validator_sets::Call::set_keys { network, - removed_participants: <_>::try_from( - removed_participants.into_iter().map(PublicKey::from).collect::>(), - ) - .unwrap(), key_pair, + signature_participants, signature, }), + serai_abi::validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + } => RuntimeCall::ValidatorSets(validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + }), serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } => { RuntimeCall::ValidatorSets(validator_sets::Call::report_slashes { network, @@ -282,17 +286,20 @@ impl TryInto for RuntimeCall { _ => Err(())?, }), RuntimeCall::ValidatorSets(call) => Call::ValidatorSets(match call { - validator_sets::Call::set_keys { network, removed_participants, key_pair, signature } => { + validator_sets::Call::set_keys { network, key_pair, signature_participants, signature } => { serai_abi::validator_sets::Call::set_keys { network, - removed_participants: <_>::try_from( - removed_participants.into_iter().map(SeraiAddress::from).collect::>(), - ) - .unwrap(), key_pair, + signature_participants, signature, } } + validator_sets::Call::set_embedded_elliptic_curve_key { embedded_elliptic_curve, key } => { + serai_abi::validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + } + } validator_sets::Call::report_slashes { network, slashes, signature } => { serai_abi::validator_sets::Call::report_slashes { network, diff --git a/substrate/validator-sets/pallet/Cargo.toml b/substrate/validator-sets/pallet/Cargo.toml index dd67d1bc3..e6f559e18 100644 --- a/substrate/validator-sets/pallet/Cargo.toml +++ b/substrate/validator-sets/pallet/Cargo.toml @@ -12,17 +12,16 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] -[package.metadata.cargo-machete] -ignored = ["scale", "scale-info"] - [lints] workspace = true [dependencies] -hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } +bitvec = { version = "1", default-features = false, features = ["alloc", "serde"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "bit-vec"] } +scale-info = { version = "2", default-features = false, features = ["derive", "bit-vec"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } -scale-info = { version = "2", default-features = false, features = ["derive"] } +serde = { version = "1", default-features = false, features = ["derive", "alloc"] } sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-io = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -46,6 +45,8 @@ dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default- [features] std = [ + "bitvec/std", + "scale/std", "scale-info/std", diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index c2ba80a96..655c67220 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -83,6 +83,12 @@ pub mod pallet { type ShouldEndSession: ShouldEndSession>; } + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, serde::Serialize, serde::Deserialize)] + pub struct AllEmbeddedEllipticCurveKeysAtGenesis { + pub embedwards25519: BoundedVec>, + pub secq256k1: BoundedVec>, + } + #[pallet::genesis_config] #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct GenesisConfig { @@ -92,7 +98,7 @@ pub mod pallet { /// This stake cannot be withdrawn however as there's no actual stake behind it. pub networks: Vec<(NetworkId, Amount)>, /// List of participants to place in the initial validator sets. - pub participants: Vec, + pub participants: Vec<(T::AccountId, AllEmbeddedEllipticCurveKeysAtGenesis)>, } impl Default for GenesisConfig { @@ -191,6 +197,18 @@ pub mod pallet { } } + /// A key on an embedded elliptic curve. + #[pallet::storage] + pub type EmbeddedEllipticCurveKeys = StorageDoubleMap< + _, + Blake2_128Concat, + Public, + Identity, + EmbeddedEllipticCurve, + BoundedVec>, + OptionQuery, + >; + /// The total stake allocated to this network by the active set of validators. #[pallet::storage] #[pallet::getter(fn total_allocated_stake)] @@ -426,6 +444,14 @@ pub mod pallet { pub enum Error { /// Validator Set doesn't exist. NonExistentValidatorSet, + /// An invalid embedded elliptic curve key was specified. + /// + /// This error not being raised does not mean the key was valid. Solely that it wasn't detected + /// by this pallet as invalid. + InvalidEmbeddedEllipticCurveKey, + /// Trying to perform an operation requiring an embedded elliptic curve key, without an + /// embedded elliptic curve key. + MissingEmbeddedEllipticCurveKey, /// Not enough allocation to obtain a key share in the set. InsufficientAllocation, /// Trying to deallocate more than allocated. @@ -469,10 +495,20 @@ pub mod pallet { fn build(&self) { for (id, stake) in self.networks.clone() { AllocationPerKeyShare::::set(id, Some(stake)); - for participant in self.participants.clone() { - if Pallet::::set_allocation(id, participant, stake) { + for participant in &self.participants { + if Pallet::::set_allocation(id, participant.0, stake) { panic!("participants contained duplicates"); } + EmbeddedEllipticCurveKeys::::set( + participant.0, + EmbeddedEllipticCurve::Embedwards25519, + Some(participant.1.embedwards25519.clone()), + ); + EmbeddedEllipticCurveKeys::::set( + participant.0, + EmbeddedEllipticCurve::Secq256k1, + Some(participant.1.secq256k1.clone()), + ); } Pallet::::new_set(id); } @@ -941,14 +977,15 @@ pub mod pallet { pub fn set_keys( origin: OriginFor, network: NetworkId, - removed_participants: BoundedVec>, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) -> DispatchResult { ensure_none(origin)?; // signature isn't checked as this is an unsigned transaction, and validate_unsigned // (called by pre_dispatch) checks it + let _ = signature_participants; let _ = signature; let session = Self::session(network).unwrap(); @@ -963,15 +1000,6 @@ pub mod pallet { Self::set_total_allocated_stake(network); } - // This does not remove from TotalAllocatedStake or InSet in order to: - // 1) Not decrease the stake present in this set. This means removed participants are - // still liable for the economic security of the external network. This prevents - // a decided set, which is economically secure, from falling below the threshold. - // 2) Not allow parties removed to immediately deallocate, per commentary on deallocation - // scheduling (https://github.com/serai-dex/serai/issues/394). - for removed in removed_participants { - Self::deposit_event(Event::ParticipantRemoved { set, removed }); - } Self::deposit_event(Event::KeyGen { set, key_pair }); Ok(()) @@ -1004,8 +1032,42 @@ pub mod pallet { #[pallet::call_index(2)] #[pallet::weight(0)] // TODO + pub fn set_embedded_elliptic_curve_key( + origin: OriginFor, + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + ) -> DispatchResult { + let validator = ensure_signed(origin)?; + + // We don't have the curve formulas, nor the BigInt arithmetic, necessary here to validate + // these keys. Instead, we solely check the key lengths. Validators are responsible to not + // provide invalid keys. + let expected_len = match embedded_elliptic_curve { + EmbeddedEllipticCurve::Embedwards25519 => 32, + EmbeddedEllipticCurve::Secq256k1 => 33, + }; + if key.len() != expected_len { + Err(Error::::InvalidEmbeddedEllipticCurveKey)?; + } + + // This does allow overwriting an existing key which... is unlikely to be done? + // Yet it isn't an issue as we'll fix to the key as of any set's declaration (uncaring to if + // it's distinct at the latest block) + EmbeddedEllipticCurveKeys::::set(validator, embedded_elliptic_curve, Some(key)); + Ok(()) + } + + #[pallet::call_index(3)] + #[pallet::weight(0)] // TODO pub fn allocate(origin: OriginFor, network: NetworkId, amount: Amount) -> DispatchResult { let validator = ensure_signed(origin)?; + // If this network utilizes embedded elliptic curve(s), require the validator to have set the + // appropriate key(s) + for embedded_elliptic_curve in network.embedded_elliptic_curves() { + if !EmbeddedEllipticCurveKeys::::contains_key(validator, *embedded_elliptic_curve) { + Err(Error::::MissingEmbeddedEllipticCurveKey)?; + } + } Coins::::transfer_internal( validator, Self::account(), @@ -1014,7 +1076,7 @@ pub mod pallet { Self::increase_allocation(network, validator, amount, false) } - #[pallet::call_index(3)] + #[pallet::call_index(4)] #[pallet::weight(0)] // TODO pub fn deallocate(origin: OriginFor, network: NetworkId, amount: Amount) -> DispatchResult { let account = ensure_signed(origin)?; @@ -1031,7 +1093,7 @@ pub mod pallet { Ok(()) } - #[pallet::call_index(4)] + #[pallet::call_index(5)] #[pallet::weight((0, DispatchClass::Operational))] // TODO pub fn claim_deallocation( origin: OriginFor, @@ -1059,7 +1121,7 @@ pub mod pallet { fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { // Match to be exhaustive match call { - Call::set_keys { network, ref removed_participants, ref key_pair, ref signature } => { + Call::set_keys { network, ref key_pair, ref signature_participants, ref signature } => { let network = *network; // Don't allow the Serai set to set_keys, as they have no reason to do so @@ -1083,30 +1145,24 @@ pub mod pallet { // session on this assumption assert_eq!(Pallet::::latest_decided_session(network), Some(current_session)); - // This does not slash the removed participants as that'll be done at the end of the - // set's lifetime - let mut removed = hashbrown::HashSet::new(); - for participant in removed_participants { - // Confirm this wasn't duplicated - if removed.contains(&participant.0) { - Err(InvalidTransaction::Custom(2))?; - } - removed.insert(participant.0); - } - let participants = Participants::::get(network).expect("session existed without participants"); + // Check the bitvec is of the proper length + if participants.len() != signature_participants.len() { + Err(InvalidTransaction::Custom(2))?; + } + let mut all_key_shares = 0; let mut signers = vec![]; let mut signing_key_shares = 0; - for participant in participants { + for (participant, in_use) in participants.into_iter().zip(signature_participants) { let participant = participant.0; let shares = InSet::::get(network, participant) .expect("participant from Participants wasn't InSet"); all_key_shares += shares; - if removed.contains(&participant.0) { + if !in_use { continue; } @@ -1124,9 +1180,7 @@ pub mod pallet { // Verify the signature with the MuSig key of the signers // We theoretically don't need set_keys_message to bind to removed_participants, as the // key we're signing with effectively already does so, yet there's no reason not to - if !musig_key(set, &signers) - .verify(&set_keys_message(&set, removed_participants, key_pair), signature) - { + if !musig_key(set, &signers).verify(&set_keys_message(&set, key_pair), signature) { Err(InvalidTransaction::BadProof)?; } @@ -1159,9 +1213,10 @@ pub mod pallet { .propagate(true) .build() } - Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => { - Err(InvalidTransaction::Call)? - } + Call::set_embedded_elliptic_curve_key { .. } | + Call::allocate { .. } | + Call::deallocate { .. } | + Call::claim_deallocation { .. } => Err(InvalidTransaction::Call)?, Call::__Ignore(_, _) => unreachable!(), } } diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index c900b0a99..90d58c37c 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -99,12 +99,8 @@ pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public { } /// The message for the set_keys signature. -pub fn set_keys_message( - set: &ValidatorSet, - removed_participants: &[Public], - key_pair: &KeyPair, -) -> Vec { - (b"ValidatorSets-set_keys", set, removed_participants, key_pair).encode() +pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec { + (b"ValidatorSets-set_keys", set, key_pair).encode() } pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) -> Vec { diff --git a/tests/coordinator/Cargo.toml b/tests/coordinator/Cargo.toml index 89b168c03..ca7a10d6c 100644 --- a/tests/coordinator/Cargo.toml +++ b/tests/coordinator/Cargo.toml @@ -24,7 +24,11 @@ zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } blake2 = "0.10" + ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["ristretto", "secp256k1"] } +embedwards25519 = { path = "../../crypto/evrf/embedwards25519" } +secq256k1 = { path = "../../crypto/evrf/secq256k1" } + schnorrkel = "0.11" dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index c364128ca..fe2a0a4fb 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -18,6 +18,8 @@ use ciphersuite::{ group::{ff::PrimeField, GroupEncoding}, Ciphersuite, Ristretto, }; +use embedwards25519::Embedwards25519; +use secq256k1::Secq256k1; use serai_client::primitives::NetworkId; @@ -118,6 +120,8 @@ pub struct Processor { queue_for_sending: MessageQueue, abort_handle: Option>, + evrf_public_keys: ([u8; 32], Vec), + substrate_key: Arc::F>>>>, } @@ -131,7 +135,7 @@ impl Drop for Processor { impl Processor { pub async fn new( - raw_i: u8, + name: &'static str, network: NetworkId, ops: &DockerOperations, handles: Handles, @@ -168,7 +172,11 @@ impl Processor { let (msg_send, msg_recv) = mpsc::unbounded_channel(); + use serai_client::primitives::insecure_arbitrary_key_from_name; let substrate_key = Arc::new(AsyncMutex::new(None)); + let embedwards25519_evrf_key = (Embedwards25519::generator() * + insecure_arbitrary_key_from_name::(name)) + .to_bytes(); let mut res = Processor { network, @@ -183,6 +191,21 @@ impl Processor { msgs: msg_recv, abort_handle: None, + evrf_public_keys: ( + embedwards25519_evrf_key, + match network { + NetworkId::Serai => panic!("mock processor for the serai network"), + NetworkId::Bitcoin | NetworkId::Ethereum => { + let key = (Secq256k1::generator() * + insecure_arbitrary_key_from_name::(name)) + .to_bytes(); + let key: &[u8] = key.as_ref(); + key.to_vec() + } + NetworkId::Monero => embedwards25519_evrf_key.to_vec(), + }, + ), + substrate_key: substrate_key.clone(), }; @@ -256,10 +279,12 @@ impl Processor { if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { *current_cosign = Some(new_cosign); } + let mut preprocess = [0; 64]; + preprocess[.. name.len()].copy_from_slice(name.as_ref()); send_message( messages::coordinator::ProcessorMessage::CosignPreprocess { id: id.clone(), - preprocesses: vec![[raw_i; 64]], + preprocesses: vec![preprocess], } .into(), ) @@ -270,12 +295,11 @@ impl Processor { ) => { // TODO: Assert the ID matches CURRENT_COSIGN // TODO: Verify the received preprocesses + let mut share = [0; 32]; + share[.. name.len()].copy_from_slice(name.as_bytes()); send_message( - messages::coordinator::ProcessorMessage::SubstrateShare { - id, - shares: vec![[raw_i; 32]], - } - .into(), + messages::coordinator::ProcessorMessage::SubstrateShare { id, shares: vec![share] } + .into(), ) .await; } @@ -327,6 +351,14 @@ impl Processor { res } + pub fn network(&self) -> NetworkId { + self.network + } + + pub fn evrf_public_keys(&self) -> ([u8; 32], Vec) { + self.evrf_public_keys.clone() + } + pub async fn serai(&self) -> Serai { Serai::new(self.serai_rpc.clone()).await.unwrap() } diff --git a/tests/coordinator/src/tests/key_gen.rs b/tests/coordinator/src/tests/key_gen.rs index 8ea14cbc0..1ec31776f 100644 --- a/tests/coordinator/src/tests/key_gen.rs +++ b/tests/coordinator/src/tests/key_gen.rs @@ -1,7 +1,4 @@ -use std::{ - time::{Duration, SystemTime}, - collections::HashMap, -}; +use std::time::{Duration, SystemTime}; use zeroize::Zeroizing; use rand_core::OsRng; @@ -10,14 +7,14 @@ use ciphersuite::{ group::{ff::Field, GroupEncoding}, Ciphersuite, Ristretto, Secp256k1, }; -use dkg::ThresholdParams; +use dkg::Participant; use serai_client::{ primitives::NetworkId, Public, validator_sets::primitives::{Session, ValidatorSet, KeyPair}, }; -use messages::{key_gen::KeyGenId, CoordinatorMessage}; +use messages::CoordinatorMessage; use crate::tests::*; @@ -29,16 +26,28 @@ pub async fn key_gen( let mut participant_is = vec![]; let set = ValidatorSet { session, network: NetworkId::Bitcoin }; - let id = KeyGenId { session: set.session, attempt: 0 }; - for (i, processor) in processors.iter_mut().enumerate() { + // This is distinct from the result of evrf_public_keys for each processor, as there'll have some + // ordering algorithm on-chain which won't match our ordering + let mut evrf_public_keys_as_on_chain = None; + for processor in processors.iter_mut() { + // Receive GenerateKey let msg = processor.recv_message().await; match &msg { CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { - params, + evrf_public_keys, .. }) => { - participant_is.push(params.i()); + if evrf_public_keys_as_on_chain.is_none() { + evrf_public_keys_as_on_chain = Some(evrf_public_keys.clone()); + } + assert_eq!(evrf_public_keys_as_on_chain.as_ref().unwrap(), evrf_public_keys); + let i = evrf_public_keys + .iter() + .position(|public_keys| *public_keys == processor.evrf_public_keys()) + .unwrap(); + let i = Participant::new(1 + u16::try_from(i).unwrap()).unwrap(); + participant_is.push(i); } _ => panic!("unexpected message: {msg:?}"), } @@ -46,63 +55,43 @@ pub async fn key_gen( assert_eq!( msg, CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { - id, - params: ThresholdParams::new( - u16::try_from(((coordinators * 2) / 3) + 1).unwrap(), - u16::try_from(coordinators).unwrap(), - participant_is[i], - ) - .unwrap(), - shares: 1, + session, + threshold: u16::try_from(((coordinators * 2) / 3) + 1).unwrap(), + evrf_public_keys: evrf_public_keys_as_on_chain.clone().unwrap(), }) ); + } - processor - .send_message(messages::key_gen::ProcessorMessage::Commitments { - id, - commitments: vec![vec![u8::try_from(u16::from(participant_is[i])).unwrap()]], + for i in 0 .. coordinators { + // Send Participation + processors[i] + .send_message(messages::key_gen::ProcessorMessage::Participation { + session, + participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()], }) .await; - } - wait_for_tributary().await; - for (i, processor) in processors.iter_mut().enumerate() { - let mut commitments = (0 .. u8::try_from(coordinators).unwrap()) - .map(|l| { - ( - participant_is[usize::from(l)], - vec![u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap()], - ) - }) - .collect::>(); - commitments.remove(&participant_is[i]); - assert_eq!( - processor.recv_message().await, - CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Commitments { - id, - commitments, - }) - ); + // Sleep so this participation gets included + for _ in 0 .. 2 { + wait_for_tributary().await; + } - // Recipient it's for -> (Sender i, Recipient i) - let mut shares = (0 .. u8::try_from(coordinators).unwrap()) - .map(|l| { - ( - participant_is[usize::from(l)], - vec![ - u8::try_from(u16::from(participant_is[i])).unwrap(), - u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(), - ], + // Have every other processor recv this message too + for processor in processors.iter_mut() { + assert_eq!( + processor.recv_message().await, + messages::CoordinatorMessage::KeyGen( + messages::key_gen::CoordinatorMessage::Participation { + session, + participant: participant_is[i], + participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()], + } ) - }) - .collect::>(); - - shares.remove(&participant_is[i]); - processor - .send_message(messages::key_gen::ProcessorMessage::Shares { id, shares: vec![shares] }) - .await; + ); + } } + // Now that we've received all participations, publish the key pair let substrate_priv_key = Zeroizing::new(::F::random(&mut OsRng)); let substrate_key = (::generator() * *substrate_priv_key).to_bytes(); @@ -112,40 +101,24 @@ pub async fn key_gen( let serai = processors[0].serai().await; let mut last_serai_block = serai.latest_finalized_block().await.unwrap().number(); - wait_for_tributary().await; - for (i, processor) in processors.iter_mut().enumerate() { - let i = participant_is[i]; - assert_eq!( - processor.recv_message().await, - CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Shares { - id, - shares: { - let mut shares = (0 .. u8::try_from(coordinators).unwrap()) - .map(|l| { - ( - participant_is[usize::from(l)], - vec![ - u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(), - u8::try_from(u16::from(i)).unwrap(), - ], - ) - }) - .collect::>(); - shares.remove(&i); - vec![shares] - }, - }) - ); + for processor in processors.iter_mut() { processor .send_message(messages::key_gen::ProcessorMessage::GeneratedKeyPair { - id, + session, substrate_key, network_key: network_key.clone(), }) .await; } - // Sleeps for longer since we need to wait for a Substrate block as well + // Wait for the Nonces TXs to go around + wait_for_tributary().await; + // Wait for the Share TXs to go around + wait_for_tributary().await; + + // And now we're waiting ro the TX to be published onto Serai + + // We need to wait for a finalized Substrate block as well, so this waites for up to 20 blocks 'outer: for _ in 0 .. 20 { tokio::time::sleep(Duration::from_secs(6)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { diff --git a/tests/coordinator/src/tests/mod.rs b/tests/coordinator/src/tests/mod.rs index ef67b0ac5..0b46cd818 100644 --- a/tests/coordinator/src/tests/mod.rs +++ b/tests/coordinator/src/tests/mod.rs @@ -41,6 +41,18 @@ impl) -> F> Test } } +fn name(i: usize) -> &'static str { + match i { + 0 => "Alice", + 1 => "Bob", + 2 => "Charlie", + 3 => "Dave", + 4 => "Eve", + 5 => "Ferdie", + _ => panic!("needed a 7th name for a serai node"), + } +} + pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await; @@ -50,15 +62,7 @@ pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { // Spawn one extra coordinator which isn't in-set #[allow(clippy::range_plus_one)] for i in 0 .. (COORDINATORS + 1) { - let name = match i { - 0 => "Alice", - 1 => "Bob", - 2 => "Charlie", - 3 => "Dave", - 4 => "Eve", - 5 => "Ferdie", - _ => panic!("needed a 7th name for a serai node"), - }; + let name = name(i); let serai_composition = serai_composition(name, fast_epoch); let (processor_key, message_queue_keys, message_queue_composition) = @@ -196,14 +200,7 @@ pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { let mut processors: Vec = vec![]; for (i, (handles, key)) in coordinators.iter().enumerate() { processors.push( - Processor::new( - i.try_into().unwrap(), - NetworkId::Bitcoin, - &outer_ops, - handles.clone(), - *key, - ) - .await, + Processor::new(name(i), NetworkId::Bitcoin, &outer_ops, handles.clone(), *key).await, ); } diff --git a/tests/coordinator/src/tests/rotation.rs b/tests/coordinator/src/tests/rotation.rs index 1ebeec16a..507b05363 100644 --- a/tests/coordinator/src/tests/rotation.rs +++ b/tests/coordinator/src/tests/rotation.rs @@ -3,7 +3,7 @@ use tokio::time::{sleep, Duration}; use ciphersuite::Secp256k1; use serai_client::{ - primitives::{insecure_pair_from_name, NetworkId}, + primitives::{EmbeddedEllipticCurve, NetworkId, insecure_pair_from_name}, validator_sets::{ self, primitives::{Session, ValidatorSet}, @@ -55,6 +55,27 @@ async fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] { } } +#[allow(dead_code)] +async fn set_embedded_elliptic_curve_key( + serai: &Serai, + curve: EmbeddedEllipticCurve, + key: Vec, + pair: &Pair, + nonce: u32, +) -> [u8; 32] { + // get the call + let tx = serai.sign( + pair, + validator_sets::SeraiValidatorSets::set_embedded_elliptic_curve_key( + curve, + key.try_into().unwrap(), + ), + nonce, + 0, + ); + publish_tx(serai, &tx).await +} + #[allow(dead_code)] async fn allocate_stake( serai: &Serai, @@ -132,13 +153,29 @@ async fn set_rotation_test() { // excluded participant let pair5 = insecure_pair_from_name("Eve"); - let network = NetworkId::Bitcoin; + let network = excluded.network(); let amount = Amount(1_000_000 * 10_u64.pow(8)); let serai = processors[0].serai().await; // allocate now for the last participant so that it is guaranteed to be included into session // 1 set. This doesn't affect the genesis set at all since that is a predetermined set. - allocate_stake(&serai, network, amount, &pair5, 0).await; + set_embedded_elliptic_curve_key( + &serai, + EmbeddedEllipticCurve::Embedwards25519, + excluded.evrf_public_keys().0.to_vec(), + &pair5, + 0, + ) + .await; + set_embedded_elliptic_curve_key( + &serai, + *excluded.network().embedded_elliptic_curves().last().unwrap(), + excluded.evrf_public_keys().1.clone(), + &pair5, + 1, + ) + .await; + allocate_stake(&serai, network, amount, &pair5, 2).await; // genesis keygen let _ = key_gen::(&mut processors, Session(0)).await; diff --git a/tests/full-stack/src/tests/mod.rs b/tests/full-stack/src/tests/mod.rs index 7d92070ef..a288ff055 100644 --- a/tests/full-stack/src/tests/mod.rs +++ b/tests/full-stack/src/tests/mod.rs @@ -57,14 +57,24 @@ pub(crate) async fn new_test(test_body: impl TestBody) { let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance(); let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin); - let mut bitcoin_processor_composition = - processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]); + let mut bitcoin_processor_composition = processor_instance( + name, + NetworkId::Bitcoin, + bitcoin_port, + message_queue_keys[&NetworkId::Bitcoin], + ) + .0; assert_eq!(bitcoin_processor_composition.len(), 1); let bitcoin_processor_composition = bitcoin_processor_composition.swap_remove(0); let (monero_composition, monero_port) = network_instance(NetworkId::Monero); - let mut monero_processor_composition = - processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]); + let mut monero_processor_composition = processor_instance( + name, + NetworkId::Monero, + monero_port, + message_queue_keys[&NetworkId::Monero], + ) + .0; assert_eq!(monero_processor_composition.len(), 1); let monero_processor_composition = monero_processor_composition.swap_remove(0); diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index 8817b0c96..f06e47419 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -23,8 +23,8 @@ zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } curve25519-dalek = "4" -ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["secp256k1", "ristretto"] } -dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["secp256k1", "ed25519", "ristretto"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } bitcoin-serai = { path = "../../networks/bitcoin" } diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index ec607a551..d57ec290c 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -3,11 +3,14 @@ use std::sync::{OnceLock, Mutex}; use zeroize::Zeroizing; -use rand_core::{RngCore, OsRng}; -use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Secp256k1, Ed25519, Ristretto, +}; +use dkg::evrf::*; -use serai_client::primitives::NetworkId; +use serai_client::primitives::{NetworkId, insecure_arbitrary_key_from_name}; use messages::{ProcessorMessage, CoordinatorMessage}; use serai_message_queue::{Service, Metadata, client::MessageQueue}; @@ -24,13 +27,42 @@ mod tests; static UNIQUE_ID: OnceLock> = OnceLock::new(); +#[allow(dead_code)] +#[derive(Clone)] +pub struct EvrfPublicKeys { + substrate: [u8; 32], + network: Vec, +} + pub fn processor_instance( + name: &str, network: NetworkId, port: u32, message_queue_key: ::F, -) -> Vec { - let mut entropy = [0; 32]; - OsRng.fill_bytes(&mut entropy); +) -> (Vec, EvrfPublicKeys) { + let substrate_evrf_key = + insecure_arbitrary_key_from_name::<::EmbeddedCurve>(name); + let substrate_evrf_pub_key = + (::EmbeddedCurve::generator() * substrate_evrf_key).to_bytes(); + let substrate_evrf_key = substrate_evrf_key.to_repr(); + + let (network_evrf_key, network_evrf_pub_key) = match network { + NetworkId::Serai => panic!("starting a processor for Serai"), + NetworkId::Bitcoin | NetworkId::Ethereum => { + let evrf_key = + insecure_arbitrary_key_from_name::<::EmbeddedCurve>(name); + let pub_key = + (::EmbeddedCurve::generator() * evrf_key).to_bytes().to_vec(); + (evrf_key.to_repr(), pub_key) + } + NetworkId::Monero => { + let evrf_key = + insecure_arbitrary_key_from_name::<::EmbeddedCurve>(name); + let pub_key = + (::EmbeddedCurve::generator() * evrf_key).to_bytes().to_vec(); + (evrf_key.to_repr(), pub_key) + } + }; let network_str = match network { NetworkId::Serai => panic!("starting a processor for Serai"), @@ -47,7 +79,8 @@ pub fn processor_instance( .replace_env( [ ("MESSAGE_QUEUE_KEY".to_string(), hex::encode(message_queue_key.to_repr())), - ("ENTROPY".to_string(), hex::encode(entropy)), + ("SUBSTRATE_EVRF_KEY".to_string(), hex::encode(substrate_evrf_key)), + ("NETWORK_EVRF_KEY".to_string(), hex::encode(network_evrf_key)), ("NETWORK".to_string(), network_str.to_string()), ("NETWORK_RPC_LOGIN".to_string(), format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_PORT".to_string(), port.to_string()), @@ -75,21 +108,27 @@ pub fn processor_instance( ); } - res + (res, EvrfPublicKeys { substrate: substrate_evrf_pub_key, network: network_evrf_pub_key }) +} + +pub struct ProcessorKeys { + coordinator: ::F, + evrf: EvrfPublicKeys, } pub type Handles = (String, String, String, String); pub fn processor_stack( + name: &str, network: NetworkId, network_hostname_override: Option, -) -> (Handles, ::F, Vec) { +) -> (Handles, ProcessorKeys, Vec) { let (network_composition, network_rpc_port) = network_instance(network); let (coord_key, message_queue_keys, message_queue_composition) = serai_message_queue_tests::instance(); - let mut processor_compositions = - processor_instance(network, network_rpc_port, message_queue_keys[&network]); + let (mut processor_compositions, evrf_keys) = + processor_instance(name, network, network_rpc_port, message_queue_keys[&network]); // Give every item in this stack a unique ID // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits @@ -155,7 +194,7 @@ pub fn processor_stack( handles[2].clone(), handles.get(3).cloned().unwrap_or(String::new()), ), - coord_key, + ProcessorKeys { coordinator: coord_key, evrf: evrf_keys }, compositions, ) } @@ -170,6 +209,8 @@ pub struct Coordinator { processor_handle: String, relayer_handle: String, + evrf_keys: EvrfPublicKeys, + next_send_id: u64, next_recv_id: u64, queue: MessageQueue, @@ -180,7 +221,7 @@ impl Coordinator { network: NetworkId, ops: &DockerOperations, handles: Handles, - coord_key: ::F, + keys: ProcessorKeys, ) -> Coordinator { let rpc = ops.handle(&handles.1).host_port(2287).unwrap(); let rpc = rpc.0.to_string() + ":" + &rpc.1.to_string(); @@ -193,9 +234,11 @@ impl Coordinator { processor_handle: handles.2, relayer_handle: handles.3, + evrf_keys: keys.evrf, + next_send_id: 0, next_recv_id: 0, - queue: MessageQueue::new(Service::Coordinator, rpc, Zeroizing::new(coord_key)), + queue: MessageQueue::new(Service::Coordinator, rpc, Zeroizing::new(keys.coordinator)), }; // Sleep for up to a minute in case the external network's RPC has yet to start @@ -302,6 +345,11 @@ impl Coordinator { res } + /// Get the eVRF keys for the associated processor. + pub fn evrf_keys(&self) -> EvrfPublicKeys { + self.evrf_keys.clone() + } + /// Send a message to a processor as its coordinator. pub async fn send_message(&mut self, msg: impl Into) { let msg: CoordinatorMessage = msg.into(); diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 32563c9fe..bed741e56 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -451,7 +451,7 @@ impl Wallet { ); } - let to_spend_key = decompress_point(<[u8; 32]>::try_from(to.as_ref()).unwrap()).unwrap(); + let to_spend_key = decompress_point(<[u8; 32]>::try_from(to.as_slice()).unwrap()).unwrap(); let to_view_key = additional_key::(0); let to_addr = Address::new( Network::Mainnet, diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 6170270ac..b85f43cfe 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -3,6 +3,8 @@ use std::{ time::{SystemTime, Duration}, }; +use rand_core::{RngCore, OsRng}; + use dkg::{Participant, tests::clone_without}; use messages::{coordinator::*, SubstrateContext}; diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index 7dea0bfd5..ee69086b7 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -1,30 +1,24 @@ -use std::{collections::HashMap, time::SystemTime}; +use std::time::SystemTime; -use dkg::{Participant, ThresholdParams, tests::clone_without}; +use dkg::Participant; use serai_client::{ primitives::{NetworkId, BlockHash, PublicKey}, validator_sets::primitives::{Session, KeyPair}, }; -use messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage, ProcessorMessage}; +use messages::{SubstrateContext, CoordinatorMessage, ProcessorMessage}; use crate::{*, tests::*}; pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { // Perform an interaction with all processors via their coordinators - async fn interact_with_all< - FS: Fn(Participant) -> messages::key_gen::CoordinatorMessage, - FR: FnMut(Participant, messages::key_gen::ProcessorMessage), - >( + async fn interact_with_all( coordinators: &mut [Coordinator], - message: FS, mut recv: FR, ) { for (i, coordinator) in coordinators.iter_mut().enumerate() { let participant = Participant::new(u16::try_from(i + 1).unwrap()).unwrap(); - coordinator.send_message(CoordinatorMessage::KeyGen(message(participant))).await; - match coordinator.recv_message().await { ProcessorMessage::KeyGen(msg) => recv(participant, msg), _ => panic!("processor didn't return KeyGen message"), @@ -33,85 +27,69 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { } // Order a key gen - let id = KeyGenId { session: Session(0), attempt: 0 }; - - let mut commitments = HashMap::new(); - interact_with_all( - coordinators, - |participant| messages::key_gen::CoordinatorMessage::GenerateKey { - id, - params: ThresholdParams::new( - u16::try_from(THRESHOLD).unwrap(), - u16::try_from(COORDINATORS).unwrap(), - participant, - ) - .unwrap(), - shares: 1, - }, - |participant, msg| match msg { - messages::key_gen::ProcessorMessage::Commitments { - id: this_id, - commitments: mut these_commitments, - } => { - assert_eq!(this_id, id); - assert_eq!(these_commitments.len(), 1); - commitments.insert(participant, these_commitments.swap_remove(0)); - } - _ => panic!("processor didn't return Commitments in response to GenerateKey"), - }, - ) - .await; + let session = Session(0); - // Send the commitments to all parties - let mut shares = HashMap::new(); - interact_with_all( - coordinators, - |participant| messages::key_gen::CoordinatorMessage::Commitments { - id, - commitments: clone_without(&commitments, &participant), - }, - |participant, msg| match msg { - messages::key_gen::ProcessorMessage::Shares { id: this_id, shares: mut these_shares } => { - assert_eq!(this_id, id); - assert_eq!(these_shares.len(), 1); - shares.insert(participant, these_shares.swap_remove(0)); - } - _ => panic!("processor didn't return Shares in response to GenerateKey"), - }, - ) + let mut evrf_public_keys = vec![]; + for coordinator in &*coordinators { + let keys = coordinator.evrf_keys(); + evrf_public_keys.push((keys.substrate, keys.network)); + } + + let mut participations = vec![]; + for coordinator in &mut *coordinators { + coordinator + .send_message(CoordinatorMessage::KeyGen( + messages::key_gen::CoordinatorMessage::GenerateKey { + session, + threshold: u16::try_from(THRESHOLD).unwrap(), + evrf_public_keys: evrf_public_keys.clone(), + }, + )) + .await; + } + // This takes forever on debug, as we use in these tests + let ci_scaling_factor = + 1 + u64::from(u8::from(std::env::var("GITHUB_CI") == Ok("true".to_string()))); + tokio::time::sleep(core::time::Duration::from_secs(600 * ci_scaling_factor)).await; + interact_with_all(coordinators, |participant, msg| match msg { + messages::key_gen::ProcessorMessage::Participation { session: this_session, participation } => { + assert_eq!(this_session, session); + participations.push(messages::key_gen::CoordinatorMessage::Participation { + session, + participant, + participation, + }); + } + _ => panic!("processor didn't return Participation in response to GenerateKey"), + }) .await; - // Send the shares + // Send the participations let mut substrate_key = None; let mut network_key = None; - interact_with_all( - coordinators, - |participant| messages::key_gen::CoordinatorMessage::Shares { - id, - shares: vec![shares - .iter() - .filter_map(|(this_participant, shares)| { - shares.get(&participant).cloned().map(|share| (*this_participant, share)) - }) - .collect()], - }, - |_, msg| match msg { - messages::key_gen::ProcessorMessage::GeneratedKeyPair { - id: this_id, - substrate_key: this_substrate_key, - network_key: this_network_key, - } => { - assert_eq!(this_id, id); - if substrate_key.is_none() { - substrate_key = Some(this_substrate_key); - network_key = Some(this_network_key.clone()); - } - assert_eq!(substrate_key.unwrap(), this_substrate_key); - assert_eq!(network_key.as_ref().unwrap(), &this_network_key); + for participation in participations { + for coordinator in &mut *coordinators { + coordinator.send_message(participation.clone()).await; + } + } + // This also takes a while on debug + tokio::time::sleep(core::time::Duration::from_secs(240 * ci_scaling_factor)).await; + interact_with_all(coordinators, |_, msg| match msg { + messages::key_gen::ProcessorMessage::GeneratedKeyPair { + session: this_session, + substrate_key: this_substrate_key, + network_key: this_network_key, + } => { + assert_eq!(this_session, session); + if substrate_key.is_none() { + substrate_key = Some(this_substrate_key); + network_key = Some(this_network_key.clone()); } - _ => panic!("processor didn't return GeneratedKeyPair in response to GenerateKey"), - }, - ) + assert_eq!(substrate_key.unwrap(), this_substrate_key); + assert_eq!(network_key.as_ref().unwrap(), &this_network_key); + } + _ => panic!("processor didn't return GeneratedKeyPair in response to all Participations"), + }) .await; // Confirm the key pair @@ -132,7 +110,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { .send_message(CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, - session: id.session, + session, key_pair: key_pair.clone(), }, )) diff --git a/tests/processor/src/tests/mod.rs b/tests/processor/src/tests/mod.rs index afda97d5e..668506b17 100644 --- a/tests/processor/src/tests/mod.rs +++ b/tests/processor/src/tests/mod.rs @@ -1,5 +1,3 @@ -use ciphersuite::{Ciphersuite, Ristretto}; - use serai_client::primitives::NetworkId; use dockertest::DockerTest; @@ -17,18 +15,21 @@ mod send; pub(crate) const COORDINATORS: usize = 4; pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; -fn new_test(network: NetworkId) -> (Vec<(Handles, ::F)>, DockerTest) { +fn new_test(network: NetworkId) -> (Vec<(Handles, ProcessorKeys)>, DockerTest) { let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); let mut eth_handle = None; - for _ in 0 .. COORDINATORS { - let (handles, coord_key, compositions) = processor_stack(network, eth_handle.clone()); + for i in 0 .. COORDINATORS { + // Uses the counter `i` as this has no relation to any other system, and while Substrate has + // hard-coded names for itself, these tests down't spawn any Substrate node + let (handles, keys, compositions) = + processor_stack(&i.to_string(), network, eth_handle.clone()); // TODO: Remove this once https://github.com/foundry-rs/foundry/issues/7955 // This has all processors share an Ethereum node until we can sync controlled nodes if network == NetworkId::Ethereum { eth_handle = eth_handle.or_else(|| Some(handles.0.clone())); } - coordinators.push((handles, coord_key)); + coordinators.push((handles, keys)); for composition in compositions { test.provide_container(composition); } diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 62e80c095..8dfb53535 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -3,6 +3,8 @@ use std::{ time::{SystemTime, Duration}, }; +use rand_core::{RngCore, OsRng}; + use dkg::{Participant, tests::clone_without}; use messages::{sign::SignId, SubstrateContext};