From 1b762b286721504e1fe4742edda2e09ddd1b4373 Mon Sep 17 00:00:00 2001 From: Artyom Pavlov Date: Thu, 9 May 2024 09:50:08 +0300 Subject: [PATCH] Apply rustfmt and fix Clippy warnings (#1448) --- .github/workflows/benches.yml | 23 + .github/workflows/test.yml | 2 - .github/workflows/workspace.yml | 33 ++ Cargo.toml | 2 +- benches/benches/generators.rs | 3 - rand_chacha/src/chacha.rs | 26 +- rand_chacha/src/guts.rs | 12 +- rand_core/src/blanket_impls.rs | 3 +- rand_core/src/block.rs | 3 +- rand_core/src/impls.rs | 2 +- rand_core/src/lib.rs | 25 +- rand_distr/src/binomial.rs | 21 +- rand_distr/src/cauchy.rs | 35 +- rand_distr/src/dirichlet.rs | 3 +- rand_distr/src/exponential.rs | 24 +- rand_distr/src/gamma.rs | 56 ++- rand_distr/src/geometric.rs | 56 ++- rand_distr/src/hypergeometric.rs | 204 +++++--- rand_distr/src/inverse_gaussian.rs | 13 +- rand_distr/src/lib.rs | 9 +- rand_distr/src/normal.rs | 34 +- rand_distr/src/normal_inverse_gaussian.rs | 19 +- rand_distr/src/pareto.rs | 44 +- rand_distr/src/pert.rs | 16 +- rand_distr/src/poisson.rs | 27 +- rand_distr/src/skew_normal.rs | 16 +- rand_distr/src/triangular.rs | 29 +- rand_distr/src/unit_ball.rs | 2 +- rand_distr/src/unit_circle.rs | 2 +- rand_distr/src/unit_disc.rs | 2 +- rand_distr/src/unit_sphere.rs | 8 +- rand_distr/src/utils.rs | 6 +- rand_distr/src/weibull.rs | 47 +- rand_distr/src/weighted_alias.rs | 58 ++- rand_distr/src/weighted_tree.rs | 24 +- rand_distr/src/zipf.rs | 59 ++- rand_distr/tests/pdf.rs | 4 +- rand_distr/tests/sparkline.rs | 18 +- rand_distr/tests/value_stability.rs | 569 ++++++++++++++-------- rand_pcg/src/pcg128.rs | 4 +- rand_pcg/src/pcg128cm.rs | 3 +- rand_pcg/src/pcg64.rs | 3 +- rustfmt.toml | 32 -- src/distributions/bernoulli.rs | 12 +- src/distributions/distribution.rs | 3 +- src/distributions/float.rs | 95 ++-- src/distributions/integer.rs | 143 +++--- src/distributions/mod.rs | 2 +- src/distributions/other.rs | 122 ++--- src/distributions/slice.rs | 6 +- src/distributions/uniform.rs | 251 ++++++---- src/distributions/utils.rs | 17 +- src/distributions/weighted_index.rs | 72 +-- src/lib.rs | 11 +- src/prelude.rs | 6 +- src/rng.rs | 14 +- src/rngs/mock.rs | 6 +- src/rngs/mod.rs | 12 +- src/rngs/thread.rs | 1 - src/rngs/xoshiro128plusplus.rs | 3 +- src/rngs/xoshiro256plusplus.rs | 3 +- src/seq/coin_flipper.rs | 4 +- src/seq/index.rs | 99 ++-- src/seq/mod.rs | 30 +- 64 files changed, 1537 insertions(+), 956 deletions(-) create mode 100644 .github/workflows/benches.yml create mode 100644 .github/workflows/workspace.yml delete mode 100644 rustfmt.toml diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml new file mode 100644 index 00000000000..118a1765406 --- /dev/null +++ b/.github/workflows/benches.yml @@ -0,0 +1,23 @@ +name: Benches + +on: + pull_request: + paths: + - ".github/workflows/benches.yml" + - "benches/**" + +jobs: + benches: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: clippy, rustfmt + - name: Rustfmt + run: cargo fmt --all -- --check + - name: Clippy + run: cargo clippy --all --all-targets -- -D warnings + - name: Build + run: RUSTFLAGS=-Dwarnings cargo build --all-targets diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 495571d2620..de886e48fb2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -72,8 +72,6 @@ jobs: if: ${{ matrix.variant == 'minimal_versions' }} run: | cargo generate-lockfile -Z minimal-versions - # Overrides for dependencies with incorrect requirements (may need periodic updating) - cargo update -p regex --precise 1.5.1 - name: Maybe nightly if: ${{ matrix.toolchain == 'nightly' }} run: | diff --git a/.github/workflows/workspace.yml b/.github/workflows/workspace.yml new file mode 100644 index 00000000000..ef92b7f479c --- /dev/null +++ b/.github/workflows/workspace.yml @@ -0,0 +1,33 @@ +name: Workspace + +on: + pull_request: + paths-ignore: + - README.md + - "benches/**" + push: + branches: master + paths-ignore: + - README.md + - "benches/**" + +jobs: + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: 1.78.0 + components: clippy + - run: cargo clippy --all --all-targets -- -D warnings + + rustfmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + components: rustfmt + - run: cargo fmt --all -- --check diff --git a/Cargo.toml b/Cargo.toml index 8ffdff78190..a2ea562a28c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,12 +58,12 @@ unbiased = [] [workspace] members = [ - "benches", "rand_core", "rand_distr", "rand_chacha", "rand_pcg", ] +exclude = ["benches"] [dependencies] rand_core = { path = "rand_core", version = "=0.9.0-alpha.1", default-features = false } diff --git a/benches/benches/generators.rs b/benches/benches/generators.rs index 05090c24498..4019ec087ec 100644 --- a/benches/benches/generators.rs +++ b/benches/benches/generators.rs @@ -50,7 +50,6 @@ gen_bytes!(gen_bytes_chacha8, ChaCha8Rng::from_os_rng()); gen_bytes!(gen_bytes_chacha12, ChaCha12Rng::from_os_rng()); gen_bytes!(gen_bytes_chacha20, ChaCha20Rng::from_os_rng()); gen_bytes!(gen_bytes_std, StdRng::from_os_rng()); -#[cfg(feature = "small_rng")] gen_bytes!(gen_bytes_small, SmallRng::from_thread_rng()); gen_bytes!(gen_bytes_os, UnwrapErr(OsRng)); gen_bytes!(gen_bytes_thread, thread_rng()); @@ -81,7 +80,6 @@ gen_uint!(gen_u32_chacha8, u32, ChaCha8Rng::from_os_rng()); gen_uint!(gen_u32_chacha12, u32, ChaCha12Rng::from_os_rng()); gen_uint!(gen_u32_chacha20, u32, ChaCha20Rng::from_os_rng()); gen_uint!(gen_u32_std, u32, StdRng::from_os_rng()); -#[cfg(feature = "small_rng")] gen_uint!(gen_u32_small, u32, SmallRng::from_thread_rng()); gen_uint!(gen_u32_os, u32, UnwrapErr(OsRng)); gen_uint!(gen_u32_thread, u32, thread_rng()); @@ -95,7 +93,6 @@ gen_uint!(gen_u64_chacha8, u64, ChaCha8Rng::from_os_rng()); gen_uint!(gen_u64_chacha12, u64, ChaCha12Rng::from_os_rng()); gen_uint!(gen_u64_chacha20, u64, ChaCha20Rng::from_os_rng()); gen_uint!(gen_u64_std, u64, StdRng::from_os_rng()); -#[cfg(feature = "small_rng")] gen_uint!(gen_u64_small, u64, SmallRng::from_thread_rng()); gen_uint!(gen_u64_os, u64, UnwrapErr(OsRng)); gen_uint!(gen_u64_thread, u64, thread_rng()); diff --git a/rand_chacha/src/chacha.rs b/rand_chacha/src/chacha.rs index 14be765a18b..3da47e45d76 100644 --- a/rand_chacha/src/chacha.rs +++ b/rand_chacha/src/chacha.rs @@ -8,8 +8,10 @@ //! The ChaCha random number generator. -#[cfg(not(feature = "std"))] use core; -#[cfg(feature = "std")] use std as core; +#[cfg(not(feature = "std"))] +use core; +#[cfg(feature = "std")] +use std as core; use self::core::fmt; use crate::guts::ChaCha; @@ -27,7 +29,8 @@ const BLOCK_WORDS: u8 = 16; #[repr(transparent)] pub struct Array64([T; 64]); impl Default for Array64 -where T: Default +where + T: Default, { #[rustfmt::skip] fn default() -> Self { @@ -54,7 +57,8 @@ impl AsMut<[T]> for Array64 { } } impl Clone for Array64 -where T: Copy + Default +where + T: Copy + Default, { fn clone(&self) -> Self { let mut new = Self::default(); @@ -275,20 +279,25 @@ macro_rules! chacha_impl { #[cfg(feature = "serde1")] impl Serialize for $ChaChaXRng { fn serialize(&self, s: S) -> Result - where S: Serializer { + where + S: Serializer, + { $abst::$ChaChaXRng::from(self).serialize(s) } } #[cfg(feature = "serde1")] impl<'de> Deserialize<'de> for $ChaChaXRng { fn deserialize(d: D) -> Result - where D: Deserializer<'de> { + where + D: Deserializer<'de>, + { $abst::$ChaChaXRng::deserialize(d).map(|x| Self::from(&x)) } } mod $abst { - #[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; + #[cfg(feature = "serde1")] + use serde::{Deserialize, Serialize}; // The abstract state of a ChaCha stream, independent of implementation choices. The // comparison and serialization of this object is considered a semver-covered part of @@ -353,7 +362,8 @@ chacha_impl!( mod test { use rand_core::{RngCore, SeedableRng}; - #[cfg(feature = "serde1")] use super::{ChaCha12Rng, ChaCha20Rng, ChaCha8Rng}; + #[cfg(feature = "serde1")] + use super::{ChaCha12Rng, ChaCha20Rng, ChaCha8Rng}; type ChaChaRng = super::ChaCha20Rng; diff --git a/rand_chacha/src/guts.rs b/rand_chacha/src/guts.rs index 797ded6fa73..d077225c625 100644 --- a/rand_chacha/src/guts.rs +++ b/rand_chacha/src/guts.rs @@ -12,7 +12,9 @@ use ppv_lite86::{dispatch, dispatch_light128}; pub use ppv_lite86::Machine; -use ppv_lite86::{vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4, Vec4Ext, Vector}; +use ppv_lite86::{ + vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4, Vec4Ext, Vector, +}; pub(crate) const BLOCK: usize = 16; pub(crate) const BLOCK64: u64 = BLOCK as u64; @@ -140,14 +142,18 @@ fn add_pos(m: Mach, d: Mach::u32x4, i: u64) -> Mach::u32x4 { #[cfg(target_endian = "little")] fn d0123(m: Mach, d: vec128_storage) -> Mach::u32x4x4 { let d0: Mach::u64x2 = m.unpack(d); - let incr = Mach::u64x2x4::from_lanes([m.vec([0, 0]), m.vec([1, 0]), m.vec([2, 0]), m.vec([3, 0])]); + let incr = + Mach::u64x2x4::from_lanes([m.vec([0, 0]), m.vec([1, 0]), m.vec([2, 0]), m.vec([3, 0])]); m.unpack((Mach::u64x2x4::from_lanes([d0, d0, d0, d0]) + incr).into()) } #[allow(clippy::many_single_char_names)] #[inline(always)] fn refill_wide_impl( - m: Mach, state: &mut ChaCha, drounds: u32, out: &mut [u32; BUFSZ], + m: Mach, + state: &mut ChaCha, + drounds: u32, + out: &mut [u32; BUFSZ], ) { let k = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let b = m.unpack(state.b); diff --git a/rand_core/src/blanket_impls.rs b/rand_core/src/blanket_impls.rs index cadd456ca5c..e3b54f894d1 100644 --- a/rand_core/src/blanket_impls.rs +++ b/rand_core/src/blanket_impls.rs @@ -1,4 +1,5 @@ -#[cfg(feature = "alloc")] use alloc::boxed::Box; +#[cfg(feature = "alloc")] +use alloc::boxed::Box; use crate::{CryptoRng, RngCore, TryCryptoRng, TryRngCore}; diff --git a/rand_core/src/block.rs b/rand_core/src/block.rs index b5cc42bdbef..6872af432f5 100644 --- a/rand_core/src/block.rs +++ b/rand_core/src/block.rs @@ -56,7 +56,8 @@ use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks}; use crate::{CryptoRng, RngCore, SeedableRng, TryRngCore}; use core::fmt; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; /// A trait for RNGs which do not generate random numbers individually, but in /// blocks (typically `[u32; N]`). This technique is commonly used by diff --git a/rand_core/src/impls.rs b/rand_core/src/impls.rs index f9152fb2734..ff07d7834c1 100644 --- a/rand_core/src/impls.rs +++ b/rand_core/src/impls.rs @@ -199,7 +199,7 @@ macro_rules! impl_try_rng_from_rng_core { macro_rules! impl_try_crypto_rng_from_crypto_rng { ($t:ty) => { $crate::impl_try_rng_from_rng_core!($t); - + impl $crate::TryCryptoRng for $t {} /// Check at compile time that `$t` implements `CryptoRng` diff --git a/rand_core/src/lib.rs b/rand_core/src/lib.rs index d0c8af065c8..48c9c528dfc 100644 --- a/rand_core/src/lib.rs +++ b/rand_core/src/lib.rs @@ -32,11 +32,14 @@ #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![doc(test(attr(allow(unused_variables), deny(warnings))))] +#![allow(unexpected_cfgs)] #![cfg_attr(doc_cfg, feature(doc_cfg))] #![no_std] -#[cfg(feature = "alloc")] extern crate alloc; -#[cfg(feature = "std")] extern crate std; +#[cfg(feature = "alloc")] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; use core::fmt; @@ -44,11 +47,13 @@ mod blanket_impls; pub mod block; pub mod impls; pub mod le; -#[cfg(feature = "getrandom")] mod os; - -#[cfg(feature = "getrandom")] pub use getrandom; -#[cfg(feature = "getrandom")] pub use os::OsRng; +#[cfg(feature = "getrandom")] +mod os; +#[cfg(feature = "getrandom")] +pub use getrandom; +#[cfg(feature = "getrandom")] +pub use os::OsRng; /// The core of a random number generator. /// @@ -213,14 +218,18 @@ pub trait TryRngCore { /// Wrap RNG with the [`UnwrapErr`] wrapper. fn unwrap_err(self) -> UnwrapErr - where Self: Sized { + where + Self: Sized, + { UnwrapErr(self) } /// Convert an [`RngCore`] to a [`RngReadAdapter`]. #[cfg(feature = "std")] fn read_adapter(&mut self) -> RngReadAdapter<'_, Self> - where Self: Sized { + where + Self: Sized, + { RngReadAdapter { inner: self } } } diff --git a/rand_distr/src/binomial.rs b/rand_distr/src/binomial.rs index 1e8e5e190a3..a9e6a708427 100644 --- a/rand_distr/src/binomial.rs +++ b/rand_distr/src/binomial.rs @@ -10,11 +10,11 @@ //! The binomial distribution. use crate::{Distribution, Uniform}; -use rand::Rng; -use core::fmt; use core::cmp::Ordering; +use core::fmt; #[allow(unused_imports)] use num_traits::Float; +use rand::Rng; /// The binomial distribution `Binomial(n, p)`. /// @@ -110,21 +110,21 @@ impl Distribution for Binomial { // Threshold for preferring the BINV algorithm. The paper suggests 10, // Ranlib uses 30, and GSL uses 14. const BINV_THRESHOLD: f64 = 10.; - + // Same value as in GSL. // It is possible for BINV to get stuck, so we break if x > BINV_MAX_X and try again. // It would be safer to set BINV_MAX_X to self.n, but it is extremely unlikely to be relevant. // When n*p < 10, so is n*p*q which is the variance, so a result > 110 would be 100 / sqrt(10) = 31 standard deviations away. - const BINV_MAX_X : u64 = 110; + const BINV_MAX_X: u64 = 110; if (self.n as f64) * p < BINV_THRESHOLD && self.n <= (i32::MAX as u64) { // Use the BINV algorithm. let s = p / q; let a = ((self.n + 1) as f64) * s; - + result = 'outer: loop { let mut r = q.powi(self.n as i32); - let mut u: f64 = rng.gen(); + let mut u: f64 = rng.random(); let mut x = 0; while u > r { @@ -136,7 +136,6 @@ impl Distribution for Binomial { r *= a / (x as f64) - s; } break x; - } } else { // Use the BTPE algorithm. @@ -238,7 +237,7 @@ impl Distribution for Binomial { break; } } - }, + } Ordering::Greater => { let mut i = y; loop { @@ -248,8 +247,8 @@ impl Distribution for Binomial { break; } } - }, - Ordering::Equal => {}, + } + Ordering::Equal => {} } if v > f { continue; @@ -366,7 +365,7 @@ mod test { fn binomial_distributions_can_be_compared() { assert_eq!(Binomial::new(1, 1.0), Binomial::new(1, 1.0)); } - + #[test] fn binomial_avoid_infinite_loop() { let dist = Binomial::new(16000000, 3.1444753148558566e-10).unwrap(); diff --git a/rand_distr/src/cauchy.rs b/rand_distr/src/cauchy.rs index fefaa737daf..5a445ff849e 100644 --- a/rand_distr/src/cauchy.rs +++ b/rand_distr/src/cauchy.rs @@ -9,10 +9,10 @@ //! The Cauchy distribution. -use num_traits::{Float, FloatConst}; use crate::{Distribution, Standard}; -use rand::Rng; use core::fmt; +use num_traits::{Float, FloatConst}; +use rand::Rng; /// The Cauchy distribution `Cauchy(median, scale)`. /// @@ -34,7 +34,9 @@ use core::fmt; #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Cauchy -where F: Float + FloatConst, Standard: Distribution +where + F: Float + FloatConst, + Standard: Distribution, { median: F, scale: F, @@ -60,7 +62,9 @@ impl fmt::Display for Error { impl std::error::Error for Error {} impl Cauchy -where F: Float + FloatConst, Standard: Distribution +where + F: Float + FloatConst, + Standard: Distribution, { /// Construct a new `Cauchy` with the given shape parameters /// `median` the peak location and `scale` the scale factor. @@ -73,7 +77,9 @@ where F: Float + FloatConst, Standard: Distribution } impl Distribution for Cauchy -where F: Float + FloatConst, Standard: Distribution +where + F: Float + FloatConst, + Standard: Distribution, { fn sample(&self, rng: &mut R) -> F { // sample from [0, 1) @@ -138,7 +144,9 @@ mod test { #[test] fn value_stability() { fn gen_samples(m: F, s: F, buf: &mut [F]) - where Standard: Distribution { + where + Standard: Distribution, + { let distr = Cauchy::new(m, s).unwrap(); let mut rng = crate::test::rng(353); for x in buf { @@ -148,12 +156,15 @@ mod test { let mut buf = [0.0; 4]; gen_samples(100f64, 10.0, &mut buf); - assert_eq!(&buf, &[ - 77.93369152808678, - 90.1606912098641, - 125.31516221323625, - 86.10217834773925 - ]); + assert_eq!( + &buf, + &[ + 77.93369152808678, + 90.1606912098641, + 125.31516221323625, + 86.10217834773925 + ] + ); // Unfortunately this test is not fully portable due to reliance on the // system's implementation of tanf (see doc on Cauchy struct). diff --git a/rand_distr/src/dirichlet.rs b/rand_distr/src/dirichlet.rs index 413c00476ab..a7e11482a34 100644 --- a/rand_distr/src/dirichlet.rs +++ b/rand_distr/src/dirichlet.rs @@ -13,7 +13,8 @@ use crate::{Beta, Distribution, Exp1, Gamma, Open01, StandardNormal}; use core::fmt; use num_traits::{Float, NumCast}; use rand::Rng; -#[cfg(feature = "serde_with")] use serde_with::serde_as; +#[cfg(feature = "serde_with")] +use serde_with::serde_as; use alloc::{boxed::Box, vec, vec::Vec}; diff --git a/rand_distr/src/exponential.rs b/rand_distr/src/exponential.rs index e3d2a8d1cf6..1fa56a95b15 100644 --- a/rand_distr/src/exponential.rs +++ b/rand_distr/src/exponential.rs @@ -10,10 +10,10 @@ //! The exponential distribution. use crate::utils::ziggurat; -use num_traits::Float; use crate::{ziggurat_tables, Distribution}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; /// Samples floating-point numbers according to the exponential distribution, /// with rate parameter `λ = 1`. This is equivalent to `Exp::new(1.0)` or @@ -61,7 +61,7 @@ impl Distribution for Exp1 { } #[inline] fn zero_case(rng: &mut R, _u: f64) -> f64 { - ziggurat_tables::ZIG_EXP_R - rng.gen::().ln() + ziggurat_tables::ZIG_EXP_R - rng.random::().ln() } ziggurat( @@ -94,7 +94,9 @@ impl Distribution for Exp1 { #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Exp -where F: Float, Exp1: Distribution +where + F: Float, + Exp1: Distribution, { /// `lambda` stored as `1/lambda`, since this is what we scale by. lambda_inverse: F, @@ -120,16 +122,18 @@ impl fmt::Display for Error { impl std::error::Error for Error {} impl Exp -where F: Float, Exp1: Distribution +where + F: Float, + Exp1: Distribution, { /// Construct a new `Exp` with the given shape parameter /// `lambda`. - /// + /// /// # Remarks - /// + /// /// For custom types `N` implementing the [`Float`] trait, /// the case `lambda = 0` is handled as follows: each sample corresponds - /// to a sample from an `Exp1` multiplied by `1 / 0`. Primitive types + /// to a sample from an `Exp1` multiplied by `1 / 0`. Primitive types /// yield infinity, since `1 / 0 = infinity`. #[inline] pub fn new(lambda: F) -> Result, Error> { @@ -143,7 +147,9 @@ where F: Float, Exp1: Distribution } impl Distribution for Exp -where F: Float, Exp1: Distribution +where + F: Float, + Exp1: Distribution, { fn sample(&self, rng: &mut R) -> F { rng.sample(Exp1) * self.lambda_inverse diff --git a/rand_distr/src/gamma.rs b/rand_distr/src/gamma.rs index 1a575bd6a9f..fbafd26824c 100644 --- a/rand_distr/src/gamma.rs +++ b/rand_distr/src/gamma.rs @@ -17,12 +17,12 @@ use self::ChiSquaredRepr::*; use self::GammaRepr::*; use crate::normal::StandardNormal; -use num_traits::Float; use crate::{Distribution, Exp, Exp1, Open01}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; #[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// The Gamma distribution `Gamma(shape, scale)` distribution. /// @@ -566,7 +566,9 @@ where F: Float, Open01: Distribution, { - a: F, b: F, switched_params: bool, + a: F, + b: F, + switched_params: bool, algorithm: BetaAlgorithm, } @@ -618,15 +620,19 @@ where if a > F::one() { // Algorithm BB let alpha = a + b; - let beta = ((alpha - F::from(2.).unwrap()) - / (F::from(2.).unwrap()*a*b - alpha)).sqrt(); + + let two = F::from(2.).unwrap(); + let beta_numer = alpha - two; + let beta_denom = two * a * b - alpha; + let beta = (beta_numer / beta_denom).sqrt(); + let gamma = a + F::one() / beta; Ok(Beta { - a, b, switched_params, - algorithm: BetaAlgorithm::BB(BB { - alpha, beta, gamma, - }) + a, + b, + switched_params, + algorithm: BetaAlgorithm::BB(BB { alpha, beta, gamma }), }) } else { // Algorithm BC @@ -637,16 +643,21 @@ where let beta = F::one() / b; let delta = F::one() + a - b; let kappa1 = delta - * (F::from(1. / 18. / 4.).unwrap() + F::from(3. / 18. / 4.).unwrap()*b) - / (a*beta - F::from(14. / 18.).unwrap()); + * (F::from(1. / 18. / 4.).unwrap() + F::from(3. / 18. / 4.).unwrap() * b) + / (a * beta - F::from(14. / 18.).unwrap()); let kappa2 = F::from(0.25).unwrap() - + (F::from(0.5).unwrap() + F::from(0.25).unwrap()/delta)*b; + + (F::from(0.5).unwrap() + F::from(0.25).unwrap() / delta) * b; Ok(Beta { - a, b, switched_params, + a, + b, + switched_params, algorithm: BetaAlgorithm::BC(BC { - alpha, beta, kappa1, kappa2, - }) + alpha, + beta, + kappa1, + kappa2, + }), }) } } @@ -667,12 +678,11 @@ where let u2 = rng.sample(Open01); let v = algo.beta * (u1 / (F::one() - u1)).ln(); w = self.a * v.exp(); - let z = u1*u1 * u2; + let z = u1 * u1 * u2; let r = algo.gamma * v - F::from(4.).unwrap().ln(); let s = self.a + r - w; // 2. - if s + F::one() + F::from(5.).unwrap().ln() - >= F::from(5.).unwrap() * z { + if s + F::one() + F::from(5.).unwrap().ln() >= F::from(5.).unwrap() * z { break; } // 3. @@ -685,7 +695,7 @@ where break; } } - }, + } BetaAlgorithm::BC(algo) => { loop { let z; @@ -716,11 +726,13 @@ where let v = algo.beta * (u1 / (F::one() - u1)).ln(); w = self.a * v.exp(); if !(algo.alpha * ((algo.alpha / (self.b + w)).ln() + v) - - F::from(4.).unwrap().ln() < z.ln()) { + - F::from(4.).unwrap().ln() + < z.ln()) + { break; }; } - }, + } }; // 5. for BB, 6. for BC if !self.switched_params { diff --git a/rand_distr/src/geometric.rs b/rand_distr/src/geometric.rs index 5204013a5bd..0f14d4227d9 100644 --- a/rand_distr/src/geometric.rs +++ b/rand_distr/src/geometric.rs @@ -1,20 +1,20 @@ //! The geometric distribution. use crate::Distribution; -use rand::Rng; use core::fmt; #[allow(unused_imports)] use num_traits::Float; +use rand::Rng; /// The geometric distribution `Geometric(p)` bounded to `[0, u64::MAX]`. -/// +/// /// This is the probability distribution of the number of failures before the /// first success in a series of Bernoulli trials. It has the density function /// `f(k) = (1 - p)^k p` for `k >= 0`, where `p` is the probability of success /// on each trial. -/// +/// /// This is the discrete analogue of the [exponential distribution](crate::Exp). -/// +/// /// Note that [`StandardGeometric`](crate::StandardGeometric) is an optimised /// implementation for `p = 0.5`. /// @@ -29,11 +29,10 @@ use num_traits::Float; /// ``` #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct Geometric -{ +pub struct Geometric { p: f64, pi: f64, - k: u64 + k: u64, } /// Error type returned from `Geometric::new`. @@ -46,7 +45,9 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { - Error::InvalidProbability => "p is NaN or outside the interval [0, 1] in geometric distribution", + Error::InvalidProbability => { + "p is NaN or outside the interval [0, 1] in geometric distribution" + } }) } } @@ -80,21 +81,24 @@ impl Geometric { } } -impl Distribution for Geometric -{ +impl Distribution for Geometric { fn sample(&self, rng: &mut R) -> u64 { if self.p >= 2.0 / 3.0 { // use the trivial algorithm: let mut failures = 0; loop { - let u = rng.gen::(); - if u <= self.p { break; } + let u = rng.random::(); + if u <= self.p { + break; + } failures += 1; } return failures; } - - if self.p == 0.0 { return u64::MAX; } + + if self.p == 0.0 { + return u64::MAX; + } let Geometric { p, pi, k } = *self; @@ -108,7 +112,7 @@ impl Distribution for Geometric // Use the trivial algorithm to sample D from Geo(pi) = Geo(p) / 2^k: let d = { let mut failures = 0; - while rng.gen::() < pi { + while rng.random::() < pi { failures += 1; } failures @@ -116,18 +120,18 @@ impl Distribution for Geometric // Use rejection sampling for the remainder M from Geo(p) % 2^k: // choose M uniformly from [0, 2^k), but reject with probability (1 - p)^M - // NOTE: The paper suggests using bitwise sampling here, which is + // NOTE: The paper suggests using bitwise sampling here, which is // currently unsupported, but should improve performance by requiring // fewer iterations on average. ~ October 28, 2020 let m = loop { - let m = rng.gen::() & ((1 << k) - 1); + let m = rng.random::() & ((1 << k) - 1); let p_reject = if m <= i32::MAX as u64 { (1.0 - p).powi(m as i32) } else { (1.0 - p).powf(m as f64) }; - - let u = rng.gen::(); + + let u = rng.random::(); if u < p_reject { break m; } @@ -140,17 +144,17 @@ impl Distribution for Geometric /// Samples integers according to the geometric distribution with success /// probability `p = 0.5`. This is equivalent to `Geometeric::new(0.5)`, /// but faster. -/// +/// /// See [`Geometric`](crate::Geometric) for the general geometric distribution. -/// +/// /// Implemented via iterated /// [`Rng::gen::().leading_zeros()`](Rng::gen::().leading_zeros()). -/// +/// /// # Example /// ``` /// use rand::prelude::*; /// use rand_distr::StandardGeometric; -/// +/// /// let v = StandardGeometric.sample(&mut thread_rng()); /// println!("{} is from a Geometric(0.5) distribution", v); /// ``` @@ -162,9 +166,11 @@ impl Distribution for StandardGeometric { fn sample(&self, rng: &mut R) -> u64 { let mut result = 0; loop { - let x = rng.gen::().leading_zeros() as u64; + let x = rng.random::().leading_zeros() as u64; result += x; - if x < 64 { break; } + if x < 64 { + break; + } } result } diff --git a/rand_distr/src/hypergeometric.rs b/rand_distr/src/hypergeometric.rs index 73a8e91c75e..84fde5437d8 100644 --- a/rand_distr/src/hypergeometric.rs +++ b/rand_distr/src/hypergeometric.rs @@ -1,17 +1,20 @@ //! The hypergeometric distribution. use crate::Distribution; -use rand::Rng; -use rand::distributions::uniform::Uniform; use core::fmt; #[allow(unused_imports)] use num_traits::Float; +use rand::distributions::uniform::Uniform; +use rand::Rng; #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] enum SamplingMethod { - InverseTransform{ initial_p: f64, initial_x: i64 }, - RejectionAcceptance{ + InverseTransform { + initial_p: f64, + initial_x: i64, + }, + RejectionAcceptance { m: f64, a: f64, lambda_l: f64, @@ -20,24 +23,24 @@ enum SamplingMethod { x_r: f64, p1: f64, p2: f64, - p3: f64 + p3: f64, }, } /// The hypergeometric distribution `Hypergeometric(N, K, n)`. -/// +/// /// This is the distribution of successes in samples of size `n` drawn without /// replacement from a population of size `N` containing `K` success states. /// It has the density function: /// `f(k) = binomial(K, k) * binomial(N-K, n-k) / binomial(N, n)`, /// where `binomial(a, b) = a! / (b! * (a - b)!)`. -/// +/// /// The [binomial distribution](crate::Binomial) is the analogous distribution /// for sampling with replacement. It is a good approximation when the population /// size is much larger than the sample size. -/// +/// /// # Example -/// +/// /// ``` /// use rand_distr::{Distribution, Hypergeometric}; /// @@ -70,9 +73,15 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { - Error::PopulationTooLarge => "total_population_size is too large causing underflow in geometric distribution", - Error::ProbabilityTooLarge => "population_with_feature > total_population_size in geometric distribution", - Error::SampleSizeTooLarge => "sample_size > total_population_size in geometric distribution", + Error::PopulationTooLarge => { + "total_population_size is too large causing underflow in geometric distribution" + } + Error::ProbabilityTooLarge => { + "population_with_feature > total_population_size in geometric distribution" + } + Error::SampleSizeTooLarge => { + "sample_size > total_population_size in geometric distribution" + } }) } } @@ -97,20 +106,20 @@ fn fraction_of_products_of_factorials(numerator: (u64, u64), denominator: (u64, if i <= min_top { result *= i as f64; } - + if i <= min_bottom { result /= i as f64; } - + if i <= max_top { result *= i as f64; } - + if i <= max_bottom { result /= i as f64; } } - + result } @@ -126,7 +135,11 @@ impl Hypergeometric { /// `K = population_with_feature`, /// `n = sample_size`. #[allow(clippy::many_single_char_names)] // Same names as in the reference. - pub fn new(total_population_size: u64, population_with_feature: u64, sample_size: u64) -> Result { + pub fn new( + total_population_size: u64, + population_with_feature: u64, + sample_size: u64, + ) -> Result { if population_with_feature > total_population_size { return Err(Error::ProbabilityTooLarge); } @@ -151,7 +164,7 @@ impl Hypergeometric { }; // when sampling more than half the total population, take the smaller // group as sampled instead (we can then return n1-x instead). - // + // // Note: the boundary condition given in the paper is `sample_size < n / 2`; // we're deviating here, because when n is even, it doesn't matter whether // we switch here or not, but when n is odd `n/2 < n - n/2`, so switching @@ -167,7 +180,7 @@ impl Hypergeometric { // Algorithm H2PE has bounded runtime only if `M - max(0, k-n2) >= 10`, // where `M` is the mode of the distribution. // Use algorithm HIN for the remaining parameter space. - // + // // Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1985. Computer // generation of hypergeometric random variates. // J. Statist. Comput. Simul. Vol.22 (August 1985), 127-145 @@ -176,21 +189,30 @@ impl Hypergeometric { let m = ((k + 1) as f64 * (n1 + 1) as f64 / (n + 2) as f64).floor(); let sampling_method = if m - f64::max(0.0, k as f64 - n2 as f64) < HIN_THRESHOLD { let (initial_p, initial_x) = if k < n2 { - (fraction_of_products_of_factorials((n2, n - k), (n, n2 - k)), 0) + ( + fraction_of_products_of_factorials((n2, n - k), (n, n2 - k)), + 0, + ) } else { - (fraction_of_products_of_factorials((n1, k), (n, k - n2)), (k - n2) as i64) + ( + fraction_of_products_of_factorials((n1, k), (n, k - n2)), + (k - n2) as i64, + ) }; if initial_p <= 0.0 || !initial_p.is_finite() { return Err(Error::PopulationTooLarge); } - SamplingMethod::InverseTransform { initial_p, initial_x } + SamplingMethod::InverseTransform { + initial_p, + initial_x, + } } else { - let a = ln_of_factorial(m) + - ln_of_factorial(n1 as f64 - m) + - ln_of_factorial(k as f64 - m) + - ln_of_factorial((n2 - k) as f64 + m); + let a = ln_of_factorial(m) + + ln_of_factorial(n1 as f64 - m) + + ln_of_factorial(k as f64 - m) + + ln_of_factorial((n2 - k) as f64 + m); let numerator = (n - k) as f64 * k as f64 * n1 as f64 * n2 as f64; let denominator = (n - 1) as f64 * n as f64 * n as f64; @@ -199,17 +221,19 @@ impl Hypergeometric { let x_l = m - d + 0.5; let x_r = m + d + 0.5; - let k_l = f64::exp(a - - ln_of_factorial(x_l) - - ln_of_factorial(n1 as f64 - x_l) - - ln_of_factorial(k as f64 - x_l) - - ln_of_factorial((n2 - k) as f64 + x_l)); - let k_r = f64::exp(a - - ln_of_factorial(x_r - 1.0) - - ln_of_factorial(n1 as f64 - x_r + 1.0) - - ln_of_factorial(k as f64 - x_r + 1.0) - - ln_of_factorial((n2 - k) as f64 + x_r - 1.0)); - + let k_l = f64::exp( + a - ln_of_factorial(x_l) + - ln_of_factorial(n1 as f64 - x_l) + - ln_of_factorial(k as f64 - x_l) + - ln_of_factorial((n2 - k) as f64 + x_l), + ); + let k_r = f64::exp( + a - ln_of_factorial(x_r - 1.0) + - ln_of_factorial(n1 as f64 - x_r + 1.0) + - ln_of_factorial(k as f64 - x_r + 1.0) + - ln_of_factorial((n2 - k) as f64 + x_r - 1.0), + ); + let numerator = x_l * ((n2 - k) as f64 + x_l); let denominator = (n1 as f64 - x_l + 1.0) * (k as f64 - x_l + 1.0); let lambda_l = -((numerator / denominator).ln()); @@ -225,11 +249,26 @@ impl Hypergeometric { let p3 = p2 + k_r / lambda_r; SamplingMethod::RejectionAcceptance { - m, a, lambda_l, lambda_r, x_l, x_r, p1, p2, p3 + m, + a, + lambda_l, + lambda_r, + x_l, + x_r, + p1, + p2, + p3, } }; - Ok(Hypergeometric { n1, n2, k, offset_x, sign_x, sampling_method }) + Ok(Hypergeometric { + n1, + n2, + k, + offset_x, + sign_x, + sampling_method, + }) } } @@ -238,25 +277,47 @@ impl Distribution for Hypergeometric { fn sample(&self, rng: &mut R) -> u64 { use SamplingMethod::*; - let Hypergeometric { n1, n2, k, sign_x, offset_x, sampling_method } = *self; + let Hypergeometric { + n1, + n2, + k, + sign_x, + offset_x, + sampling_method, + } = *self; let x = match sampling_method { - InverseTransform { initial_p: mut p, initial_x: mut x } => { - let mut u = rng.gen::(); - while u > p && x < k as i64 { // the paper erroneously uses `until n < p`, which doesn't make any sense + InverseTransform { + initial_p: mut p, + initial_x: mut x, + } => { + let mut u = rng.random::(); + + // the paper erroneously uses `until n < p`, which doesn't make any sense + while u > p && x < k as i64 { u -= p; p *= ((n1 as i64 - x) * (k as i64 - x)) as f64; p /= ((x + 1) * (n2 as i64 - k as i64 + 1 + x)) as f64; x += 1; } x - }, - RejectionAcceptance { m, a, lambda_l, lambda_r, x_l, x_r, p1, p2, p3 } => { + } + RejectionAcceptance { + m, + a, + lambda_l, + lambda_r, + x_l, + x_r, + p1, + p2, + p3, + } => { let distr_region_select = Uniform::new(0.0, p3).unwrap(); loop { let (y, v) = loop { let u = distr_region_select.sample(rng); - let v = rng.gen::(); // for the accept/reject decision - + let v = rng.random::(); // for the accept/reject decision + if u <= p1 { // Region 1, central bell let y = (x_l + u).floor(); @@ -277,7 +338,7 @@ impl Distribution for Hypergeometric { } } }; - + // Step 4: Acceptance/Rejection Comparison if m < 100.0 || y <= 50.0 { // Step 4.1: evaluate f(y) via recursive relationship @@ -293,8 +354,10 @@ impl Distribution for Hypergeometric { f /= (n1 - i) as f64 * (k - i) as f64; } } - - if v <= f { break y as i64; } + + if v <= f { + break y as i64; + } } else { // Step 4.2: Squeezing let y1 = y + 1.0; @@ -307,24 +370,24 @@ impl Distribution for Hypergeometric { let t = ym / yk; let e = -ym / nk; let g = yn * yk / (y1 * nk) - 1.0; - let dg = if g < 0.0 { - 1.0 + g - } else { - 1.0 - }; + let dg = if g < 0.0 { 1.0 + g } else { 1.0 }; let gu = g * (1.0 + g * (-0.5 + g / 3.0)); let gl = gu - g.powi(4) / (4.0 * dg); let xm = m + 0.5; let xn = n1 as f64 - m + 0.5; let xk = k as f64 - m + 0.5; let nm = n2 as f64 - k as f64 + xm; - let ub = xm * r * (1.0 + r * (-0.5 + r / 3.0)) + - xn * s * (1.0 + s * (-0.5 + s / 3.0)) + - xk * t * (1.0 + t * (-0.5 + t / 3.0)) + - nm * e * (1.0 + e * (-0.5 + e / 3.0)) + - y * gu - m * gl + 0.0034; + let ub = xm * r * (1.0 + r * (-0.5 + r / 3.0)) + + xn * s * (1.0 + s * (-0.5 + s / 3.0)) + + xk * t * (1.0 + t * (-0.5 + t / 3.0)) + + nm * e * (1.0 + e * (-0.5 + e / 3.0)) + + y * gu + - m * gl + + 0.0034; let av = v.ln(); - if av > ub { continue; } + if av > ub { + continue; + } let dr = if r < 0.0 { xm * r.powi(4) / (1.0 + r) } else { @@ -345,17 +408,17 @@ impl Distribution for Hypergeometric { } else { nm * e.powi(4) }; - - if av < ub - 0.25*(dr + ds + dt + de) + (y + m)*(gl - gu) - 0.0078 { + + if av < ub - 0.25 * (dr + ds + dt + de) + (y + m) * (gl - gu) - 0.0078 { break y as i64; } - + // Step 4.3: Final Acceptance/Rejection Test - let av_critical = a - - ln_of_factorial(y) - - ln_of_factorial(n1 as f64 - y) - - ln_of_factorial(k as f64 - y) - - ln_of_factorial((n2 - k) as f64 + y); + let av_critical = a + - ln_of_factorial(y) + - ln_of_factorial(n1 as f64 - y) + - ln_of_factorial(k as f64 - y) + - ln_of_factorial((n2 - k) as f64 + y); if v.ln() <= av_critical { break y as i64; } @@ -380,8 +443,7 @@ mod test { assert!(Hypergeometric::new(100, 10, 5).is_ok()); } - fn test_hypergeometric_mean_and_variance(n: u64, k: u64, s: u64, rng: &mut R) - { + fn test_hypergeometric_mean_and_variance(n: u64, k: u64, s: u64, rng: &mut R) { let distr = Hypergeometric::new(n, k, s).unwrap(); let expected_mean = s as f64 * k as f64 / n as f64; diff --git a/rand_distr/src/inverse_gaussian.rs b/rand_distr/src/inverse_gaussian.rs index ba845fd1505..6518ee9e958 100644 --- a/rand_distr/src/inverse_gaussian.rs +++ b/rand_distr/src/inverse_gaussian.rs @@ -1,7 +1,7 @@ use crate::{Distribution, Standard, StandardNormal}; +use core::fmt; use num_traits::Float; use rand::Rng; -use core::fmt; /// Error type returned from `InverseGaussian::new` #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -68,7 +68,9 @@ where { #[allow(clippy::many_single_char_names)] fn sample(&self, rng: &mut R) -> F - where R: Rng + ?Sized { + where + R: Rng + ?Sized, + { let mu = self.mean; let l = self.shape; @@ -79,7 +81,7 @@ where let x = mu + mu_2l * (y - (F::from(4.).unwrap() * l * y + y * y).sqrt()); - let u: F = rng.gen(); + let u: F = rng.random(); if u <= mu / (mu + x) { return x; @@ -112,6 +114,9 @@ mod tests { #[test] fn inverse_gaussian_distributions_can_be_compared() { - assert_eq!(InverseGaussian::new(1.0, 2.0), InverseGaussian::new(1.0, 2.0)); + assert_eq!( + InverseGaussian::new(1.0, 2.0), + InverseGaussian::new(1.0, 2.0) + ); } } diff --git a/rand_distr/src/lib.rs b/rand_distr/src/lib.rs index dc155bb5d5d..ea57dd0f742 100644 --- a/rand_distr/src/lib.rs +++ b/rand_distr/src/lib.rs @@ -21,6 +21,7 @@ )] #![allow(clippy::neg_cmp_op_on_partial_ord)] // suggested fix too verbose #![no_std] +#![allow(unexpected_cfgs)] #![cfg_attr(doc_cfg, feature(doc_cfg))] //! Generating random samples from probability distributions. @@ -178,10 +179,14 @@ mod test { macro_rules! assert_almost_eq { ($a:expr, $b:expr, $prec:expr) => { let diff = ($a - $b).abs(); - assert!(diff <= $prec, + assert!( + diff <= $prec, "assertion failed: `abs(left - right) = {:.1e} < {:e}`, \ (left: `{}`, right: `{}`)", - diff, $prec, $a, $b + diff, + $prec, + $a, + $b ); }; } diff --git a/rand_distr/src/normal.rs b/rand_distr/src/normal.rs index 635f26f1d43..8ef231b0f32 100644 --- a/rand_distr/src/normal.rs +++ b/rand_distr/src/normal.rs @@ -10,10 +10,10 @@ //! The normal and derived distributions. use crate::utils::ziggurat; -use num_traits::Float; use crate::{ziggurat_tables, Distribution, Open01}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; /// Samples floating-point numbers according to the normal distribution /// `N(0, 1)` (a.k.a. a standard normal, or Gaussian). This is equivalent to @@ -115,7 +115,9 @@ impl Distribution for StandardNormal { #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Normal -where F: Float, StandardNormal: Distribution +where + F: Float, + StandardNormal: Distribution, { mean: F, std_dev: F, @@ -144,7 +146,9 @@ impl fmt::Display for Error { impl std::error::Error for Error {} impl Normal -where F: Float, StandardNormal: Distribution +where + F: Float, + StandardNormal: Distribution, { /// Construct, from mean and standard deviation /// @@ -204,14 +208,15 @@ where F: Float, StandardNormal: Distribution } impl Distribution for Normal -where F: Float, StandardNormal: Distribution +where + F: Float, + StandardNormal: Distribution, { fn sample(&self, rng: &mut R) -> F { self.from_zscore(rng.sample(StandardNormal)) } } - /// The log-normal distribution `ln N(mean, std_dev**2)`. /// /// If `X` is log-normal distributed, then `ln(X)` is `N(mean, std_dev**2)` @@ -230,13 +235,17 @@ where F: Float, StandardNormal: Distribution #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct LogNormal -where F: Float, StandardNormal: Distribution +where + F: Float, + StandardNormal: Distribution, { norm: Normal, } impl LogNormal -where F: Float, StandardNormal: Distribution +where + F: Float, + StandardNormal: Distribution, { /// Construct, from (log-space) mean and standard deviation /// @@ -307,7 +316,9 @@ where F: Float, StandardNormal: Distribution } impl Distribution for LogNormal -where F: Float, StandardNormal: Distribution +where + F: Float, + StandardNormal: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> F { @@ -348,7 +359,10 @@ mod tests { #[test] fn test_log_normal_cv() { let lnorm = LogNormal::from_mean_cv(0.0, 0.0).unwrap(); - assert_eq!((lnorm.norm.mean, lnorm.norm.std_dev), (f64::NEG_INFINITY, 0.0)); + assert_eq!( + (lnorm.norm.mean, lnorm.norm.std_dev), + (f64::NEG_INFINITY, 0.0) + ); let lnorm = LogNormal::from_mean_cv(1.0, 0.0).unwrap(); assert_eq!((lnorm.norm.mean, lnorm.norm.std_dev), (0.0, 0.0)); diff --git a/rand_distr/src/normal_inverse_gaussian.rs b/rand_distr/src/normal_inverse_gaussian.rs index 7c5ad971710..b7988f40bb3 100644 --- a/rand_distr/src/normal_inverse_gaussian.rs +++ b/rand_distr/src/normal_inverse_gaussian.rs @@ -1,7 +1,7 @@ use crate::{Distribution, InverseGaussian, Standard, StandardNormal}; +use core::fmt; use num_traits::Float; use rand::Rng; -use core::fmt; /// Error type returned from `NormalInverseGaussian::new` #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -15,8 +15,12 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { - Error::AlphaNegativeOrNull => "alpha <= 0 or is NaN in normal inverse Gaussian distribution", - Error::AbsoluteBetaNotLessThanAlpha => "|beta| >= alpha or is NaN in normal inverse Gaussian distribution", + Error::AlphaNegativeOrNull => { + "alpha <= 0 or is NaN in normal inverse Gaussian distribution" + } + Error::AbsoluteBetaNotLessThanAlpha => { + "|beta| >= alpha or is NaN in normal inverse Gaussian distribution" + } }) } } @@ -75,7 +79,9 @@ where Standard: Distribution, { fn sample(&self, rng: &mut R) -> F - where R: Rng + ?Sized { + where + R: Rng + ?Sized, + { let inv_gauss = rng.sample(self.inverse_gaussian); self.beta * inv_gauss + inv_gauss.sqrt() * rng.sample(StandardNormal) @@ -105,6 +111,9 @@ mod tests { #[test] fn normal_inverse_gaussian_distributions_can_be_compared() { - assert_eq!(NormalInverseGaussian::new(1.0, 2.0), NormalInverseGaussian::new(1.0, 2.0)); + assert_eq!( + NormalInverseGaussian::new(1.0, 2.0), + NormalInverseGaussian::new(1.0, 2.0) + ); } } diff --git a/rand_distr/src/pareto.rs b/rand_distr/src/pareto.rs index 25c8e0537dd..952afec4bc5 100644 --- a/rand_distr/src/pareto.rs +++ b/rand_distr/src/pareto.rs @@ -8,10 +8,10 @@ //! The Pareto distribution. -use num_traits::Float; use crate::{Distribution, OpenClosed01}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; /// Samples floating-point numbers according to the Pareto distribution /// @@ -26,7 +26,9 @@ use core::fmt; #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Pareto -where F: Float, OpenClosed01: Distribution +where + F: Float, + OpenClosed01: Distribution, { scale: F, inv_neg_shape: F, @@ -55,7 +57,9 @@ impl fmt::Display for Error { impl std::error::Error for Error {} impl Pareto -where F: Float, OpenClosed01: Distribution +where + F: Float, + OpenClosed01: Distribution, { /// Construct a new Pareto distribution with given `scale` and `shape`. /// @@ -78,7 +82,9 @@ where F: Float, OpenClosed01: Distribution } impl Distribution for Pareto -where F: Float, OpenClosed01: Distribution +where + F: Float, + OpenClosed01: Distribution, { fn sample(&self, rng: &mut R) -> F { let u: F = OpenClosed01.sample(rng); @@ -112,7 +118,9 @@ mod tests { #[test] fn value_stability() { fn test_samples>( - distr: D, thresh: F, expected: &[F], + distr: D, + thresh: F, + expected: &[F], ) { let mut rng = crate::test::rng(213); for v in expected { @@ -121,15 +129,21 @@ mod tests { } } - test_samples(Pareto::new(1f32, 1.0).unwrap(), 1e-6, &[ - 1.0423688, 2.1235929, 4.132709, 1.4679428, - ]); - test_samples(Pareto::new(2.0, 0.5).unwrap(), 1e-14, &[ - 9.019295276219136, - 4.3097126018270595, - 6.837815045397157, - 105.8826669383772, - ]); + test_samples( + Pareto::new(1f32, 1.0).unwrap(), + 1e-6, + &[1.0423688, 2.1235929, 4.132709, 1.4679428], + ); + test_samples( + Pareto::new(2.0, 0.5).unwrap(), + 1e-14, + &[ + 9.019295276219136, + 4.3097126018270595, + 6.837815045397157, + 105.8826669383772, + ], + ); } #[test] diff --git a/rand_distr/src/pert.rs b/rand_distr/src/pert.rs index 9ed79bf28ff..48114d97be2 100644 --- a/rand_distr/src/pert.rs +++ b/rand_distr/src/pert.rs @@ -7,10 +7,10 @@ // except according to those terms. //! The PERT distribution. -use num_traits::Float; use crate::{Beta, Distribution, Exp1, Open01, StandardNormal}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; /// The PERT distribution. /// @@ -129,20 +129,12 @@ mod test { #[test] fn test_pert() { - for &(min, max, mode) in &[ - (-1., 1., 0.), - (1., 2., 1.), - (5., 25., 25.), - ] { + for &(min, max, mode) in &[(-1., 1., 0.), (1., 2., 1.), (5., 25., 25.)] { let _distr = Pert::new(min, max, mode).unwrap(); // TODO: test correctness } - for &(min, max, mode) in &[ - (-1., 1., 2.), - (-1., 1., -2.), - (2., 1., 1.), - ] { + for &(min, max, mode) in &[(-1., 1., 2.), (-1., 1., -2.), (2., 1., 1.)] { assert!(Pert::new(min, max, mode).is_err()); } } diff --git a/rand_distr/src/poisson.rs b/rand_distr/src/poisson.rs index 50d74298356..5de3113de00 100644 --- a/rand_distr/src/poisson.rs +++ b/rand_distr/src/poisson.rs @@ -9,10 +9,10 @@ //! The Poisson distribution. -use num_traits::{Float, FloatConst}; use crate::{Cauchy, Distribution, Standard}; -use rand::Rng; use core::fmt; +use num_traits::{Float, FloatConst}; +use rand::Rng; /// The Poisson distribution `Poisson(lambda)`. /// @@ -31,7 +31,9 @@ use core::fmt; #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Poisson -where F: Float + FloatConst, Standard: Distribution +where + F: Float + FloatConst, + Standard: Distribution, { lambda: F, // precalculated values @@ -64,7 +66,9 @@ impl fmt::Display for Error { impl std::error::Error for Error {} impl Poisson -where F: Float + FloatConst, Standard: Distribution +where + F: Float + FloatConst, + Standard: Distribution, { /// Construct a new `Poisson` with the given shape parameter /// `lambda`. @@ -87,7 +91,9 @@ where F: Float + FloatConst, Standard: Distribution } impl Distribution for Poisson -where F: Float + FloatConst, Standard: Distribution +where + F: Float + FloatConst, + Standard: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> F { @@ -96,9 +102,9 @@ where F: Float + FloatConst, Standard: Distribution // for low expected values use the Knuth method if self.lambda < F::from(12.0).unwrap() { let mut result = F::one(); - let mut p = rng.gen::(); + let mut p = rng.random::(); while p > self.exp_lambda { - p = p*rng.gen::(); + p = p * rng.random::(); result = result + F::one(); } result - F::one() @@ -139,7 +145,7 @@ where F: Float + FloatConst, Standard: Distribution .exp(); // check with uniform random value - if below the threshold, we are within the target distribution - if rng.gen::() <= check { + if rng.random::() <= check { break; } } @@ -153,7 +159,8 @@ mod test { use super::*; fn test_poisson_avg_gen(lambda: F, tol: F) - where Standard: Distribution + where + Standard: Distribution, { let poisson = Poisson::new(lambda).unwrap(); let mut rng = crate::test::rng(123); @@ -173,7 +180,7 @@ mod test { test_poisson_avg_gen::(10.0, 0.1); test_poisson_avg_gen::(15.0, 0.1); - //Small lambda will use Knuth's method with exp_lambda == 1.0 + // Small lambda will use Knuth's method with exp_lambda == 1.0 test_poisson_avg_gen::(0.00000000000000005, 0.1); test_poisson_avg_gen::(0.00000000000000005, 0.1); } diff --git a/rand_distr/src/skew_normal.rs b/rand_distr/src/skew_normal.rs index 3577147f863..ad7dd2b5635 100644 --- a/rand_distr/src/skew_normal.rs +++ b/rand_distr/src/skew_normal.rs @@ -150,9 +150,7 @@ where mod tests { use super::*; - fn test_samples>( - distr: D, zero: F, expected: &[F], - ) { + fn test_samples>(distr: D, zero: F, expected: &[F]) { let mut rng = crate::test::rng(213); let mut buf = [zero; 4]; for x in &mut buf { @@ -222,12 +220,7 @@ mod tests { test_samples( SkewNormal::new(f64::INFINITY, 1.0, 0.0).unwrap(), 0f64, - &[ - f64::INFINITY, - f64::INFINITY, - f64::INFINITY, - f64::INFINITY, - ], + &[f64::INFINITY, f64::INFINITY, f64::INFINITY, f64::INFINITY], ); test_samples( SkewNormal::new(f64::NEG_INFINITY, 1.0, 0.0).unwrap(), @@ -256,6 +249,9 @@ mod tests { #[test] fn skew_normal_distributions_can_be_compared() { - assert_eq!(SkewNormal::new(1.0, 2.0, 3.0), SkewNormal::new(1.0, 2.0, 3.0)); + assert_eq!( + SkewNormal::new(1.0, 2.0, 3.0), + SkewNormal::new(1.0, 2.0, 3.0) + ); } } diff --git a/rand_distr/src/triangular.rs b/rand_distr/src/triangular.rs index eef7d190133..083725f78c7 100644 --- a/rand_distr/src/triangular.rs +++ b/rand_distr/src/triangular.rs @@ -7,10 +7,10 @@ // except according to those terms. //! The triangular distribution. -use num_traits::Float; use crate::{Distribution, Standard}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; /// The triangular distribution. /// @@ -34,7 +34,9 @@ use core::fmt; #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Triangular -where F: Float, Standard: Distribution +where + F: Float, + Standard: Distribution, { min: F, max: F, @@ -66,7 +68,9 @@ impl fmt::Display for TriangularError { impl std::error::Error for TriangularError {} impl Triangular -where F: Float, Standard: Distribution +where + F: Float, + Standard: Distribution, { /// Set up the Triangular distribution with defined `min`, `max` and `mode`. #[inline] @@ -82,7 +86,9 @@ where F: Float, Standard: Distribution } impl Distribution for Triangular -where F: Float, Standard: Distribution +where + F: Float, + Standard: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> F { @@ -106,7 +112,7 @@ mod test { #[test] fn test_triangular() { let mut half_rng = mock::StepRng::new(0x8000_0000_0000_0000, 0); - assert_eq!(half_rng.gen::(), 0.5); + assert_eq!(half_rng.random::(), 0.5); for &(min, max, mode, median) in &[ (-1., 1., 0., 0.), (1., 2., 1., 2. - 0.5f64.sqrt()), @@ -122,17 +128,16 @@ mod test { assert_eq!(distr.sample(&mut half_rng), median); } - for &(min, max, mode) in &[ - (-1., 1., 2.), - (-1., 1., -2.), - (2., 1., 1.), - ] { + for &(min, max, mode) in &[(-1., 1., 2.), (-1., 1., -2.), (2., 1., 1.)] { assert!(Triangular::new(min, max, mode).is_err()); } } #[test] fn triangular_distributions_can_be_compared() { - assert_eq!(Triangular::new(1.0, 3.0, 2.0), Triangular::new(1.0, 3.0, 2.0)); + assert_eq!( + Triangular::new(1.0, 3.0, 2.0), + Triangular::new(1.0, 3.0, 2.0) + ); } } diff --git a/rand_distr/src/unit_ball.rs b/rand_distr/src/unit_ball.rs index 4d29612597f..84ba6909abf 100644 --- a/rand_distr/src/unit_ball.rs +++ b/rand_distr/src/unit_ball.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use num_traits::Float; use crate::{uniform::SampleUniform, Distribution, Uniform}; +use num_traits::Float; use rand::Rng; /// Samples uniformly from the unit ball (surface and interior) in three diff --git a/rand_distr/src/unit_circle.rs b/rand_distr/src/unit_circle.rs index f3dbe757aa9..8b67545300a 100644 --- a/rand_distr/src/unit_circle.rs +++ b/rand_distr/src/unit_circle.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use num_traits::Float; use crate::{uniform::SampleUniform, Distribution, Uniform}; +use num_traits::Float; use rand::Rng; /// Samples uniformly from the edge of the unit circle in two dimensions. diff --git a/rand_distr/src/unit_disc.rs b/rand_distr/src/unit_disc.rs index 5004217d5b7..bcf33c7924f 100644 --- a/rand_distr/src/unit_disc.rs +++ b/rand_distr/src/unit_disc.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use num_traits::Float; use crate::{uniform::SampleUniform, Distribution, Uniform}; +use num_traits::Float; use rand::Rng; /// Samples uniformly from the unit disc in two dimensions. diff --git a/rand_distr/src/unit_sphere.rs b/rand_distr/src/unit_sphere.rs index 632275e3327..5e7f8fe7712 100644 --- a/rand_distr/src/unit_sphere.rs +++ b/rand_distr/src/unit_sphere.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use num_traits::Float; use crate::{uniform::SampleUniform, Distribution, Uniform}; +use num_traits::Float; use rand::Rng; /// Samples uniformly from the surface of the unit sphere in three dimensions. @@ -42,7 +42,11 @@ impl Distribution<[F; 3]> for UnitSphere { continue; } let factor = F::from(2.).unwrap() * (F::one() - sum).sqrt(); - return [x1 * factor, x2 * factor, F::from(1.).unwrap() - F::from(2.).unwrap() * sum]; + return [ + x1 * factor, + x2 * factor, + F::from(1.).unwrap() - F::from(2.).unwrap() * sum, + ]; } } } diff --git a/rand_distr/src/utils.rs b/rand_distr/src/utils.rs index 052bfc49991..fb49ab85762 100644 --- a/rand_distr/src/utils.rs +++ b/rand_distr/src/utils.rs @@ -9,9 +9,9 @@ //! Math helper functions use crate::ziggurat_tables; +use num_traits::Float; use rand::distributions::hidden_export::IntoFloat; use rand::Rng; -use num_traits::Float; /// Calculates ln(gamma(x)) (natural logarithm of the gamma /// function) using the Lanczos approximation. @@ -77,7 +77,7 @@ pub(crate) fn ziggurat( x_tab: ziggurat_tables::ZigTable, f_tab: ziggurat_tables::ZigTable, mut pdf: P, - mut zero_case: Z + mut zero_case: Z, ) -> f64 where P: FnMut(f64) -> f64, @@ -114,7 +114,7 @@ where return zero_case(rng, u); } // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1 - if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::() < pdf(x) { + if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.random::() < pdf(x) { return x; } } diff --git a/rand_distr/src/weibull.rs b/rand_distr/src/weibull.rs index 2ab74edde2c..2fba2a87079 100644 --- a/rand_distr/src/weibull.rs +++ b/rand_distr/src/weibull.rs @@ -8,10 +8,10 @@ //! The Weibull distribution. -use num_traits::Float; use crate::{Distribution, OpenClosed01}; -use rand::Rng; use core::fmt; +use num_traits::Float; +use rand::Rng; /// Samples floating-point numbers according to the Weibull distribution /// @@ -26,7 +26,9 @@ use core::fmt; #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Weibull -where F: Float, OpenClosed01: Distribution +where + F: Float, + OpenClosed01: Distribution, { inv_shape: F, scale: F, @@ -55,7 +57,9 @@ impl fmt::Display for Error { impl std::error::Error for Error {} impl Weibull -where F: Float, OpenClosed01: Distribution +where + F: Float, + OpenClosed01: Distribution, { /// Construct a new `Weibull` distribution with given `scale` and `shape`. pub fn new(scale: F, shape: F) -> Result, Error> { @@ -73,7 +77,9 @@ where F: Float, OpenClosed01: Distribution } impl Distribution for Weibull -where F: Float, OpenClosed01: Distribution +where + F: Float, + OpenClosed01: Distribution, { fn sample(&self, rng: &mut R) -> F { let x: F = rng.sample(OpenClosed01); @@ -106,7 +112,9 @@ mod tests { #[test] fn value_stability() { fn test_samples>( - distr: D, zero: F, expected: &[F], + distr: D, + zero: F, + expected: &[F], ) { let mut rng = crate::test::rng(213); let mut buf = [zero; 4]; @@ -116,18 +124,21 @@ mod tests { assert_eq!(buf, expected); } - test_samples(Weibull::new(1.0, 1.0).unwrap(), 0f32, &[ - 0.041495778, - 0.7531094, - 1.4189332, - 0.38386202, - ]); - test_samples(Weibull::new(2.0, 0.5).unwrap(), 0f64, &[ - 1.1343478702739669, - 0.29470010050655226, - 0.7556151370284702, - 7.877212340241561, - ]); + test_samples( + Weibull::new(1.0, 1.0).unwrap(), + 0f32, + &[0.041495778, 0.7531094, 1.4189332, 0.38386202], + ); + test_samples( + Weibull::new(2.0, 0.5).unwrap(), + 0f64, + &[ + 1.1343478702739669, + 0.29470010050655226, + 0.7556151370284702, + 7.877212340241561, + ], + ); } #[test] diff --git a/rand_distr/src/weighted_alias.rs b/rand_distr/src/weighted_alias.rs index 0c07ead0df9..beb31f0733b 100644 --- a/rand_distr/src/weighted_alias.rs +++ b/rand_distr/src/weighted_alias.rs @@ -11,13 +11,13 @@ use super::WeightError; use crate::{uniform::SampleUniform, Distribution, Uniform}; +use alloc::{boxed::Box, vec, vec::Vec}; use core::fmt; use core::iter::Sum; use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign}; use rand::Rng; -use alloc::{boxed::Box, vec, vec::Vec}; #[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// A distribution using weighted sampling to pick a discretely selected item. /// @@ -67,8 +67,14 @@ use serde::{Serialize, Deserialize}; /// [`Uniform::sample`]: Distribution::sample #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde1", serde(bound(serialize = "W: Serialize, W::Sampler: Serialize")))] -#[cfg_attr(feature = "serde1", serde(bound(deserialize = "W: Deserialize<'de>, W::Sampler: Deserialize<'de>")))] +#[cfg_attr( + feature = "serde1", + serde(bound(serialize = "W: Serialize, W::Sampler: Serialize")) +)] +#[cfg_attr( + feature = "serde1", + serde(bound(deserialize = "W: Deserialize<'de>, W::Sampler: Deserialize<'de>")) +)] pub struct WeightedAliasIndex { aliases: Box<[u32]>, no_alias_odds: Box<[W]>, @@ -257,7 +263,8 @@ where } impl Clone for WeightedAliasIndex -where Uniform: Clone +where + Uniform: Clone, { fn clone(&self) -> Self { Self { @@ -308,7 +315,7 @@ pub trait AliasableWeight: macro_rules! impl_weight_for_float { ($T: ident) => { impl AliasableWeight for $T { - const MAX: Self = ::core::$T::MAX; + const MAX: Self = $T::MAX; const ZERO: Self = 0.0; fn try_from_u32_lossy(n: u32) -> Option { @@ -337,7 +344,7 @@ fn pairwise_sum(values: &[T]) -> T { macro_rules! impl_weight_for_int { ($T: ident) => { impl AliasableWeight for $T { - const MAX: Self = ::core::$T::MAX; + const MAX: Self = $T::MAX; const ZERO: Self = 0; fn try_from_u32_lossy(n: u32) -> Option { @@ -444,7 +451,9 @@ mod test { } fn test_weighted_index f64>(w_to_f64: F) - where WeightedAliasIndex: fmt::Debug { + where + WeightedAliasIndex: fmt::Debug, + { const NUM_WEIGHTS: u32 = 10; const ZERO_WEIGHT_INDEX: u32 = 3; const NUM_SAMPLES: u32 = 15000; @@ -455,7 +464,8 @@ mod test { let random_weight_distribution = Uniform::new_inclusive( W::ZERO, W::MAX / W::try_from_u32_lossy(NUM_WEIGHTS).unwrap(), - ).unwrap(); + ) + .unwrap(); for _ in 0..NUM_WEIGHTS { weights.push(rng.sample(&random_weight_distribution)); } @@ -497,7 +507,11 @@ mod test { #[test] fn value_stability() { - fn test_samples(weights: Vec, buf: &mut [usize], expected: &[usize]) { + fn test_samples( + weights: Vec, + buf: &mut [usize], + expected: &[usize], + ) { assert_eq!(buf.len(), expected.len()); let distr = WeightedAliasIndex::new(weights).unwrap(); let mut rng = crate::test::rng(0x9c9fa0b0580a7031); @@ -508,14 +522,20 @@ mod test { } let mut buf = [0; 10]; - test_samples(vec![1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[ - 6, 5, 7, 5, 8, 7, 6, 2, 3, 7, - ]); - test_samples(vec![0.7f32, 0.1, 0.1, 0.1], &mut buf, &[ - 2, 0, 0, 0, 0, 0, 0, 0, 1, 3, - ]); - test_samples(vec![1.0f64, 0.999, 0.998, 0.997], &mut buf, &[ - 2, 1, 2, 3, 2, 1, 3, 2, 1, 1, - ]); + test_samples( + vec![1i32, 1, 1, 1, 1, 1, 1, 1, 1], + &mut buf, + &[6, 5, 7, 5, 8, 7, 6, 2, 3, 7], + ); + test_samples( + vec![0.7f32, 0.1, 0.1, 0.1], + &mut buf, + &[2, 0, 0, 0, 0, 0, 0, 0, 1, 3], + ); + test_samples( + vec![1.0f64, 0.999, 0.998, 0.997], + &mut buf, + &[2, 1, 2, 3, 2, 1, 3, 2, 1, 1], + ); } } diff --git a/rand_distr/src/weighted_tree.rs b/rand_distr/src/weighted_tree.rs index c292578bf4e..67ce48bad80 100644 --- a/rand_distr/src/weighted_tree.rs +++ b/rand_distr/src/weighted_tree.rs @@ -303,6 +303,7 @@ mod test { #[test] fn test_no_item_error() { let mut rng = crate::test::rng(0x9c9fa0b0580a7031); + #[allow(clippy::needless_borrows_for_generic_args)] let tree = WeightedTreeIndex::::new(&[]).unwrap(); assert_eq!( tree.try_sample(&mut rng).unwrap_err(), @@ -313,10 +314,10 @@ mod test { #[test] fn test_overflow_error() { assert_eq!( - WeightedTreeIndex::new(&[i32::MAX, 2]), + WeightedTreeIndex::new([i32::MAX, 2]), Err(WeightError::Overflow) ); - let mut tree = WeightedTreeIndex::new(&[i32::MAX - 2, 1]).unwrap(); + let mut tree = WeightedTreeIndex::new([i32::MAX - 2, 1]).unwrap(); assert_eq!(tree.push(3), Err(WeightError::Overflow)); assert_eq!(tree.update(1, 4), Err(WeightError::Overflow)); tree.update(1, 2).unwrap(); @@ -324,7 +325,7 @@ mod test { #[test] fn test_all_weights_zero_error() { - let tree = WeightedTreeIndex::::new(&[0.0, 0.0]).unwrap(); + let tree = WeightedTreeIndex::::new([0.0, 0.0]).unwrap(); let mut rng = crate::test::rng(0x9c9fa0b0580a7031); assert_eq!( tree.try_sample(&mut rng).unwrap_err(), @@ -335,37 +336,36 @@ mod test { #[test] fn test_invalid_weight_error() { assert_eq!( - WeightedTreeIndex::::new(&[1, -1]).unwrap_err(), + WeightedTreeIndex::::new([1, -1]).unwrap_err(), WeightError::InvalidWeight ); + #[allow(clippy::needless_borrows_for_generic_args)] let mut tree = WeightedTreeIndex::::new(&[]).unwrap(); assert_eq!(tree.push(-1).unwrap_err(), WeightError::InvalidWeight); tree.push(1).unwrap(); - assert_eq!( - tree.update(0, -1).unwrap_err(), - WeightError::InvalidWeight - ); + assert_eq!(tree.update(0, -1).unwrap_err(), WeightError::InvalidWeight); } #[test] fn test_tree_modifications() { - let mut tree = WeightedTreeIndex::new(&[9, 1, 2]).unwrap(); + let mut tree = WeightedTreeIndex::new([9, 1, 2]).unwrap(); tree.push(3).unwrap(); tree.push(5).unwrap(); tree.update(0, 0).unwrap(); assert_eq!(tree.pop(), Some(5)); - let expected = WeightedTreeIndex::new(&[0, 1, 2, 3]).unwrap(); + let expected = WeightedTreeIndex::new([0, 1, 2, 3]).unwrap(); assert_eq!(tree, expected); } #[test] + #[allow(clippy::needless_range_loop)] fn test_sample_counts_match_probabilities() { let start = 1; let end = 3; let samples = 20; let mut rng = crate::test::rng(0x9c9fa0b0580a7031); - let weights: Vec<_> = (0..end).map(|_| rng.gen()).collect(); - let mut tree = WeightedTreeIndex::new(&weights).unwrap(); + let weights: Vec = (0..end).map(|_| rng.random()).collect(); + let mut tree = WeightedTreeIndex::new(weights).unwrap(); let mut total_weight = 0.0; let mut weights = alloc::vec![0.0; end]; for i in 0..end { diff --git a/rand_distr/src/zipf.rs b/rand_distr/src/zipf.rs index d0813ef9066..7b207a46a78 100644 --- a/rand_distr/src/zipf.rs +++ b/rand_distr/src/zipf.rs @@ -8,10 +8,10 @@ //! The Zeta and related distributions. -use num_traits::Float; use crate::{Distribution, Standard}; -use rand::{Rng, distributions::OpenClosed01}; use core::fmt; +use num_traits::Float; +use rand::{distributions::OpenClosed01, Rng}; /// Samples integers according to the [zeta distribution]. /// @@ -48,7 +48,10 @@ use core::fmt; /// [Non-Uniform Random Variate Generation]: https://doi.org/10.1007/978-1-4613-8643-8 #[derive(Clone, Copy, Debug, PartialEq)] pub struct Zeta -where F: Float, Standard: Distribution, OpenClosed01: Distribution +where + F: Float, + Standard: Distribution, + OpenClosed01: Distribution, { a_minus_1: F, b: F, @@ -74,7 +77,10 @@ impl fmt::Display for ZetaError { impl std::error::Error for ZetaError {} impl Zeta -where F: Float, Standard: Distribution, OpenClosed01: Distribution +where + F: Float, + Standard: Distribution, + OpenClosed01: Distribution, { /// Construct a new `Zeta` distribution with given `a` parameter. #[inline] @@ -92,7 +98,10 @@ where F: Float, Standard: Distribution, OpenClosed01: Distribution } impl Distribution for Zeta -where F: Float, Standard: Distribution, OpenClosed01: Distribution +where + F: Float, + Standard: Distribution, + OpenClosed01: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> F { @@ -144,7 +153,10 @@ where F: Float, Standard: Distribution, OpenClosed01: Distribution /// [1]: https://jasoncrease.medium.com/rejection-sampling-the-zipf-distribution-6b359792cffa #[derive(Clone, Copy, Debug, PartialEq)] pub struct Zipf -where F: Float, Standard: Distribution { +where + F: Float, + Standard: Distribution, +{ s: F, t: F, q: F, @@ -173,7 +185,10 @@ impl fmt::Display for ZipfError { impl std::error::Error for ZipfError {} impl Zipf -where F: Float, Standard: Distribution { +where + F: Float, + Standard: Distribution, +{ /// Construct a new `Zipf` distribution for a set with `n` elements and a /// frequency rank exponent `s`. /// @@ -186,7 +201,7 @@ where F: Float, Standard: Distribution { if n < 1 { return Err(ZipfError::NTooSmall); } - let n = F::from(n).unwrap(); // This does not fail. + let n = F::from(n).unwrap(); // This does not fail. let q = if s != F::one() { // Make sure to calculate the division only once. F::one() / (F::one() - s) @@ -200,9 +215,7 @@ where F: Float, Standard: Distribution { F::one() + n.ln() }; debug_assert!(t > F::zero()); - Ok(Zipf { - s, t, q - }) + Ok(Zipf { s, t, q }) } /// Inverse cumulative density function @@ -221,7 +234,9 @@ where F: Float, Standard: Distribution { } impl Distribution for Zipf -where F: Float, Standard: Distribution +where + F: Float, + Standard: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> F { @@ -246,9 +261,7 @@ where F: Float, Standard: Distribution mod tests { use super::*; - fn test_samples>( - distr: D, zero: F, expected: &[F], - ) { + fn test_samples>(distr: D, zero: F, expected: &[F]) { let mut rng = crate::test::rng(213); let mut buf = [zero; 4]; for x in &mut buf { @@ -293,12 +306,8 @@ mod tests { #[test] fn zeta_value_stability() { - test_samples(Zeta::new(1.5).unwrap(), 0f32, &[ - 1.0, 2.0, 1.0, 1.0, - ]); - test_samples(Zeta::new(2.0).unwrap(), 0f64, &[ - 2.0, 1.0, 1.0, 1.0, - ]); + test_samples(Zeta::new(1.5).unwrap(), 0f32, &[1.0, 2.0, 1.0, 1.0]); + test_samples(Zeta::new(2.0).unwrap(), 0f64, &[2.0, 1.0, 1.0, 1.0]); } #[test] @@ -363,12 +372,8 @@ mod tests { #[test] fn zipf_value_stability() { - test_samples(Zipf::new(10, 0.5).unwrap(), 0f32, &[ - 10.0, 2.0, 6.0, 7.0 - ]); - test_samples(Zipf::new(10, 2.0).unwrap(), 0f64, &[ - 1.0, 2.0, 3.0, 2.0 - ]); + test_samples(Zipf::new(10, 0.5).unwrap(), 0f32, &[10.0, 2.0, 6.0, 7.0]); + test_samples(Zipf::new(10, 2.0).unwrap(), 0f64, &[1.0, 2.0, 3.0, 2.0]); } #[test] diff --git a/rand_distr/tests/pdf.rs b/rand_distr/tests/pdf.rs index be5ee0e2595..47b00ef7391 100644 --- a/rand_distr/tests/pdf.rs +++ b/rand_distr/tests/pdf.rs @@ -57,7 +57,7 @@ fn normal() { let mut diff = [0.; HIST_LEN]; for (i, n) in hist.normalized_bins().enumerate() { - let bin = (n as f64) / (N_SAMPLES as f64); + let bin = n / (N_SAMPLES as f64); diff[i] = (bin - expected[i]).abs(); } @@ -140,7 +140,7 @@ fn skew_normal() { let mut diff = [0.; HIST_LEN]; for (i, n) in hist.normalized_bins().enumerate() { - let bin = (n as f64) / (N_SAMPLES as f64); + let bin = n / (N_SAMPLES as f64); diff[i] = (bin - expected[i]).abs(); } diff --git a/rand_distr/tests/sparkline.rs b/rand_distr/tests/sparkline.rs index ee6c9788d9c..ec0ee98de98 100644 --- a/rand_distr/tests/sparkline.rs +++ b/rand_distr/tests/sparkline.rs @@ -16,7 +16,7 @@ pub fn render_u64(data: &[u64], buffer: &mut String) { match data.len() { 0 => { return; - }, + } 1 => { if data[0] == 0 { buffer.push(TICKS[0]); @@ -24,8 +24,8 @@ pub fn render_u64(data: &[u64], buffer: &mut String) { buffer.push(TICKS[N - 1]); } return; - }, - _ => {}, + } + _ => {} } let max = data.iter().max().unwrap(); let min = data.iter().min().unwrap(); @@ -56,7 +56,7 @@ pub fn render_f64(data: &[f64], buffer: &mut String) { match data.len() { 0 => { return; - }, + } 1 => { if data[0] == 0. { buffer.push(TICKS[0]); @@ -64,16 +64,14 @@ pub fn render_f64(data: &[f64], buffer: &mut String) { buffer.push(TICKS[N - 1]); } return; - }, - _ => {}, + } + _ => {} } for x in data { assert!(x.is_finite(), "can only render finite values"); } - let max = data.iter().fold( - f64::NEG_INFINITY, |a, &b| a.max(b)); - let min = data.iter().fold( - f64::INFINITY, |a, &b| a.min(b)); + let max = data.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b)); + let min = data.iter().fold(f64::INFINITY, |a, &b| a.min(b)); let scale = ((N - 1) as f64) / (max - min); for x in data { let tick = ((x - min) * scale) as usize; diff --git a/rand_distr/tests/value_stability.rs b/rand_distr/tests/value_stability.rs index 7006dd0e816..31bfce52e3e 100644 --- a/rand_distr/tests/value_stability.rs +++ b/rand_distr/tests/value_stability.rs @@ -53,9 +53,7 @@ impl ApproxEq for [T; 3] { } } -fn test_samples>( - seed: u64, distr: D, expected: &[F], -) { +fn test_samples>(seed: u64, distr: D, expected: &[F]) { let mut rng = get_rng(seed); for val in expected { let x = rng.sample(&distr); @@ -68,16 +66,28 @@ fn binomial_stability() { // We have multiple code paths: np < 10, p > 0.5 test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]); test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]); - test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]); + test_samples( + 353, + Binomial::new(2000, 0.6).unwrap(), + &[1194, 1208, 1192, 1210], + ); } #[test] fn geometric_stability() { test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]); - + test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]); - test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]); - test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]); + test_samples( + 464, + Geometric::new(0.05).unwrap(), + &[24, 51, 81, 67, 27, 11, 7, 6], + ); + test_samples( + 464, + Geometric::new(0.95).unwrap(), + &[0, 0, 0, 0, 1, 0, 0, 0], + ); // expect non-random behaviour for series of pre-determined trials test_samples(464, Geometric::new(0.0).unwrap(), &[u64::MAX; 100][..]); @@ -87,260 +97,404 @@ fn geometric_stability() { #[test] fn hypergeometric_stability() { // We have multiple code paths based on the distribution's mode and sample_size - test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN - test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE + test_samples( + 7221, + Hypergeometric::new(99, 33, 8).unwrap(), + &[4, 3, 2, 2, 3, 2, 3, 1], + ); // Algorithm HIN + test_samples( + 7221, + Hypergeometric::new(100, 50, 50).unwrap(), + &[23, 27, 26, 27, 22, 24, 31, 22], + ); // Algorithm H2PE } #[test] fn unit_ball_stability() { - test_samples(2, UnitBall, &[ - [0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706], - [0.10588569388223945, -0.4734350111375454, -0.7392104908825501], - [0.11060237642041049, -0.16065642822852677, -0.8444043930440075] - ]); + test_samples( + 2, + UnitBall, + &[ + [ + 0.018035709265959987f64, + -0.4348771383120438, + -0.07982762085055706, + ], + [ + 0.10588569388223945, + -0.4734350111375454, + -0.7392104908825501, + ], + [ + 0.11060237642041049, + -0.16065642822852677, + -0.8444043930440075, + ], + ], + ); } #[test] fn unit_circle_stability() { - test_samples(2, UnitCircle, &[ - [-0.9965658683520504f64, -0.08280380447614634], - [-0.9790853270389644, -0.20345004884984505], - [-0.8449189758898707, 0.5348943112253227], - ]); + test_samples( + 2, + UnitCircle, + &[ + [-0.9965658683520504f64, -0.08280380447614634], + [-0.9790853270389644, -0.20345004884984505], + [-0.8449189758898707, 0.5348943112253227], + ], + ); } #[test] fn unit_sphere_stability() { - test_samples(2, UnitSphere, &[ - [0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027], - [-0.09978440840914075, 0.9706650829833128, -0.21875184231323952], - [0.2735582468624679, 0.9435374242279655, -0.1868234852870203], - ]); + test_samples( + 2, + UnitSphere, + &[ + [ + 0.03247542860231647f64, + -0.7830477442152738, + 0.6211131755296027, + ], + [ + -0.09978440840914075, + 0.9706650829833128, + -0.21875184231323952, + ], + [0.2735582468624679, 0.9435374242279655, -0.1868234852870203], + ], + ); } #[test] fn unit_disc_stability() { - test_samples(2, UnitDisc, &[ - [0.018035709265959987f64, -0.4348771383120438], - [-0.07982762085055706, 0.7765329819820659], - [0.21450745997299503, 0.7398636984333291], - ]); + test_samples( + 2, + UnitDisc, + &[ + [0.018035709265959987f64, -0.4348771383120438], + [-0.07982762085055706, 0.7765329819820659], + [0.21450745997299503, 0.7398636984333291], + ], + ); } #[test] fn pareto_stability() { - test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[ - 1.0423688f32, 2.1235929, 4.132709, 1.4679428, - ]); - test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[ - 9.019295276219136f64, - 4.3097126018270595, - 6.837815045397157, - 105.8826669383772, - ]); + test_samples( + 213, + Pareto::new(1.0, 1.0).unwrap(), + &[1.0423688f32, 2.1235929, 4.132709, 1.4679428], + ); + test_samples( + 213, + Pareto::new(2.0, 0.5).unwrap(), + &[ + 9.019295276219136f64, + 4.3097126018270595, + 6.837815045397157, + 105.8826669383772, + ], + ); } #[test] fn poisson_stability() { test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]); test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]); - test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]); + test_samples( + 223, + Poisson::new(27.0).unwrap(), + &[28.0f32, 32.0, 36.0, 36.0], + ); } - #[test] fn triangular_stability() { - test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[ - 5.74373257511361f64, - 7.890059162791258f64, - 4.7256280652553455f64, - 2.9474808121184077f64, - 3.058301946314053f64, - ]); + test_samples( + 860, + Triangular::new(2., 10., 3.).unwrap(), + &[ + 5.74373257511361f64, + 7.890059162791258f64, + 4.7256280652553455f64, + 2.9474808121184077f64, + 3.058301946314053f64, + ], + ); } - #[test] fn normal_inverse_gaussian_stability() { - test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[ - 0.6568966f32, 1.3744819, 2.216063, 0.11488572, - ]); - test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[ - 0.6838707059642927f64, - 2.4447306460569784, - 0.2361045023235968, - 1.7774534624785319, - ]); + test_samples( + 213, + NormalInverseGaussian::new(2.0, 1.0).unwrap(), + &[0.6568966f32, 1.3744819, 2.216063, 0.11488572], + ); + test_samples( + 213, + NormalInverseGaussian::new(2.0, 1.0).unwrap(), + &[ + 0.6838707059642927f64, + 2.4447306460569784, + 0.2361045023235968, + 1.7774534624785319, + ], + ); } #[test] fn pert_stability() { // mean = 4, var = 12/7 - test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[ - 4.908681667460367, - 4.014196196158352, - 2.6489397149197234, - 3.4569780580044727, - 4.242864311947118, - ]); + test_samples( + 860, + Pert::new(2., 10., 3.).unwrap(), + &[ + 4.908681667460367, + 4.014196196158352, + 2.6489397149197234, + 3.4569780580044727, + 4.242864311947118, + ], + ); } #[test] fn inverse_gaussian_stability() { - test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[ - 0.9339157f32, 1.108113, 0.50864697, 0.39849377, - ]); - test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[ - 1.0707604954722476f64, - 0.9628140605340697, - 0.4069687656468226, - 0.660283852985818, - ]); + test_samples( + 213, + InverseGaussian::new(1.0, 3.0).unwrap(), + &[0.9339157f32, 1.108113, 0.50864697, 0.39849377], + ); + test_samples( + 213, + InverseGaussian::new(1.0, 3.0).unwrap(), + &[ + 1.0707604954722476f64, + 0.9628140605340697, + 0.4069687656468226, + 0.660283852985818, + ], + ); } #[test] fn gamma_stability() { // Gamma has 3 cases: shape == 1, shape < 1, shape > 1 - test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[ - 5.398085f32, 9.162783, 0.2300583, 1.7235851, - ]); - test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[ - 0.5051203f32, 0.9048302, 3.095812, 1.8566116, - ]); - test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[ - 7.783878094584059f64, - 1.4939528171618057, - 8.638017638857592, - 3.0949337228829004, - ]); + test_samples( + 223, + Gamma::new(1.0, 5.0).unwrap(), + &[5.398085f32, 9.162783, 0.2300583, 1.7235851], + ); + test_samples( + 223, + Gamma::new(0.8, 5.0).unwrap(), + &[0.5051203f32, 0.9048302, 3.095812, 1.8566116], + ); + test_samples( + 223, + Gamma::new(1.1, 5.0).unwrap(), + &[ + 7.783878094584059f64, + 1.4939528171618057, + 8.638017638857592, + 3.0949337228829004, + ], + ); // ChiSquared has 2 cases: k == 1, k != 1 - test_samples(223, ChiSquared::new(1.0).unwrap(), &[ - 0.4893526200348249f64, - 1.635249736808788, - 0.5013580219361969, - 0.1457735613733489, - ]); - test_samples(223, ChiSquared::new(0.1).unwrap(), &[ - 0.014824404726978617f64, - 0.021602123937134326, - 0.0000003431429746851693, - 0.00000002291755769542258, - ]); - test_samples(223, ChiSquared::new(10.0).unwrap(), &[ - 12.693656f32, 6.812016, 11.082001, 12.436167, - ]); + test_samples( + 223, + ChiSquared::new(1.0).unwrap(), + &[ + 0.4893526200348249f64, + 1.635249736808788, + 0.5013580219361969, + 0.1457735613733489, + ], + ); + test_samples( + 223, + ChiSquared::new(0.1).unwrap(), + &[ + 0.014824404726978617f64, + 0.021602123937134326, + 0.0000003431429746851693, + 0.00000002291755769542258, + ], + ); + test_samples( + 223, + ChiSquared::new(10.0).unwrap(), + &[12.693656f32, 6.812016, 11.082001, 12.436167], + ); // FisherF has same special cases as ChiSquared on each param - test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[ - 0.32283646f32, 0.048049655, 0.0788893, 1.817178, - ]); - test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[ - 0.29925257f32, 3.4392934, 9.567652, 0.020074, - ]); - test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[ - 3.3196593155045124f64, - 0.3409169916262829, - 0.03377989856426519, - 0.00004041672861036937, - ]); + test_samples( + 223, + FisherF::new(1.0, 13.5).unwrap(), + &[0.32283646f32, 0.048049655, 0.0788893, 1.817178], + ); + test_samples( + 223, + FisherF::new(1.0, 1.0).unwrap(), + &[0.29925257f32, 3.4392934, 9.567652, 0.020074], + ); + test_samples( + 223, + FisherF::new(0.7, 13.5).unwrap(), + &[ + 3.3196593155045124f64, + 0.3409169916262829, + 0.03377989856426519, + 0.00004041672861036937, + ], + ); // StudentT has same special cases as ChiSquared - test_samples(223, StudentT::new(1.0).unwrap(), &[ - 0.54703987f32, -1.8545331, 3.093162, -0.14168274, - ]); - test_samples(223, StudentT::new(1.1).unwrap(), &[ - 0.7729195887949754f64, - 1.2606210611616204, - -1.7553606501113175, - -2.377641221169782, - ]); + test_samples( + 223, + StudentT::new(1.0).unwrap(), + &[0.54703987f32, -1.8545331, 3.093162, -0.14168274], + ); + test_samples( + 223, + StudentT::new(1.1).unwrap(), + &[ + 0.7729195887949754f64, + 1.2606210611616204, + -1.7553606501113175, + -2.377641221169782, + ], + ); // Beta has two special cases: // // 1. min(alpha, beta) <= 1 // 2. min(alpha, beta) > 1 - test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[ - 0.8300703726659456, - 0.8134131062097899, - 0.47912589330631555, - 0.25323238071138526, - ]); - test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[ - 0.49563509121756827, - 0.9551305482256759, - 0.5151181353461637, - 0.7551732971235077, - ]); + test_samples( + 223, + Beta::new(1.0, 0.8).unwrap(), + &[ + 0.8300703726659456, + 0.8134131062097899, + 0.47912589330631555, + 0.25323238071138526, + ], + ); + test_samples( + 223, + Beta::new(3.0, 1.2).unwrap(), + &[ + 0.49563509121756827, + 0.9551305482256759, + 0.5151181353461637, + 0.7551732971235077, + ], + ); } #[test] fn exponential_stability() { - test_samples(223, Exp1, &[ - 1.079617f32, 1.8325565, 0.04601166, 0.34471703, - ]); - test_samples(223, Exp1, &[ - 1.0796170642388276f64, - 1.8325565304274, - 0.04601166186842716, - 0.3447170217100157, - ]); - - test_samples(223, Exp::new(2.0).unwrap(), &[ - 0.5398085f32, 0.91627824, 0.02300583, 0.17235851, - ]); - test_samples(223, Exp::new(1.0).unwrap(), &[ - 1.0796170642388276f64, - 1.8325565304274, - 0.04601166186842716, - 0.3447170217100157, - ]); + test_samples(223, Exp1, &[1.079617f32, 1.8325565, 0.04601166, 0.34471703]); + test_samples( + 223, + Exp1, + &[ + 1.0796170642388276f64, + 1.8325565304274, + 0.04601166186842716, + 0.3447170217100157, + ], + ); + + test_samples( + 223, + Exp::new(2.0).unwrap(), + &[0.5398085f32, 0.91627824, 0.02300583, 0.17235851], + ); + test_samples( + 223, + Exp::new(1.0).unwrap(), + &[ + 1.0796170642388276f64, + 1.8325565304274, + 0.04601166186842716, + 0.3447170217100157, + ], + ); } #[test] fn normal_stability() { - test_samples(213, StandardNormal, &[ - -0.11844189f32, 0.781378, 0.06563994, -1.1932899, - ]); - test_samples(213, StandardNormal, &[ - -0.11844188827977231f64, - 0.7813779637772346, - 0.06563993969580051, - -1.1932899004186373, - ]); - - test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[ - -0.11844189f32, 0.781378, 0.06563994, -1.1932899, - ]); - test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[ - 1.940779055860114f64, - 2.3906889818886174, - 2.0328199698479, - 1.4033550497906813, - ]); - - test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[ - 0.88830346f32, 2.1844804, 1.0678421, 0.30322206, - ]); - test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[ - 6.964174338639032f64, - 10.921015733601452, - 7.6355881556915906, - 4.068828213584092, - ]); + test_samples( + 213, + StandardNormal, + &[-0.11844189f32, 0.781378, 0.06563994, -1.1932899], + ); + test_samples( + 213, + StandardNormal, + &[ + -0.11844188827977231f64, + 0.7813779637772346, + 0.06563993969580051, + -1.1932899004186373, + ], + ); + + test_samples( + 213, + Normal::new(0.0, 1.0).unwrap(), + &[-0.11844189f32, 0.781378, 0.06563994, -1.1932899], + ); + test_samples( + 213, + Normal::new(2.0, 0.5).unwrap(), + &[ + 1.940779055860114f64, + 2.3906889818886174, + 2.0328199698479, + 1.4033550497906813, + ], + ); + + test_samples( + 213, + LogNormal::new(0.0, 1.0).unwrap(), + &[0.88830346f32, 2.1844804, 1.0678421, 0.30322206], + ); + test_samples( + 213, + LogNormal::new(2.0, 0.5).unwrap(), + &[ + 6.964174338639032f64, + 10.921015733601452, + 7.6355881556915906, + 4.068828213584092, + ], + ); } #[test] fn weibull_stability() { - test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[ - 0.041495778f32, 0.7531094, 1.4189332, 0.38386202, - ]); - test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[ - 1.1343478702739669f64, - 0.29470010050655226, - 0.7556151370284702, - 7.877212340241561, - ]); + test_samples( + 213, + Weibull::new(1.0, 1.0).unwrap(), + &[0.041495778f32, 0.7531094, 1.4189332, 0.38386202], + ); + test_samples( + 213, + Weibull::new(2.0, 0.5).unwrap(), + &[ + 1.1343478702739669f64, + 0.29470010050655226, + 0.7556151370284702, + 7.877212340241561, + ], + ); } #[cfg(feature = "alloc")] @@ -351,13 +505,16 @@ fn dirichlet_stability() { rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()), [0.12941567177708177, 0.4702121891675036, 0.4003721390554146] ); - assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [ - 0.17684200044809556, - 0.29915953935953055, - 0.1832858056608014, - 0.1425623503573967, - 0.19815030417417595 - ]); + assert_eq!( + rng.sample(Dirichlet::new([8.0; 5]).unwrap()), + [ + 0.17684200044809556, + 0.29915953935953055, + 0.1832858056608014, + 0.1425623503573967, + 0.19815030417417595 + ] + ); // Test stability for the case where all alphas are less than 0.1. assert_eq!( rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()), @@ -372,12 +529,16 @@ fn dirichlet_stability() { #[test] fn cauchy_stability() { - test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[ - 77.93369152808678f64, - 90.1606912098641, - 125.31516221323625, - 86.10217834773925, - ]); + test_samples( + 353, + Cauchy::new(100f64, 10.0).unwrap(), + &[ + 77.93369152808678f64, + 90.1606912098641, + 125.31516221323625, + 86.10217834773925, + ], + ); // Unfortunately this test is not fully portable due to reliance on the // system's implementation of tanf (see doc on Cauchy struct). @@ -386,7 +547,7 @@ fn cauchy_stability() { let mut rng = get_rng(353); let expected = [15.023088, -5.446413, 3.7092876, 3.112482]; for &a in expected.iter() { - let b = rng.sample(&distr); + let b = rng.sample(distr); assert_almost_eq!(a, b, 1e-5); } } diff --git a/rand_pcg/src/pcg128.rs b/rand_pcg/src/pcg128.rs index ecb0e56bc47..61f54c4a85d 100644 --- a/rand_pcg/src/pcg128.rs +++ b/rand_pcg/src/pcg128.rs @@ -15,7 +15,8 @@ const MULTIPLIER: u128 = 0x2360_ED05_1FC6_5DA4_4385_DF64_9FCC_F645; use core::fmt; use rand_core::{impls, le, RngCore, SeedableRng}; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; /// A PCG random number generator (XSL RR 128/64 (LCG) variant). /// @@ -153,7 +154,6 @@ impl RngCore for Lcg128Xsl64 { } } - /// A PCG random number generator (XSL 128/64 (MCG) variant). /// /// Permuted Congruential Generator with 128-bit state, internal Multiplicative diff --git a/rand_pcg/src/pcg128cm.rs b/rand_pcg/src/pcg128cm.rs index 29be17a904d..6910f3458e1 100644 --- a/rand_pcg/src/pcg128cm.rs +++ b/rand_pcg/src/pcg128cm.rs @@ -15,7 +15,8 @@ const MULTIPLIER: u64 = 15750249268501108917; use core::fmt; use rand_core::{impls, le, RngCore, SeedableRng}; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; /// A PCG random number generator (CM DXSM 128/64 (LCG) variant). /// diff --git a/rand_pcg/src/pcg64.rs b/rand_pcg/src/pcg64.rs index 0b6864a42f3..8b2df6aa618 100644 --- a/rand_pcg/src/pcg64.rs +++ b/rand_pcg/src/pcg64.rs @@ -12,7 +12,8 @@ use core::fmt; use rand_core::{impls, le, RngCore, SeedableRng}; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; // This is the default multiplier used by PCG for 64-bit state. const MULTIPLIER: u64 = 6364136223846793005; diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index ded1e7812fb..00000000000 --- a/rustfmt.toml +++ /dev/null @@ -1,32 +0,0 @@ -# This rustfmt file is added for configuration, but in practice much of our -# code is hand-formatted, frequently with more readable results. - -# Comments: -normalize_comments = true -wrap_comments = false -comment_width = 90 # small excess is okay but prefer 80 - -# Arguments: -use_small_heuristics = "Default" -# TODO: single line functions only where short, please? -# https://github.com/rust-lang/rustfmt/issues/3358 -fn_single_line = false -fn_args_layout = "Compressed" -overflow_delimited_expr = true -where_single_line = true - -# enum_discrim_align_threshold = 20 -# struct_field_align_threshold = 20 - -# Compatibility: -edition = "2021" - -# Misc: -inline_attribute_width = 80 -blank_lines_upper_bound = 2 -reorder_impl_items = true -# report_todo = "Unnumbered" -# report_fixme = "Unnumbered" - -# Ignored files: -ignore = [] diff --git a/src/distributions/bernoulli.rs b/src/distributions/bernoulli.rs index a68a82965ce..a8a46b0e3cc 100644 --- a/src/distributions/bernoulli.rs +++ b/src/distributions/bernoulli.rs @@ -13,7 +13,7 @@ use crate::Rng; use core::fmt; #[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// The Bernoulli distribution. /// @@ -151,7 +151,8 @@ mod test { #[cfg(feature = "serde1")] fn test_serializing_deserializing_bernoulli() { let coin_flip = Bernoulli::new(0.5).unwrap(); - let de_coin_flip: Bernoulli = bincode::deserialize(&bincode::serialize(&coin_flip).unwrap()).unwrap(); + let de_coin_flip: Bernoulli = + bincode::deserialize(&bincode::serialize(&coin_flip).unwrap()).unwrap(); assert_eq!(coin_flip.p_int, de_coin_flip.p_int); } @@ -208,9 +209,10 @@ mod test { for x in &mut buf { *x = rng.sample(distr); } - assert_eq!(buf, [ - true, false, false, true, false, false, true, true, true, true - ]); + assert_eq!( + buf, + [true, false, false, true, false, false, true, true, true, true] + ); } #[test] diff --git a/src/distributions/distribution.rs b/src/distributions/distribution.rs index 0eabfb80594..2bd6a6a4044 100644 --- a/src/distributions/distribution.rs +++ b/src/distributions/distribution.rs @@ -10,7 +10,8 @@ //! Distribution trait and associates use crate::Rng; -#[cfg(feature = "alloc")] use alloc::string::String; +#[cfg(feature = "alloc")] +use alloc::string::String; use core::iter; /// Types (distributions) that can be used to create a random instance of `T`. diff --git a/src/distributions/float.rs b/src/distributions/float.rs index ace6fe66119..84022685817 100644 --- a/src/distributions/float.rs +++ b/src/distributions/float.rs @@ -8,14 +8,15 @@ //! Basic floating-point number distributions -use crate::distributions::utils::{IntAsSIMD, FloatAsSIMD, FloatSIMDUtils}; +use crate::distributions::utils::{FloatAsSIMD, FloatSIMDUtils, IntAsSIMD}; use crate::distributions::{Distribution, Standard}; use crate::Rng; use core::mem; -#[cfg(feature = "simd_support")] use core::simd::prelude::*; +#[cfg(feature = "simd_support")] +use core::simd::prelude::*; #[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// A distribution to sample floating point numbers uniformly in the half-open /// interval `(0, 1]`, i.e. including 1 but not 0. @@ -72,7 +73,6 @@ pub struct OpenClosed01; #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] pub struct Open01; - // This trait is needed by both this lib and rand_distr hence is a hidden export #[doc(hidden)] pub trait IntoFloat { @@ -146,12 +146,11 @@ macro_rules! float_impls { // Transmute-based method; 23/52 random bits; (0, 1) interval. // We use the most significant bits because for simple RNGs // those are usually more random. - use core::$f_scalar::EPSILON; let float_size = mem::size_of::<$f_scalar>() as $u_scalar * 8; let value: $uty = rng.random(); let fraction = value >> $uty::splat(float_size - $fraction_bits); - fraction.into_float_with_exponent(0) - $ty::splat(1.0 - EPSILON / 2.0) + fraction.into_float_with_exponent(0) - $ty::splat(1.0 - $f_scalar::EPSILON / 2.0) } } } @@ -210,9 +209,15 @@ mod tests { let mut zeros = StepRng::new(0, 0); assert_eq!(zeros.sample::<$ty, _>(Open01), $ZERO + $EPSILON / two); let mut one = StepRng::new(1 << 9 | 1 << (9 + 32), 0); - assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / two * $ty::splat(3.0)); + assert_eq!( + one.sample::<$ty, _>(Open01), + $EPSILON / two * $ty::splat(3.0) + ); let mut max = StepRng::new(!0, 0); - assert_eq!(max.sample::<$ty, _>(Open01), $ty::splat(1.0) - $EPSILON / two); + assert_eq!( + max.sample::<$ty, _>(Open01), + $ty::splat(1.0) - $EPSILON / two + ); } }; } @@ -252,9 +257,15 @@ mod tests { let mut zeros = StepRng::new(0, 0); assert_eq!(zeros.sample::<$ty, _>(Open01), $ZERO + $EPSILON / two); let mut one = StepRng::new(1 << 12, 0); - assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / two * $ty::splat(3.0)); + assert_eq!( + one.sample::<$ty, _>(Open01), + $EPSILON / two * $ty::splat(3.0) + ); let mut max = StepRng::new(!0, 0); - assert_eq!(max.sample::<$ty, _>(Open01), $ty::splat(1.0) - $EPSILON / two); + assert_eq!( + max.sample::<$ty, _>(Open01), + $ty::splat(1.0) - $EPSILON / two + ); } }; } @@ -269,7 +280,9 @@ mod tests { #[test] fn value_stability() { fn test_samples>( - distr: &D, zero: T, expected: &[T], + distr: &D, + zero: T, + expected: &[T], ) { let mut rng = crate::test::rng(0x6f44f5646c2a7334); let mut buf = [zero; 3]; @@ -280,25 +293,25 @@ mod tests { } test_samples(&Standard, 0f32, &[0.0035963655, 0.7346052, 0.09778172]); - test_samples(&Standard, 0f64, &[ - 0.7346051961657583, - 0.20298547462974248, - 0.8166436635290655, - ]); + test_samples( + &Standard, + 0f64, + &[0.7346051961657583, 0.20298547462974248, 0.8166436635290655], + ); test_samples(&OpenClosed01, 0f32, &[0.003596425, 0.73460525, 0.09778178]); - test_samples(&OpenClosed01, 0f64, &[ - 0.7346051961657584, - 0.2029854746297426, - 0.8166436635290656, - ]); + test_samples( + &OpenClosed01, + 0f64, + &[0.7346051961657584, 0.2029854746297426, 0.8166436635290656], + ); test_samples(&Open01, 0f32, &[0.0035963655, 0.73460525, 0.09778172]); - test_samples(&Open01, 0f64, &[ - 0.7346051961657584, - 0.20298547462974248, - 0.8166436635290656, - ]); + test_samples( + &Open01, + 0f64, + &[0.7346051961657584, 0.20298547462974248, 0.8166436635290656], + ); #[cfg(feature = "simd_support")] { @@ -306,17 +319,25 @@ mod tests { // non-SIMD types; we assume this pattern continues across all // SIMD types. - test_samples(&Standard, f32x2::from([0.0, 0.0]), &[ - f32x2::from([0.0035963655, 0.7346052]), - f32x2::from([0.09778172, 0.20298547]), - f32x2::from([0.34296435, 0.81664366]), - ]); - - test_samples(&Standard, f64x2::from([0.0, 0.0]), &[ - f64x2::from([0.7346051961657583, 0.20298547462974248]), - f64x2::from([0.8166436635290655, 0.7423708925400552]), - f64x2::from([0.16387782224016323, 0.9087068770169618]), - ]); + test_samples( + &Standard, + f32x2::from([0.0, 0.0]), + &[ + f32x2::from([0.0035963655, 0.7346052]), + f32x2::from([0.09778172, 0.20298547]), + f32x2::from([0.34296435, 0.81664366]), + ], + ); + + test_samples( + &Standard, + f64x2::from([0.0, 0.0]), + &[ + f64x2::from([0.7346051961657583, 0.20298547462974248]), + f64x2::from([0.8166436635290655, 0.7423708925400552]), + f64x2::from([0.16387782224016323, 0.9087068770169618]), + ], + ); } } } diff --git a/src/distributions/integer.rs b/src/distributions/integer.rs index ca27c6331d8..60bcbf9565f 100644 --- a/src/distributions/integer.rs +++ b/src/distributions/integer.rs @@ -19,10 +19,11 @@ use core::arch::x86_64::__m512i; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::{__m128i, __m256i}; use core::num::{ - NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,NonZeroU128, - NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize,NonZeroI128 + NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, + NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, }; -#[cfg(feature = "simd_support")] use core::simd::*; +#[cfg(feature = "simd_support")] +use core::simd::*; impl Distribution for Standard { #[inline] @@ -211,7 +212,9 @@ mod tests { #[test] fn value_stability() { fn test_samples(zero: T, expected: &[T]) - where Standard: Distribution { + where + Standard: Distribution, + { let mut rng = crate::test::rng(807); let mut buf = [zero; 3]; for x in &mut buf { @@ -223,24 +226,33 @@ mod tests { test_samples(0u8, &[9, 247, 111]); test_samples(0u16, &[32265, 42999, 38255]); test_samples(0u32, &[2220326409, 2575017975, 2018088303]); - test_samples(0u64, &[ - 11059617991457472009, - 16096616328739788143, - 1487364411147516184, - ]); - test_samples(0u128, &[ - 296930161868957086625409848350820761097, - 145644820879247630242265036535529306392, - 111087889832015897993126088499035356354, - ]); + test_samples( + 0u64, + &[ + 11059617991457472009, + 16096616328739788143, + 1487364411147516184, + ], + ); + test_samples( + 0u128, + &[ + 296930161868957086625409848350820761097, + 145644820879247630242265036535529306392, + 111087889832015897993126088499035356354, + ], + ); #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))] test_samples(0usize, &[2220326409, 2575017975, 2018088303]); #[cfg(target_pointer_width = "64")] - test_samples(0usize, &[ - 11059617991457472009, - 16096616328739788143, - 1487364411147516184, - ]); + test_samples( + 0usize, + &[ + 11059617991457472009, + 16096616328739788143, + 1487364411147516184, + ], + ); test_samples(0i8, &[9, -9, 111]); // Skip further i* types: they are simple reinterpretation of u* samples @@ -249,49 +261,58 @@ mod tests { { // We only test a sub-set of types here and make assumptions about the rest. - test_samples(u8x4::default(), &[ - u8x4::from([9, 126, 87, 132]), - u8x4::from([247, 167, 123, 153]), - u8x4::from([111, 149, 73, 120]), - ]); - test_samples(u8x8::default(), &[ - u8x8::from([9, 126, 87, 132, 247, 167, 123, 153]), - u8x8::from([111, 149, 73, 120, 68, 171, 98, 223]), - u8x8::from([24, 121, 1, 50, 13, 46, 164, 20]), - ]); + test_samples( + u8x4::default(), + &[ + u8x4::from([9, 126, 87, 132]), + u8x4::from([247, 167, 123, 153]), + u8x4::from([111, 149, 73, 120]), + ], + ); + test_samples( + u8x8::default(), + &[ + u8x8::from([9, 126, 87, 132, 247, 167, 123, 153]), + u8x8::from([111, 149, 73, 120, 68, 171, 98, 223]), + u8x8::from([24, 121, 1, 50, 13, 46, 164, 20]), + ], + ); - test_samples(i64x8::default(), &[ - i64x8::from([ - -7387126082252079607, - -2350127744969763473, - 1487364411147516184, - 7895421560427121838, - 602190064936008898, - 6022086574635100741, - -5080089175222015595, - -4066367846667249123, - ]), - i64x8::from([ - 9180885022207963908, - 3095981199532211089, - 6586075293021332726, - 419343203796414657, - 3186951873057035255, - 5287129228749947252, - 444726432079249540, - -1587028029513790706, - ]), - i64x8::from([ - 6075236523189346388, - 1351763722368165432, - -6192309979959753740, - -7697775502176768592, - -4482022114172078123, - 7522501477800909500, - -1837258847956201231, - -586926753024886735, - ]), - ]); + test_samples( + i64x8::default(), + &[ + i64x8::from([ + -7387126082252079607, + -2350127744969763473, + 1487364411147516184, + 7895421560427121838, + 602190064936008898, + 6022086574635100741, + -5080089175222015595, + -4066367846667249123, + ]), + i64x8::from([ + 9180885022207963908, + 3095981199532211089, + 6586075293021332726, + 419343203796414657, + 3186951873057035255, + 5287129228749947252, + 444726432079249540, + -1587028029513790706, + ]), + i64x8::from([ + 6075236523189346388, + 1351763722368165432, + -6192309979959753740, + -7697775502176768592, + -4482022114172078123, + 7522501477800909500, + -1837258847956201231, + -586926753024886735, + ]), + ], + ); } } } diff --git a/src/distributions/mod.rs b/src/distributions/mod.rs index 91303428875..e5973297dd9 100644 --- a/src/distributions/mod.rs +++ b/src/distributions/mod.rs @@ -110,10 +110,10 @@ pub mod hidden_export { pub mod uniform; pub use self::bernoulli::{Bernoulli, BernoulliError}; -pub use self::distribution::{Distribution, DistIter, DistMap}; #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub use self::distribution::DistString; +pub use self::distribution::{DistIter, DistMap, Distribution}; pub use self::float::{Open01, OpenClosed01}; pub use self::other::Alphanumeric; pub use self::slice::Slice; diff --git a/src/distributions/other.rs b/src/distributions/other.rs index 7cb63cc8365..15ccf30a8c9 100644 --- a/src/distributions/other.rs +++ b/src/distributions/other.rs @@ -8,24 +8,23 @@ //! The implementations of the `Standard` distribution for other built-in types. -use core::char; -use core::num::Wrapping; #[cfg(feature = "alloc")] use alloc::string::String; +use core::char; +use core::num::Wrapping; -use crate::distributions::{Distribution, Standard, Uniform}; #[cfg(feature = "alloc")] use crate::distributions::DistString; +use crate::distributions::{Distribution, Standard, Uniform}; use crate::Rng; -#[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; use core::mem::{self, MaybeUninit}; #[cfg(feature = "simd_support")] use core::simd::prelude::*; #[cfg(feature = "simd_support")] use core::simd::{LaneCount, MaskElement, SupportedLaneCount}; - +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; // ----- Sampling distributions ----- @@ -71,7 +70,6 @@ use core::simd::{LaneCount, MaskElement, SupportedLaneCount}; #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] pub struct Alphanumeric; - // ----- Implementations of distributions ----- impl Distribution for Standard { @@ -240,7 +238,8 @@ macro_rules! tuple_impls { tuple_impls! {A B C D E F G H I J K L} impl Distribution<[T; N]> for Standard -where Standard: Distribution +where + Standard: Distribution, { #[inline] fn sample(&self, _rng: &mut R) -> [T; N] { @@ -255,7 +254,8 @@ where Standard: Distribution } impl Distribution> for Standard -where Standard: Distribution +where + Standard: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> Option { @@ -269,7 +269,8 @@ where Standard: Distribution } impl Distribution> for Standard -where Standard: Distribution +where + Standard: Distribution, { #[inline] fn sample(&self, rng: &mut R) -> Wrapping { @@ -277,7 +278,6 @@ where Standard: Distribution } } - #[cfg(test)] mod tests { use super::*; @@ -315,9 +315,7 @@ mod tests { let mut incorrect = false; for _ in 0..100 { let c: char = rng.sample(Alphanumeric).into(); - incorrect |= !(('0'..='9').contains(&c) || - ('A'..='Z').contains(&c) || - ('a'..='z').contains(&c) ); + incorrect |= !c.is_ascii_alphanumeric(); } assert!(!incorrect); } @@ -325,7 +323,9 @@ mod tests { #[test] fn value_stability() { fn test_samples>( - distr: &D, zero: T, expected: &[T], + distr: &D, + zero: T, + expected: &[T], ) { let mut rng = crate::test::rng(807); let mut buf = [zero; 5]; @@ -335,54 +335,66 @@ mod tests { assert_eq!(&buf, expected); } - test_samples(&Standard, 'a', &[ - '\u{8cdac}', - '\u{a346a}', - '\u{80120}', - '\u{ed692}', - '\u{35888}', - ]); + test_samples( + &Standard, + 'a', + &[ + '\u{8cdac}', + '\u{a346a}', + '\u{80120}', + '\u{ed692}', + '\u{35888}', + ], + ); test_samples(&Alphanumeric, 0, &[104, 109, 101, 51, 77]); test_samples(&Standard, false, &[true, true, false, true, false]); - test_samples(&Standard, None as Option, &[ - Some(true), - None, - Some(false), - None, - Some(false), - ]); - test_samples(&Standard, Wrapping(0i32), &[ - Wrapping(-2074640887), - Wrapping(-1719949321), - Wrapping(2018088303), - Wrapping(-547181756), - Wrapping(838957336), - ]); + test_samples( + &Standard, + None as Option, + &[Some(true), None, Some(false), None, Some(false)], + ); + test_samples( + &Standard, + Wrapping(0i32), + &[ + Wrapping(-2074640887), + Wrapping(-1719949321), + Wrapping(2018088303), + Wrapping(-547181756), + Wrapping(838957336), + ], + ); // We test only sub-sets of tuple and array impls test_samples(&Standard, (), &[(), (), (), (), ()]); - test_samples(&Standard, (false,), &[ - (true,), - (true,), + test_samples( + &Standard, (false,), - (true,), - (false,), - ]); - test_samples(&Standard, (false, false), &[ - (true, true), - (false, true), - (false, false), - (true, false), + &[(true,), (true,), (false,), (true,), (false,)], + ); + test_samples( + &Standard, (false, false), - ]); + &[ + (true, true), + (false, true), + (false, false), + (true, false), + (false, false), + ], + ); test_samples(&Standard, [0u8; 0], &[[], [], [], [], []]); - test_samples(&Standard, [0u8; 3], &[ - [9, 247, 111], - [68, 24, 13], - [174, 19, 194], - [172, 69, 213], - [149, 207, 29], - ]); + test_samples( + &Standard, + [0u8; 3], + &[ + [9, 247, 111], + [68, 24, 13], + [174, 19, 194], + [172, 69, 213], + [149, 207, 29], + ], + ); } } diff --git a/src/distributions/slice.rs b/src/distributions/slice.rs index d49f45ccebc..88cff8897bb 100644 --- a/src/distributions/slice.rs +++ b/src/distributions/slice.rs @@ -148,7 +148,11 @@ impl<'a> super::DistString for Slice<'a, char> { // Split the extension of string to reuse the unused capacities. // Skip the split for small length or only ascii slice. - let mut extend_len = if max_char_len == 1 || len < 100 { len } else { len / 4 }; + let mut extend_len = if max_char_len == 1 || len < 100 { + len + } else { + len / 4 + }; let mut remain_len = len; while extend_len > 0 { string.reserve(max_char_len * extend_len); diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs index 7fbf0fced25..fde8d6dbbec 100644 --- a/src/distributions/uniform.rs +++ b/src/distributions/uniform.rs @@ -104,18 +104,22 @@ //! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow use core::fmt; -use core::time::Duration; use core::ops::{Range, RangeInclusive}; +use core::time::Duration; use crate::distributions::float::IntoFloat; -use crate::distributions::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, IntAsSIMD, WideningMultiply}; +use crate::distributions::utils::{ + BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, IntAsSIMD, WideningMultiply, +}; use crate::distributions::Distribution; #[cfg(feature = "simd_support")] use crate::distributions::Standard; use crate::{Rng, RngCore}; -#[cfg(feature = "simd_support")] use core::simd::prelude::*; -#[cfg(feature = "simd_support")] use core::simd::{LaneCount, SupportedLaneCount}; +#[cfg(feature = "simd_support")] +use core::simd::prelude::*; +#[cfg(feature = "simd_support")] +use core::simd::{LaneCount, SupportedLaneCount}; /// Error type returned from [`Uniform::new`] and `new_inclusive`. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -140,7 +144,7 @@ impl fmt::Display for Error { impl std::error::Error for Error {} #[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// Sample values uniformly between two bounds. /// @@ -194,7 +198,10 @@ use serde::{Serialize, Deserialize}; #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde1", serde(bound(serialize = "X::Sampler: Serialize")))] -#[cfg_attr(feature = "serde1", serde(bound(deserialize = "X::Sampler: Deserialize<'de>")))] +#[cfg_attr( + feature = "serde1", + serde(bound(deserialize = "X::Sampler: Deserialize<'de>")) +)] pub struct Uniform(X::Sampler); impl Uniform { @@ -297,7 +304,11 @@ pub trait UniformSampler: Sized { /// ::Sampler::sample_single(lb, ub, &mut rng).unwrap() /// } /// ``` - fn sample_single(low: B1, high: B2, rng: &mut R) -> Result + fn sample_single( + low: B1, + high: B2, + rng: &mut R, + ) -> Result where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, @@ -314,10 +325,14 @@ pub trait UniformSampler: Sized { /// some types more optimal implementations for single usage may be provided /// via this method. /// Results may not be identical. - fn sample_single_inclusive(low: B1, high: B2, rng: &mut R) - -> Result - where B1: SampleBorrow + Sized, - B2: SampleBorrow + Sized + fn sample_single_inclusive( + low: B1, + high: B2, + rng: &mut R, + ) -> Result + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, { let uniform: Self = UniformSampler::new_inclusive(low, high)?; Ok(uniform.sample(rng)) @@ -340,7 +355,6 @@ impl TryFrom> for Uniform { } } - /// Helper trait similar to [`Borrow`] but implemented /// only for SampleUniform and references to SampleUniform in /// order to resolve ambiguity issues. @@ -353,7 +367,8 @@ pub trait SampleBorrow { fn borrow(&self) -> &Borrowed; } impl SampleBorrow for Borrowed -where Borrowed: SampleUniform +where + Borrowed: SampleUniform, { #[inline(always)] fn borrow(&self) -> &Borrowed { @@ -361,7 +376,8 @@ where Borrowed: SampleUniform } } impl<'a, Borrowed> SampleBorrow for &'a Borrowed -where Borrowed: SampleUniform +where + Borrowed: SampleUniform, { #[inline(always)] fn borrow(&self) -> &Borrowed { @@ -405,12 +421,10 @@ impl SampleRange for RangeInclusive { } } - //////////////////////////////////////////////////////////////////////////////// // What follows are all back-ends. - /// The back-end implementing [`UniformSampler`] for integer types. /// /// Unless you are implementing [`UniformSampler`] for your own type, this type @@ -505,7 +519,7 @@ macro_rules! uniform_int_impl { Ok(UniformInt { low, - range: range as $ty, // type: $uty + range: range as $ty, // type: $uty thresh: thresh as $uty as $ty, // type: $sample_ty }) } @@ -529,7 +543,11 @@ macro_rules! uniform_int_impl { } #[inline] - fn sample_single(low_b: B1, high_b: B2, rng: &mut R) -> Result + fn sample_single( + low_b: B1, + high_b: B2, + rng: &mut R, + ) -> Result where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, @@ -549,7 +567,9 @@ macro_rules! uniform_int_impl { #[cfg(not(feature = "unbiased"))] #[inline] fn sample_single_inclusive( - low_b: B1, high_b: B2, rng: &mut R, + low_b: B1, + high_b: B2, + rng: &mut R, ) -> Result where B1: SampleBorrow + Sized, @@ -585,7 +605,9 @@ macro_rules! uniform_int_impl { #[cfg(feature = "unbiased")] #[inline] fn sample_single_inclusive( - low_b: B1, high_b: B2, rng: &mut R, + low_b: B1, + high_b: B2, + rng: &mut R, ) -> Result where B1: SampleBorrow<$ty> + Sized, @@ -599,7 +621,7 @@ macro_rules! uniform_int_impl { let range = high.wrapping_sub(low).wrapping_add(1) as $uty as $sample_ty; if range == 0 { // Range is MAX+1 (unrepresentable), so we need a special case - return Ok(rng.gen()); + return Ok(rng.random()); } let (mut result, mut lo) = rng.random::<$sample_ty>().wmul(range); @@ -844,7 +866,12 @@ impl UniformSampler for UniformChar { #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] impl super::DistString for Uniform { - fn append_string(&self, rng: &mut R, string: &mut alloc::string::String, len: usize) { + fn append_string( + &self, + rng: &mut R, + string: &mut alloc::string::String, + len: usize, + ) { // Getting the hi value to assume the required length to reserve in string. let mut hi = self.0.sampler.low + self.0.sampler.range - 1; if hi >= CHAR_SURROGATE_START { @@ -911,7 +938,7 @@ macro_rules! uniform_float_impl { return Err(Error::EmptyRange); } let max_rand = <$ty>::splat( - (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0, + ($u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0, ); let mut scale = high - low; @@ -947,7 +974,7 @@ macro_rules! uniform_float_impl { return Err(Error::EmptyRange); } let max_rand = <$ty>::splat( - (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0, + ($u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0, ); let mut scale = (high - low) / max_rand; @@ -1111,7 +1138,6 @@ uniform_float_impl! { feature = "simd_support", f64x4, u64x4, f64, u64, 64 - 52 #[cfg(feature = "simd_support")] uniform_float_impl! { feature = "simd_support", f64x8, u64x8, f64, u64, 64 - 52 } - /// The back-end implementing [`UniformSampler`] for `Duration`. /// /// Unless you are implementing [`UniformSampler`] for your own types, this type @@ -1248,26 +1274,29 @@ impl UniformSampler for UniformDuration { #[cfg(test)] mod tests { use super::*; - use crate::rngs::mock::StepRng; use crate::distributions::utils::FloatSIMDScalarUtils; + use crate::rngs::mock::StepRng; #[test] #[cfg(feature = "serde1")] fn test_serialization_uniform_duration() { let distr = UniformDuration::new(Duration::from_secs(10), Duration::from_secs(60)).unwrap(); - let de_distr: UniformDuration = bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap(); + let de_distr: UniformDuration = + bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap(); assert_eq!(distr, de_distr); } #[test] #[cfg(feature = "serde1")] fn test_uniform_serialization() { - let unit_box: Uniform = Uniform::new(-1, 1).unwrap(); - let de_unit_box: Uniform = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap(); + let unit_box: Uniform = Uniform::new(-1, 1).unwrap(); + let de_unit_box: Uniform = + bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap(); assert_eq!(unit_box.0, de_unit_box.0); let unit_box: Uniform = Uniform::new(-1., 1.).unwrap(); - let de_unit_box: Uniform = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap(); + let de_unit_box: Uniform = + bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap(); assert_eq!(unit_box.0, de_unit_box.0); } @@ -1293,10 +1322,6 @@ mod tests { #[test] #[cfg_attr(miri, ignore)] // Miri is too slow fn test_integers() { - use core::{i128, u128}; - use core::{i16, i32, i64, i8, isize}; - use core::{u16, u32, u64, u8, usize}; - let mut rng = crate::test::rng(251); macro_rules! t { ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{ @@ -1383,14 +1408,15 @@ mod tests { let mut max = core::char::from_u32(0).unwrap(); for _ in 0..100 { let c = rng.gen_range('A'..='Z'); - assert!(('A'..='Z').contains(&c)); + assert!(c.is_ascii_uppercase()); max = max.max(c); } assert_eq!(max, 'Z'); let d = Uniform::new( core::char::from_u32(0xD7F0).unwrap(), core::char::from_u32(0xE010).unwrap(), - ).unwrap(); + ) + .unwrap(); for _ in 0..100 { let c = d.sample(&mut rng); assert!((c as u32) < 0xD800 || (c as u32) > 0xDFFF); @@ -1403,12 +1429,16 @@ mod tests { let string2 = Uniform::new( core::char::from_u32(0x0000).unwrap(), core::char::from_u32(0x0080).unwrap(), - ).unwrap().sample_string(&mut rng, 100); + ) + .unwrap() + .sample_string(&mut rng, 100); assert_eq!(string2.capacity(), 100); let string3 = Uniform::new_inclusive( core::char::from_u32(0x0000).unwrap(), core::char::from_u32(0x0080).unwrap(), - ).unwrap().sample_string(&mut rng, 100); + ) + .unwrap() + .sample_string(&mut rng, 100); assert_eq!(string3.capacity(), 200); } } @@ -1430,8 +1460,8 @@ mod tests { (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)), (-<$f_scalar>::from_bits(5), 0.0), (-<$f_scalar>::from_bits(7), -0.0), - (0.1 * ::core::$f_scalar::MAX, ::core::$f_scalar::MAX), - (-::core::$f_scalar::MAX * 0.2, ::core::$f_scalar::MAX * 0.7), + (0.1 * $f_scalar::MAX, $f_scalar::MAX), + (-$f_scalar::MAX * 0.2, $f_scalar::MAX * 0.7), ]; for &(low_scalar, high_scalar) in v.iter() { for lane in 0..<$ty>::LEN { @@ -1444,27 +1474,47 @@ mod tests { assert!(low_scalar <= v && v < high_scalar); let v = rng.sample(my_incl_uniform).extract(lane); assert!(low_scalar <= v && v <= high_scalar); - let v = <$ty as SampleUniform>::Sampler - ::sample_single(low, high, &mut rng).unwrap().extract(lane); + let v = + <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng) + .unwrap() + .extract(lane); assert!(low_scalar <= v && v < high_scalar); - let v = <$ty as SampleUniform>::Sampler - ::sample_single_inclusive(low, high, &mut rng).unwrap().extract(lane); + let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive( + low, high, &mut rng, + ) + .unwrap() + .extract(lane); assert!(low_scalar <= v && v <= high_scalar); } assert_eq!( - rng.sample(Uniform::new_inclusive(low, low).unwrap()).extract(lane), + rng.sample(Uniform::new_inclusive(low, low).unwrap()) + .extract(lane), low_scalar ); assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar); assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar); - assert_eq!(<$ty as SampleUniform>::Sampler - ::sample_single(low, high, &mut zero_rng).unwrap() - .extract(lane), low_scalar); - assert_eq!(<$ty as SampleUniform>::Sampler - ::sample_single_inclusive(low, high, &mut zero_rng).unwrap() - .extract(lane), low_scalar); + assert_eq!( + <$ty as SampleUniform>::Sampler::sample_single( + low, + high, + &mut zero_rng + ) + .unwrap() + .extract(lane), + low_scalar + ); + assert_eq!( + <$ty as SampleUniform>::Sampler::sample_single_inclusive( + low, + high, + &mut zero_rng + ) + .unwrap() + .extract(lane), + low_scalar + ); assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar); assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar); @@ -1472,9 +1522,16 @@ mod tests { // assert!(<$ty as SampleUniform>::Sampler // ::sample_single(low, high, &mut max_rng).unwrap() // .extract(lane) < high_scalar); - assert!(<$ty as SampleUniform>::Sampler - ::sample_single_inclusive(low, high, &mut max_rng).unwrap() - .extract(lane) <= high_scalar); + assert!( + <$ty as SampleUniform>::Sampler::sample_single_inclusive( + low, + high, + &mut max_rng + ) + .unwrap() + .extract(lane) + <= high_scalar + ); // Don't run this test for really tiny differences between high and low // since for those rounding might result in selecting high for a very @@ -1485,27 +1542,26 @@ mod tests { (-1i64 << $bits_shifted) as u64, ); assert!( - <$ty as SampleUniform>::Sampler - ::sample_single(low, high, &mut lowering_max_rng).unwrap() - .extract(lane) < high_scalar + <$ty as SampleUniform>::Sampler::sample_single( + low, + high, + &mut lowering_max_rng + ) + .unwrap() + .extract(lane) + < high_scalar ); } } } assert_eq!( - rng.sample(Uniform::new_inclusive( - ::core::$f_scalar::MAX, - ::core::$f_scalar::MAX - ).unwrap()), - ::core::$f_scalar::MAX + rng.sample(Uniform::new_inclusive($f_scalar::MAX, $f_scalar::MAX).unwrap()), + $f_scalar::MAX ); assert_eq!( - rng.sample(Uniform::new_inclusive( - -::core::$f_scalar::MAX, - -::core::$f_scalar::MAX - ).unwrap()), - -::core::$f_scalar::MAX + rng.sample(Uniform::new_inclusive(-$f_scalar::MAX, -$f_scalar::MAX).unwrap()), + -$f_scalar::MAX ); }}; } @@ -1549,21 +1605,18 @@ mod tests { macro_rules! t { ($ty:ident, $f_scalar:ident) => {{ let v: &[($f_scalar, $f_scalar)] = &[ - (::std::$f_scalar::NAN, 0.0), - (1.0, ::std::$f_scalar::NAN), - (::std::$f_scalar::NAN, ::std::$f_scalar::NAN), + ($f_scalar::NAN, 0.0), + (1.0, $f_scalar::NAN), + ($f_scalar::NAN, $f_scalar::NAN), (1.0, 0.5), - (::std::$f_scalar::MAX, -::std::$f_scalar::MAX), - (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY), - ( - ::std::$f_scalar::NEG_INFINITY, - ::std::$f_scalar::NEG_INFINITY, - ), - (::std::$f_scalar::NEG_INFINITY, 5.0), - (5.0, ::std::$f_scalar::INFINITY), - (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY), - (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN), - (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY), + ($f_scalar::MAX, -$f_scalar::MAX), + ($f_scalar::INFINITY, $f_scalar::INFINITY), + ($f_scalar::NEG_INFINITY, $f_scalar::NEG_INFINITY), + ($f_scalar::NEG_INFINITY, 5.0), + (5.0, $f_scalar::INFINITY), + ($f_scalar::NAN, $f_scalar::INFINITY), + ($f_scalar::NEG_INFINITY, $f_scalar::NAN), + ($f_scalar::NEG_INFINITY, $f_scalar::INFINITY), ]; for &(low_scalar, high_scalar) in v.iter() { for lane in 0..<$ty>::LEN { @@ -1593,7 +1646,6 @@ mod tests { } } - #[test] #[cfg_attr(miri, ignore)] // Miri is too slow fn test_durations() { @@ -1602,10 +1654,7 @@ mod tests { let v = &[ (Duration::new(10, 50000), Duration::new(100, 1234)), (Duration::new(0, 100), Duration::new(1, 50)), - ( - Duration::new(0, 0), - Duration::new(u64::MAX, 999_999_999), - ), + (Duration::new(0, 0), Duration::new(u64::MAX, 999_999_999)), ]; for &(low, high) in v.iter() { let my_uniform = Uniform::new(low, high).unwrap(); @@ -1707,8 +1756,13 @@ mod tests { #[test] fn value_stability() { fn test_samples( - lb: T, ub: T, expected_single: &[T], expected_multiple: &[T], - ) where Uniform: Distribution { + lb: T, + ub: T, + expected_single: &[T], + expected_multiple: &[T], + ) where + Uniform: Distribution, + { let mut rng = crate::test::rng(897); let mut buf = [lb; 3]; @@ -1730,11 +1784,12 @@ mod tests { test_samples(11u8, 219, &[17, 66, 214], &[181, 93, 165]); test_samples(11u32, 219, &[17, 66, 214], &[181, 93, 165]); - test_samples(0f32, 1e-2f32, &[0.0003070104, 0.0026630748, 0.00979833], &[ - 0.008194133, - 0.00398172, - 0.007428536, - ]); + test_samples( + 0f32, + 1e-2f32, + &[0.0003070104, 0.0026630748, 0.00979833], + &[0.008194133, 0.00398172, 0.007428536], + ); test_samples( -1e10f64, 1e10f64, @@ -1760,9 +1815,15 @@ mod tests { #[test] fn uniform_distributions_can_be_compared() { - assert_eq!(Uniform::new(1.0, 2.0).unwrap(), Uniform::new(1.0, 2.0).unwrap()); + assert_eq!( + Uniform::new(1.0, 2.0).unwrap(), + Uniform::new(1.0, 2.0).unwrap() + ); // To cover UniformInt - assert_eq!(Uniform::new(1_u32, 2_u32).unwrap(), Uniform::new(1_u32, 2_u32).unwrap()); + assert_eq!( + Uniform::new(1_u32, 2_u32).unwrap(), + Uniform::new(1_u32, 2_u32).unwrap() + ); } } diff --git a/src/distributions/utils.rs b/src/distributions/utils.rs index e3ef5bcdb8b..aee92b67902 100644 --- a/src/distributions/utils.rs +++ b/src/distributions/utils.rs @@ -8,9 +8,10 @@ //! Math helper functions -#[cfg(feature = "simd_support")] use core::simd::prelude::*; -#[cfg(feature = "simd_support")] use core::simd::{LaneCount, SimdElement, SupportedLaneCount}; - +#[cfg(feature = "simd_support")] +use core::simd::prelude::*; +#[cfg(feature = "simd_support")] +use core::simd::{LaneCount, SimdElement, SupportedLaneCount}; pub(crate) trait WideningMultiply { type Output; @@ -146,8 +147,10 @@ wmul_impl_usize! { u64 } #[cfg(feature = "simd_support")] mod simd_wmul { use super::*; - #[cfg(target_arch = "x86")] use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; wmul_impl! { (u8x4, u16x4), @@ -340,12 +343,12 @@ macro_rules! scalar_float_impl { scalar_float_impl!(f32, u32); scalar_float_impl!(f64, u64); - #[cfg(feature = "simd_support")] macro_rules! simd_impl { ($fty:ident, $uty:ident) => { impl FloatSIMDUtils for Simd<$fty, LANES> - where LaneCount: SupportedLaneCount + where + LaneCount: SupportedLaneCount, { type Mask = Mask<<$fty as SimdElement>::Mask, LANES>; type UInt = Simd<$uty, LANES>; diff --git a/src/distributions/weighted_index.rs b/src/distributions/weighted_index.rs index cec292dd8a8..a021376639f 100644 --- a/src/distributions/weighted_index.rs +++ b/src/distributions/weighted_index.rs @@ -108,7 +108,11 @@ impl WeightedIndex { X: Weight, { let mut iter = weights.into_iter(); - let mut total_weight: X = iter.next().ok_or(WeightError::InvalidInput)?.borrow().clone(); + let mut total_weight: X = iter + .next() + .ok_or(WeightError::InvalidInput)? + .borrow() + .clone(); let zero = X::ZERO; if !(total_weight >= zero) { @@ -252,9 +256,9 @@ pub struct WeightedIndexIter<'a, X: SampleUniform + PartialOrd> { } impl<'a, X> Debug for WeightedIndexIter<'a, X> - where - X: SampleUniform + PartialOrd + Debug, - X::Sampler: Debug, +where + X: SampleUniform + PartialOrd + Debug, + X::Sampler: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("WeightedIndexIter") @@ -278,10 +282,7 @@ where impl<'a, X> Iterator for WeightedIndexIter<'a, X> where - X: for<'b> core::ops::SubAssign<&'b X> - + SampleUniform - + PartialOrd - + Clone, + X: for<'b> core::ops::SubAssign<&'b X> + SampleUniform + PartialOrd + Clone, { type Item = X; @@ -315,15 +316,16 @@ impl WeightedIndex { /// ``` pub fn weight(&self, index: usize) -> Option where - X: for<'a> core::ops::SubAssign<&'a X> + X: for<'a> core::ops::SubAssign<&'a X>, { - let mut weight = if index < self.cumulative_weights.len() { - self.cumulative_weights[index].clone() - } else if index == self.cumulative_weights.len() { - self.total_weight.clone() - } else { - return None; + use core::cmp::Ordering::*; + + let mut weight = match index.cmp(&self.cumulative_weights.len()) { + Less => self.cumulative_weights[index].clone(), + Equal => self.total_weight.clone(), + Greater => return None, }; + if index > 0 { weight -= &self.cumulative_weights[index - 1]; } @@ -348,7 +350,7 @@ impl WeightedIndex { /// ``` pub fn weights(&self) -> WeightedIndexIter<'_, X> where - X: for<'a> core::ops::SubAssign<&'a X> + X: for<'a> core::ops::SubAssign<&'a X>, { WeightedIndexIter { weighted_index: self, @@ -387,6 +389,7 @@ pub trait Weight: Clone { /// - `Result::Err`: Returns an error when `Self` cannot represent the /// result of `self + v` (i.e. overflow). The value of `self` should be /// discarded. + #[allow(clippy::result_unit_err)] fn checked_add_assign(&mut self, v: &Self) -> Result<(), ()>; } @@ -417,6 +420,7 @@ macro_rules! impl_weight_float { ($t:ty) => { impl Weight for $t { const ZERO: Self = 0.0; + fn checked_add_assign(&mut self, v: &Self) -> Result<(), ()> { // Floats have an explicit representation for overflow *self += *v; @@ -435,7 +439,7 @@ mod test { #[cfg(feature = "serde1")] #[test] fn test_weightedindex_serde1() { - let weighted_index = WeightedIndex::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unwrap(); + let weighted_index = WeightedIndex::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unwrap(); let ser_weighted_index = bincode::serialize(&weighted_index).unwrap(); let de_weighted_index: WeightedIndex = @@ -451,20 +455,20 @@ mod test { #[test] fn test_accepting_nan() { assert_eq!( - WeightedIndex::new(&[f32::NAN, 0.5]).unwrap_err(), + WeightedIndex::new([f32::NAN, 0.5]).unwrap_err(), WeightError::InvalidWeight, ); assert_eq!( - WeightedIndex::new(&[f32::NAN]).unwrap_err(), + WeightedIndex::new([f32::NAN]).unwrap_err(), WeightError::InvalidWeight, ); assert_eq!( - WeightedIndex::new(&[0.5, f32::NAN]).unwrap_err(), + WeightedIndex::new([0.5, f32::NAN]).unwrap_err(), WeightError::InvalidWeight, ); assert_eq!( - WeightedIndex::new(&[0.5, 7.0]) + WeightedIndex::new([0.5, 7.0]) .unwrap() .update_weights(&[(0, &f32::NAN)]) .unwrap_err(), @@ -516,10 +520,10 @@ mod test { verify(chosen); for _ in 0..5 { - assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1); - assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0); + assert_eq!(WeightedIndex::new([0, 1]).unwrap().sample(&mut r), 1); + assert_eq!(WeightedIndex::new([1, 0]).unwrap().sample(&mut r), 0); assert_eq!( - WeightedIndex::new(&[0, 0, 0, 0, 10, 0]) + WeightedIndex::new([0, 0, 0, 0, 10, 0]) .unwrap() .sample(&mut r), 4 @@ -531,19 +535,19 @@ mod test { WeightError::InvalidInput ); assert_eq!( - WeightedIndex::new(&[0]).unwrap_err(), + WeightedIndex::new([0]).unwrap_err(), WeightError::InsufficientNonZero ); assert_eq!( - WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), + WeightedIndex::new([10, 20, -1, 30]).unwrap_err(), WeightError::InvalidWeight ); assert_eq!( - WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), + WeightedIndex::new([-10, 20, 1, 30]).unwrap_err(), WeightError::InvalidWeight ); assert_eq!( - WeightedIndex::new(&[-10]).unwrap_err(), + WeightedIndex::new([-10]).unwrap_err(), WeightError::InvalidWeight ); } @@ -649,7 +653,9 @@ mod test { #[test] fn value_stability() { fn test_samples( - weights: I, buf: &mut [usize], expected: &[usize], + weights: I, + buf: &mut [usize], + expected: &[usize], ) where I: IntoIterator, I::Item: SampleBorrow, @@ -665,17 +671,17 @@ mod test { let mut buf = [0; 10]; test_samples( - &[1i32, 1, 1, 1, 1, 1, 1, 1, 1], + [1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[0, 6, 2, 6, 3, 4, 7, 8, 2, 5], ); test_samples( - &[0.7f32, 0.1, 0.1, 0.1], + [0.7f32, 0.1, 0.1, 0.1], &mut buf, &[0, 0, 0, 1, 0, 0, 2, 3, 0, 0], ); test_samples( - &[1.0f64, 0.999, 0.998, 0.997], + [1.0f64, 0.999, 0.998, 0.997], &mut buf, &[2, 2, 1, 3, 2, 1, 3, 3, 2, 1], ); @@ -683,7 +689,7 @@ mod test { #[test] fn weighted_index_distributions_can_be_compared() { - assert_eq!(WeightedIndex::new(&[1, 2]), WeightedIndex::new(&[1, 2])); + assert_eq!(WeightedIndex::new([1, 2]), WeightedIndex::new([1, 2])); } #[test] diff --git a/src/lib.rs b/src/lib.rs index af51387441a..3e8ab5c7c77 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -50,6 +50,7 @@ #![doc(test(attr(allow(unused_variables), deny(warnings))))] #![no_std] #![cfg_attr(feature = "simd_support", feature(portable_simd))] +#![allow(unexpected_cfgs)] #![cfg_attr(doc_cfg, feature(doc_cfg))] #![allow( clippy::float_cmp, @@ -57,8 +58,10 @@ clippy::nonminimal_bool )] -#[cfg(feature = "alloc")] extern crate alloc; -#[cfg(feature = "std")] extern crate std; +#[cfg(feature = "alloc")] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; #[allow(unused)] macro_rules! trace { ($($x:tt)*) => ( @@ -160,7 +163,9 @@ use crate::distributions::{Distribution, Standard}; )] #[inline] pub fn random() -> T -where Standard: Distribution { +where + Standard: Distribution, +{ thread_rng().random() } diff --git a/src/prelude.rs b/src/prelude.rs index 87613532f78..2605bca91f4 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -18,7 +18,8 @@ //! # let _: f32 = r.random(); //! ``` -#[doc(no_inline)] pub use crate::distributions::Distribution; +#[doc(no_inline)] +pub use crate::distributions::Distribution; #[cfg(feature = "small_rng")] #[doc(no_inline)] pub use crate::rngs::SmallRng; @@ -33,4 +34,5 @@ pub use crate::seq::{IndexedMutRandom, IndexedRandom, IteratorRandom, SliceRando #[doc(no_inline)] #[cfg(all(feature = "std", feature = "std_rng", feature = "getrandom"))] pub use crate::{random, thread_rng}; -#[doc(no_inline)] pub use crate::{CryptoRng, Rng, RngCore, SeedableRng}; +#[doc(no_inline)] +pub use crate::{CryptoRng, Rng, RngCore, SeedableRng}; diff --git a/src/rng.rs b/src/rng.rs index 129c141796e..06fc2bb741e 100644 --- a/src/rng.rs +++ b/src/rng.rs @@ -89,7 +89,9 @@ pub trait Rng: RngCore { /// [`Standard`]: distributions::Standard #[inline] fn random(&mut self) -> T - where Standard: Distribution { + where + Standard: Distribution, + { Standard.sample(self) } @@ -309,7 +311,9 @@ pub trait Rng: RngCore { note = "Renamed to `random` to avoid conflict with the new `gen` keyword in Rust 2024." )] fn gen(&mut self) -> T - where Standard: Distribution { + where + Standard: Distribution, + { self.random() } } @@ -402,7 +406,8 @@ impl_fill!(u16, u32, u64, usize, u128,); impl_fill!(i8, i16, i32, i64, isize, i128,); impl Fill for [T; N] -where [T]: Fill +where + [T]: Fill, { fn fill(&mut self, rng: &mut R) { <[T] as Fill>::fill(self, rng) @@ -414,7 +419,8 @@ mod test { use super::*; use crate::rngs::mock::StepRng; use crate::test::rng; - #[cfg(feature = "alloc")] use alloc::boxed::Box; + #[cfg(feature = "alloc")] + use alloc::boxed::Box; #[test] fn test_fill_bytes_default() { diff --git a/src/rngs/mock.rs b/src/rngs/mock.rs index e186fb7f062..a01a6bd7b4e 100644 --- a/src/rngs/mock.rs +++ b/src/rngs/mock.rs @@ -10,7 +10,8 @@ use rand_core::{impls, RngCore}; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; /// A mock generator yielding very predictable output /// @@ -78,7 +79,8 @@ rand_core::impl_try_rng_from_rng_core!(StepRng); #[cfg(test)] mod tests { - #[cfg(any(feature = "alloc", feature = "serde1"))] use super::StepRng; + #[cfg(any(feature = "alloc", feature = "serde1"))] + use super::StepRng; #[test] #[cfg(feature = "serde1")] diff --git a/src/rngs/mod.rs b/src/rngs/mod.rs index d357a715669..0aa6c427d85 100644 --- a/src/rngs/mod.rs +++ b/src/rngs/mod.rs @@ -102,18 +102,22 @@ pub use reseeding::ReseedingRng; pub mod mock; // Public so we don't export `StepRng` directly, making it a bit // more clear it is intended for testing. -#[cfg(feature = "small_rng")] mod small; +#[cfg(feature = "small_rng")] +mod small; #[cfg(all(feature = "small_rng", not(target_pointer_width = "64")))] mod xoshiro128plusplus; #[cfg(all(feature = "small_rng", target_pointer_width = "64"))] mod xoshiro256plusplus; -#[cfg(feature = "std_rng")] mod std; +#[cfg(feature = "std_rng")] +mod std; #[cfg(all(feature = "std", feature = "std_rng", feature = "getrandom"))] pub(crate) mod thread; -#[cfg(feature = "small_rng")] pub use self::small::SmallRng; -#[cfg(feature = "std_rng")] pub use self::std::StdRng; +#[cfg(feature = "small_rng")] +pub use self::small::SmallRng; +#[cfg(feature = "std_rng")] +pub use self::std::StdRng; #[cfg(all(feature = "std", feature = "std_rng", feature = "getrandom"))] pub use self::thread::ThreadRng; diff --git a/src/rngs/thread.rs b/src/rngs/thread.rs index 08654149670..f9f1a0af676 100644 --- a/src/rngs/thread.rs +++ b/src/rngs/thread.rs @@ -33,7 +33,6 @@ use crate::rngs::ReseedingRng; // `ThreadRng` internally, which is nonsensical anyway. We should also never run // `ThreadRng` in destructors of its implementation, which is also nonsensical. - // Number of generated bytes after which to reseed `ThreadRng`. // According to benchmarks, reseeding has a noticeable impact with thresholds // of 32 kB and less. We choose 64 kB to avoid significant overhead. diff --git a/src/rngs/xoshiro128plusplus.rs b/src/rngs/xoshiro128plusplus.rs index 416ea91a9fa..aa621950164 100644 --- a/src/rngs/xoshiro128plusplus.rs +++ b/src/rngs/xoshiro128plusplus.rs @@ -9,7 +9,8 @@ use rand_core::impls::{fill_bytes_via_next, next_u64_via_u32}; use rand_core::le::read_u32_into; use rand_core::{RngCore, SeedableRng}; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; /// A xoshiro128++ random number generator. /// diff --git a/src/rngs/xoshiro256plusplus.rs b/src/rngs/xoshiro256plusplus.rs index 0fdc66df7c8..d6210df6b39 100644 --- a/src/rngs/xoshiro256plusplus.rs +++ b/src/rngs/xoshiro256plusplus.rs @@ -9,7 +9,8 @@ use rand_core::impls::fill_bytes_via_next; use rand_core::le::read_u64_into; use rand_core::{RngCore, SeedableRng}; -#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] +use serde::{Deserialize, Serialize}; /// A xoshiro256++ random number generator. /// diff --git a/src/seq/coin_flipper.rs b/src/seq/coin_flipper.rs index 7a97fa8aaf0..4c41c07da44 100644 --- a/src/seq/coin_flipper.rs +++ b/src/seq/coin_flipper.rs @@ -10,7 +10,7 @@ use crate::RngCore; pub(crate) struct CoinFlipper { pub rng: R, - chunk: u32, //TODO(opt): this should depend on RNG word size + chunk: u32, // TODO(opt): this should depend on RNG word size chunk_remaining: u32, } @@ -92,7 +92,7 @@ impl CoinFlipper { // If n * 2^c > `usize::MAX` we always return `true` anyway n = n.saturating_mul(2_usize.pow(c)); } else { - //At least one tail + // At least one tail if c == 1 { // Calculate 2n - d. // We need to use wrapping as 2n might be greater than `usize::MAX` diff --git a/src/seq/index.rs b/src/seq/index.rs index e34b1c2ca62..9ef6bf89a45 100644 --- a/src/seq/index.rs +++ b/src/seq/index.rs @@ -7,23 +7,30 @@ // except according to those terms. //! Low-level API for sampling indices +use core::{cmp::Ordering, hash::Hash, ops::AddAssign}; -#[cfg(feature = "alloc")] use core::slice; +#[cfg(feature = "alloc")] +use core::slice; -#[cfg(feature = "alloc")] use alloc::vec::{self, Vec}; +#[cfg(feature = "alloc")] +use alloc::vec::{self, Vec}; // BTreeMap is not as fast in tests, but better than nothing. #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::collections::BTreeSet; -#[cfg(feature = "std")] use std::collections::HashSet; +#[cfg(feature = "std")] +use std::collections::HashSet; #[cfg(feature = "std")] use super::WeightError; #[cfg(feature = "alloc")] -use crate::{Rng, distributions::{uniform::SampleUniform, Distribution, Uniform}}; +use crate::{ + distributions::{uniform::SampleUniform, Distribution, Uniform}, + Rng, +}; #[cfg(feature = "serde1")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// A vector of indices. /// @@ -88,8 +95,8 @@ impl IndexVec { } impl IntoIterator for IndexVec { - type Item = usize; type IntoIter = IndexVecIntoIter; + type Item = usize; /// Convert into an iterator over the indices as a sequence of `usize` values #[inline] @@ -196,7 +203,6 @@ impl Iterator for IndexVecIntoIter { impl ExactSizeIterator for IndexVecIntoIter {} - /// Randomly sample exactly `amount` distinct indices from `0..length`, and /// return them in random order (fully shuffled). /// @@ -221,7 +227,9 @@ impl ExactSizeIterator for IndexVecIntoIter {} /// Panics if `amount > length`. #[track_caller] pub fn sample(rng: &mut R, length: usize, amount: usize) -> IndexVec -where R: Rng + ?Sized { +where + R: Rng + ?Sized, +{ if amount > length { panic!("`amount` of samples must be less than or equal to `length`"); } @@ -276,7 +284,10 @@ where R: Rng + ?Sized { #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub fn sample_weighted( - rng: &mut R, length: usize, weight: F, amount: usize, + rng: &mut R, + length: usize, + weight: F, + amount: usize, ) -> Result where R: Rng + ?Sized, @@ -293,7 +304,6 @@ where } } - /// Randomly sample exactly `amount` distinct indices from `0..length`, and /// return them in an arbitrary order (there is no guarantee of shuffling or /// ordering). The weights are to be provided by the input function `weights`, @@ -308,7 +318,10 @@ where /// - [`WeightError::InsufficientNonZero`] when fewer than `amount` weights are positive. #[cfg(feature = "std")] fn sample_efraimidis_spirakis( - rng: &mut R, length: N, weight: F, amount: N, + rng: &mut R, + length: N, + weight: F, + amount: N, ) -> Result where R: Rng + ?Sized, @@ -325,23 +338,27 @@ where index: N, key: f64, } + impl PartialOrd for Element { - fn partial_cmp(&self, other: &Self) -> Option { - self.key.partial_cmp(&other.key) + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) } } + impl Ord for Element { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - // partial_cmp will always produce a value, - // because we check that the weights are not nan - self.partial_cmp(other).unwrap() + fn cmp(&self, other: &Self) -> Ordering { + // partial_cmp will always produce a value, + // because we check that the weights are not nan + self.key.partial_cmp(&other.key).unwrap() } } + impl PartialEq for Element { fn eq(&self, other: &Self) -> bool { self.key == other.key } } + impl Eq for Element {} let mut candidates = Vec::with_capacity(length.as_usize()); @@ -367,8 +384,7 @@ where // keys. Do this by using `select_nth_unstable` to put the elements with // the *smallest* keys at the beginning of the list in `O(n)` time, which // provides equivalent information about the elements with the *greatest* keys. - let (_, mid, greater) - = candidates.select_nth_unstable(avail - amount.as_usize()); + let (_, mid, greater) = candidates.select_nth_unstable(avail - amount.as_usize()); let mut result: Vec = Vec::with_capacity(amount.as_usize()); result.push(mid.index); @@ -385,7 +401,9 @@ where /// /// This implementation uses `O(amount)` memory and `O(amount^2)` time. fn sample_floyd(rng: &mut R, length: u32, amount: u32) -> IndexVec -where R: Rng + ?Sized { +where + R: Rng + ?Sized, +{ // Note that the values returned by `rng.gen_range()` can be // inferred from the returned vector by working backwards from // the last entry. This bijection proves the algorithm fair. @@ -414,7 +432,9 @@ where R: Rng + ?Sized { /// /// Set-up is `O(length)` time and memory and shuffling is `O(amount)` time. fn sample_inplace(rng: &mut R, length: u32, amount: u32) -> IndexVec -where R: Rng + ?Sized { +where + R: Rng + ?Sized, +{ debug_assert!(amount <= length); let mut indices: Vec = Vec::with_capacity(length as usize); indices.extend(0..length); @@ -427,12 +447,12 @@ where R: Rng + ?Sized { IndexVec::from(indices) } -trait UInt: Copy + PartialOrd + Ord + PartialEq + Eq + SampleUniform - + core::hash::Hash + core::ops::AddAssign { +trait UInt: Copy + PartialOrd + Ord + PartialEq + Eq + SampleUniform + Hash + AddAssign { fn zero() -> Self; fn one() -> Self; fn as_usize(self) -> usize; } + impl UInt for u32 { #[inline] fn zero() -> Self { @@ -449,6 +469,7 @@ impl UInt for u32 { self as usize } } + impl UInt for usize { #[inline] fn zero() -> Self { @@ -507,19 +528,23 @@ mod test { #[cfg(feature = "serde1")] fn test_serialization_index_vec() { let some_index_vec = IndexVec::from(vec![254_usize, 234, 2, 1]); - let de_some_index_vec: IndexVec = bincode::deserialize(&bincode::serialize(&some_index_vec).unwrap()).unwrap(); + let de_some_index_vec: IndexVec = + bincode::deserialize(&bincode::serialize(&some_index_vec).unwrap()).unwrap(); match (some_index_vec, de_some_index_vec) { (IndexVec::U32(a), IndexVec::U32(b)) => { assert_eq!(a, b); - }, + } (IndexVec::USize(a), IndexVec::USize(b)) => { assert_eq!(a, b); - }, - _ => {panic!("failed to seralize/deserialize `IndexVec`")} + } + _ => { + panic!("failed to seralize/deserialize `IndexVec`") + } } } - #[cfg(feature = "alloc")] use alloc::vec; + #[cfg(feature = "alloc")] + use alloc::vec; #[test] fn test_sample_boundaries() { @@ -593,7 +618,7 @@ mod test { for &i in &indices { assert!((i as usize) < len); } - }, + } IndexVec::USize(_) => panic!("expected `IndexVec::U32`"), } } @@ -628,11 +653,15 @@ mod test { do_test(300, 80, &[31, 289, 248, 154, 221, 243, 7, 192]); // inplace do_test(300, 180, &[31, 289, 248, 154, 221, 243, 7, 192]); // inplace - do_test(1_000_000, 8, &[ - 103717, 963485, 826422, 509101, 736394, 807035, 5327, 632573, - ]); // floyd - do_test(1_000_000, 180, &[ - 103718, 963490, 826426, 509103, 736396, 807036, 5327, 632573, - ]); // rejection + do_test( + 1_000_000, + 8, + &[103717, 963485, 826422, 509101, 736394, 807035, 5327, 632573], + ); // floyd + do_test( + 1_000_000, + 180, + &[103718, 963490, 826426, 509103, 736396, 807036, 5327, 632573], + ); // rejection } } diff --git a/src/seq/mod.rs b/src/seq/mod.rs index fc1cc993113..1df250d04af 100644 --- a/src/seq/mod.rs +++ b/src/seq/mod.rs @@ -44,7 +44,8 @@ use alloc::vec::Vec; #[cfg(feature = "alloc")] use crate::distributions::uniform::{SampleBorrow, SampleUniform}; -#[cfg(feature = "alloc")] use crate::distributions::Weight; +#[cfg(feature = "alloc")] +use crate::distributions::Weight; use crate::Rng; use self::coin_flipper::CoinFlipper; @@ -167,7 +168,9 @@ pub trait IndexedRandom: Index { #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] fn choose_weighted( - &self, rng: &mut R, weight: F, + &self, + rng: &mut R, + weight: F, ) -> Result<&Self::Output, WeightError> where R: Rng + ?Sized, @@ -212,13 +215,15 @@ pub trait IndexedRandom: Index { /// println!("{:?}", choices.choose_multiple_weighted(&mut rng, 2, |item| item.1).unwrap().collect::>()); /// ``` /// [`choose_multiple`]: IndexedRandom::choose_multiple - // // Note: this is feature-gated on std due to usage of f64::powf. // If necessary, we may use alloc+libm as an alternative (see PR #1089). #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] fn choose_multiple_weighted( - &self, rng: &mut R, amount: usize, weight: F, + &self, + rng: &mut R, + amount: usize, + weight: F, ) -> Result, WeightError> where Self::Output: Sized, @@ -285,7 +290,9 @@ pub trait IndexedMutRandom: IndexedRandom + IndexMut { #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] fn choose_weighted_mut( - &mut self, rng: &mut R, weight: F, + &mut self, + rng: &mut R, + weight: F, ) -> Result<&mut Self::Output, WeightError> where R: Rng + ?Sized, @@ -358,7 +365,9 @@ pub trait SliceRandom: IndexedMutRandom { /// /// For slices, complexity is `O(m)` where `m = amount`. fn partial_shuffle( - &mut self, rng: &mut R, amount: usize, + &mut self, + rng: &mut R, + amount: usize, ) -> (&mut [Self::Output], &mut [Self::Output]) where Self::Output: Sized, @@ -624,9 +633,7 @@ impl SliceRandom for [T] { self.partial_shuffle(rng, self.len()); } - fn partial_shuffle( - &mut self, rng: &mut R, amount: usize, - ) -> (&mut [T], &mut [T]) + fn partial_shuffle(&mut self, rng: &mut R, amount: usize) -> (&mut [T], &mut [T]) where R: Rng + ?Sized, { @@ -1294,7 +1301,10 @@ mod test { fn do_test>(iter: I, v: &[u32]) { let mut rng = crate::test::rng(412); let mut buf = [0u32; 8]; - assert_eq!(iter.clone().choose_multiple_fill(&mut rng, &mut buf), v.len()); + assert_eq!( + iter.clone().choose_multiple_fill(&mut rng, &mut buf), + v.len() + ); assert_eq!(&buf[0..v.len()], v); #[cfg(feature = "alloc")]