From 9e7fc365937792cc40b661fa5599ca16231db274 Mon Sep 17 00:00:00 2001 From: Hacl Bot Date: Mon, 16 Dec 2024 10:52:41 +0000 Subject: [PATCH] [CI] update code --- include/Hacl_Bignum32.h | 382 ++++-- include/Hacl_HMAC.h | 92 ++ include/Hacl_Hash_Blake2b.h | 45 +- include/Hacl_Hash_Blake2b_Simd256.h | 104 +- include/Hacl_Hash_Blake2s.h | 108 +- include/Hacl_Hash_Blake2s_Simd128.h | 104 +- include/Hacl_Hash_SHA3.h | 6 +- include/Hacl_Hash_SHA3_Simd256.h | 12 +- include/internal/Hacl_Bignum_Base.h | 50 +- include/internal/Hacl_Bignum_K256.h | 84 +- include/internal/Hacl_Frodo_KEM.h | 2 +- include/internal/Hacl_HMAC.h | 2 + include/internal/Hacl_Hash_Blake2b.h | 16 +- include/internal/Hacl_Hash_Blake2b_Simd256.h | 1 + include/internal/Hacl_Hash_Blake2s.h | 1 + include/internal/Hacl_Hash_Blake2s_Simd128.h | 1 + include/internal/Hacl_Hash_SHA2.h | 2 + include/libintvector.h | 2 +- include/msvc/Hacl_Bignum32.h | 382 ++++-- include/msvc/Hacl_HMAC.h | 92 ++ include/msvc/Hacl_Hash_Blake2b.h | 43 +- include/msvc/Hacl_Hash_Blake2b_Simd256.h | 98 +- include/msvc/Hacl_Hash_Blake2s.h | 102 +- include/msvc/Hacl_Hash_Blake2s_Simd128.h | 98 +- include/msvc/Hacl_Hash_SHA3.h | 6 +- include/msvc/Hacl_Hash_SHA3_Simd256.h | 12 +- include/msvc/internal/Hacl_Bignum_K256.h | 6 +- include/msvc/internal/Hacl_HMAC.h | 2 + include/msvc/internal/Hacl_Hash_Blake2b.h | 16 +- .../msvc/internal/Hacl_Hash_Blake2b_Simd256.h | 1 + include/msvc/internal/Hacl_Hash_Blake2s.h | 1 + .../msvc/internal/Hacl_Hash_Blake2s_Simd128.h | 1 + include/msvc/internal/Hacl_Hash_SHA2.h | 2 + include/msvc/libintvector.h | 2 +- karamel/include/krml/c_endianness.h | 2 +- karamel/include/krml/internal/builtin.h | 2 +- karamel/include/krml/internal/callconv.h | 2 +- karamel/include/krml/internal/compat.h | 2 +- karamel/include/krml/internal/debug.h | 2 +- karamel/include/krml/internal/target.h | 28 +- karamel/include/krml/internal/types.h | 2 +- karamel/include/krml/internal/wasmsupport.h | 2 +- karamel/include/krml/lowstar_endianness.h | 2 +- karamel/krmllib/dist/minimal/FStar_UInt128.h | 2 +- .../dist/minimal/FStar_UInt128_Verified.h | 2 +- .../dist/minimal/FStar_UInt_8_16_32_64.h | 2 +- .../krmllib/dist/minimal/LowStar_Endianness.h | 2 +- .../dist/minimal/fstar_uint128_gcc64.h | 2 +- .../krmllib/dist/minimal/fstar_uint128_msvc.h | 2 +- .../minimal/fstar_uint128_struct_endianness.h | 2 +- ocaml/ctypes.depend | 34 +- ocaml/lib/Hacl_HMAC_bindings.ml | 30 + .../lib/Hacl_Hash_Blake2b_Simd256_bindings.ml | 4 +- ocaml/lib/Hacl_Hash_Blake2b_bindings.ml | 41 +- .../lib/Hacl_Hash_Blake2s_Simd128_bindings.ml | 4 +- ocaml/lib/Hacl_Hash_Blake2s_bindings.ml | 33 +- ocaml/lib/Hacl_Hash_SHA2_bindings.ml | 3 + src/EverCrypt_AEAD.c | 88 +- src/EverCrypt_HKDF.c | 90 +- src/EverCrypt_HMAC.c | 152 +-- src/EverCrypt_Hash.c | 210 ++- src/Hacl_AEAD_Chacha20Poly1305.c | 6 +- src/Hacl_AEAD_Chacha20Poly1305_Simd128.c | 6 +- src/Hacl_AEAD_Chacha20Poly1305_Simd256.c | 6 +- src/Hacl_Bignum.c | 412 ++++-- src/Hacl_Bignum256.c | 174 ++- src/Hacl_Bignum256_32.c | 174 ++- src/Hacl_Bignum32.c | 468 ++++--- src/Hacl_Bignum4096.c | 158 ++- src/Hacl_Bignum4096_32.c | 158 ++- src/Hacl_Bignum64.c | 86 +- src/Hacl_Chacha20.c | 22 +- src/Hacl_Chacha20_Vec128.c | 12 +- src/Hacl_Chacha20_Vec256.c | 12 +- src/Hacl_Chacha20_Vec32.c | 12 +- src/Hacl_Curve25519_51.c | 192 ++- src/Hacl_Curve25519_64.c | 190 ++- src/Hacl_EC_K256.c | 2 +- src/Hacl_Ed25519.c | 247 ++-- src/Hacl_FFDHE.c | 16 +- src/Hacl_Frodo1344.c | 2 +- src/Hacl_Frodo64.c | 2 +- src/Hacl_Frodo640.c | 2 +- src/Hacl_Frodo976.c | 2 +- src/Hacl_Frodo_KEM.c | 3 +- src/Hacl_GenericField32.c | 134 +- src/Hacl_GenericField64.c | 134 +- src/Hacl_HKDF.c | 75 +- src/Hacl_HKDF_Blake2b_256.c | 15 +- src/Hacl_HKDF_Blake2s_128.c | 15 +- src/Hacl_HMAC.c | 1162 +++++++++++++---- src/Hacl_HMAC_Blake2b_256.c | 25 +- src/Hacl_HMAC_Blake2s_128.c | 26 +- src/Hacl_Hash_Blake2b.c | 747 ++++++----- src/Hacl_Hash_Blake2b_Simd256.c | 725 +++++----- src/Hacl_Hash_Blake2s.c | 809 +++++++----- src/Hacl_Hash_Blake2s_Simd128.c | 707 +++++----- src/Hacl_Hash_MD5.c | 107 +- src/Hacl_Hash_SHA1.c | 107 +- src/Hacl_Hash_SHA2.c | 272 ++-- src/Hacl_Hash_SHA3.c | 152 +-- src/Hacl_Hash_SHA3_Simd256.c | 12 +- src/Hacl_K256_ECDSA.c | 573 +++++--- src/Hacl_MAC_Poly1305.c | 121 +- src/Hacl_MAC_Poly1305_Simd128.c | 121 +- src/Hacl_MAC_Poly1305_Simd256.c | 121 +- src/Hacl_NaCl.c | 4 +- src/Hacl_P256.c | 1006 ++++++++++---- src/Hacl_RSAPSS.c | 31 +- src/Hacl_SHA2_Vec128.c | 8 +- src/Hacl_SHA2_Vec256.c | 16 +- src/Hacl_Salsa20.c | 34 +- src/Lib_Memzero0.c | 4 +- src/msvc/EverCrypt_AEAD.c | 88 +- src/msvc/EverCrypt_HMAC.c | 134 +- src/msvc/EverCrypt_Hash.c | 6 +- src/msvc/Hacl_Bignum.c | 16 +- src/msvc/Hacl_Bignum256.c | 14 +- src/msvc/Hacl_Bignum256_32.c | 14 +- src/msvc/Hacl_Bignum32.c | 390 ++++-- src/msvc/Hacl_Bignum4096.c | 14 +- src/msvc/Hacl_Bignum4096_32.c | 14 +- src/msvc/Hacl_Bignum64.c | 8 +- src/msvc/Hacl_Ed25519.c | 32 +- src/msvc/Hacl_FFDHE.c | 12 +- src/msvc/Hacl_Frodo_KEM.c | 3 +- src/msvc/Hacl_HMAC.c | 1002 ++++++++++++-- src/msvc/Hacl_HMAC_Blake2b_256.c | 23 +- src/msvc/Hacl_HMAC_Blake2s_128.c | 24 +- src/msvc/Hacl_Hash_Blake2b.c | 437 ++++--- src/msvc/Hacl_Hash_Blake2b_Simd256.c | 471 ++++--- src/msvc/Hacl_Hash_Blake2s.c | 489 ++++--- src/msvc/Hacl_Hash_Blake2s_Simd128.c | 443 ++++--- src/msvc/Hacl_Hash_SHA2.c | 6 +- src/msvc/Hacl_Hash_SHA3.c | 9 +- src/msvc/Hacl_Hash_SHA3_Simd256.c | 12 +- src/msvc/Hacl_K256_ECDSA.c | 24 +- src/msvc/Hacl_RSAPSS.c | 25 +- src/msvc/Lib_Memzero0.c | 4 +- src/wasm/EverCrypt_Hash.wasm | Bin 58084 -> 58076 bytes src/wasm/Hacl_Bignum.wasm | Bin 74661 -> 74589 bytes src/wasm/Hacl_Bignum256.wasm | Bin 70325 -> 70121 bytes src/wasm/Hacl_Bignum256_32.wasm | Bin 32207 -> 32007 bytes src/wasm/Hacl_Bignum32.wasm | Bin 13286 -> 13122 bytes src/wasm/Hacl_Bignum4096.wasm | Bin 52302 -> 52098 bytes src/wasm/Hacl_Bignum4096_32.wasm | Bin 28696 -> 28496 bytes src/wasm/Hacl_Bignum64.wasm | Bin 14527 -> 14359 bytes src/wasm/Hacl_Bignum_Base.wasm | Bin 24692 -> 24692 bytes src/wasm/Hacl_Ed25519.wasm | Bin 77669 -> 77650 bytes src/wasm/Hacl_GenericField32.wasm | Bin 10731 -> 10719 bytes src/wasm/Hacl_GenericField64.wasm | Bin 11718 -> 11706 bytes src/wasm/Hacl_HKDF_Blake2s_128.wasm | Bin 1392 -> 1392 bytes src/wasm/Hacl_HMAC.wasm | Bin 28160 -> 57148 bytes src/wasm/Hacl_HMAC_Blake2b_256.wasm | Bin 1510 -> 1513 bytes src/wasm/Hacl_HMAC_Blake2s_128.wasm | Bin 1508 -> 1511 bytes src/wasm/Hacl_HMAC_DRBG.wasm | Bin 25396 -> 23871 bytes src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm | Bin 21320 -> 19078 bytes src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm | Bin 21448 -> 19206 bytes src/wasm/Hacl_Hash_Base.wasm | Bin 1191 -> 1175 bytes src/wasm/Hacl_Hash_Blake2b.wasm | Bin 22942 -> 24591 bytes src/wasm/Hacl_Hash_Blake2b_Simd256.wasm | Bin 11362 -> 11539 bytes src/wasm/Hacl_Hash_Blake2s.wasm | Bin 21136 -> 22696 bytes src/wasm/Hacl_Hash_Blake2s_Simd128.wasm | Bin 10194 -> 10398 bytes src/wasm/Hacl_Hash_MD5.wasm | Bin 15447 -> 15346 bytes src/wasm/Hacl_Hash_SHA1.wasm | Bin 13044 -> 12448 bytes src/wasm/Hacl_Hash_SHA2.wasm | Bin 23468 -> 23305 bytes src/wasm/Hacl_Hash_SHA3.wasm | Bin 55480 -> 55310 bytes src/wasm/Hacl_IntTypes_Intrinsics.wasm | Bin 1412 -> 1412 bytes src/wasm/Hacl_K256_ECDSA.wasm | Bin 98133 -> 97717 bytes src/wasm/Hacl_MAC_Poly1305.wasm | Bin 9539 -> 9438 bytes src/wasm/Hacl_P256.wasm | Bin 83198 -> 83025 bytes src/wasm/INFO.txt | 4 +- src/wasm/WasmSupport.wasm | Bin 1135 -> 1131 bytes src/wasm/layouts.json | 2 +- src/wasm/main.html | 2 +- src/wasm/shell.js | 2 +- 176 files changed, 10265 insertions(+), 5711 deletions(-) diff --git a/include/Hacl_Bignum32.h b/include/Hacl_Bignum32.h index 84a839a9..709f22d9 100644 --- a/include/Hacl_Bignum32.h +++ b/include/Hacl_Bignum32.h @@ -56,9 +56,18 @@ of `len` unsigned 32-bit integers, i.e. uint32_t[len]. /** Write `a + b mod 2 ^ (32 * len)` in `res`. - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + This function returns the carry. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly equal memory + location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_add(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); @@ -67,82 +76,134 @@ Write `a - b mod 2 ^ (32 * len)` in `res`. This functions returns the carry. - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `(a + b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `(a - b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `a * b` in `res`. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `b` and `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory locations of `a` and `b`. */ void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `a * a` in `res`. - The argument a is meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory location of `a`. */ void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res); /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `n`. + + @return `false` if any precondition is violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `1 < n` + - `n % 2 = 1` */ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res); /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_vartime( @@ -157,22 +218,30 @@ Hacl_Bignum32_mod_exp_vartime( /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is constant-time over its argument `b`, at the cost of a slower + execution time than `mod_exp_vartime_*`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_consttime( @@ -187,18 +256,23 @@ Hacl_Bignum32_mod_exp_consttime( /** Write `a ^ (-1) mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `n`. + + @return `false` if any preconditions (except the precondition: `n` is a prime) + are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `n % 2 = 1` + - `1 < n` + - `0 < a` + - `a < n` */ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res); @@ -212,15 +286,16 @@ Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint /** Heap-allocate and initialize a montgomery context. - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n + @param n Points to `len` number of limbs, i.e. `uint32_t[len]`. - The caller will need to call Hacl_Bignum32_mont_ctx_free on the return value - to avoid memory leaks. + @return A pointer to an allocated and initialized Montgomery context is returned. + Clients will need to call `Hacl_Bignum32_mont_ctx_free` on the return value to + avoid memory leaks. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` */ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum32_mont_ctx_init(uint32_t len, uint32_t *n); @@ -228,16 +303,18 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 /** Deallocate the memory previously allocated by Hacl_Bignum32_mont_ctx_init. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. */ void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. */ void Hacl_Bignum32_mod_precomp( @@ -249,21 +326,25 @@ Hacl_Bignum32_mod_precomp( /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_vartime_precomp( @@ -277,21 +358,25 @@ Hacl_Bignum32_mod_exp_vartime_precomp( /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + execution time than `mod_exp_vartime_*`. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_consttime_precomp( @@ -305,14 +390,17 @@ Hacl_Bignum32_mod_exp_consttime_precomp( /** Write `a ^ (-1) mod n` in `res`. - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n + @param[in] k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `0 < a` + - `a < n` */ void Hacl_Bignum32_mod_inv_prime_vartime_precomp( @@ -330,42 +418,48 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp( /** Load a bid-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b); /** Load a little-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b); /** Serialize a bignum into big-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res); /** Serialize a bignum into little-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res); @@ -378,14 +472,22 @@ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res); /** Returns 2^32 - 1 if a < b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if `a < b`, otherwise, `0`. */ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b); /** Returns 2^32 - 1 if a = b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if a = b, otherwise, `0`. */ uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b); diff --git a/include/Hacl_HMAC.h b/include/Hacl_HMAC.h index e1dc04f2..0f6a5c27 100644 --- a/include/Hacl_HMAC.h +++ b/include/Hacl_HMAC.h @@ -35,11 +35,28 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_Streaming_Types.h" #include "Hacl_Krmllib.h" +#include "Hacl_Hash_SHA3.h" #include "Hacl_Hash_SHA2.h" #include "Hacl_Hash_Blake2s.h" #include "Hacl_Hash_Blake2b.h" +/** +Write the HMAC-MD5 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 16 bytes of memory. +*/ +void +Hacl_HMAC_compute_md5( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + /** Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`. @@ -55,6 +72,21 @@ Hacl_HMAC_compute_sha1( uint32_t data_len ); +/** +Write the HMAC-SHA-2-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 28 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_224( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + /** Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. @@ -100,6 +132,66 @@ Hacl_HMAC_compute_sha2_512( uint32_t data_len ); +/** +Write the HMAC-SHA-3-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 144 bytes. +`dst` must point to 28 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_224( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + +/** +Write the HMAC-SHA-3-256 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 136 bytes. +`dst` must point to 32 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_256( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + +/** +Write the HMAC-SHA-3-384 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 104 bytes. +`dst` must point to 48 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_384( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + +/** +Write the HMAC-SHA-3-512 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 72 bytes. +`dst` must point to 64 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_512( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + /** Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`. diff --git a/include/Hacl_Hash_Blake2b.h b/include/Hacl_Hash_Blake2b.h index 3403fc83..8c3f4405 100644 --- a/include/Hacl_Hash_Blake2b.h +++ b/include/Hacl_Hash_Blake2b.h @@ -53,18 +53,31 @@ typedef struct Hacl_Hash_Blake2b_blake2_params_s } Hacl_Hash_Blake2b_blake2_params; -typedef struct K____uint64_t___uint64_t__s +typedef struct Hacl_Hash_Blake2b_index_s { - uint64_t *fst; - uint64_t *snd; + uint8_t key_length; + uint8_t digest_length; + bool last_node; } -K____uint64_t___uint64_t_; +Hacl_Hash_Blake2b_index; + +#define HACL_HASH_BLAKE2B_BLOCK_BYTES (128U) + +#define HACL_HASH_BLAKE2B_OUT_BYTES (64U) + +#define HACL_HASH_BLAKE2B_KEY_BYTES (64U) + +#define HACL_HASH_BLAKE2B_SALT_BYTES (16U) + +#define HACL_HASH_BLAKE2B_PERSONAL_BYTES (16U) typedef struct Hacl_Hash_Blake2b_block_state_t_s { uint8_t fst; uint8_t snd; - K____uint64_t___uint64_t_ thd; + bool thd; + uint64_t *f3; + uint64_t *f4; } Hacl_Hash_Blake2b_block_state_t; @@ -92,7 +105,11 @@ The caller must satisfy the following requirements. */ Hacl_Hash_Blake2b_state_t -*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); +*Hacl_Hash_Blake2b_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +); /** Specialized allocation function that picks default values for all @@ -116,7 +133,7 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void); /** General-purpose re-initialization function with parameters and -key. You cannot change digest_length or key_length, meaning those values in +key. You cannot change digest_length, key_length, or last_node, meaning those values in the parameters object must be the same as originally decided via one of the malloc functions. All other values of the parameter can be changed. The behavior is unspecified if you violate this precondition. @@ -159,10 +176,14 @@ at least `digest_length` bytes, where `digest_length` was determined by your choice of `malloc` function. Concretely, if you used `malloc` or `malloc_with_key`, then the expected length is 32 for S, or 64 for B (default digest length). If you used `malloc_with_params_and_key`, then the expected -length is whatever you chose for the `digest_length` field of your -parameters. +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_info(Hacl_Hash_Blake2b_state_t *s); /** Free state function when there is no key @@ -198,10 +219,10 @@ Hacl_Hash_Blake2b_hash_with_key( Write the BLAKE2b digest of message `input` using key `key` and parameters `params` into `output`. The `key` array must be of length `params.key_length`. The `output` array must be of length -`params.digest_length`. +`params.digest_length`. */ void -Hacl_Hash_Blake2b_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/Hacl_Hash_Blake2b_Simd256.h b/include/Hacl_Hash_Blake2b_Simd256.h index af309dc8..446b1cd5 100644 --- a/include/Hacl_Hash_Blake2b_Simd256.h +++ b/include/Hacl_Hash_Blake2b_Simd256.h @@ -40,18 +40,23 @@ extern "C" { #include "Hacl_Hash_Blake2b.h" #include "libintvector.h" -typedef struct K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256__s -{ - Lib_IntVector_Intrinsics_vec256 *fst; - Lib_IntVector_Intrinsics_vec256 *snd; -} -K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_; +#define HACL_HASH_BLAKE2B_SIMD256_BLOCK_BYTES (128U) + +#define HACL_HASH_BLAKE2B_SIMD256_OUT_BYTES (64U) + +#define HACL_HASH_BLAKE2B_SIMD256_KEY_BYTES (64U) + +#define HACL_HASH_BLAKE2B_SIMD256_SALT_BYTES (16U) + +#define HACL_HASH_BLAKE2B_SIMD256_PERSONAL_BYTES (16U) typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s { uint8_t fst; uint8_t snd; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ thd; + bool thd; + Lib_IntVector_Intrinsics_vec256 *f3; + Lib_IntVector_Intrinsics_vec256 *f4; } Hacl_Hash_Blake2b_Simd256_block_state_t; @@ -64,34 +69,54 @@ typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s Hacl_Hash_Blake2b_Simd256_state_t; /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (256 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 256 for S, 64 for B. +- The digest_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ); /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (256 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk); /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void); /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( @@ -101,21 +126,27 @@ Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( ); /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k); /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_Simd256_update( @@ -125,10 +156,19 @@ Hacl_Hash_Blake2b_Simd256_update( ); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 256 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_256_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_Simd256_info(Hacl_Hash_Blake2b_Simd256_state_t *s); /** Free state function when there is no key @@ -136,7 +176,7 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state); /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state); @@ -161,8 +201,14 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/Hacl_Hash_Blake2s.h b/include/Hacl_Hash_Blake2s.h index ac783473..bdf4a4b4 100644 --- a/include/Hacl_Hash_Blake2s.h +++ b/include/Hacl_Hash_Blake2s.h @@ -38,18 +38,23 @@ extern "C" { #include "Hacl_Streaming_Types.h" #include "Hacl_Hash_Blake2b.h" -typedef struct K____uint32_t___uint32_t__s -{ - uint32_t *fst; - uint32_t *snd; -} -K____uint32_t___uint32_t_; +#define HACL_HASH_BLAKE2S_BLOCK_BYTES (64U) + +#define HACL_HASH_BLAKE2S_OUT_BYTES (32U) + +#define HACL_HASH_BLAKE2S_KEY_BYTES (32U) + +#define HACL_HASH_BLAKE2S_SALT_BYTES (8U) + +#define HACL_HASH_BLAKE2S_PERSONAL_BYTES (8U) typedef struct Hacl_Hash_Blake2s_block_state_t_s { uint8_t fst; uint8_t snd; - K____uint32_t___uint32_t_ thd; + bool thd; + uint32_t *f3; + uint32_t *f4; } Hacl_Hash_Blake2s_block_state_t; @@ -62,30 +67,53 @@ typedef struct Hacl_Hash_Blake2s_state_t_s Hacl_Hash_Blake2s_state_t; /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (32 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t -*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); +*Hacl_Hash_Blake2s_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +); /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (32 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk); /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void); /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key_and_params( @@ -95,28 +123,44 @@ Hacl_Hash_Blake2s_reset_with_key_and_params( ); /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k); /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_info(Hacl_Hash_Blake2s_state_t *s); /** Free state function when there is no key @@ -124,7 +168,7 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state); /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state); @@ -148,8 +192,14 @@ Hacl_Hash_Blake2s_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/Hacl_Hash_Blake2s_Simd128.h b/include/Hacl_Hash_Blake2s_Simd128.h index d725ee86..f1e0b641 100644 --- a/include/Hacl_Hash_Blake2s_Simd128.h +++ b/include/Hacl_Hash_Blake2s_Simd128.h @@ -39,18 +39,23 @@ extern "C" { #include "Hacl_Hash_Blake2b.h" #include "libintvector.h" -typedef struct K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128__s -{ - Lib_IntVector_Intrinsics_vec128 *fst; - Lib_IntVector_Intrinsics_vec128 *snd; -} -K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_; +#define HACL_HASH_BLAKE2S_SIMD128_BLOCK_BYTES (64U) + +#define HACL_HASH_BLAKE2S_SIMD128_OUT_BYTES (32U) + +#define HACL_HASH_BLAKE2S_SIMD128_KEY_BYTES (32U) + +#define HACL_HASH_BLAKE2S_SIMD128_SALT_BYTES (8U) + +#define HACL_HASH_BLAKE2S_SIMD128_PERSONAL_BYTES (8U) typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s { uint8_t fst; uint8_t snd; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ thd; + bool thd; + Lib_IntVector_Intrinsics_vec128 *f3; + Lib_IntVector_Intrinsics_vec128 *f4; } Hacl_Hash_Blake2s_Simd128_block_state_t; @@ -63,34 +68,54 @@ typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s Hacl_Hash_Blake2s_Simd128_state_t; /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (128 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 128 for S, 64 for B. +- The digest_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ); /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (128 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk); /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void); /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( @@ -100,21 +125,27 @@ Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( ); /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k); /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_Simd128_update( @@ -124,10 +155,19 @@ Hacl_Hash_Blake2s_Simd128_update( ); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 128 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_128_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_Simd128_info(Hacl_Hash_Blake2s_Simd128_state_t *s); /** Free state function when there is no key @@ -135,7 +175,7 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state); /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state); @@ -160,8 +200,14 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/Hacl_Hash_SHA3.h b/include/Hacl_Hash_SHA3.h index 8fb78fcd..18f23d8d 100644 --- a/include/Hacl_Hash_SHA3.h +++ b/include/Hacl_Hash_SHA3.h @@ -117,7 +117,7 @@ void Hacl_Hash_SHA3_state_free(uint64_t *s); Absorb number of input blocks and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + It processes an input of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] @@ -131,14 +131,14 @@ Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t Absorb a final partial block of input and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses a sequence of bytes at end of input buffer that is less + It processes a sequence of bytes at end of input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffer are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] The argument `input` (IN) points to `inputByteLen` bytes of valid memory, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffer must be passed to `inputByteLen` including the number of full-block bytes at start of input buffer that are ignored */ diff --git a/include/Hacl_Hash_SHA3_Simd256.h b/include/Hacl_Hash_SHA3_Simd256.h index 617e8e34..72162d43 100644 --- a/include/Hacl_Hash_SHA3_Simd256.h +++ b/include/Hacl_Hash_SHA3_Simd256.h @@ -139,12 +139,12 @@ void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s); Absorb number of blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + It processes an inputs of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block for each buffer are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void @@ -161,15 +161,15 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( Absorb a final partial blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses a sequence of bytes at end of each input buffer that is less + It processes a sequence of bytes at end of each input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffers are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffers must be passed to `inputByteLen` including the number of full-block bytes at start of each input buffer that are ignored */ @@ -192,7 +192,7 @@ Squeeze a quadruple hash state to 4 output buffers The argument `state` (IN) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void diff --git a/include/internal/Hacl_Bignum_Base.h b/include/internal/Hacl_Bignum_Base.h index f2e282f4..4e0b35cb 100644 --- a/include/internal/Hacl_Bignum_Base.h +++ b/include/internal/Hacl_Bignum_Base.h @@ -72,9 +72,9 @@ Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t * memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint64_t *os = res; uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = res; os[i] = x; } } @@ -372,8 +372,8 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res) memset(res, 0U, (aLen + aLen) * sizeof (uint32_t)); for (uint32_t i0 = 0U; i0 < aLen; i0++) { - uint32_t *ab = a; uint32_t a_j = a[i0]; + uint32_t *ab = a; uint32_t *res_j = res + i0; uint32_t c = 0U; for (uint32_t i = 0U; i < i0 / 4U; i++) @@ -400,7 +400,16 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res) uint32_t r = c; res[i0 + i0] = r; } - uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); + uint32_t a_copy0[aLen + aLen]; + memset(a_copy0, 0U, (aLen + aLen) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); + uint32_t b_copy0[aLen + aLen]; + memset(b_copy0, 0U, (aLen + aLen) * sizeof (uint32_t)); + memcpy(a_copy0, res, (aLen + aLen) * sizeof (uint32_t)); + memcpy(b_copy0, res, (aLen + aLen) * sizeof (uint32_t)); + uint32_t r = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, a_copy0, b_copy0, res); + uint32_t c0 = r; KRML_MAYBE_UNUSED_VAR(c0); KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); uint32_t tmp[aLen + aLen]; @@ -413,7 +422,16 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res) tmp[2U * i] = lo; tmp[2U * i + 1U] = hi; } - uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); + uint32_t a_copy[aLen + aLen]; + memset(a_copy, 0U, (aLen + aLen) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); + uint32_t b_copy[aLen + aLen]; + memset(b_copy, 0U, (aLen + aLen) * sizeof (uint32_t)); + memcpy(a_copy, res, (aLen + aLen) * sizeof (uint32_t)); + memcpy(b_copy, tmp, (aLen + aLen) * sizeof (uint32_t)); + uint32_t r0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, a_copy, b_copy, res); + uint32_t c1 = r0; KRML_MAYBE_UNUSED_VAR(c1); } @@ -423,8 +441,8 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res) memset(res, 0U, (aLen + aLen) * sizeof (uint64_t)); for (uint32_t i0 = 0U; i0 < aLen; i0++) { - uint64_t *ab = a; uint64_t a_j = a[i0]; + uint64_t *ab = a; uint64_t *res_j = res + i0; uint64_t c = 0ULL; for (uint32_t i = 0U; i < i0 / 4U; i++) @@ -451,7 +469,16 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res) uint64_t r = c; res[i0 + i0] = r; } - uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); + uint64_t a_copy0[aLen + aLen]; + memset(a_copy0, 0U, (aLen + aLen) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); + uint64_t b_copy0[aLen + aLen]; + memset(b_copy0, 0U, (aLen + aLen) * sizeof (uint64_t)); + memcpy(a_copy0, res, (aLen + aLen) * sizeof (uint64_t)); + memcpy(b_copy0, res, (aLen + aLen) * sizeof (uint64_t)); + uint64_t r = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, a_copy0, b_copy0, res); + uint64_t c0 = r; KRML_MAYBE_UNUSED_VAR(c0); KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); uint64_t tmp[aLen + aLen]; @@ -464,7 +491,16 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res) tmp[2U * i] = lo; tmp[2U * i + 1U] = hi; } - uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); + uint64_t a_copy[aLen + aLen]; + memset(a_copy, 0U, (aLen + aLen) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); + uint64_t b_copy[aLen + aLen]; + memset(b_copy, 0U, (aLen + aLen) * sizeof (uint64_t)); + memcpy(a_copy, res, (aLen + aLen) * sizeof (uint64_t)); + memcpy(b_copy, tmp, (aLen + aLen) * sizeof (uint64_t)); + uint64_t r0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, a_copy, b_copy, res); + uint64_t c1 = r0; KRML_MAYBE_UNUSED_VAR(c1); } diff --git a/include/internal/Hacl_Bignum_K256.h b/include/internal/Hacl_Bignum_K256.h index fe72fffe..33d77791 100644 --- a/include/internal/Hacl_Bignum_K256.h +++ b/include/internal/Hacl_Bignum_K256.h @@ -70,11 +70,7 @@ static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_ uint64_t f2 = f[2U]; uint64_t f3 = f[3U]; uint64_t f4 = f[4U]; - if (f4 > 0ULL) - { - return false; - } - if (f3 > 0ULL) + if (f4 > 0ULL || f3 > 0ULL) { return false; } @@ -104,11 +100,11 @@ static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b) 0U, 4U, 1U, - uint64_t *os = tmp; uint8_t *bj = b + i * 8U; uint64_t u = load64_be(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = tmp; os[i] = x;); uint64_t s0 = tmp[3U]; uint64_t s1 = tmp[2U]; @@ -589,7 +585,9 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool f[2U] = f2; f[3U] = f3; f[4U] = f4; - Hacl_K256_Field_fnormalize(f, f); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, f, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(f, f_copy); return; } } @@ -598,7 +596,9 @@ static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uin { for (uint32_t i = 0U; i < b; i++) { - Hacl_K256_Field_fsqr(out, out); + uint64_t x_copy[5U] = { 0U }; + memcpy(x_copy, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsqr(out, x_copy); } } @@ -607,7 +607,9 @@ static inline void Hacl_Impl_K256_Finv_fsquare_times(uint64_t *out, uint64_t *a, memcpy(out, a, 5U * sizeof (uint64_t)); for (uint32_t i = 0U; i < b; i++) { - Hacl_K256_Field_fsqr(out, out); + uint64_t x_copy[5U] = { 0U }; + memcpy(x_copy, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsqr(out, x_copy); } } @@ -618,29 +620,53 @@ static inline void Hacl_Impl_K256_Finv_fexp_223_23(uint64_t *out, uint64_t *x2, uint64_t x44[5U] = { 0U }; uint64_t x88[5U] = { 0U }; Hacl_Impl_K256_Finv_fsquare_times(x2, f, 1U); - Hacl_K256_Field_fmul(x2, x2, f); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, x2, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x2, f1_copy, f); Hacl_Impl_K256_Finv_fsquare_times(x3, x2, 1U); - Hacl_K256_Field_fmul(x3, x3, f); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x3, f1_copy0, f); Hacl_Impl_K256_Finv_fsquare_times(out, x3, 3U); - Hacl_K256_Field_fmul(out, out, x3); + uint64_t f1_copy1[5U] = { 0U }; + memcpy(f1_copy1, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy1, x3); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U); - Hacl_K256_Field_fmul(out, out, x3); + uint64_t f1_copy2[5U] = { 0U }; + memcpy(f1_copy2, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy2, x3); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U); - Hacl_K256_Field_fmul(out, out, x2); + uint64_t f1_copy3[5U] = { 0U }; + memcpy(f1_copy3, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy3, x2); Hacl_Impl_K256_Finv_fsquare_times(x22, out, 11U); - Hacl_K256_Field_fmul(x22, x22, out); + uint64_t f1_copy4[5U] = { 0U }; + memcpy(f1_copy4, x22, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x22, f1_copy4, out); Hacl_Impl_K256_Finv_fsquare_times(x44, x22, 22U); - Hacl_K256_Field_fmul(x44, x44, x22); + uint64_t f1_copy5[5U] = { 0U }; + memcpy(f1_copy5, x44, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x44, f1_copy5, x22); Hacl_Impl_K256_Finv_fsquare_times(x88, x44, 44U); - Hacl_K256_Field_fmul(x88, x88, x44); + uint64_t f1_copy6[5U] = { 0U }; + memcpy(f1_copy6, x88, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x88, f1_copy6, x44); Hacl_Impl_K256_Finv_fsquare_times(out, x88, 88U); - Hacl_K256_Field_fmul(out, out, x88); + uint64_t f1_copy7[5U] = { 0U }; + memcpy(f1_copy7, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy7, x88); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 44U); - Hacl_K256_Field_fmul(out, out, x44); + uint64_t f1_copy8[5U] = { 0U }; + memcpy(f1_copy8, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy8, x44); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U); - Hacl_K256_Field_fmul(out, out, x3); + uint64_t f1_copy9[5U] = { 0U }; + memcpy(f1_copy9, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy9, x3); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 23U); - Hacl_K256_Field_fmul(out, out, x22); + uint64_t f1_copy10[5U] = { 0U }; + memcpy(f1_copy10, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy10, x22); } static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f) @@ -648,11 +674,17 @@ static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f) uint64_t x2[5U] = { 0U }; Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 5U); - Hacl_K256_Field_fmul(out, out, f); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy, f); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U); - Hacl_K256_Field_fmul(out, out, x2); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy0, x2); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U); - Hacl_K256_Field_fmul(out, out, f); + uint64_t f1_copy1[5U] = { 0U }; + memcpy(f1_copy1, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy1, f); } static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f) @@ -660,7 +692,9 @@ static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f) uint64_t x2[5U] = { 0U }; Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 6U); - Hacl_K256_Field_fmul(out, out, x2); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, out, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(out, f1_copy, x2); Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U); } diff --git a/include/internal/Hacl_Frodo_KEM.h b/include/internal/Hacl_Frodo_KEM.h index 34b1816a..78593991 100644 --- a/include/internal/Hacl_Frodo_KEM.h +++ b/include/internal/Hacl_Frodo_KEM.h @@ -182,9 +182,9 @@ Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16 { for (uint32_t i = 0U; i < n1 * n2; i++) { - uint16_t *os = res; uint16_t u = load16_le(b + 2U * i); uint16_t x = u; + uint16_t *os = res; os[i] = x; } } diff --git a/include/internal/Hacl_HMAC.h b/include/internal/Hacl_HMAC.h index ad344c4c..a9719654 100644 --- a/include/internal/Hacl_HMAC.h +++ b/include/internal/Hacl_HMAC.h @@ -36,8 +36,10 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Krmllib.h" +#include "internal/Hacl_Hash_SHA3.h" #include "internal/Hacl_Hash_SHA2.h" #include "internal/Hacl_Hash_SHA1.h" +#include "internal/Hacl_Hash_MD5.h" #include "internal/Hacl_Hash_Blake2s.h" #include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_HMAC.h" diff --git a/include/internal/Hacl_Hash_Blake2b.h b/include/internal/Hacl_Hash_Blake2b.h index 6928d205..2dad4b01 100644 --- a/include/internal/Hacl_Hash_Blake2b.h +++ b/include/internal/Hacl_Hash_Blake2b.h @@ -38,12 +38,12 @@ extern "C" { #include "internal/Hacl_Impl_Blake2_Constants.h" #include "../Hacl_Hash_Blake2b.h" -typedef struct Hacl_Hash_Blake2b_index_s +typedef struct Hacl_Hash_Blake2b_params_and_key_s { - uint8_t key_length; - uint8_t digest_length; + Hacl_Hash_Blake2b_blake2_params *fst; + uint8_t *snd; } -Hacl_Hash_Blake2b_index; +Hacl_Hash_Blake2b_params_and_key; void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn); @@ -62,6 +62,7 @@ Hacl_Hash_Blake2b_update_last( uint32_t len, uint64_t *wv, uint64_t *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d @@ -69,13 +70,6 @@ Hacl_Hash_Blake2b_update_last( void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash); -typedef struct K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t__s -{ - Hacl_Hash_Blake2b_blake2_params *fst; - uint8_t *snd; -} -K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_; - #if defined(__cplusplus) } #endif diff --git a/include/internal/Hacl_Hash_Blake2b_Simd256.h b/include/internal/Hacl_Hash_Blake2b_Simd256.h index 4dd986b2..04b091fc 100644 --- a/include/internal/Hacl_Hash_Blake2b_Simd256.h +++ b/include/internal/Hacl_Hash_Blake2b_Simd256.h @@ -58,6 +58,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec256 *wv, Lib_IntVector_Intrinsics_vec256 *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d diff --git a/include/internal/Hacl_Hash_Blake2s.h b/include/internal/Hacl_Hash_Blake2s.h index eccd92de..279c472e 100644 --- a/include/internal/Hacl_Hash_Blake2s.h +++ b/include/internal/Hacl_Hash_Blake2s.h @@ -56,6 +56,7 @@ Hacl_Hash_Blake2s_update_last( uint32_t len, uint32_t *wv, uint32_t *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d diff --git a/include/internal/Hacl_Hash_Blake2s_Simd128.h b/include/internal/Hacl_Hash_Blake2s_Simd128.h index 2c422949..77505dc2 100644 --- a/include/internal/Hacl_Hash_Blake2s_Simd128.h +++ b/include/internal/Hacl_Hash_Blake2s_Simd128.h @@ -58,6 +58,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec128 *wv, Lib_IntVector_Intrinsics_vec128 *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d diff --git a/include/internal/Hacl_Hash_SHA2.h b/include/internal/Hacl_Hash_SHA2.h index 7dade3f3..d61ef455 100644 --- a/include/internal/Hacl_Hash_SHA2.h +++ b/include/internal/Hacl_Hash_SHA2.h @@ -123,6 +123,8 @@ void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h); void Hacl_Hash_SHA2_sha224_init(uint32_t *hash); +void Hacl_Hash_SHA2_sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st); + void Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st); diff --git a/include/libintvector.h b/include/libintvector.h index 99d11336..11e914f7 100644 --- a/include/libintvector.h +++ b/include/libintvector.h @@ -19,7 +19,7 @@ #define Lib_IntVector_Intrinsics_bit_mask64(x) -((x) & 1) -#if defined(__x86_64__) || defined(_M_X64) +#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) #if defined(HACL_CAN_COMPILE_VEC128) diff --git a/include/msvc/Hacl_Bignum32.h b/include/msvc/Hacl_Bignum32.h index 84a839a9..709f22d9 100644 --- a/include/msvc/Hacl_Bignum32.h +++ b/include/msvc/Hacl_Bignum32.h @@ -56,9 +56,18 @@ of `len` unsigned 32-bit integers, i.e. uint32_t[len]. /** Write `a + b mod 2 ^ (32 * len)` in `res`. - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + This function returns the carry. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly equal memory + location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_add(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); @@ -67,82 +76,134 @@ Write `a - b mod 2 ^ (32 * len)` in `res`. This functions returns the carry. - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `(a + b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `(a - b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `a * b` in `res`. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `b` and `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory locations of `a` and `b`. */ void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); /** Write `a * a` in `res`. - The argument a is meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory location of `a`. */ void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res); /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `n`. + + @return `false` if any precondition is violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `1 < n` + - `n % 2 = 1` */ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res); /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_vartime( @@ -157,22 +218,30 @@ Hacl_Bignum32_mod_exp_vartime( /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is constant-time over its argument `b`, at the cost of a slower + execution time than `mod_exp_vartime_*`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_consttime( @@ -187,18 +256,23 @@ Hacl_Bignum32_mod_exp_consttime( /** Write `a ^ (-1) mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `n`. + + @return `false` if any preconditions (except the precondition: `n` is a prime) + are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `n % 2 = 1` + - `1 < n` + - `0 < a` + - `a < n` */ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res); @@ -212,15 +286,16 @@ Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint /** Heap-allocate and initialize a montgomery context. - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n + @param n Points to `len` number of limbs, i.e. `uint32_t[len]`. - The caller will need to call Hacl_Bignum32_mont_ctx_free on the return value - to avoid memory leaks. + @return A pointer to an allocated and initialized Montgomery context is returned. + Clients will need to call `Hacl_Bignum32_mont_ctx_free` on the return value to + avoid memory leaks. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` */ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum32_mont_ctx_init(uint32_t len, uint32_t *n); @@ -228,16 +303,18 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 /** Deallocate the memory previously allocated by Hacl_Bignum32_mont_ctx_init. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. */ void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. */ void Hacl_Bignum32_mod_precomp( @@ -249,21 +326,25 @@ Hacl_Bignum32_mod_precomp( /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_vartime_precomp( @@ -277,21 +358,25 @@ Hacl_Bignum32_mod_exp_vartime_precomp( /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + execution time than `mod_exp_vartime_*`. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_consttime_precomp( @@ -305,14 +390,17 @@ Hacl_Bignum32_mod_exp_consttime_precomp( /** Write `a ^ (-1) mod n` in `res`. - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n + @param[in] k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `0 < a` + - `a < n` */ void Hacl_Bignum32_mod_inv_prime_vartime_precomp( @@ -330,42 +418,48 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp( /** Load a bid-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b); /** Load a little-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b); /** Serialize a bignum into big-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res); /** Serialize a bignum into little-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res); @@ -378,14 +472,22 @@ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res); /** Returns 2^32 - 1 if a < b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if `a < b`, otherwise, `0`. */ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b); /** Returns 2^32 - 1 if a = b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if a = b, otherwise, `0`. */ uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b); diff --git a/include/msvc/Hacl_HMAC.h b/include/msvc/Hacl_HMAC.h index e1dc04f2..0f6a5c27 100644 --- a/include/msvc/Hacl_HMAC.h +++ b/include/msvc/Hacl_HMAC.h @@ -35,11 +35,28 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_Streaming_Types.h" #include "Hacl_Krmllib.h" +#include "Hacl_Hash_SHA3.h" #include "Hacl_Hash_SHA2.h" #include "Hacl_Hash_Blake2s.h" #include "Hacl_Hash_Blake2b.h" +/** +Write the HMAC-MD5 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 16 bytes of memory. +*/ +void +Hacl_HMAC_compute_md5( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + /** Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`. @@ -55,6 +72,21 @@ Hacl_HMAC_compute_sha1( uint32_t data_len ); +/** +Write the HMAC-SHA-2-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 28 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_224( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + /** Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. @@ -100,6 +132,66 @@ Hacl_HMAC_compute_sha2_512( uint32_t data_len ); +/** +Write the HMAC-SHA-3-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 144 bytes. +`dst` must point to 28 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_224( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + +/** +Write the HMAC-SHA-3-256 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 136 bytes. +`dst` must point to 32 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_256( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + +/** +Write the HMAC-SHA-3-384 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 104 bytes. +`dst` must point to 48 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_384( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + +/** +Write the HMAC-SHA-3-512 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 72 bytes. +`dst` must point to 64 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_512( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +); + /** Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`. diff --git a/include/msvc/Hacl_Hash_Blake2b.h b/include/msvc/Hacl_Hash_Blake2b.h index 3403fc83..fcc2d5df 100644 --- a/include/msvc/Hacl_Hash_Blake2b.h +++ b/include/msvc/Hacl_Hash_Blake2b.h @@ -53,6 +53,24 @@ typedef struct Hacl_Hash_Blake2b_blake2_params_s } Hacl_Hash_Blake2b_blake2_params; +typedef struct Hacl_Hash_Blake2b_index_s +{ + uint8_t key_length; + uint8_t digest_length; + bool last_node; +} +Hacl_Hash_Blake2b_index; + +#define HACL_HASH_BLAKE2B_BLOCK_BYTES (128U) + +#define HACL_HASH_BLAKE2B_OUT_BYTES (64U) + +#define HACL_HASH_BLAKE2B_KEY_BYTES (64U) + +#define HACL_HASH_BLAKE2B_SALT_BYTES (16U) + +#define HACL_HASH_BLAKE2B_PERSONAL_BYTES (16U) + typedef struct K____uint64_t___uint64_t__s { uint64_t *fst; @@ -64,7 +82,8 @@ typedef struct Hacl_Hash_Blake2b_block_state_t_s { uint8_t fst; uint8_t snd; - K____uint64_t___uint64_t_ thd; + bool thd; + K____uint64_t___uint64_t_ f3; } Hacl_Hash_Blake2b_block_state_t; @@ -92,7 +111,11 @@ The caller must satisfy the following requirements. */ Hacl_Hash_Blake2b_state_t -*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); +*Hacl_Hash_Blake2b_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +); /** Specialized allocation function that picks default values for all @@ -116,7 +139,7 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void); /** General-purpose re-initialization function with parameters and -key. You cannot change digest_length or key_length, meaning those values in +key. You cannot change digest_length, key_length, or last_node, meaning those values in the parameters object must be the same as originally decided via one of the malloc functions. All other values of the parameter can be changed. The behavior is unspecified if you violate this precondition. @@ -159,10 +182,14 @@ at least `digest_length` bytes, where `digest_length` was determined by your choice of `malloc` function. Concretely, if you used `malloc` or `malloc_with_key`, then the expected length is 32 for S, or 64 for B (default digest length). If you used `malloc_with_params_and_key`, then the expected -length is whatever you chose for the `digest_length` field of your -parameters. +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_info(Hacl_Hash_Blake2b_state_t *s); /** Free state function when there is no key @@ -198,10 +225,10 @@ Hacl_Hash_Blake2b_hash_with_key( Write the BLAKE2b digest of message `input` using key `key` and parameters `params` into `output`. The `key` array must be of length `params.key_length`. The `output` array must be of length -`params.digest_length`. +`params.digest_length`. */ void -Hacl_Hash_Blake2b_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/msvc/Hacl_Hash_Blake2b_Simd256.h b/include/msvc/Hacl_Hash_Blake2b_Simd256.h index af309dc8..f1799e25 100644 --- a/include/msvc/Hacl_Hash_Blake2b_Simd256.h +++ b/include/msvc/Hacl_Hash_Blake2b_Simd256.h @@ -40,6 +40,16 @@ extern "C" { #include "Hacl_Hash_Blake2b.h" #include "libintvector.h" +#define HACL_HASH_BLAKE2B_SIMD256_BLOCK_BYTES (128U) + +#define HACL_HASH_BLAKE2B_SIMD256_OUT_BYTES (64U) + +#define HACL_HASH_BLAKE2B_SIMD256_KEY_BYTES (64U) + +#define HACL_HASH_BLAKE2B_SIMD256_SALT_BYTES (16U) + +#define HACL_HASH_BLAKE2B_SIMD256_PERSONAL_BYTES (16U) + typedef struct K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256__s { Lib_IntVector_Intrinsics_vec256 *fst; @@ -51,7 +61,8 @@ typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s { uint8_t fst; uint8_t snd; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ thd; + bool thd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ f3; } Hacl_Hash_Blake2b_Simd256_block_state_t; @@ -64,34 +75,54 @@ typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s Hacl_Hash_Blake2b_Simd256_state_t; /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (256 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 256 for S, 64 for B. +- The digest_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ); /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (256 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk); /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void); /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( @@ -101,21 +132,27 @@ Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( ); /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k); /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_Simd256_update( @@ -125,10 +162,19 @@ Hacl_Hash_Blake2b_Simd256_update( ); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 256 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_256_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_Simd256_info(Hacl_Hash_Blake2b_Simd256_state_t *s); /** Free state function when there is no key @@ -136,7 +182,7 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state); /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state); @@ -161,8 +207,14 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/msvc/Hacl_Hash_Blake2s.h b/include/msvc/Hacl_Hash_Blake2s.h index ac783473..870f1edc 100644 --- a/include/msvc/Hacl_Hash_Blake2s.h +++ b/include/msvc/Hacl_Hash_Blake2s.h @@ -38,6 +38,16 @@ extern "C" { #include "Hacl_Streaming_Types.h" #include "Hacl_Hash_Blake2b.h" +#define HACL_HASH_BLAKE2S_BLOCK_BYTES (64U) + +#define HACL_HASH_BLAKE2S_OUT_BYTES (32U) + +#define HACL_HASH_BLAKE2S_KEY_BYTES (32U) + +#define HACL_HASH_BLAKE2S_SALT_BYTES (8U) + +#define HACL_HASH_BLAKE2S_PERSONAL_BYTES (8U) + typedef struct K____uint32_t___uint32_t__s { uint32_t *fst; @@ -49,7 +59,8 @@ typedef struct Hacl_Hash_Blake2s_block_state_t_s { uint8_t fst; uint8_t snd; - K____uint32_t___uint32_t_ thd; + bool thd; + K____uint32_t___uint32_t_ f3; } Hacl_Hash_Blake2s_block_state_t; @@ -62,30 +73,53 @@ typedef struct Hacl_Hash_Blake2s_state_t_s Hacl_Hash_Blake2s_state_t; /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (32 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t -*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); +*Hacl_Hash_Blake2s_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +); /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (32 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk); /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void); /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key_and_params( @@ -95,28 +129,44 @@ Hacl_Hash_Blake2s_reset_with_key_and_params( ); /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k); /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_info(Hacl_Hash_Blake2s_state_t *s); /** Free state function when there is no key @@ -124,7 +174,7 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state); /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state); @@ -148,8 +198,14 @@ Hacl_Hash_Blake2s_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/msvc/Hacl_Hash_Blake2s_Simd128.h b/include/msvc/Hacl_Hash_Blake2s_Simd128.h index d725ee86..2bae1c8e 100644 --- a/include/msvc/Hacl_Hash_Blake2s_Simd128.h +++ b/include/msvc/Hacl_Hash_Blake2s_Simd128.h @@ -39,6 +39,16 @@ extern "C" { #include "Hacl_Hash_Blake2b.h" #include "libintvector.h" +#define HACL_HASH_BLAKE2S_SIMD128_BLOCK_BYTES (64U) + +#define HACL_HASH_BLAKE2S_SIMD128_OUT_BYTES (32U) + +#define HACL_HASH_BLAKE2S_SIMD128_KEY_BYTES (32U) + +#define HACL_HASH_BLAKE2S_SIMD128_SALT_BYTES (8U) + +#define HACL_HASH_BLAKE2S_SIMD128_PERSONAL_BYTES (8U) + typedef struct K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128__s { Lib_IntVector_Intrinsics_vec128 *fst; @@ -50,7 +60,8 @@ typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s { uint8_t fst; uint8_t snd; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ thd; + bool thd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ f3; } Hacl_Hash_Blake2s_Simd128_block_state_t; @@ -63,34 +74,54 @@ typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s Hacl_Hash_Blake2s_Simd128_state_t; /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (128 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 128 for S, 64 for B. +- The digest_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ); /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (128 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk); /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void); /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( @@ -100,21 +131,27 @@ Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( ); /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k); /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_Simd128_update( @@ -124,10 +161,19 @@ Hacl_Hash_Blake2s_Simd128_update( ); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 128 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_128_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output); +uint8_t Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *dst); + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_Simd128_info(Hacl_Hash_Blake2s_Simd128_state_t *s); /** Free state function when there is no key @@ -135,7 +181,7 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state); /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state); @@ -160,8 +206,14 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/include/msvc/Hacl_Hash_SHA3.h b/include/msvc/Hacl_Hash_SHA3.h index 8fb78fcd..18f23d8d 100644 --- a/include/msvc/Hacl_Hash_SHA3.h +++ b/include/msvc/Hacl_Hash_SHA3.h @@ -117,7 +117,7 @@ void Hacl_Hash_SHA3_state_free(uint64_t *s); Absorb number of input blocks and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + It processes an input of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] @@ -131,14 +131,14 @@ Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t Absorb a final partial block of input and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses a sequence of bytes at end of input buffer that is less + It processes a sequence of bytes at end of input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffer are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] The argument `input` (IN) points to `inputByteLen` bytes of valid memory, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffer must be passed to `inputByteLen` including the number of full-block bytes at start of input buffer that are ignored */ diff --git a/include/msvc/Hacl_Hash_SHA3_Simd256.h b/include/msvc/Hacl_Hash_SHA3_Simd256.h index 617e8e34..72162d43 100644 --- a/include/msvc/Hacl_Hash_SHA3_Simd256.h +++ b/include/msvc/Hacl_Hash_SHA3_Simd256.h @@ -139,12 +139,12 @@ void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s); Absorb number of blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + It processes an inputs of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block for each buffer are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void @@ -161,15 +161,15 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( Absorb a final partial blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses a sequence of bytes at end of each input buffer that is less + It processes a sequence of bytes at end of each input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffers are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffers must be passed to `inputByteLen` including the number of full-block bytes at start of each input buffer that are ignored */ @@ -192,7 +192,7 @@ Squeeze a quadruple hash state to 4 output buffers The argument `state` (IN) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void diff --git a/include/msvc/internal/Hacl_Bignum_K256.h b/include/msvc/internal/Hacl_Bignum_K256.h index fe72fffe..d8212bab 100644 --- a/include/msvc/internal/Hacl_Bignum_K256.h +++ b/include/msvc/internal/Hacl_Bignum_K256.h @@ -70,11 +70,7 @@ static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_ uint64_t f2 = f[2U]; uint64_t f3 = f[3U]; uint64_t f4 = f[4U]; - if (f4 > 0ULL) - { - return false; - } - if (f3 > 0ULL) + if (f4 > 0ULL || f3 > 0ULL) { return false; } diff --git a/include/msvc/internal/Hacl_HMAC.h b/include/msvc/internal/Hacl_HMAC.h index ad344c4c..a9719654 100644 --- a/include/msvc/internal/Hacl_HMAC.h +++ b/include/msvc/internal/Hacl_HMAC.h @@ -36,8 +36,10 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Krmllib.h" +#include "internal/Hacl_Hash_SHA3.h" #include "internal/Hacl_Hash_SHA2.h" #include "internal/Hacl_Hash_SHA1.h" +#include "internal/Hacl_Hash_MD5.h" #include "internal/Hacl_Hash_Blake2s.h" #include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_HMAC.h" diff --git a/include/msvc/internal/Hacl_Hash_Blake2b.h b/include/msvc/internal/Hacl_Hash_Blake2b.h index 6928d205..2dad4b01 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2b.h +++ b/include/msvc/internal/Hacl_Hash_Blake2b.h @@ -38,12 +38,12 @@ extern "C" { #include "internal/Hacl_Impl_Blake2_Constants.h" #include "../Hacl_Hash_Blake2b.h" -typedef struct Hacl_Hash_Blake2b_index_s +typedef struct Hacl_Hash_Blake2b_params_and_key_s { - uint8_t key_length; - uint8_t digest_length; + Hacl_Hash_Blake2b_blake2_params *fst; + uint8_t *snd; } -Hacl_Hash_Blake2b_index; +Hacl_Hash_Blake2b_params_and_key; void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn); @@ -62,6 +62,7 @@ Hacl_Hash_Blake2b_update_last( uint32_t len, uint64_t *wv, uint64_t *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d @@ -69,13 +70,6 @@ Hacl_Hash_Blake2b_update_last( void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash); -typedef struct K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t__s -{ - Hacl_Hash_Blake2b_blake2_params *fst; - uint8_t *snd; -} -K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_; - #if defined(__cplusplus) } #endif diff --git a/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h b/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h index 4dd986b2..04b091fc 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h +++ b/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h @@ -58,6 +58,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec256 *wv, Lib_IntVector_Intrinsics_vec256 *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d diff --git a/include/msvc/internal/Hacl_Hash_Blake2s.h b/include/msvc/internal/Hacl_Hash_Blake2s.h index eccd92de..279c472e 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2s.h +++ b/include/msvc/internal/Hacl_Hash_Blake2s.h @@ -56,6 +56,7 @@ Hacl_Hash_Blake2s_update_last( uint32_t len, uint32_t *wv, uint32_t *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d diff --git a/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h b/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h index 2c422949..77505dc2 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h +++ b/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h @@ -58,6 +58,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec128 *wv, Lib_IntVector_Intrinsics_vec128 *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d diff --git a/include/msvc/internal/Hacl_Hash_SHA2.h b/include/msvc/internal/Hacl_Hash_SHA2.h index 7dade3f3..d61ef455 100644 --- a/include/msvc/internal/Hacl_Hash_SHA2.h +++ b/include/msvc/internal/Hacl_Hash_SHA2.h @@ -123,6 +123,8 @@ void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h); void Hacl_Hash_SHA2_sha224_init(uint32_t *hash); +void Hacl_Hash_SHA2_sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st); + void Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st); diff --git a/include/msvc/libintvector.h b/include/msvc/libintvector.h index 99d11336..11e914f7 100644 --- a/include/msvc/libintvector.h +++ b/include/msvc/libintvector.h @@ -19,7 +19,7 @@ #define Lib_IntVector_Intrinsics_bit_mask64(x) -((x) & 1) -#if defined(__x86_64__) || defined(_M_X64) +#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) #if defined(HACL_CAN_COMPILE_VEC128) diff --git a/karamel/include/krml/c_endianness.h b/karamel/include/krml/c_endianness.h index 21d7e1b4..937d8d10 100644 --- a/karamel/include/krml/c_endianness.h +++ b/karamel/include/krml/c_endianness.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef __KRML_ENDIAN_H #define __KRML_ENDIAN_H diff --git a/karamel/include/krml/internal/builtin.h b/karamel/include/krml/internal/builtin.h index 6098f30b..bb47d64d 100644 --- a/karamel/include/krml/internal/builtin.h +++ b/karamel/include/krml/internal/builtin.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef __KRML_BUILTIN_H #define __KRML_BUILTIN_H diff --git a/karamel/include/krml/internal/callconv.h b/karamel/include/krml/internal/callconv.h index aeca0ba7..4bc0f878 100644 --- a/karamel/include/krml/internal/callconv.h +++ b/karamel/include/krml/internal/callconv.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef __KRML_CALLCONV_H #define __KRML_CALLCONV_H diff --git a/karamel/include/krml/internal/compat.h b/karamel/include/krml/internal/compat.h index b557bbc1..f206520f 100644 --- a/karamel/include/krml/internal/compat.h +++ b/karamel/include/krml/internal/compat.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef KRML_COMPAT_H #define KRML_COMPAT_H diff --git a/karamel/include/krml/internal/debug.h b/karamel/include/krml/internal/debug.h index 786db147..97f06995 100644 --- a/karamel/include/krml/internal/debug.h +++ b/karamel/include/krml/internal/debug.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef __KRML_DEBUG_H #define __KRML_DEBUG_H diff --git a/karamel/include/krml/internal/target.h b/karamel/include/krml/internal/target.h index d4252a10..425ed282 100644 --- a/karamel/include/krml/internal/target.h +++ b/karamel/include/krml/internal/target.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef __KRML_TARGET_H #define __KRML_TARGET_H @@ -69,11 +69,21 @@ # endif #endif +#ifndef KRML_ATTRIBUTE_TARGET +# if defined(__GNUC__) +# define KRML_ATTRIBUTE_TARGET(x) __attribute__((target(x))) +# else +# define KRML_ATTRIBUTE_TARGET(x) +# endif +#endif + #ifndef KRML_NOINLINE # if defined(_MSC_VER) # define KRML_NOINLINE __declspec(noinline) # elif defined (__GNUC__) # define KRML_NOINLINE __attribute__((noinline,unused)) +# elif defined (__SUNPRO_C) +# define KRML_NOINLINE __attribute__((noinline)) # else # define KRML_NOINLINE # warning "The KRML_NOINLINE macro is not defined for this toolchain!" @@ -82,6 +92,20 @@ # endif #endif +#ifndef KRML_MUSTINLINE +# if defined(_MSC_VER) +# define KRML_MUSTINLINE inline __forceinline +# elif defined (__GNUC__) +# define KRML_MUSTINLINE inline __attribute__((always_inline)) +# elif defined (__SUNPRO_C) +# define KRML_MUSTINLINE inline __attribute__((always_inline)) +# else +# define KRML_MUSTINLINE inline +# warning "The KRML_MUSTINLINE macro defaults to plain inline for this toolchain!" +# warning "Please locate target.h and try to fill it out with a suitable definition for this compiler." +# endif +#endif + #ifndef KRML_PRE_ALIGN # ifdef _MSC_VER # define KRML_PRE_ALIGN(X) __declspec(align(X)) @@ -191,6 +215,8 @@ inline static int32_t krml_time(void) { #elif defined(__GNUC__) /* deprecated attribute is not defined in GCC < 4.5. */ # define KRML_DEPRECATED(x) +#elif defined(__SUNPRO_C) +# define KRML_DEPRECATED(x) __attribute__((deprecated(x))) #elif defined(_MSC_VER) # define KRML_DEPRECATED(x) __declspec(deprecated(x)) #endif diff --git a/karamel/include/krml/internal/types.h b/karamel/include/krml/internal/types.h index e41b39be..31476313 100644 --- a/karamel/include/krml/internal/types.h +++ b/karamel/include/krml/internal/types.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef KRML_TYPES_H #define KRML_TYPES_H diff --git a/karamel/include/krml/internal/wasmsupport.h b/karamel/include/krml/internal/wasmsupport.h index b44fa3f7..5aba9756 100644 --- a/karamel/include/krml/internal/wasmsupport.h +++ b/karamel/include/krml/internal/wasmsupport.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ /* This file is automatically included when compiling with -wasm -d force-c */ #define WasmSupport_check_buffer_size(X) diff --git a/karamel/include/krml/lowstar_endianness.h b/karamel/include/krml/lowstar_endianness.h index 1aa2ccd6..af6b882c 100644 --- a/karamel/include/krml/lowstar_endianness.h +++ b/karamel/include/krml/lowstar_endianness.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef __LOWSTAR_ENDIANNESS_H #define __LOWSTAR_ENDIANNESS_H diff --git a/karamel/krmllib/dist/minimal/FStar_UInt128.h b/karamel/krmllib/dist/minimal/FStar_UInt128.h index ecc90213..be32ad9b 100644 --- a/karamel/krmllib/dist/minimal/FStar_UInt128.h +++ b/karamel/krmllib/dist/minimal/FStar_UInt128.h @@ -1,6 +1,6 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. + Licensed under the Apache 2.0 and MIT Licenses. */ diff --git a/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h b/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h index 9e4e2290..d4a90220 100644 --- a/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h +++ b/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h @@ -1,6 +1,6 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. + Licensed under the Apache 2.0 and MIT Licenses. */ diff --git a/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h b/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h index 56a2454f..39ac471f 100644 --- a/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h +++ b/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h @@ -1,6 +1,6 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. + Licensed under the Apache 2.0 and MIT Licenses. */ diff --git a/karamel/krmllib/dist/minimal/LowStar_Endianness.h b/karamel/krmllib/dist/minimal/LowStar_Endianness.h index e851c15c..f95743d4 100644 --- a/karamel/krmllib/dist/minimal/LowStar_Endianness.h +++ b/karamel/krmllib/dist/minimal/LowStar_Endianness.h @@ -1,6 +1,6 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. + Licensed under the Apache 2.0 and MIT Licenses. */ diff --git a/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h b/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h index ae109004..10a4dc1a 100644 --- a/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h +++ b/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ /******************************************************************************/ /* Machine integers (128-bit arithmetic) */ diff --git a/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h b/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h index 6ff658f5..89bbc159 100644 --- a/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h +++ b/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ /* This file was generated by KaRaMeL * then hand-edited to use MSVC intrinsics KaRaMeL invocation: diff --git a/karamel/krmllib/dist/minimal/fstar_uint128_struct_endianness.h b/karamel/krmllib/dist/minimal/fstar_uint128_struct_endianness.h index e2b6d628..bb736add 100644 --- a/karamel/krmllib/dist/minimal/fstar_uint128_struct_endianness.h +++ b/karamel/krmllib/dist/minimal/fstar_uint128_struct_endianness.h @@ -1,5 +1,5 @@ /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. - Licensed under the Apache 2.0 License. */ + Licensed under the Apache 2.0 and MIT Licenses. */ #ifndef FSTAR_UINT128_STRUCT_ENDIANNESS_H #define FSTAR_UINT128_STRUCT_ENDIANNESS_H diff --git a/ocaml/ctypes.depend b/ocaml/ctypes.depend index d94fad90..6007465b 100644 --- a/ocaml/ctypes.depend +++ b/ocaml/ctypes.depend @@ -1,4 +1,4 @@ -CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx +CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx lib/Hacl_Streaming_Types_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmo: lib_gen/Hacl_Streaming_Types_gen.cmx: lib/Hacl_Streaming_Types_bindings.cmx @@ -27,10 +27,18 @@ lib/Hacl_Hash_Base_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_ lib/Hacl_Hash_Base_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_Base_gen.cmx: lib/Hacl_Hash_Base_bindings.cmx lib_gen/Hacl_Hash_Base_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_Base_bindings.cmx lib_gen/Hacl_Hash_Base_gen.cmx +lib/Hacl_Hash_MD5_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx +lib/Hacl_Hash_MD5_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo +lib_gen/Hacl_Hash_MD5_gen.cmx: lib/Hacl_Hash_MD5_bindings.cmx +lib_gen/Hacl_Hash_MD5_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_MD5_bindings.cmx lib_gen/Hacl_Hash_MD5_gen.cmx lib/Hacl_Hash_SHA1_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_SHA1_gen.cmx: lib/Hacl_Hash_SHA1_bindings.cmx lib_gen/Hacl_Hash_SHA1_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_SHA1_bindings.cmx lib_gen/Hacl_Hash_SHA1_gen.cmx +lib/Hacl_Hash_SHA3_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx +lib/Hacl_Hash_SHA3_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo +lib_gen/Hacl_Hash_SHA3_gen.cmx: lib/Hacl_Hash_SHA3_bindings.cmx +lib_gen/Hacl_Hash_SHA3_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_SHA3_bindings.cmx lib_gen/Hacl_Hash_SHA3_gen.cmx lib/Hacl_Hash_SHA2_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_SHA2_gen.cmx: lib/Hacl_Hash_SHA2_bindings.cmx @@ -47,10 +55,6 @@ lib/Hacl_HMAC_Blake2b_256_bindings.cmx: lib/Hacl_HMAC_Blake2b_256_bindings.cmo: lib_gen/Hacl_HMAC_Blake2b_256_gen.cmx: lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib_gen/Hacl_HMAC_Blake2b_256_gen.exe: lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib_gen/Hacl_HMAC_Blake2b_256_gen.cmx -lib/Hacl_Hash_SHA3_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx -lib/Hacl_Hash_SHA3_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo -lib_gen/Hacl_Hash_SHA3_gen.cmx: lib/Hacl_Hash_SHA3_bindings.cmx -lib_gen/Hacl_Hash_SHA3_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_SHA3_bindings.cmx lib_gen/Hacl_Hash_SHA3_gen.cmx lib/Hacl_SHA2_Types_bindings.cmx: lib/Hacl_SHA2_Types_bindings.cmo: lib_gen/Hacl_SHA2_Types_gen.cmx: lib/Hacl_SHA2_Types_bindings.cmx @@ -59,10 +63,6 @@ lib/Hacl_Hash_SHA3_Simd256_bindings.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmo: lib_gen/Hacl_Hash_SHA3_Simd256_gen.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib_gen/Hacl_Hash_SHA3_Simd256_gen.exe: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib_gen/Hacl_Hash_SHA3_Simd256_gen.cmx -lib/Hacl_Hash_MD5_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx -lib/Hacl_Hash_MD5_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo -lib_gen/Hacl_Hash_MD5_gen.cmx: lib/Hacl_Hash_MD5_bindings.cmx -lib_gen/Hacl_Hash_MD5_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_MD5_bindings.cmx lib_gen/Hacl_Hash_MD5_gen.cmx lib/EverCrypt_Error_bindings.cmx: lib/EverCrypt_Error_bindings.cmo: lib_gen/EverCrypt_Error_gen.cmx: lib/EverCrypt_Error_bindings.cmx @@ -211,14 +211,14 @@ lib/Hacl_Bignum4096_bindings.cmx: lib/Hacl_Bignum_bindings.cmx lib/Hacl_Bignum_s lib/Hacl_Bignum4096_bindings.cmo: lib/Hacl_Bignum_bindings.cmo lib/Hacl_Bignum_stubs.cmo lib_gen/Hacl_Bignum4096_gen.cmx: lib/Hacl_Bignum4096_bindings.cmx lib_gen/Hacl_Bignum4096_gen.exe: lib/Hacl_Bignum_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_c_stubs.o lib/Hacl_Bignum4096_bindings.cmx lib_gen/Hacl_Bignum4096_gen.cmx -lib/Hacl_Chacha20_Vec32_bindings.cmx: -lib/Hacl_Chacha20_Vec32_bindings.cmo: -lib_gen/Hacl_Chacha20_Vec32_gen.cmx: lib/Hacl_Chacha20_Vec32_bindings.cmx -lib_gen/Hacl_Chacha20_Vec32_gen.exe: lib/Hacl_Chacha20_Vec32_bindings.cmx lib_gen/Hacl_Chacha20_Vec32_gen.cmx lib/EverCrypt_Ed25519_bindings.cmx: lib/EverCrypt_Ed25519_bindings.cmo: lib_gen/EverCrypt_Ed25519_gen.cmx: lib/EverCrypt_Ed25519_bindings.cmx lib_gen/EverCrypt_Ed25519_gen.exe: lib/EverCrypt_Ed25519_bindings.cmx lib_gen/EverCrypt_Ed25519_gen.cmx +lib/Hacl_Chacha20_Vec32_bindings.cmx: +lib/Hacl_Chacha20_Vec32_bindings.cmo: +lib_gen/Hacl_Chacha20_Vec32_gen.cmx: lib/Hacl_Chacha20_Vec32_bindings.cmx +lib_gen/Hacl_Chacha20_Vec32_gen.exe: lib/Hacl_Chacha20_Vec32_bindings.cmx lib_gen/Hacl_Chacha20_Vec32_gen.cmx lib/Hacl_Bignum4096_32_bindings.cmx: lib/Hacl_Bignum_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmo: lib/Hacl_Bignum_bindings.cmo lib/Hacl_Bignum_stubs.cmo lib_gen/Hacl_Bignum4096_32_gen.cmx: lib/Hacl_Bignum4096_32_bindings.cmx @@ -295,14 +295,14 @@ lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Imp lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx -lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx -lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo -lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx: lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx -lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx lib/EverCrypt_Poly1305_bindings.cmx: lib/EverCrypt_Poly1305_bindings.cmo: lib_gen/EverCrypt_Poly1305_gen.cmx: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.exe: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.cmx +lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx +lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo +lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx: lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx +lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA512_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx diff --git a/ocaml/lib/Hacl_HMAC_bindings.ml b/ocaml/lib/Hacl_HMAC_bindings.ml index 725d49b5..869e5c19 100644 --- a/ocaml/lib/Hacl_HMAC_bindings.ml +++ b/ocaml/lib/Hacl_HMAC_bindings.ml @@ -2,11 +2,21 @@ open Ctypes module Bindings(F:Cstubs.FOREIGN) = struct open F + let hacl_HMAC_compute_md5 = + foreign "Hacl_HMAC_compute_md5" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) let hacl_HMAC_compute_sha1 = foreign "Hacl_HMAC_compute_sha1" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) + let hacl_HMAC_compute_sha2_224 = + foreign "Hacl_HMAC_compute_sha2_224" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) let hacl_HMAC_compute_sha2_256 = foreign "Hacl_HMAC_compute_sha2_256" (ocaml_bytes @-> @@ -22,6 +32,26 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) + let hacl_HMAC_compute_sha3_224 = + foreign "Hacl_HMAC_compute_sha3_224" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) + let hacl_HMAC_compute_sha3_256 = + foreign "Hacl_HMAC_compute_sha3_256" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) + let hacl_HMAC_compute_sha3_384 = + foreign "Hacl_HMAC_compute_sha3_384" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) + let hacl_HMAC_compute_sha3_512 = + foreign "Hacl_HMAC_compute_sha3_512" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))))) let hacl_HMAC_compute_blake2s_32 = foreign "Hacl_HMAC_compute_blake2s_32" (ocaml_bytes @-> diff --git a/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml index 1c132a7a..8fdc5be6 100644 --- a/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml @@ -15,8 +15,8 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) - let hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas = - foreign "Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas" + let hacl_Hash_Blake2b_Simd256_hash_with_key_and_params = + foreign "Hacl_Hash_Blake2b_Simd256_hash_with_key_and_params" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> diff --git a/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml index 7ba4fcf6..d57e8b56 100644 --- a/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml @@ -39,22 +39,26 @@ module Bindings(F:Cstubs.FOREIGN) = field hacl_Hash_Blake2b_index "key_length" uint8_t let hacl_Hash_Blake2b_index_digest_length = field hacl_Hash_Blake2b_index "digest_length" uint8_t + let hacl_Hash_Blake2b_index_last_node = + field hacl_Hash_Blake2b_index "last_node" bool let _ = seal hacl_Hash_Blake2b_index + type hacl_Hash_Blake2b_params_and_key = + [ `hacl_Hash_Blake2b_params_and_key ] structure + let (hacl_Hash_Blake2b_params_and_key : + [ `hacl_Hash_Blake2b_params_and_key ] structure typ) = + structure "Hacl_Hash_Blake2b_params_and_key_s" + let hacl_Hash_Blake2b_params_and_key_fst = + field hacl_Hash_Blake2b_params_and_key "fst" + (ptr hacl_Hash_Blake2b_blake2_params) + let hacl_Hash_Blake2b_params_and_key_snd = + field hacl_Hash_Blake2b_params_and_key "snd" (ptr uint8_t) + let _ = seal hacl_Hash_Blake2b_params_and_key let hacl_Hash_Blake2b_init = foreign "Hacl_Hash_Blake2b_init" ((ptr uint64_t) @-> (uint32_t @-> (uint32_t @-> (returning void)))) let hacl_Hash_Blake2b_finish = foreign "Hacl_Hash_Blake2b_finish" (uint32_t @-> (ocaml_bytes @-> ((ptr uint64_t) @-> (returning void)))) - type k____uint64_t___uint64_t_ = [ `k____uint64_t___uint64_t_ ] structure - let (k____uint64_t___uint64_t_ : - [ `k____uint64_t___uint64_t_ ] structure typ) = - structure "K____uint64_t___uint64_t__s" - let k____uint64_t___uint64_t__fst = - field k____uint64_t___uint64_t_ "fst" (ptr uint64_t) - let k____uint64_t___uint64_t__snd = - field k____uint64_t___uint64_t_ "snd" (ptr uint64_t) - let _ = seal k____uint64_t___uint64_t_ type hacl_Hash_Blake2b_block_state_t = [ `hacl_Hash_Blake2b_block_state_t ] structure let (hacl_Hash_Blake2b_block_state_t : @@ -65,7 +69,11 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2b_block_state_t_snd = field hacl_Hash_Blake2b_block_state_t "snd" uint8_t let hacl_Hash_Blake2b_block_state_t_thd = - field hacl_Hash_Blake2b_block_state_t "thd" k____uint64_t___uint64_t_ + field hacl_Hash_Blake2b_block_state_t "thd" bool + let hacl_Hash_Blake2b_block_state_t_f3 = + field hacl_Hash_Blake2b_block_state_t "f3" (ptr uint64_t) + let hacl_Hash_Blake2b_block_state_t_f4 = + field hacl_Hash_Blake2b_block_state_t "f4" (ptr uint64_t) let _ = seal hacl_Hash_Blake2b_block_state_t type hacl_Hash_Blake2b_state_t = [ `hacl_Hash_Blake2b_state_t ] structure let (hacl_Hash_Blake2b_state_t : @@ -82,7 +90,8 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2b_malloc_with_params_and_key = foreign "Hacl_Hash_Blake2b_malloc_with_params_and_key" ((ptr hacl_Hash_Blake2b_blake2_params) @-> - (ocaml_bytes @-> (returning (ptr hacl_Hash_Blake2b_state_t)))) + (bool @-> + (ocaml_bytes @-> (returning (ptr hacl_Hash_Blake2b_state_t))))) let hacl_Hash_Blake2b_malloc_with_key = foreign "Hacl_Hash_Blake2b_malloc_with_key" (ocaml_bytes @-> @@ -110,7 +119,11 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2b_digest = foreign "Hacl_Hash_Blake2b_digest" ((ptr hacl_Hash_Blake2b_state_t) @-> - (ocaml_bytes @-> (returning void))) + (ocaml_bytes @-> (returning uint8_t))) + let hacl_Hash_Blake2b_info = + foreign "Hacl_Hash_Blake2b_info" + ((ptr hacl_Hash_Blake2b_state_t) @-> + (returning hacl_Hash_Blake2b_index)) let hacl_Hash_Blake2b_free = foreign "Hacl_Hash_Blake2b_free" ((ptr hacl_Hash_Blake2b_state_t) @-> (returning void)) @@ -125,8 +138,8 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) - let hacl_Hash_Blake2b_hash_with_key_and_paramas = - foreign "Hacl_Hash_Blake2b_hash_with_key_and_paramas" + let hacl_Hash_Blake2b_hash_with_key_and_params = + foreign "Hacl_Hash_Blake2b_hash_with_key_and_params" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> diff --git a/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml index 6533ddbc..75fbbf39 100644 --- a/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml @@ -15,8 +15,8 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) - let hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas = - foreign "Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas" + let hacl_Hash_Blake2s_Simd128_hash_with_key_and_params = + foreign "Hacl_Hash_Blake2s_Simd128_hash_with_key_and_params" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> diff --git a/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml index f6c93e89..b40e2a00 100644 --- a/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml @@ -23,20 +23,12 @@ module Bindings(F:Cstubs.FOREIGN) = (uint32_t @-> ((ptr uint32_t) @-> ((ptr uint32_t) @-> - (uint64_t @-> - (uint32_t @-> (ocaml_bytes @-> (returning void))))))) + (bool @-> + (uint64_t @-> + (uint32_t @-> (ocaml_bytes @-> (returning void)))))))) let hacl_Hash_Blake2s_finish = foreign "Hacl_Hash_Blake2s_finish" (uint32_t @-> (ocaml_bytes @-> ((ptr uint32_t) @-> (returning void)))) - type k____uint32_t___uint32_t_ = [ `k____uint32_t___uint32_t_ ] structure - let (k____uint32_t___uint32_t_ : - [ `k____uint32_t___uint32_t_ ] structure typ) = - structure "K____uint32_t___uint32_t__s" - let k____uint32_t___uint32_t__fst = - field k____uint32_t___uint32_t_ "fst" (ptr uint32_t) - let k____uint32_t___uint32_t__snd = - field k____uint32_t___uint32_t_ "snd" (ptr uint32_t) - let _ = seal k____uint32_t___uint32_t_ type hacl_Hash_Blake2s_block_state_t = [ `hacl_Hash_Blake2s_block_state_t ] structure let (hacl_Hash_Blake2s_block_state_t : @@ -47,7 +39,11 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2s_block_state_t_snd = field hacl_Hash_Blake2s_block_state_t "snd" uint8_t let hacl_Hash_Blake2s_block_state_t_thd = - field hacl_Hash_Blake2s_block_state_t "thd" k____uint32_t___uint32_t_ + field hacl_Hash_Blake2s_block_state_t "thd" bool + let hacl_Hash_Blake2s_block_state_t_f3 = + field hacl_Hash_Blake2s_block_state_t "f3" (ptr uint32_t) + let hacl_Hash_Blake2s_block_state_t_f4 = + field hacl_Hash_Blake2s_block_state_t "f4" (ptr uint32_t) let _ = seal hacl_Hash_Blake2s_block_state_t type hacl_Hash_Blake2s_state_t = [ `hacl_Hash_Blake2s_state_t ] structure let (hacl_Hash_Blake2s_state_t : @@ -64,7 +60,8 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2s_malloc_with_params_and_key = foreign "Hacl_Hash_Blake2s_malloc_with_params_and_key" ((ptr hacl_Hash_Blake2b_blake2_params) @-> - (ocaml_bytes @-> (returning (ptr hacl_Hash_Blake2s_state_t)))) + (bool @-> + (ocaml_bytes @-> (returning (ptr hacl_Hash_Blake2s_state_t))))) let hacl_Hash_Blake2s_malloc_with_key = foreign "Hacl_Hash_Blake2s_malloc_with_key" (ocaml_bytes @-> @@ -92,7 +89,11 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2s_digest = foreign "Hacl_Hash_Blake2s_digest" ((ptr hacl_Hash_Blake2s_state_t) @-> - (ocaml_bytes @-> (returning void))) + (ocaml_bytes @-> (returning uint8_t))) + let hacl_Hash_Blake2s_info = + foreign "Hacl_Hash_Blake2s_info" + ((ptr hacl_Hash_Blake2s_state_t) @-> + (returning hacl_Hash_Blake2b_index)) let hacl_Hash_Blake2s_free = foreign "Hacl_Hash_Blake2s_free" ((ptr hacl_Hash_Blake2s_state_t) @-> (returning void)) @@ -107,8 +108,8 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) - let hacl_Hash_Blake2s_hash_with_key_and_paramas = - foreign "Hacl_Hash_Blake2s_hash_with_key_and_paramas" + let hacl_Hash_Blake2s_hash_with_key_and_params = + foreign "Hacl_Hash_Blake2s_hash_with_key_and_params" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> diff --git a/ocaml/lib/Hacl_Hash_SHA2_bindings.ml b/ocaml/lib/Hacl_Hash_SHA2_bindings.ml index f0573724..7475a850 100644 --- a/ocaml/lib/Hacl_Hash_SHA2_bindings.ml +++ b/ocaml/lib/Hacl_Hash_SHA2_bindings.ml @@ -22,6 +22,9 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_SHA2_sha224_init = foreign "Hacl_Hash_SHA2_sha224_init" ((ptr uint32_t) @-> (returning void)) + let hacl_Hash_SHA2_sha224_update_nblocks = + foreign "Hacl_Hash_SHA2_sha224_update_nblocks" + (uint32_t @-> (ocaml_bytes @-> ((ptr uint32_t) @-> (returning void)))) let hacl_Hash_SHA2_sha224_update_last = foreign "Hacl_Hash_SHA2_sha224_update_last" (uint64_t @-> diff --git a/src/EverCrypt_AEAD.c b/src/EverCrypt_AEAD.c index b0fb4826..89965054 100644 --- a/src/EverCrypt_AEAD.c +++ b/src/EverCrypt_AEAD.c @@ -538,26 +538,27 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( KRML_MAYBE_UNUSED_VAR(cipher); KRML_MAYBE_UNUSED_VAR(tag); #if HACL_CAN_COMPILE_VALE - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 176U; + uint8_t ek0[480U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 176U; aes128_key_expansion(k, keys_b0); aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 304U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 304U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 176U; uint8_t tmp_iv[16U] = { 0U }; @@ -637,8 +638,9 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; #else KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n", @@ -680,26 +682,27 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( KRML_MAYBE_UNUSED_VAR(cipher); KRML_MAYBE_UNUSED_VAR(tag); #if HACL_CAN_COMPILE_VALE - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 240U; + uint8_t ek0[544U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 240U; aes256_key_expansion(k, keys_b0); aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 368U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 368U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 240U; uint8_t tmp_iv[16U] = { 0U }; @@ -779,8 +782,9 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; #else KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n", @@ -821,26 +825,27 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) { - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 176U; + uint8_t ek0[480U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 176U; aes128_key_expansion(k, keys_b0); aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 304U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 304U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 176U; uint8_t tmp_iv[16U] = { 0U }; @@ -920,8 +925,9 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; } return EverCrypt_Error_UnsupportedAlgorithm; @@ -960,26 +966,27 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) { - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 240U; + uint8_t ek0[544U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 240U; aes256_key_expansion(k, keys_b0); aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 368U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 368U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 240U; uint8_t tmp_iv[16U] = { 0U }; @@ -1059,8 +1066,9 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; } return EverCrypt_Error_UnsupportedAlgorithm; diff --git a/src/EverCrypt_HKDF.c b/src/EverCrypt_HKDF.c index 773f86b8..de54cafc 100644 --- a/src/EverCrypt_HKDF.c +++ b/src/EverCrypt_HKDF.c @@ -43,36 +43,45 @@ expand_sha1( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -98,36 +107,45 @@ expand_sha2_256( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -153,36 +171,45 @@ expand_sha2_384( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -208,36 +235,45 @@ expand_sha2_512( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -263,36 +299,45 @@ expand_blake2s( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -318,36 +363,45 @@ expand_blake2b( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } diff --git a/src/EverCrypt_HMAC.c b/src/EverCrypt_HMAC.c index 90bcaaac..ec48f6e0 100644 --- a/src/EverCrypt_HMAC.c +++ b/src/EverCrypt_HMAC.c @@ -81,10 +81,8 @@ EverCrypt_HMAC_compute_sha1( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -105,26 +103,23 @@ EverCrypt_HMAC_compute_sha1( { Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U }; - uint8_t *dst1 = ipad; if (data_len == 0U) { Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U); @@ -153,6 +148,7 @@ EverCrypt_HMAC_compute_sha1( Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks); Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); } + uint8_t *dst1 = ipad; Hacl_Hash_SHA1_finish(s, dst1); uint8_t *hash1 = ipad; Hacl_Hash_SHA1_init(s); @@ -189,10 +185,8 @@ EverCrypt_HMAC_compute_sha2_256( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -213,19 +207,17 @@ EverCrypt_HMAC_compute_sha2_256( { EverCrypt_HMAC_hash_256(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -236,11 +228,10 @@ EverCrypt_HMAC_compute_sha2_256( 0U, 8U, 1U, - uint32_t *os = st; uint32_t x = Hacl_Hash_SHA2_h256[i]; + uint32_t *os = st; os[i] = x;); uint32_t *s = st; - uint8_t *dst1 = ipad; if (data_len == 0U) { Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s); @@ -272,6 +263,7 @@ EverCrypt_HMAC_compute_sha2_256( rem, s); } + uint8_t *dst1 = ipad; Hacl_Hash_SHA2_sha256_finish(s, dst1); uint8_t *hash1 = ipad; Hacl_Hash_SHA2_sha256_init(s); @@ -311,10 +303,8 @@ EverCrypt_HMAC_compute_sha2_384( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -335,19 +325,17 @@ EverCrypt_HMAC_compute_sha2_384( { Hacl_Hash_SHA2_hash_384(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -358,11 +346,10 @@ EverCrypt_HMAC_compute_sha2_384( 0U, 8U, 1U, - uint64_t *os = st; uint64_t x = Hacl_Hash_SHA2_h384[i]; + uint64_t *os = st; os[i] = x;); uint64_t *s = st; - uint8_t *dst1 = ipad; if (data_len == 0U) { Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL), @@ -400,6 +387,7 @@ EverCrypt_HMAC_compute_sha2_384( rem, s); } + uint8_t *dst1 = ipad; Hacl_Hash_SHA2_sha384_finish(s, dst1); uint8_t *hash1 = ipad; Hacl_Hash_SHA2_sha384_init(s); @@ -441,10 +429,8 @@ EverCrypt_HMAC_compute_sha2_512( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -465,19 +451,17 @@ EverCrypt_HMAC_compute_sha2_512( { Hacl_Hash_SHA2_hash_512(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -488,11 +472,10 @@ EverCrypt_HMAC_compute_sha2_512( 0U, 8U, 1U, - uint64_t *os = st; uint64_t x = Hacl_Hash_SHA2_h512[i]; + uint64_t *os = st; os[i] = x;); uint64_t *s = st; - uint8_t *dst1 = ipad; if (data_len == 0U) { Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL), @@ -530,6 +513,7 @@ EverCrypt_HMAC_compute_sha2_512( rem, s); } + uint8_t *dst1 = ipad; Hacl_Hash_SHA2_sha512_finish(s, dst1); uint8_t *hash1 = ipad; Hacl_Hash_SHA2_sha512_init(s); @@ -571,10 +555,8 @@ EverCrypt_HMAC_compute_blake2s( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -595,19 +577,17 @@ EverCrypt_HMAC_compute_blake2s( { Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -616,11 +596,10 @@ EverCrypt_HMAC_compute_blake2s( uint32_t s[16U] = { 0U }; Hacl_Hash_Blake2s_init(s, 0U, 32U); uint32_t *s0 = s; - uint8_t *dst1 = ipad; if (data_len == 0U) { uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad); + Hacl_Hash_Blake2s_update_last(64U, wv, s0, false, 0ULL, 64U, ipad); } else { @@ -655,10 +634,12 @@ EverCrypt_HMAC_compute_blake2s( Hacl_Hash_Blake2s_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); } + uint8_t *dst1 = ipad; Hacl_Hash_Blake2s_finish(32U, dst1, s0); uint8_t *hash1 = ipad; Hacl_Hash_Blake2s_init(s0, 0U, 32U); @@ -693,6 +674,7 @@ EverCrypt_HMAC_compute_blake2s( Hacl_Hash_Blake2s_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); @@ -708,10 +690,8 @@ EverCrypt_HMAC_compute_blake2b( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -732,19 +712,17 @@ EverCrypt_HMAC_compute_blake2b( { Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -753,11 +731,16 @@ EverCrypt_HMAC_compute_blake2b( uint64_t s[16U] = { 0U }; Hacl_Hash_Blake2b_init(s, 0U, 64U); uint64_t *s0 = s; - uint8_t *dst1 = ipad; if (data_len == 0U) { uint64_t wv[16U] = { 0U }; - Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad); + Hacl_Hash_Blake2b_update_last(128U, + wv, + s0, + false, + FStar_UInt128_uint64_to_uint128(0ULL), + 128U, + ipad); } else { @@ -792,11 +775,13 @@ EverCrypt_HMAC_compute_blake2b( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, rem); } + uint8_t *dst1 = ipad; Hacl_Hash_Blake2b_finish(64U, dst1, s0); uint8_t *hash1 = ipad; Hacl_Hash_Blake2b_init(s0, 0U, 64U); @@ -831,6 +816,7 @@ EverCrypt_HMAC_compute_blake2b( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, diff --git a/src/EverCrypt_Hash.c b/src/EverCrypt_Hash.c index bfafa9be..859909d5 100644 --- a/src/EverCrypt_Hash.c +++ b/src/EverCrypt_Hash.c @@ -616,7 +616,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ { uint32_t *p1 = scrut.case_Blake2S_s; uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(last_len, wv, p1, prev_len, last_len, last); + Hacl_Hash_Blake2s_update_last(last_len, wv, p1, false, prev_len, last_len, last); return; } if (scrut.tag == Blake2S_128_s) @@ -624,7 +624,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s; #if HACL_CAN_COMPILE_VEC128 KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Hash_Blake2s_Simd128_update_last(last_len, wv, p1, prev_len, last_len, last); + Hacl_Hash_Blake2s_Simd128_update_last(last_len, wv, p1, false, prev_len, last_len, last); return; #else KRML_MAYBE_UNUSED_VAR(p1); @@ -638,6 +638,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ Hacl_Hash_Blake2b_update_last(last_len, wv, p1, + false, FStar_UInt128_uint64_to_uint128(prev_len), last_len, last); @@ -651,6 +652,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ Hacl_Hash_Blake2b_Simd256_update_last(last_len, wv, p1, + false, FStar_UInt128_uint64_to_uint128(prev_len), last_len, last); @@ -1305,6 +1307,7 @@ EverCrypt_Hash_Incremental_state_t KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a)); uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t)); EverCrypt_Hash_state_s *block_state = create_in(a); + init(block_state); EverCrypt_Hash_Incremental_state_t s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; EverCrypt_Hash_Incremental_state_t @@ -1313,7 +1316,6 @@ EverCrypt_Hash_Incremental_state_t EverCrypt_Hash_Incremental_state_t )); p[0U] = s; - init(block_state); return p; } @@ -1322,15 +1324,12 @@ Reset an existing state to the initial hash state with empty data. */ void EverCrypt_Hash_Incremental_reset(EverCrypt_Hash_Incremental_state_t *state) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - uint8_t *buf = scrut.buf; - EverCrypt_Hash_state_s *block_state = scrut.block_state; + EverCrypt_Hash_state_s *block_state = (*state).block_state; Spec_Hash_Definitions_hash_alg i = alg_of_state(block_state); KRML_MAYBE_UNUSED_VAR(i); init(block_state); - EverCrypt_Hash_Incremental_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } /** @@ -1347,9 +1346,8 @@ EverCrypt_Hash_Incremental_update( uint32_t chunk_len ) { - EverCrypt_Hash_Incremental_state_t s = *state; - EverCrypt_Hash_state_s *block_state = s.block_state; - uint64_t total_len = s.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; Spec_Hash_Definitions_hash_alg i1 = alg_of_state(block_state); uint64_t sw; switch (i1) @@ -1448,10 +1446,8 @@ EverCrypt_Hash_Incremental_update( } if (chunk_len <= block_len(i1) - sz) { - EverCrypt_Hash_Incremental_state_t s1 = *state; - EverCrypt_Hash_state_s *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL) { @@ -1464,22 +1460,12 @@ EverCrypt_Hash_Incremental_update( uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (EverCrypt_Hash_Incremental_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - EverCrypt_Hash_Incremental_state_t s1 = *state; - EverCrypt_Hash_state_s *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL) { @@ -1492,7 +1478,7 @@ EverCrypt_Hash_Incremental_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - update_multi(block_state1, prevlen, buf, block_len(i1)); + update_multi(block_state, prevlen, buf, block_len(i1)); } uint32_t ite0; if ((uint64_t)chunk_len % (uint64_t)block_len(i1) == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -1508,28 +1494,18 @@ EverCrypt_Hash_Incremental_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - update_multi(block_state1, total_len1, data1, data1_len); + update_multi(block_state, total_len1, data1, data1_len); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (EverCrypt_Hash_Incremental_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = block_len(i1) - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - EverCrypt_Hash_Incremental_state_t s1 = *state; - EverCrypt_Hash_state_s *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)block_len(i1) == 0ULL && total_len10 > 0ULL) { @@ -1539,22 +1515,12 @@ EverCrypt_Hash_Incremental_update( { sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i1)); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (EverCrypt_Hash_Incremental_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - EverCrypt_Hash_Incremental_state_t s10 = *state; - EverCrypt_Hash_state_s *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL) { @@ -1567,7 +1533,7 @@ EverCrypt_Hash_Incremental_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - update_multi(block_state1, prevlen, buf, block_len(i1)); + update_multi(block_state, prevlen, buf0, block_len(i1)); } uint32_t ite0; if @@ -1589,18 +1555,10 @@ EverCrypt_Hash_Incremental_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - update_multi(block_state1, total_len1, data1, data1_len); - uint8_t *dst = buf; + update_multi(block_state, total_len1, data1, data1_len); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (EverCrypt_Hash_Incremental_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } ite = Hacl_Streaming_Types_Success; } @@ -1624,10 +1582,9 @@ EverCrypt_Hash_Incremental_update( static void digest_md5(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_MD5) == 0ULL && total_len > 0ULL) { @@ -1643,6 +1600,7 @@ static void digest_md5(EverCrypt_Hash_Incremental_state_t *state, uint8_t *outpu EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_MD5) == 0U && r > 0U) { @@ -1653,7 +1611,6 @@ static void digest_md5(EverCrypt_Hash_Incremental_state_t *state, uint8_t *outpu ite = r % block_len(Spec_Hash_Definitions_MD5); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1662,10 +1619,9 @@ static void digest_md5(EverCrypt_Hash_Incremental_state_t *state, uint8_t *outpu static void digest_sha1(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA1) == 0ULL && total_len > 0ULL) { @@ -1681,6 +1637,7 @@ static void digest_sha1(EverCrypt_Hash_Incremental_state_t *state, uint8_t *outp EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA1) == 0U && r > 0U) { @@ -1691,7 +1648,6 @@ static void digest_sha1(EverCrypt_Hash_Incremental_state_t *state, uint8_t *outp ite = r % block_len(Spec_Hash_Definitions_SHA1); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1700,10 +1656,9 @@ static void digest_sha1(EverCrypt_Hash_Incremental_state_t *state, uint8_t *outp static void digest_sha224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224) == 0ULL && total_len > 0ULL) @@ -1720,6 +1675,7 @@ static void digest_sha224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA2_224) == 0U && r > 0U) { @@ -1730,7 +1686,6 @@ static void digest_sha224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou ite = r % block_len(Spec_Hash_Definitions_SHA2_224); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1739,10 +1694,9 @@ static void digest_sha224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou static void digest_sha256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256) == 0ULL && total_len > 0ULL) @@ -1759,6 +1713,7 @@ static void digest_sha256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA2_256) == 0U && r > 0U) { @@ -1769,7 +1724,6 @@ static void digest_sha256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou ite = r % block_len(Spec_Hash_Definitions_SHA2_256); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1778,10 +1732,9 @@ static void digest_sha256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou static void digest_sha3_224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224) == 0ULL && total_len > 0ULL) @@ -1798,6 +1751,7 @@ static void digest_sha3_224(EverCrypt_Hash_Incremental_state_t *state, uint8_t * EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA3_224) == 0U && r > 0U) { @@ -1808,7 +1762,6 @@ static void digest_sha3_224(EverCrypt_Hash_Incremental_state_t *state, uint8_t * ite = r % block_len(Spec_Hash_Definitions_SHA3_224); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1817,10 +1770,9 @@ static void digest_sha3_224(EverCrypt_Hash_Incremental_state_t *state, uint8_t * static void digest_sha3_256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256) == 0ULL && total_len > 0ULL) @@ -1837,6 +1789,7 @@ static void digest_sha3_256(EverCrypt_Hash_Incremental_state_t *state, uint8_t * EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA3_256) == 0U && r > 0U) { @@ -1847,7 +1800,6 @@ static void digest_sha3_256(EverCrypt_Hash_Incremental_state_t *state, uint8_t * ite = r % block_len(Spec_Hash_Definitions_SHA3_256); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1856,10 +1808,9 @@ static void digest_sha3_256(EverCrypt_Hash_Incremental_state_t *state, uint8_t * static void digest_sha3_384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384) == 0ULL && total_len > 0ULL) @@ -1876,6 +1827,7 @@ static void digest_sha3_384(EverCrypt_Hash_Incremental_state_t *state, uint8_t * EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA3_384) == 0U && r > 0U) { @@ -1886,7 +1838,6 @@ static void digest_sha3_384(EverCrypt_Hash_Incremental_state_t *state, uint8_t * ite = r % block_len(Spec_Hash_Definitions_SHA3_384); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1895,10 +1846,9 @@ static void digest_sha3_384(EverCrypt_Hash_Incremental_state_t *state, uint8_t * static void digest_sha3_512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512) == 0ULL && total_len > 0ULL) @@ -1915,6 +1865,7 @@ static void digest_sha3_512(EverCrypt_Hash_Incremental_state_t *state, uint8_t * EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA3_512) == 0U && r > 0U) { @@ -1925,7 +1876,6 @@ static void digest_sha3_512(EverCrypt_Hash_Incremental_state_t *state, uint8_t * ite = r % block_len(Spec_Hash_Definitions_SHA3_512); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1934,10 +1884,9 @@ static void digest_sha3_512(EverCrypt_Hash_Incremental_state_t *state, uint8_t * static void digest_sha384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384) == 0ULL && total_len > 0ULL) @@ -1954,6 +1903,7 @@ static void digest_sha384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA2_384) == 0U && r > 0U) { @@ -1964,7 +1914,6 @@ static void digest_sha384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou ite = r % block_len(Spec_Hash_Definitions_SHA2_384); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -1973,10 +1922,9 @@ static void digest_sha384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou static void digest_sha512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512) == 0ULL && total_len > 0ULL) @@ -1993,6 +1941,7 @@ static void digest_sha512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_SHA2_512) == 0U && r > 0U) { @@ -2003,7 +1952,6 @@ static void digest_sha512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou ite = r % block_len(Spec_Hash_Definitions_SHA2_512); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -2012,10 +1960,9 @@ static void digest_sha512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *ou static void digest_blake2s(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S) == 0ULL && total_len > 0ULL) { @@ -2046,6 +1993,7 @@ static void digest_blake2s(EverCrypt_Hash_Incremental_state_t *state, uint8_t *o EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_Blake2S) == 0U && r > 0U) { @@ -2056,7 +2004,6 @@ static void digest_blake2s(EverCrypt_Hash_Incremental_state_t *state, uint8_t *o ite = r % block_len(Spec_Hash_Definitions_Blake2S); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -2065,10 +2012,9 @@ static void digest_blake2s(EverCrypt_Hash_Incremental_state_t *state, uint8_t *o static void digest_blake2b(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output) { - EverCrypt_Hash_Incremental_state_t scrut = *state; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + EverCrypt_Hash_state_s *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B) == 0ULL && total_len > 0ULL) { @@ -2099,6 +2045,7 @@ static void digest_blake2b(EverCrypt_Hash_Incremental_state_t *state, uint8_t *o EverCrypt_Hash_state_s tmp_block_state = s; copy(block_state, &tmp_block_state); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(Spec_Hash_Definitions_Blake2B) == 0U && r > 0U) { @@ -2109,7 +2056,6 @@ static void digest_blake2b(EverCrypt_Hash_Incremental_state_t *state, uint8_t *o ite = r % block_len(Spec_Hash_Definitions_Blake2B); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; update_multi(&tmp_block_state, prev_len, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; update_last(&tmp_block_state, prev_len_last, buf_last, r); @@ -2228,8 +2174,8 @@ void EverCrypt_Hash_Incremental_hash_256(uint8_t *output, uint8_t *input, uint32 0U, 8U, 1U, - uint32_t *os = st; uint32_t x = Hacl_Hash_SHA2_h256[i]; + uint32_t *os = st; os[i] = x;); uint32_t *s = st; uint32_t blocks_n0 = input_len / 64U; @@ -2266,8 +2212,8 @@ static void hash_224(uint8_t *output, uint8_t *input, uint32_t input_len) 0U, 8U, 1U, - uint32_t *os = st; uint32_t x = Hacl_Hash_SHA2_h224[i]; + uint32_t *os = st; os[i] = x;); uint32_t *s = st; uint32_t blocks_n0 = input_len / 64U; diff --git a/src/Hacl_AEAD_Chacha20Poly1305.c b/src/Hacl_AEAD_Chacha20Poly1305.c index d5926093..4b683308 100644 --- a/src/Hacl_AEAD_Chacha20Poly1305.c +++ b/src/Hacl_AEAD_Chacha20Poly1305.c @@ -579,7 +579,8 @@ Hacl_AEAD_Chacha20Poly1305_encrypt( { Hacl_Chacha20_chacha20_encrypt(input_len, output, input, key, nonce, 1U); uint8_t tmp[64U] = { 0U }; - Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, key, nonce, 0U); + uint8_t tmp_copy[64U] = { 0U }; + Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp_copy, key, nonce, 0U); uint8_t *key1 = tmp; poly1305_do_32(key1, data_len, data, input_len, output, tag); } @@ -618,7 +619,8 @@ Hacl_AEAD_Chacha20Poly1305_decrypt( { uint8_t computed_tag[16U] = { 0U }; uint8_t tmp[64U] = { 0U }; - Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, key, nonce, 0U); + uint8_t tmp_copy[64U] = { 0U }; + Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp_copy, key, nonce, 0U); uint8_t *key1 = tmp; poly1305_do_32(key1, data_len, data, input_len, input, computed_tag); uint8_t res = 255U; diff --git a/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c b/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c index 0cfa41fd..38494f80 100644 --- a/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c +++ b/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c @@ -1095,7 +1095,8 @@ Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt( { Hacl_Chacha20_Vec128_chacha20_encrypt_128(input_len, output, input, key, nonce, 1U); uint8_t tmp[64U] = { 0U }; - Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, key, nonce, 0U); + uint8_t tmp_copy[64U] = { 0U }; + Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp_copy, key, nonce, 0U); uint8_t *key1 = tmp; poly1305_do_128(key1, data_len, data, input_len, output, tag); } @@ -1134,7 +1135,8 @@ Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt( { uint8_t computed_tag[16U] = { 0U }; uint8_t tmp[64U] = { 0U }; - Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, key, nonce, 0U); + uint8_t tmp_copy[64U] = { 0U }; + Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp_copy, key, nonce, 0U); uint8_t *key1 = tmp; poly1305_do_128(key1, data_len, data, input_len, input, computed_tag); uint8_t res = 255U; diff --git a/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c b/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c index 28414516..edf44f38 100644 --- a/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c +++ b/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c @@ -1096,7 +1096,8 @@ Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt( { Hacl_Chacha20_Vec256_chacha20_encrypt_256(input_len, output, input, key, nonce, 1U); uint8_t tmp[64U] = { 0U }; - Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, key, nonce, 0U); + uint8_t tmp_copy[64U] = { 0U }; + Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp_copy, key, nonce, 0U); uint8_t *key1 = tmp; poly1305_do_256(key1, data_len, data, input_len, output, tag); } @@ -1135,7 +1136,8 @@ Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt( { uint8_t computed_tag[16U] = { 0U }; uint8_t tmp[64U] = { 0U }; - Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, key, nonce, 0U); + uint8_t tmp_copy[64U] = { 0U }; + Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp_copy, key, nonce, 0U); uint8_t *key1 = tmp; poly1305_do_256(key1, data_len, data, input_len, input, computed_tag); uint8_t res = 255U; diff --git a/src/Hacl_Bignum.c b/src/Hacl_Bignum.c index 568bcc26..fcb722d6 100644 --- a/src/Hacl_Bignum.c +++ b/src/Hacl_Bignum.c @@ -54,8 +54,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0); for (uint32_t i = 0U; i < len2; i++) { - uint32_t *os = t0; uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]); + uint32_t *os = t0; os[i] = x; } KRML_MAYBE_UNUSED_VAR(c10); @@ -64,8 +64,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1); for (uint32_t i = 0U; i < len2; i++) { - uint32_t *os = t1; uint32_t x = ((0U - c010) & t1[i]) | (~(0U - c010) & tmp_[i]); + uint32_t *os = t1; os[i] = x; } KRML_MAYBE_UNUSED_VAR(c1); @@ -77,6 +77,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( uint32_t *r23 = res + aLen; Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, a0, b0, tmp1, r01); Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, a1, b1, tmp1, r23); + KRML_MAYBE_UNUSED_VAR(res); + KRML_MAYBE_UNUSED_VAR(tmp); uint32_t *r011 = res; uint32_t *r231 = res + aLen; uint32_t *t01 = tmp; @@ -92,37 +94,47 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( uint32_t mask = 0U - c_sign; for (uint32_t i = 0U; i < aLen; i++) { - uint32_t *os = t45; uint32_t x = (mask & t45[i]) | (~mask & t67[i]); + uint32_t *os = t45; os[i] = x; } uint32_t c5 = (mask & c41) | (~mask & c31); uint32_t aLen2 = aLen / 2U; + KRML_MAYBE_UNUSED_VAR(res); uint32_t *r0 = res + aLen2; - uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0); - uint32_t c6 = r10; + KRML_CHECK_SIZE(sizeof (uint32_t), aLen); + uint32_t a_copy[aLen]; + memset(a_copy, 0U, aLen * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen); + uint32_t b_copy[aLen]; + memset(b_copy, 0U, aLen * sizeof (uint32_t)); + memcpy(a_copy, r0, aLen * sizeof (uint32_t)); + memcpy(b_copy, t45, aLen * sizeof (uint32_t)); + uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, a_copy, b_copy, r0); + uint32_t r11 = r10; + uint32_t c6 = r11; uint32_t c60 = c6; uint32_t c7 = c5 + c60; + KRML_MAYBE_UNUSED_VAR(res); uint32_t *r = res + aLen + aLen2; uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r); uint32_t r1; if (1U < aLen + aLen - (aLen + aLen2)) { - uint32_t *a11 = r + 1U; uint32_t *res1 = r + 1U; uint32_t c = c01; for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) { - uint32_t t11 = a11[4U * i]; + uint32_t t11 = res1[4U * i]; uint32_t *res_i0 = res1 + 4U * i; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i0); - uint32_t t110 = a11[4U * i + 1U]; + uint32_t t110 = res1[4U * i + 1U]; uint32_t *res_i1 = res1 + 4U * i + 1U; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, 0U, res_i1); - uint32_t t111 = a11[4U * i + 2U]; + uint32_t t111 = res1[4U * i + 2U]; uint32_t *res_i2 = res1 + 4U * i + 2U; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, 0U, res_i2); - uint32_t t112 = a11[4U * i + 3U]; + uint32_t t112 = res1[4U * i + 3U]; uint32_t *res_i = res1 + 4U * i + 3U; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, 0U, res_i); } @@ -133,7 +145,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( < aLen + aLen - (aLen + aLen2) - 1U; i++) { - uint32_t t11 = a11[i]; + uint32_t t11 = res1[i]; uint32_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i); } @@ -176,8 +188,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0); for (uint32_t i = 0U; i < len2; i++) { - uint64_t *os = t0; uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]); + uint64_t *os = t0; os[i] = x; } KRML_MAYBE_UNUSED_VAR(c10); @@ -186,8 +198,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1); for (uint32_t i = 0U; i < len2; i++) { - uint64_t *os = t1; uint64_t x = ((0ULL - c010) & t1[i]) | (~(0ULL - c010) & tmp_[i]); + uint64_t *os = t1; os[i] = x; } KRML_MAYBE_UNUSED_VAR(c1); @@ -199,6 +211,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( uint64_t *r23 = res + aLen; Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, a0, b0, tmp1, r01); Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, a1, b1, tmp1, r23); + KRML_MAYBE_UNUSED_VAR(res); + KRML_MAYBE_UNUSED_VAR(tmp); uint64_t *r011 = res; uint64_t *r231 = res + aLen; uint64_t *t01 = tmp; @@ -214,37 +228,47 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( uint64_t mask = 0ULL - c_sign; for (uint32_t i = 0U; i < aLen; i++) { - uint64_t *os = t45; uint64_t x = (mask & t45[i]) | (~mask & t67[i]); + uint64_t *os = t45; os[i] = x; } uint64_t c5 = (mask & c41) | (~mask & c31); uint32_t aLen2 = aLen / 2U; + KRML_MAYBE_UNUSED_VAR(res); uint64_t *r0 = res + aLen2; - uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0); - uint64_t c6 = r10; + KRML_CHECK_SIZE(sizeof (uint64_t), aLen); + uint64_t a_copy[aLen]; + memset(a_copy, 0U, aLen * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen); + uint64_t b_copy[aLen]; + memset(b_copy, 0U, aLen * sizeof (uint64_t)); + memcpy(a_copy, r0, aLen * sizeof (uint64_t)); + memcpy(b_copy, t45, aLen * sizeof (uint64_t)); + uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, a_copy, b_copy, r0); + uint64_t r11 = r10; + uint64_t c6 = r11; uint64_t c60 = c6; uint64_t c7 = c5 + c60; + KRML_MAYBE_UNUSED_VAR(res); uint64_t *r = res + aLen + aLen2; uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r); uint64_t r1; if (1U < aLen + aLen - (aLen + aLen2)) { - uint64_t *a11 = r + 1U; uint64_t *res1 = r + 1U; uint64_t c = c01; for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) { - uint64_t t11 = a11[4U * i]; + uint64_t t11 = res1[4U * i]; uint64_t *res_i0 = res1 + 4U * i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i0); - uint64_t t110 = a11[4U * i + 1U]; + uint64_t t110 = res1[4U * i + 1U]; uint64_t *res_i1 = res1 + 4U * i + 1U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, 0ULL, res_i1); - uint64_t t111 = a11[4U * i + 2U]; + uint64_t t111 = res1[4U * i + 2U]; uint64_t *res_i2 = res1 + 4U * i + 2U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, 0ULL, res_i2); - uint64_t t112 = a11[4U * i + 3U]; + uint64_t t112 = res1[4U * i + 3U]; uint64_t *res_i = res1 + 4U * i + 3U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, 0ULL, res_i); } @@ -255,7 +279,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( < aLen + aLen - (aLen + aLen2) - 1U; i++) { - uint64_t t11 = a11[i]; + uint64_t t11 = res1[i]; uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i); } @@ -294,8 +318,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0); for (uint32_t i = 0U; i < len2; i++) { - uint32_t *os = t0; uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]); + uint32_t *os = t0; os[i] = x; } KRML_MAYBE_UNUSED_VAR(c1); @@ -308,6 +332,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( uint32_t *r23 = res + aLen; Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, a0, tmp1, r01); Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, a1, tmp1, r23); + KRML_MAYBE_UNUSED_VAR(res); + KRML_MAYBE_UNUSED_VAR(tmp); uint32_t *r011 = res; uint32_t *r231 = res + aLen; uint32_t *t01 = tmp; @@ -317,31 +343,41 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45); uint32_t c5 = c2 - c3; uint32_t aLen2 = aLen / 2U; + KRML_MAYBE_UNUSED_VAR(res); uint32_t *r0 = res + aLen2; - uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0); - uint32_t c4 = r10; + KRML_CHECK_SIZE(sizeof (uint32_t), aLen); + uint32_t a_copy[aLen]; + memset(a_copy, 0U, aLen * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen); + uint32_t b_copy[aLen]; + memset(b_copy, 0U, aLen * sizeof (uint32_t)); + memcpy(a_copy, r0, aLen * sizeof (uint32_t)); + memcpy(b_copy, t45, aLen * sizeof (uint32_t)); + uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, a_copy, b_copy, r0); + uint32_t r11 = r10; + uint32_t c4 = r11; uint32_t c6 = c4; uint32_t c7 = c5 + c6; + KRML_MAYBE_UNUSED_VAR(res); uint32_t *r = res + aLen + aLen2; uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r); uint32_t r1; if (1U < aLen + aLen - (aLen + aLen2)) { - uint32_t *a11 = r + 1U; uint32_t *res1 = r + 1U; uint32_t c = c01; for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) { - uint32_t t1 = a11[4U * i]; + uint32_t t1 = res1[4U * i]; uint32_t *res_i0 = res1 + 4U * i; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i0); - uint32_t t10 = a11[4U * i + 1U]; + uint32_t t10 = res1[4U * i + 1U]; uint32_t *res_i1 = res1 + 4U * i + 1U; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, 0U, res_i1); - uint32_t t11 = a11[4U * i + 2U]; + uint32_t t11 = res1[4U * i + 2U]; uint32_t *res_i2 = res1 + 4U * i + 2U; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i2); - uint32_t t12 = a11[4U * i + 3U]; + uint32_t t12 = res1[4U * i + 3U]; uint32_t *res_i = res1 + 4U * i + 3U; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, 0U, res_i); } @@ -352,7 +388,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( < aLen + aLen - (aLen + aLen2) - 1U; i++) { - uint32_t t1 = a11[i]; + uint32_t t1 = res1[i]; uint32_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i); } @@ -391,8 +427,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0); for (uint32_t i = 0U; i < len2; i++) { - uint64_t *os = t0; uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]); + uint64_t *os = t0; os[i] = x; } KRML_MAYBE_UNUSED_VAR(c1); @@ -405,6 +441,8 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( uint64_t *r23 = res + aLen; Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, a0, tmp1, r01); Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, a1, tmp1, r23); + KRML_MAYBE_UNUSED_VAR(res); + KRML_MAYBE_UNUSED_VAR(tmp); uint64_t *r011 = res; uint64_t *r231 = res + aLen; uint64_t *t01 = tmp; @@ -414,31 +452,41 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45); uint64_t c5 = c2 - c3; uint32_t aLen2 = aLen / 2U; + KRML_MAYBE_UNUSED_VAR(res); uint64_t *r0 = res + aLen2; - uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0); - uint64_t c4 = r10; + KRML_CHECK_SIZE(sizeof (uint64_t), aLen); + uint64_t a_copy[aLen]; + memset(a_copy, 0U, aLen * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen); + uint64_t b_copy[aLen]; + memset(b_copy, 0U, aLen * sizeof (uint64_t)); + memcpy(a_copy, r0, aLen * sizeof (uint64_t)); + memcpy(b_copy, t45, aLen * sizeof (uint64_t)); + uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, a_copy, b_copy, r0); + uint64_t r11 = r10; + uint64_t c4 = r11; uint64_t c6 = c4; uint64_t c7 = c5 + c6; + KRML_MAYBE_UNUSED_VAR(res); uint64_t *r = res + aLen + aLen2; uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r); uint64_t r1; if (1U < aLen + aLen - (aLen + aLen2)) { - uint64_t *a11 = r + 1U; uint64_t *res1 = r + 1U; uint64_t c = c01; for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) { - uint64_t t1 = a11[4U * i]; + uint64_t t1 = res1[4U * i]; uint64_t *res_i0 = res1 + 4U * i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0); - uint64_t t10 = a11[4U * i + 1U]; + uint64_t t10 = res1[4U * i + 1U]; uint64_t *res_i1 = res1 + 4U * i + 1U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1); - uint64_t t11 = a11[4U * i + 2U]; + uint64_t t11 = res1[4U * i + 2U]; uint64_t *res_i2 = res1 + 4U * i + 2U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2); - uint64_t t12 = a11[4U * i + 3U]; + uint64_t t12 = res1[4U * i + 3U]; uint64_t *res_i = res1 + 4U * i + 3U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i); } @@ -449,7 +497,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( < aLen + aLen - (aLen + aLen2) - 1U; i++) { - uint64_t t1 = a11[i]; + uint64_t t1 = res1[i]; uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i); } @@ -537,8 +585,8 @@ Hacl_Bignum_bn_add_mod_n_u32( uint32_t c2 = c00 - c1; for (uint32_t i = 0U; i < len1; i++) { - uint32_t *os = res; uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint32_t *os = res; os[i] = x; } } @@ -614,8 +662,8 @@ Hacl_Bignum_bn_add_mod_n_u64( uint64_t c2 = c00 - c1; for (uint32_t i = 0U; i < len1; i++) { - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x; } } @@ -692,8 +740,8 @@ Hacl_Bignum_bn_sub_mod_n_u32( uint32_t c2 = 0U - c00; for (uint32_t i = 0U; i < len1; i++) { - uint32_t *os = res; uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint32_t *os = res; os[i] = x; } } @@ -770,8 +818,8 @@ Hacl_Bignum_bn_sub_mod_n_u64( uint64_t c2 = 0ULL - c00; for (uint32_t i = 0U; i < len1; i++) { - uint64_t *os = res; uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint64_t *os = res; os[i] = x; } } @@ -832,7 +880,7 @@ uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m1 = acc; return m0 & m1; @@ -852,7 +900,15 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32( res[i] = res[i] | 1U << j; for (uint32_t i0 = 0U; i0 < 64U * len - nBits; i0++) { - Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t a_copy[len]; + memset(a_copy, 0U, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t b_copy[len]; + memset(b_copy, 0U, len * sizeof (uint32_t)); + memcpy(a_copy, res, len * sizeof (uint32_t)); + memcpy(b_copy, res, len * sizeof (uint32_t)); + Hacl_Bignum_bn_add_mod_n_u32(len, n, a_copy, b_copy, res); } } @@ -888,8 +944,8 @@ bn_mont_reduction_u32(uint32_t len, uint32_t *n, uint32_t nInv, uint32_t *c, uin } uint32_t r = c1; uint32_t c10 = r; - uint32_t *resb = c + len + i0; uint32_t res_j = c[len + i0]; + uint32_t *resb = c + len + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb); } memcpy(res, c + len, (len + len - len) * sizeof (uint32_t)); @@ -928,8 +984,8 @@ bn_mont_reduction_u32(uint32_t len, uint32_t *n, uint32_t nInv, uint32_t *c, uin uint32_t c2 = c00 - c10; for (uint32_t i = 0U; i < len; i++) { - uint32_t *os = res; uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint32_t *os = res; os[i] = x; } } @@ -1023,7 +1079,7 @@ uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; return m0 & m1; @@ -1043,7 +1099,15 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64( res[i] = res[i] | 1ULL << j; for (uint32_t i0 = 0U; i0 < 128U * len - nBits; i0++) { - Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t a_copy[len]; + memset(a_copy, 0U, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t b_copy[len]; + memset(b_copy, 0U, len * sizeof (uint64_t)); + memcpy(a_copy, res, len * sizeof (uint64_t)); + memcpy(b_copy, res, len * sizeof (uint64_t)); + Hacl_Bignum_bn_add_mod_n_u64(len, n, a_copy, b_copy, res); } } @@ -1079,8 +1143,8 @@ bn_mont_reduction_u64(uint32_t len, uint64_t *n, uint64_t nInv, uint64_t *c, uin } uint64_t r = c1; uint64_t c10 = r; - uint64_t *resb = c + len + i0; uint64_t res_j = c[len + i0]; + uint64_t *resb = c + len + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb); } memcpy(res, c + len, (len + len - len) * sizeof (uint64_t)); @@ -1119,8 +1183,8 @@ bn_mont_reduction_u64(uint32_t len, uint64_t *n, uint64_t nInv, uint64_t *c, uin uint64_t c2 = c00 - c10; for (uint32_t i = 0U; i < len; i++) { - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x; } } @@ -1238,8 +1302,8 @@ Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32( } uint32_t r = c1; uint32_t c10 = r; - uint32_t *resb = c + len + i0; uint32_t res_j = c[len + i0]; + uint32_t *resb = c + len + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb); } memcpy(res, c + len, (len + len - len) * sizeof (uint32_t)); @@ -1252,8 +1316,8 @@ Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32( uint32_t m = 0U - c00; for (uint32_t i = 0U; i < len; i++) { - uint32_t *os = res; uint32_t x = (m & tmp[i]) | (~m & res[i]); + uint32_t *os = res; os[i] = x; } } @@ -1335,8 +1399,8 @@ Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64( } uint64_t r = c1; uint64_t c10 = r; - uint64_t *resb = c + len + i0; uint64_t res_j = c[len + i0]; + uint64_t *resb = c + len + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb); } memcpy(res, c + len, (len + len - len) * sizeof (uint64_t)); @@ -1349,8 +1413,8 @@ Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64( uint64_t m = 0ULL - c00; for (uint32_t i = 0U; i < len; i++) { - uint64_t *os = res; uint64_t x = (m & tmp[i]) | (~m & res[i]); + uint64_t *os = res; os[i] = x; } } @@ -1415,7 +1479,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m10 = acc0; uint32_t m00 = m0 & m10; @@ -1442,7 +1506,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( { uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t res = acc; m1 = res; @@ -1456,7 +1520,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t m = m1 & m2; @@ -1489,9 +1553,10 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( memset(ctx, 0U, (len + len) * sizeof (uint32_t)); memcpy(ctx, n, len * sizeof (uint32_t)); memcpy(ctx + len, r2, len * sizeof (uint32_t)); - uint32_t *ctx_n = ctx; + uint32_t *ctx_n0 = ctx; uint32_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 32U; @@ -1500,11 +1565,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( uint32_t bit = tmp >> j & 1U; if (!(bit == 0U)) { - uint32_t *ctx_n0 = ctx; - bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, resM, len * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + bn_almost_mont_mul_u32(len, ctx_n, mu, aM_copy, aM, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } - uint32_t *ctx_n0 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n0, mu, aM, aM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, aM, len * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + bn_almost_mont_sqr_u32(len, ctx_n, mu, aM_copy, aM); + KRML_MAYBE_UNUSED_VAR(ctx); } Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); return; @@ -1541,18 +1616,30 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, len * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * len; + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy0[len]; + memset(aM_copy0, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy0, t11, len * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp); + bn_almost_mont_sqr_u32(len, ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * len; + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, aM, len * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp); + bn_almost_mont_mul_u32(len, ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -1567,6 +1654,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } KRML_CHECK_SIZE(sizeof (uint32_t), len); uint32_t tmp0[len]; @@ -1577,15 +1665,26 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( 0U, 4U, 1U, + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, resM, len * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM);); + bn_almost_mont_sqr_u32(len, ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = bits_l; const uint32_t *a_bits_l = table + bits_l32 * len; memcpy(tmp0, (uint32_t *)a_bits_l, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, resM, len * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM); + bn_almost_mont_mul_u32(len, ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); } @@ -1617,9 +1716,10 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( memcpy(ctx, n, len * sizeof (uint32_t)); memcpy(ctx + len, r2, len * sizeof (uint32_t)); uint32_t sw = 0U; - uint32_t *ctx_n = ctx; + uint32_t *ctx_n0 = ctx; uint32_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 32U; @@ -1633,10 +1733,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( resM[i] = resM[i] ^ dummy; aM[i] = aM[i] ^ dummy; } - uint32_t *ctx_n0 = ctx; - bn_almost_mont_mul_u32(len, ctx_n0, mu, aM, resM, aM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, aM, len * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n1, mu, resM, resM); + bn_almost_mont_mul_u32(len, ctx_n1, mu, aM_copy, resM, aM); + KRML_MAYBE_UNUSED_VAR(ctx); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy0[len]; + memset(aM_copy0, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy0, resM, len * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + bn_almost_mont_sqr_u32(len, ctx_n, mu, aM_copy0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); sw = bit; } uint32_t sw0 = sw; @@ -1681,18 +1791,30 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, len * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * len; + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy0[len]; + memset(aM_copy0, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy0, t11, len * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp); + bn_almost_mont_sqr_u32(len, ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * len; + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, aM, len * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp); + bn_almost_mont_mul_u32(len, ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -1707,8 +1829,8 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( const uint32_t *res_j = table + (i1 + 1U) * len; for (uint32_t i = 0U; i < len; i++) { - uint32_t *os = resM; uint32_t x = (c & res_j[i]) | (~c & resM[i]); + uint32_t *os = resM; os[i] = x; }); } @@ -1717,6 +1839,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } KRML_CHECK_SIZE(sizeof (uint32_t), len); uint32_t tmp0[len]; @@ -1727,10 +1850,16 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( 0U, 4U, 1U, + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, resM, len * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM);); + bn_almost_mont_sqr_u32(len, ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -1740,12 +1869,17 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( const uint32_t *res_j = table + (i1 + 1U) * len; for (uint32_t i = 0U; i < len; i++) { - uint32_t *os = tmp0; uint32_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint32_t *os = tmp0; os[i] = x; }); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint32_t)); + memcpy(aM_copy, resM, len * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM); + bn_almost_mont_mul_u32(len, ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); } @@ -1809,7 +1943,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m10 = acc0; uint64_t m00 = m0 & m10; @@ -1836,7 +1970,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; m1 = res; @@ -1850,7 +1984,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t m = m1 & m2; @@ -1883,9 +2017,10 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( memset(ctx, 0U, (len + len) * sizeof (uint64_t)); memcpy(ctx, n, len * sizeof (uint64_t)); memcpy(ctx + len, r2, len * sizeof (uint64_t)); - uint64_t *ctx_n = ctx; + uint64_t *ctx_n0 = ctx; uint64_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 64U; @@ -1894,11 +2029,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( uint64_t bit = tmp >> j & 1ULL; if (!(bit == 0ULL)) { - uint64_t *ctx_n0 = ctx; - bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, resM, len * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + bn_almost_mont_mul_u64(len, ctx_n, mu, aM_copy, aM, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } - uint64_t *ctx_n0 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n0, mu, aM, aM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, aM, len * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + bn_almost_mont_sqr_u64(len, ctx_n, mu, aM_copy, aM); + KRML_MAYBE_UNUSED_VAR(ctx); } Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); return; @@ -1935,18 +2080,30 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, len * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * len; + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy0[len]; + memset(aM_copy0, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy0, t11, len * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp); + bn_almost_mont_sqr_u64(len, ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * len; + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, aM, len * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp); + bn_almost_mont_mul_u64(len, ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -1961,6 +2118,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } KRML_CHECK_SIZE(sizeof (uint64_t), len); uint64_t tmp0[len]; @@ -1971,15 +2129,26 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( 0U, 4U, 1U, + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, resM, len * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM);); + bn_almost_mont_sqr_u64(len, ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = (uint32_t)bits_l; const uint64_t *a_bits_l = table + bits_l32 * len; memcpy(tmp0, (uint64_t *)a_bits_l, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, resM, len * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM); + bn_almost_mont_mul_u64(len, ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); } @@ -2011,9 +2180,10 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( memcpy(ctx, n, len * sizeof (uint64_t)); memcpy(ctx + len, r2, len * sizeof (uint64_t)); uint64_t sw = 0ULL; - uint64_t *ctx_n = ctx; + uint64_t *ctx_n0 = ctx; uint64_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 64U; @@ -2027,10 +2197,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( resM[i] = resM[i] ^ dummy; aM[i] = aM[i] ^ dummy; } - uint64_t *ctx_n0 = ctx; - bn_almost_mont_mul_u64(len, ctx_n0, mu, aM, resM, aM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, aM, len * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n1, mu, resM, resM); + bn_almost_mont_mul_u64(len, ctx_n1, mu, aM_copy, resM, aM); + KRML_MAYBE_UNUSED_VAR(ctx); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy0[len]; + memset(aM_copy0, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy0, resM, len * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + bn_almost_mont_sqr_u64(len, ctx_n, mu, aM_copy0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); sw = bit; } uint64_t sw0 = sw; @@ -2075,18 +2255,30 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, len * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * len; + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy0[len]; + memset(aM_copy0, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy0, t11, len * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp); + bn_almost_mont_sqr_u64(len, ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * len; + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, aM, len * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp); + bn_almost_mont_mul_u64(len, ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -2101,8 +2293,8 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( const uint64_t *res_j = table + (i1 + 1U) * len; for (uint32_t i = 0U; i < len; i++) { - uint64_t *os = resM; uint64_t x = (c & res_j[i]) | (~c & resM[i]); + uint64_t *os = resM; os[i] = x; }); } @@ -2111,6 +2303,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + len; Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } KRML_CHECK_SIZE(sizeof (uint64_t), len); uint64_t tmp0[len]; @@ -2121,10 +2314,16 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( 0U, 4U, 1U, + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, resM, len * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM);); + bn_almost_mont_sqr_u64(len, ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -2134,12 +2333,17 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( const uint64_t *res_j = table + (i1 + 1U) * len; for (uint32_t i = 0U; i < len; i++) { - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x; }); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t aM_copy[len]; + memset(aM_copy, 0U, len * sizeof (uint64_t)); + memcpy(aM_copy, resM, len * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM); + bn_almost_mont_mul_u64(len, ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); } diff --git a/src/Hacl_Bignum256.c b/src/Hacl_Bignum256.c index 54bbc88a..d1a118c1 100644 --- a/src/Hacl_Bignum256.c +++ b/src/Hacl_Bignum256.c @@ -171,8 +171,8 @@ void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x;); } @@ -235,8 +235,8 @@ void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint64_t *os = res; os[i] = x;); } @@ -287,8 +287,8 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res) 0U, 4U, 1U, - uint64_t *ab = a; uint64_t a_j = a[i0]; + uint64_t *ab = a; uint64_t *res_j = res + i0; uint64_t c = 0ULL; for (uint32_t i = 0U; i < i0 / 4U; i++) @@ -314,7 +314,12 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res) } uint64_t r = c; res[i0 + i0] = r;); - uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res); + uint64_t a_copy0[8U] = { 0U }; + uint64_t b_copy0[8U] = { 0U }; + memcpy(a_copy0, res, 8U * sizeof (uint64_t)); + memcpy(b_copy0, res, 8U * sizeof (uint64_t)); + uint64_t r = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, a_copy0, b_copy0, res); + uint64_t c0 = r; KRML_MAYBE_UNUSED_VAR(c0); uint64_t tmp[8U] = { 0U }; KRML_MAYBE_FOR4(i, @@ -326,7 +331,12 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res) uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); tmp[2U * i] = lo; tmp[2U * i + 1U] = hi;); - uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res); + uint64_t a_copy[8U] = { 0U }; + uint64_t b_copy[8U] = { 0U }; + memcpy(a_copy, res, 8U * sizeof (uint64_t)); + memcpy(b_copy, tmp, 8U * sizeof (uint64_t)); + uint64_t r0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, a_copy, b_copy, res); + uint64_t c1 = r0; KRML_MAYBE_UNUSED_VAR(c1); } @@ -338,7 +348,11 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res) res[i] = res[i] | 1ULL << j; for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++) { - Hacl_Bignum256_add_mod(n, res, res, res); + uint64_t a_copy[4U] = { 0U }; + uint64_t b_copy[4U] = { 0U }; + memcpy(a_copy, res, 4U * sizeof (uint64_t)); + memcpy(b_copy, res, 4U * sizeof (uint64_t)); + Hacl_Bignum256_add_mod(n, a_copy, b_copy, res); } } @@ -368,8 +382,8 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t * } uint64_t r = c1; uint64_t c10 = r; - uint64_t *resb = c + 4U + i0; uint64_t res_j = c[4U + i0]; + uint64_t *resb = c + 4U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);); memcpy(res, c + 4U, 4U * sizeof (uint64_t)); uint64_t c00 = c0; @@ -399,8 +413,8 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t * 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x;); } @@ -444,8 +458,8 @@ static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t } uint64_t r = c1; uint64_t c10 = r; - uint64_t *resb = c + 4U + i0; uint64_t res_j = c[4U + i0]; + uint64_t *resb = c + 4U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);); memcpy(res, c + 4U, 4U * sizeof (uint64_t)); uint64_t c00 = c0; @@ -457,8 +471,8 @@ static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (m & tmp[i]) | (~m & res[i]); + uint64_t *os = res; os[i] = x;); } @@ -512,7 +526,7 @@ bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res) 1U, uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t m1 = acc; uint64_t is_valid_m = m0 & m1; uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n); @@ -544,7 +558,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc0 = (beq & acc0) | (~beq & blt);); uint64_t m10 = acc0; uint64_t m00 = m0 & m10; uint32_t bLen; @@ -570,7 +584,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; m1 = res; @@ -586,7 +600,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t m2 = acc; uint64_t m = m1 & m2; return m00 & m; @@ -611,9 +625,10 @@ exp_vartime_precomp( uint64_t ctx[8U] = { 0U }; memcpy(ctx, n, 4U * sizeof (uint64_t)); memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t)); - uint64_t *ctx_n = ctx; + uint64_t *ctx_n0 = ctx; uint64_t *ctx_r2 = ctx + 4U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 64U; @@ -622,11 +637,17 @@ exp_vartime_precomp( uint64_t bit = tmp >> j & 1ULL; if (!(bit == 0ULL)) { - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, resM, 4U * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + amont_mul(ctx_n, mu, aM_copy, aM, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } - uint64_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, aM, 4U * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy, aM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); return; @@ -653,18 +674,26 @@ exp_vartime_precomp( uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + 4U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 4U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 4U; + uint64_t aM_copy0[4U] = { 0U }; + memcpy(aM_copy0, t11, 4U * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 4U; + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, aM, 4U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -679,6 +708,7 @@ exp_vartime_precomp( uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + 4U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint64_t tmp0[4U] = { 0U }; for (uint32_t i = 0U; i < bBits / 4U; i++) @@ -687,15 +717,22 @@ exp_vartime_precomp( 0U, 4U, 1U, + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, resM, 4U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = (uint32_t)bits_l; const uint64_t *a_bits_l = table + bits_l32 * 4U; memcpy(tmp0, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t)); + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, resM, 4U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -720,9 +757,10 @@ exp_consttime_precomp( memcpy(ctx, n, 4U * sizeof (uint64_t)); memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t)); uint64_t sw = 0ULL; - uint64_t *ctx_n = ctx; + uint64_t *ctx_n0 = ctx; uint64_t *ctx_r2 = ctx + 4U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 64U; @@ -737,10 +775,16 @@ exp_consttime_precomp( uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]); resM[i] = resM[i] ^ dummy; aM[i] = aM[i] ^ dummy;); - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, aM, 4U * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); + amont_mul(ctx_n1, mu, aM_copy, resM, aM); + KRML_MAYBE_UNUSED_VAR(ctx); + uint64_t aM_copy0[4U] = { 0U }; + memcpy(aM_copy0, resM, 4U * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); sw = bit; } uint64_t sw0 = sw; @@ -776,18 +820,26 @@ exp_consttime_precomp( uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + 4U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 4U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 4U; + uint64_t aM_copy0[4U] = { 0U }; + memcpy(aM_copy0, t11, 4U * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 4U; + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, aM, 4U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -804,8 +856,8 @@ exp_consttime_precomp( 0U, 4U, 1U, - uint64_t *os = resM; uint64_t x = (c & res_j[i]) | (~c & resM[i]); + uint64_t *os = resM; os[i] = x;);); } else @@ -813,6 +865,7 @@ exp_consttime_precomp( uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + 4U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint64_t tmp0[4U] = { 0U }; for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++) @@ -821,10 +874,14 @@ exp_consttime_precomp( 0U, 4U, 1U, + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, resM, 4U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)table, 4U * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -836,11 +893,14 @@ exp_consttime_precomp( 0U, 4U, 1U, - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x;);); + uint64_t aM_copy[4U] = { 0U }; + memcpy(aM_copy, resM, 4U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -990,7 +1050,7 @@ bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *re 1U, uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc0 = (beq & acc0) | (~beq & blt);); uint64_t m1 = acc0; uint64_t m00 = m0 & m1; uint64_t bn_zero[4U] = { 0U }; @@ -1011,7 +1071,7 @@ bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *re 1U, uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t m2 = acc; uint64_t is_valid_m = (m00 & ~m10) & m2; uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n); @@ -1087,9 +1147,9 @@ Deallocate the memory previously allocated by Hacl_Bignum256_mont_ctx_init. */ void Hacl_Bignum256_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + uint64_t *n = uu____0.n; + uint64_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -1109,8 +1169,10 @@ Hacl_Bignum256_mod_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + bn_slow_precomp(n, mu, r2, a, res); } /** @@ -1141,8 +1203,10 @@ Hacl_Bignum256_mod_exp_vartime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + exp_vartime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1173,8 +1237,10 @@ Hacl_Bignum256_mod_exp_consttime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + exp_consttime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1196,10 +1262,12 @@ Hacl_Bignum256_mod_inv_prime_vartime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; uint64_t n2[4U] = { 0U }; - uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2); - uint64_t *a1 = k1.n + 1U; + uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2); + uint64_t *a1 = n + 1U; uint64_t *res1 = n2 + 1U; uint64_t c = c0; KRML_MAYBE_FOR3(i, @@ -1212,7 +1280,7 @@ Hacl_Bignum256_mod_inv_prime_vartime_precomp( uint64_t c1 = c; uint64_t c2 = c1; KRML_MAYBE_UNUSED_VAR(c2); - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res); + exp_vartime_precomp(n, mu, r2, a, 256U, n2, res); } @@ -1254,9 +1322,9 @@ uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b) memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint64_t *os = res2; uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = res2; os[i] = x; } return res2; @@ -1295,11 +1363,11 @@ uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b) memcpy(tmp, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++) { - uint64_t *os = res2; uint8_t *bj = tmp + i * 8U; uint64_t u = load64_le(bj); uint64_t r1 = u; uint64_t x = r1; + uint64_t *os = res2; os[i] = x; } return res2; @@ -1351,7 +1419,7 @@ uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); return acc; } diff --git a/src/Hacl_Bignum256_32.c b/src/Hacl_Bignum256_32.c index eed6c65c..b734d073 100644 --- a/src/Hacl_Bignum256_32.c +++ b/src/Hacl_Bignum256_32.c @@ -179,8 +179,8 @@ void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t * 0U, 8U, 1U, - uint32_t *os = res; uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint32_t *os = res; os[i] = x;); } @@ -247,8 +247,8 @@ void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t * 0U, 8U, 1U, - uint32_t *os = res; uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint32_t *os = res; os[i] = x;); } @@ -301,8 +301,8 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res) 0U, 8U, 1U, - uint32_t *ab = a; uint32_t a_j = a[i0]; + uint32_t *ab = a; uint32_t *res_j = res + i0; uint32_t c = 0U; for (uint32_t i = 0U; i < i0 / 4U; i++) @@ -328,7 +328,12 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res) } uint32_t r = c; res[i0 + i0] = r;); - uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, res, res); + uint32_t a_copy0[16U] = { 0U }; + uint32_t b_copy0[16U] = { 0U }; + memcpy(a_copy0, res, 16U * sizeof (uint32_t)); + memcpy(b_copy0, res, 16U * sizeof (uint32_t)); + uint32_t r = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, a_copy0, b_copy0, res); + uint32_t c0 = r; KRML_MAYBE_UNUSED_VAR(c0); uint32_t tmp[16U] = { 0U }; KRML_MAYBE_FOR8(i, @@ -340,7 +345,12 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res) uint32_t lo = (uint32_t)res1; tmp[2U * i] = lo; tmp[2U * i + 1U] = hi;); - uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, tmp, res); + uint32_t a_copy[16U] = { 0U }; + uint32_t b_copy[16U] = { 0U }; + memcpy(a_copy, res, 16U * sizeof (uint32_t)); + memcpy(b_copy, tmp, 16U * sizeof (uint32_t)); + uint32_t r0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, a_copy, b_copy, res); + uint32_t c1 = r0; KRML_MAYBE_UNUSED_VAR(c1); } @@ -352,7 +362,11 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res) res[i] = res[i] | 1U << j; for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++) { - Hacl_Bignum256_32_add_mod(n, res, res, res); + uint32_t a_copy[8U] = { 0U }; + uint32_t b_copy[8U] = { 0U }; + memcpy(a_copy, res, 8U * sizeof (uint32_t)); + memcpy(b_copy, res, 8U * sizeof (uint32_t)); + Hacl_Bignum256_32_add_mod(n, a_copy, b_copy, res); } } @@ -384,8 +398,8 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t * c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);); uint32_t r = c1; uint32_t c10 = r; - uint32_t *resb = c + 8U + i0; uint32_t res_j = c[8U + i0]; + uint32_t *resb = c + 8U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);); memcpy(res, c + 8U, 8U * sizeof (uint32_t)); uint32_t c00 = c0; @@ -417,8 +431,8 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t * 0U, 8U, 1U, - uint32_t *os = res; uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint32_t *os = res; os[i] = x;); } @@ -464,8 +478,8 @@ static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);); uint32_t r = c1; uint32_t c10 = r; - uint32_t *resb = c + 8U + i0; uint32_t res_j = c[8U + i0]; + uint32_t *resb = c + 8U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);); memcpy(res, c + 8U, 8U * sizeof (uint32_t)); uint32_t c00 = c0; @@ -477,8 +491,8 @@ static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t 0U, 8U, 1U, - uint32_t *os = res; uint32_t x = (m & tmp[i]) | (~m & res[i]); + uint32_t *os = res; os[i] = x;); } @@ -532,7 +546,7 @@ bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res) 1U, uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); uint32_t m1 = acc; uint32_t is_valid_m = m0 & m1; uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n); @@ -564,7 +578,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) 1U, uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc0 = (beq & acc0) | (~beq & blt);); uint32_t m10 = acc0; uint32_t m00 = m0 & m10; uint32_t bLen; @@ -590,7 +604,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t res = acc; m1 = res; @@ -606,7 +620,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) 1U, uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); uint32_t m2 = acc; uint32_t m = m1 & m2; return m00 & m; @@ -631,9 +645,10 @@ exp_vartime_precomp( uint32_t ctx[16U] = { 0U }; memcpy(ctx, n, 8U * sizeof (uint32_t)); memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t)); - uint32_t *ctx_n = ctx; + uint32_t *ctx_n0 = ctx; uint32_t *ctx_r2 = ctx + 8U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 32U; @@ -642,11 +657,17 @@ exp_vartime_precomp( uint32_t bit = tmp >> j & 1U; if (!(bit == 0U)) { - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, resM, 8U * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + amont_mul(ctx_n, mu, aM_copy, aM, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } - uint32_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, aM, 8U * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy, aM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); return; @@ -673,18 +694,26 @@ exp_vartime_precomp( uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + 8U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 8U * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * 8U; + uint32_t aM_copy0[8U] = { 0U }; + memcpy(aM_copy0, t11, 8U * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * 8U; + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, aM, 8U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -699,6 +728,7 @@ exp_vartime_precomp( uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + 8U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint32_t tmp0[8U] = { 0U }; for (uint32_t i = 0U; i < bBits / 4U; i++) @@ -707,15 +737,22 @@ exp_vartime_precomp( 0U, 4U, 1U, + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, resM, 8U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = bits_l; const uint32_t *a_bits_l = table + bits_l32 * 8U; memcpy(tmp0, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t)); + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, resM, 8U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -740,9 +777,10 @@ exp_consttime_precomp( memcpy(ctx, n, 8U * sizeof (uint32_t)); memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t)); uint32_t sw = 0U; - uint32_t *ctx_n = ctx; + uint32_t *ctx_n0 = ctx; uint32_t *ctx_r2 = ctx + 8U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 32U; @@ -757,10 +795,16 @@ exp_consttime_precomp( uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]); resM[i] = resM[i] ^ dummy; aM[i] = aM[i] ^ dummy;); - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, aM, 8U * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); + amont_mul(ctx_n1, mu, aM_copy, resM, aM); + KRML_MAYBE_UNUSED_VAR(ctx); + uint32_t aM_copy0[8U] = { 0U }; + memcpy(aM_copy0, resM, 8U * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); sw = bit; } uint32_t sw0 = sw; @@ -796,18 +840,26 @@ exp_consttime_precomp( uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + 8U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 8U * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * 8U; + uint32_t aM_copy0[8U] = { 0U }; + memcpy(aM_copy0, t11, 8U * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * 8U; + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, aM, 8U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -824,8 +876,8 @@ exp_consttime_precomp( 0U, 8U, 1U, - uint32_t *os = resM; uint32_t x = (c & res_j[i]) | (~c & resM[i]); + uint32_t *os = resM; os[i] = x;);); } else @@ -833,6 +885,7 @@ exp_consttime_precomp( uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + 8U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint32_t tmp0[8U] = { 0U }; for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++) @@ -841,10 +894,14 @@ exp_consttime_precomp( 0U, 4U, 1U, + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, resM, 8U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint32_t *)table, 8U * sizeof (uint32_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -856,11 +913,14 @@ exp_consttime_precomp( 0U, 8U, 1U, - uint32_t *os = tmp0; uint32_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint32_t *os = tmp0; os[i] = x;);); + uint32_t aM_copy[8U] = { 0U }; + memcpy(aM_copy, resM, 8U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -1010,7 +1070,7 @@ bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t 1U, uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc0 = (beq & acc0) | (~beq & blt);); uint32_t m1 = acc0; uint32_t m00 = m0 & m1; uint32_t bn_zero[8U] = { 0U }; @@ -1031,7 +1091,7 @@ bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t 1U, uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); uint32_t m2 = acc; uint32_t is_valid_m = (m00 & ~m10) & m2; uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n); @@ -1121,9 +1181,9 @@ Deallocate the memory previously allocated by Hacl_Bignum256_mont_ctx_init. */ void Hacl_Bignum256_32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + uint32_t *n = uu____0.n; + uint32_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -1143,8 +1203,10 @@ Hacl_Bignum256_32_mod_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + bn_slow_precomp(n, mu, r2, a, res); } /** @@ -1175,8 +1237,10 @@ Hacl_Bignum256_32_mod_exp_vartime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + exp_vartime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1207,8 +1271,10 @@ Hacl_Bignum256_32_mod_exp_consttime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + exp_consttime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1230,10 +1296,12 @@ Hacl_Bignum256_32_mod_inv_prime_vartime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; uint32_t n2[8U] = { 0U }; - uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2); - uint32_t *a1 = k1.n + 1U; + uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2); + uint32_t *a1 = n + 1U; uint32_t *res1 = n2 + 1U; uint32_t c = c0; { @@ -1260,7 +1328,7 @@ Hacl_Bignum256_32_mod_inv_prime_vartime_precomp( uint32_t c1 = c; uint32_t c2 = c1; KRML_MAYBE_UNUSED_VAR(c2); - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res); + exp_vartime_precomp(n, mu, r2, a, 256U, n2, res); } @@ -1302,9 +1370,9 @@ uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint32_t *os = res2; uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U); uint32_t x = u; + uint32_t *os = res2; os[i] = x; } return res2; @@ -1343,11 +1411,11 @@ uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) memcpy(tmp, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++) { - uint32_t *os = res2; uint8_t *bj = tmp + i * 4U; uint32_t u = load32_le(bj); uint32_t r1 = u; uint32_t x = r1; + uint32_t *os = res2; os[i] = x; } return res2; @@ -1399,7 +1467,7 @@ uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b) 1U, uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); return acc; } diff --git a/src/Hacl_Bignum32.c b/src/Hacl_Bignum32.c index 34b46324..4a371276 100644 --- a/src/Hacl_Bignum32.c +++ b/src/Hacl_Bignum32.c @@ -46,9 +46,18 @@ of `len` unsigned 32-bit integers, i.e. uint32_t[len]. /** Write `a + b mod 2 ^ (32 * len)` in `res`. - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + This function returns the carry. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly equal memory + location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_add(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -60,7 +69,16 @@ Write `a - b mod 2 ^ (32 * len)` in `res`. This functions returns the carry. - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -70,27 +88,57 @@ uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res /** Write `(a + b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) { - Hacl_Bignum_bn_add_mod_n_u32(len, n, a, b, res); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t a_copy[len]; + memset(a_copy, 0U, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t b_copy[len]; + memset(b_copy, 0U, len * sizeof (uint32_t)); + memcpy(a_copy, a, len * sizeof (uint32_t)); + memcpy(b_copy, b, len * sizeof (uint32_t)); + Hacl_Bignum_bn_add_mod_n_u32(len, n, a_copy, b_copy, res); } /** Write `(a - b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -100,8 +148,13 @@ void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, /** Write `a * b` in `res`. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `b` and `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory locations of `a` and `b`. */ void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -114,8 +167,10 @@ void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) /** Write `a * a` in `res`. - The argument a is meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory location of `a`. */ void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res) { @@ -149,13 +204,19 @@ bn_slow_precomp( /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `n`. + + @return `false` if any precondition is violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `1 < n` + - `n % 2 = 1` */ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) { @@ -171,7 +232,7 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m1 = acc; uint32_t is_valid_m = m0 & m1; @@ -195,22 +256,30 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_vartime( @@ -238,22 +307,30 @@ Hacl_Bignum32_mod_exp_vartime( /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is constant-time over its argument `b`, at the cost of a slower + execution time than `mod_exp_vartime_*`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_consttime( @@ -281,18 +358,23 @@ Hacl_Bignum32_mod_exp_consttime( /** Write `a ^ (-1) mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `n`. + + @return `false` if any preconditions (except the precondition: `n` is a prime) + are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `n % 2 = 1` + - `1 < n` + - `0 < a` + - `a < n` */ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) { @@ -308,7 +390,7 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m1 = acc0; uint32_t m00 = m0 & m1; @@ -329,7 +411,7 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t is_valid_m = (m00 & ~m10) & m2; @@ -393,15 +475,16 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, /** Heap-allocate and initialize a montgomery context. - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. + @param n Points to `len` number of limbs, i.e. `uint32_t[len]`. - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum32_mont_ctx_free on the return value - to avoid memory leaks. + @return A pointer to an allocated and initialized Montgomery context is returned. + Clients will need to call `Hacl_Bignum32_mont_ctx_free` on the return value to + avoid memory leaks. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` */ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum32_mont_ctx_init(uint32_t len, uint32_t *n) @@ -429,13 +512,13 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 /** Deallocate the memory previously allocated by Hacl_Bignum32_mont_ctx_init. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. */ void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + uint32_t *n = uu____0.n; + uint32_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -444,9 +527,11 @@ void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. */ void Hacl_Bignum32_mod_precomp( @@ -455,30 +540,35 @@ Hacl_Bignum32_mod_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - bn_slow_precomp(len1, k1.n, k1.mu, k1.r2, a, res); + uint32_t len1 = (*k).len; + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + bn_slow_precomp(len1, n, mu, r2, a, res); } /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_vartime_precomp( @@ -489,37 +579,35 @@ Hacl_Bignum32_mod_exp_vartime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); + uint32_t len1 = (*k).len; + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1, n, mu, r2, a, bBits, b, res); } /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + execution time than `mod_exp_vartime_*`. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_consttime_precomp( @@ -530,30 +618,27 @@ Hacl_Bignum32_mod_exp_consttime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); + uint32_t len1 = (*k).len; + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(len1, n, mu, r2, a, bBits, b, res); } /** Write `a ^ (-1) mod n` in `res`. - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n + @param[in] k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `0 < a` + - `a < n` */ void Hacl_Bignum32_mod_inv_prime_vartime_precomp( @@ -562,17 +647,18 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; + uint32_t len1 = (*k).len; + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; KRML_CHECK_SIZE(sizeof (uint32_t), len1); uint32_t n2[len1]; memset(n2, 0U, len1 * sizeof (uint32_t)); - uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2); + uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2); uint32_t c1; if (1U < len1) { - uint32_t *a1 = k1.n + 1U; + uint32_t *a1 = n + 1U; uint32_t *res1 = n2 + 1U; uint32_t c = c0; for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++) @@ -605,9 +691,9 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp( } KRML_MAYBE_UNUSED_VAR(c1); Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1, - k1.n, - k1.mu, - k1.r2, + n, + mu, + r2, a, 32U * len1, n2, @@ -623,13 +709,13 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp( /** Load a bid-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) { @@ -653,9 +739,9 @@ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint32_t *os = res2; uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U); uint32_t x = u; + uint32_t *os = res2; os[i] = x; } return res2; @@ -664,13 +750,13 @@ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) /** Load a little-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) { @@ -694,11 +780,11 @@ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) memcpy(tmp, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++) { - uint32_t *os = res2; uint8_t *bj = tmp + i * 4U; uint32_t u = load32_le(bj); uint32_t r1 = u; uint32_t x = r1; + uint32_t *os = res2; os[i] = x; } return res2; @@ -707,8 +793,11 @@ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) /** Serialize a bignum into big-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res) { @@ -727,8 +816,11 @@ void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res) /** Serialize a bignum into little-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res) { @@ -753,7 +845,11 @@ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res) /** Returns 2^32 - 1 if a < b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if `a < b`, otherwise, `0`. */ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) { @@ -762,7 +858,7 @@ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } return acc; } @@ -770,7 +866,11 @@ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) /** Returns 2^32 - 1 if a = b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if a = b, otherwise, `0`. */ uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b) { diff --git a/src/Hacl_Bignum4096.c b/src/Hacl_Bignum4096.c index 3572db07..c9ac9573 100644 --- a/src/Hacl_Bignum4096.c +++ b/src/Hacl_Bignum4096.c @@ -180,8 +180,8 @@ void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *re uint64_t c2 = c00 - c1; for (uint32_t i = 0U; i < 64U; i++) { - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x; } } @@ -247,8 +247,8 @@ void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *re uint64_t c2 = 0ULL - c00; for (uint32_t i = 0U; i < 64U; i++) { - uint64_t *os = res; uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint64_t *os = res; os[i] = x; } } @@ -285,7 +285,11 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res) res[i] = res[i] | 1ULL << j; for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++) { - Hacl_Bignum4096_add_mod(n, res, res, res); + uint64_t a_copy[64U] = { 0U }; + uint64_t b_copy[64U] = { 0U }; + memcpy(a_copy, res, 64U * sizeof (uint64_t)); + memcpy(b_copy, res, 64U * sizeof (uint64_t)); + Hacl_Bignum4096_add_mod(n, a_copy, b_copy, res); } } @@ -315,8 +319,8 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t * c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);); uint64_t r = c1; uint64_t c10 = r; - uint64_t *resb = c + 64U + i0; uint64_t res_j = c[64U + i0]; + uint64_t *resb = c + 64U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb); } memcpy(res, c + 64U, 64U * sizeof (uint64_t)); @@ -347,8 +351,8 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t * uint64_t c2 = c00 - c10; for (uint32_t i = 0U; i < 64U; i++) { - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x; } } @@ -393,8 +397,8 @@ static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);); uint64_t r = c1; uint64_t c10 = r; - uint64_t *resb = c + 64U + i0; uint64_t res_j = c[64U + i0]; + uint64_t *resb = c + 64U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb); } memcpy(res, c + 64U, 64U * sizeof (uint64_t)); @@ -405,8 +409,8 @@ static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t uint64_t m = 0ULL - c00; for (uint32_t i = 0U; i < 64U; i++) { - uint64_t *os = res; uint64_t x = (m & tmp[i]) | (~m & res[i]); + uint64_t *os = res; os[i] = x; } } @@ -459,7 +463,7 @@ bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; uint64_t is_valid_m = m0 & m1; @@ -490,7 +494,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m10 = acc0; uint64_t m00 = m0 & m10; @@ -517,7 +521,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; m1 = res; @@ -531,7 +535,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t m = m1 & m2; @@ -557,9 +561,10 @@ exp_vartime_precomp( uint64_t ctx[128U] = { 0U }; memcpy(ctx, n, 64U * sizeof (uint64_t)); memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t)); - uint64_t *ctx_n = ctx; + uint64_t *ctx_n0 = ctx; uint64_t *ctx_r2 = ctx + 64U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 64U; @@ -568,11 +573,17 @@ exp_vartime_precomp( uint64_t bit = tmp >> j & 1ULL; if (!(bit == 0ULL)) { - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, resM, 64U * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + amont_mul(ctx_n, mu, aM_copy, aM, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } - uint64_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, aM, 64U * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy, aM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); return; @@ -599,18 +610,26 @@ exp_vartime_precomp( uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + 64U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 64U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 64U; + uint64_t aM_copy0[64U] = { 0U }; + memcpy(aM_copy0, t11, 64U * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 64U; + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, aM, 64U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -625,6 +644,7 @@ exp_vartime_precomp( uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + 64U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint64_t tmp0[64U] = { 0U }; for (uint32_t i = 0U; i < bBits / 4U; i++) @@ -633,15 +653,22 @@ exp_vartime_precomp( 0U, 4U, 1U, + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, resM, 64U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = (uint32_t)bits_l; const uint64_t *a_bits_l = table + bits_l32 * 64U; memcpy(tmp0, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t)); + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, resM, 64U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -666,9 +693,10 @@ exp_consttime_precomp( memcpy(ctx, n, 64U * sizeof (uint64_t)); memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t)); uint64_t sw = 0ULL; - uint64_t *ctx_n = ctx; + uint64_t *ctx_n0 = ctx; uint64_t *ctx_r2 = ctx + 64U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 64U; @@ -682,10 +710,16 @@ exp_consttime_precomp( resM[i] = resM[i] ^ dummy; aM[i] = aM[i] ^ dummy; } - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, aM, 64U * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); + amont_mul(ctx_n1, mu, aM_copy, resM, aM); + KRML_MAYBE_UNUSED_VAR(ctx); + uint64_t aM_copy0[64U] = { 0U }; + memcpy(aM_copy0, resM, 64U * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); sw = bit; } uint64_t sw0 = sw; @@ -720,18 +754,26 @@ exp_consttime_precomp( uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + 64U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 64U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 64U; + uint64_t aM_copy0[64U] = { 0U }; + memcpy(aM_copy0, t11, 64U * sizeof (uint64_t)); uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 64U; + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, aM, 64U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -746,8 +788,8 @@ exp_consttime_precomp( const uint64_t *res_j = table + (i1 + 1U) * 64U; for (uint32_t i = 0U; i < 64U; i++) { - uint64_t *os = resM; uint64_t x = (c & res_j[i]) | (~c & resM[i]); + uint64_t *os = resM; os[i] = x; }); } @@ -756,6 +798,7 @@ exp_consttime_precomp( uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + 64U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint64_t tmp0[64U] = { 0U }; for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++) @@ -764,10 +807,14 @@ exp_consttime_precomp( 0U, 4U, 1U, + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, resM, 64U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)table, 64U * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -777,12 +824,15 @@ exp_consttime_precomp( const uint64_t *res_j = table + (i1 + 1U) * 64U; for (uint32_t i = 0U; i < 64U; i++) { - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x; }); + uint64_t aM_copy[64U] = { 0U }; + memcpy(aM_copy, resM, 64U * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -930,7 +980,7 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m1 = acc0; uint64_t m00 = m0 & m1; @@ -949,7 +999,7 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t is_valid_m = (m00 & ~m10) & m2; @@ -1042,9 +1092,9 @@ Deallocate the memory previously allocated by Hacl_Bignum4096_mont_ctx_init. */ void Hacl_Bignum4096_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + uint64_t *n = uu____0.n; + uint64_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -1064,8 +1114,10 @@ Hacl_Bignum4096_mod_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + bn_slow_precomp(n, mu, r2, a, res); } /** @@ -1096,8 +1148,10 @@ Hacl_Bignum4096_mod_exp_vartime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + exp_vartime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1128,8 +1182,10 @@ Hacl_Bignum4096_mod_exp_consttime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + exp_consttime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1151,10 +1207,12 @@ Hacl_Bignum4096_mod_inv_prime_vartime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; uint64_t n2[64U] = { 0U }; - uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2); - uint64_t *a1 = k1.n + 1U; + uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2); + uint64_t *a1 = n + 1U; uint64_t *res1 = n2 + 1U; uint64_t c = c0; KRML_MAYBE_FOR15(i, @@ -1183,7 +1241,7 @@ Hacl_Bignum4096_mod_inv_prime_vartime_precomp( uint64_t c1 = c; uint64_t c2 = c1; KRML_MAYBE_UNUSED_VAR(c2); - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res); + exp_vartime_precomp(n, mu, r2, a, 4096U, n2, res); } @@ -1225,9 +1283,9 @@ uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b) memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint64_t *os = res2; uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = res2; os[i] = x; } return res2; @@ -1266,11 +1324,11 @@ uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b) memcpy(tmp, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++) { - uint64_t *os = res2; uint8_t *bj = tmp + i * 8U; uint64_t u = load64_le(bj); uint64_t r1 = u; uint64_t x = r1; + uint64_t *os = res2; os[i] = x; } return res2; @@ -1326,7 +1384,7 @@ uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } return acc; } diff --git a/src/Hacl_Bignum4096_32.c b/src/Hacl_Bignum4096_32.c index 1a8b361c..3b36fbdc 100644 --- a/src/Hacl_Bignum4096_32.c +++ b/src/Hacl_Bignum4096_32.c @@ -177,8 +177,8 @@ void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t uint32_t c2 = c00 - c1; for (uint32_t i = 0U; i < 128U; i++) { - uint32_t *os = res; uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint32_t *os = res; os[i] = x; } } @@ -242,8 +242,8 @@ void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t uint32_t c2 = 0U - c00; for (uint32_t i = 0U; i < 128U; i++) { - uint32_t *os = res; uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint32_t *os = res; os[i] = x; } } @@ -280,7 +280,11 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res) res[i] = res[i] | 1U << j; for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++) { - Hacl_Bignum4096_32_add_mod(n, res, res, res); + uint32_t a_copy[128U] = { 0U }; + uint32_t b_copy[128U] = { 0U }; + memcpy(a_copy, res, 128U * sizeof (uint32_t)); + memcpy(b_copy, res, 128U * sizeof (uint32_t)); + Hacl_Bignum4096_32_add_mod(n, a_copy, b_copy, res); } } @@ -309,8 +313,8 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t * } uint32_t r = c1; uint32_t c10 = r; - uint32_t *resb = c + 128U + i0; uint32_t res_j = c[128U + i0]; + uint32_t *resb = c + 128U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb); } memcpy(res, c + 128U, 128U * sizeof (uint32_t)); @@ -340,8 +344,8 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t * uint32_t c2 = c00 - c10; for (uint32_t i = 0U; i < 128U; i++) { - uint32_t *os = res; uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint32_t *os = res; os[i] = x; } } @@ -385,8 +389,8 @@ static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t } uint32_t r = c1; uint32_t c10 = r; - uint32_t *resb = c + 128U + i0; uint32_t res_j = c[128U + i0]; + uint32_t *resb = c + 128U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb); } memcpy(res, c + 128U, 128U * sizeof (uint32_t)); @@ -397,8 +401,8 @@ static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t uint32_t m = 0U - c00; for (uint32_t i = 0U; i < 128U; i++) { - uint32_t *os = res; uint32_t x = (m & tmp[i]) | (~m & res[i]); + uint32_t *os = res; os[i] = x; } } @@ -451,7 +455,7 @@ bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m1 = acc; uint32_t is_valid_m = m0 & m1; @@ -482,7 +486,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m10 = acc0; uint32_t m00 = m0 & m10; @@ -509,7 +513,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t res = acc; m1 = res; @@ -523,7 +527,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t m = m1 & m2; @@ -549,9 +553,10 @@ exp_vartime_precomp( uint32_t ctx[256U] = { 0U }; memcpy(ctx, n, 128U * sizeof (uint32_t)); memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t)); - uint32_t *ctx_n = ctx; + uint32_t *ctx_n0 = ctx; uint32_t *ctx_r2 = ctx + 128U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 32U; @@ -560,11 +565,17 @@ exp_vartime_precomp( uint32_t bit = tmp >> j & 1U; if (!(bit == 0U)) { - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, resM, 128U * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + amont_mul(ctx_n, mu, aM_copy, aM, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } - uint32_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, aM, 128U * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy, aM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); return; @@ -591,18 +602,26 @@ exp_vartime_precomp( uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + 128U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 128U * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * 128U; + uint32_t aM_copy0[128U] = { 0U }; + memcpy(aM_copy0, t11, 128U * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * 128U; + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, aM, 128U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -617,6 +636,7 @@ exp_vartime_precomp( uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + 128U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint32_t tmp0[128U] = { 0U }; for (uint32_t i = 0U; i < bBits / 4U; i++) @@ -625,15 +645,22 @@ exp_vartime_precomp( 0U, 4U, 1U, + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, resM, 128U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = bits_l; const uint32_t *a_bits_l = table + bits_l32 * 128U; memcpy(tmp0, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t)); + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, resM, 128U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -658,9 +685,10 @@ exp_consttime_precomp( memcpy(ctx, n, 128U * sizeof (uint32_t)); memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t)); uint32_t sw = 0U; - uint32_t *ctx_n = ctx; + uint32_t *ctx_n0 = ctx; uint32_t *ctx_r2 = ctx + 128U; - from(ctx_n, mu, ctx_r2, resM); + from(ctx_n0, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 32U; @@ -674,10 +702,16 @@ exp_consttime_precomp( resM[i] = resM[i] ^ dummy; aM[i] = aM[i] ^ dummy; } - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, aM, 128U * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); + amont_mul(ctx_n1, mu, aM_copy, resM, aM); + KRML_MAYBE_UNUSED_VAR(ctx); + uint32_t aM_copy0[128U] = { 0U }; + memcpy(aM_copy0, resM, 128U * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + amont_sqr(ctx_n, mu, aM_copy0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); sw = bit; } uint32_t sw0 = sw; @@ -712,18 +746,26 @@ exp_consttime_precomp( uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + 128U; from(ctx_n0, mu, ctx_r20, t0); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(t1, aM, 128U * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * 128U; + uint32_t aM_copy0[128U] = { 0U }; + memcpy(aM_copy0, t11, 128U * sizeof (uint32_t)); uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); + amont_sqr(ctx_n1, mu, aM_copy0, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * 128U; + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, aM, 128U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); + amont_mul(ctx_n, mu, aM_copy, t2, tmp); + KRML_MAYBE_UNUSED_VAR(ctx); memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -738,8 +780,8 @@ exp_consttime_precomp( const uint32_t *res_j = table + (i1 + 1U) * 128U; for (uint32_t i = 0U; i < 128U; i++) { - uint32_t *os = resM; uint32_t x = (c & res_j[i]) | (~c & resM[i]); + uint32_t *os = resM; os[i] = x; }); } @@ -748,6 +790,7 @@ exp_consttime_precomp( uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + 128U; from(ctx_n, mu, ctx_r2, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } uint32_t tmp0[128U] = { 0U }; for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++) @@ -756,10 +799,14 @@ exp_consttime_precomp( 0U, 4U, 1U, + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, resM, 128U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); + amont_sqr(ctx_n, mu, aM_copy, resM); + KRML_MAYBE_UNUSED_VAR(ctx);); uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint32_t *)table, 128U * sizeof (uint32_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -769,12 +816,15 @@ exp_consttime_precomp( const uint32_t *res_j = table + (i1 + 1U) * 128U; for (uint32_t i = 0U; i < 128U; i++) { - uint32_t *os = tmp0; uint32_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint32_t *os = tmp0; os[i] = x; }); + uint32_t aM_copy[128U] = { 0U }; + memcpy(aM_copy, resM, 128U * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, tmp0, resM); + amont_mul(ctx_n, mu, aM_copy, tmp0, resM); + KRML_MAYBE_UNUSED_VAR(ctx); } from(n, mu, resM, res); } @@ -922,7 +972,7 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m1 = acc0; uint32_t m00 = m0 & m1; @@ -941,7 +991,7 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t is_valid_m = (m00 & ~m10) & m2; @@ -1034,9 +1084,9 @@ Deallocate the memory previously allocated by Hacl_Bignum4096_mont_ctx_init. */ void Hacl_Bignum4096_32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + uint32_t *n = uu____0.n; + uint32_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -1056,8 +1106,10 @@ Hacl_Bignum4096_32_mod_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + bn_slow_precomp(n, mu, r2, a, res); } /** @@ -1088,8 +1140,10 @@ Hacl_Bignum4096_32_mod_exp_vartime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + exp_vartime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1120,8 +1174,10 @@ Hacl_Bignum4096_32_mod_exp_consttime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; + exp_consttime_precomp(n, mu, r2, a, bBits, b, res); } /** @@ -1143,10 +1199,12 @@ Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp( uint32_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; + uint32_t *n = (*k).n; + uint32_t mu = (*k).mu; + uint32_t *r2 = (*k).r2; uint32_t n2[128U] = { 0U }; - uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2); - uint32_t *a1 = k1.n + 1U; + uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2); + uint32_t *a1 = n + 1U; uint32_t *res1 = n2 + 1U; uint32_t c = c0; for (uint32_t i = 0U; i < 31U; i++) @@ -1174,7 +1232,7 @@ Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp( uint32_t c1 = c; uint32_t c2 = c1; KRML_MAYBE_UNUSED_VAR(c2); - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res); + exp_vartime_precomp(n, mu, r2, a, 4096U, n2, res); } @@ -1216,9 +1274,9 @@ uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint32_t *os = res2; uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U); uint32_t x = u; + uint32_t *os = res2; os[i] = x; } return res2; @@ -1257,11 +1315,11 @@ uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) memcpy(tmp, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++) { - uint32_t *os = res2; uint8_t *bj = tmp + i * 4U; uint32_t u = load32_le(bj); uint32_t r1 = u; uint32_t x = r1; + uint32_t *os = res2; os[i] = x; } return res2; @@ -1317,7 +1375,7 @@ uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } return acc; } diff --git a/src/Hacl_Bignum64.c b/src/Hacl_Bignum64.c index f8f5bb6f..2ee38f17 100644 --- a/src/Hacl_Bignum64.c +++ b/src/Hacl_Bignum64.c @@ -78,7 +78,15 @@ Write `(a + b) mod n` in `res`. */ void Hacl_Bignum64_add_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) { - Hacl_Bignum_bn_add_mod_n_u64(len, n, a, b, res); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t a_copy[len]; + memset(a_copy, 0U, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t b_copy[len]; + memset(b_copy, 0U, len * sizeof (uint64_t)); + memcpy(a_copy, a, len * sizeof (uint64_t)); + memcpy(b_copy, b, len * sizeof (uint64_t)); + Hacl_Bignum_bn_add_mod_n_u64(len, n, a_copy, b_copy, res); } /** @@ -170,7 +178,7 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; uint64_t is_valid_m = m0 & m1; @@ -307,7 +315,7 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a, { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m1 = acc0; uint64_t m00 = m0 & m1; @@ -328,7 +336,7 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a, { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t is_valid_m = (m00 & ~m10) & m2; @@ -432,9 +440,9 @@ Deallocate the memory previously allocated by Hacl_Bignum64_mont_ctx_init. */ void Hacl_Bignum64_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + uint64_t *n = uu____0.n; + uint64_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -454,10 +462,11 @@ Hacl_Bignum64_mod_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - bn_slow_precomp(len1, k1.n, k1.mu, k1.r2, a, res); + uint32_t len1 = (*k).len; + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + bn_slow_precomp(len1, n, mu, r2, a, res); } /** @@ -488,17 +497,11 @@ Hacl_Bignum64_mod_exp_vartime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); + uint32_t len1 = (*k).len; + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1, n, mu, r2, a, bBits, b, res); } /** @@ -529,17 +532,11 @@ Hacl_Bignum64_mod_exp_consttime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); + uint32_t len1 = (*k).len; + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; + Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(len1, n, mu, r2, a, bBits, b, res); } /** @@ -561,17 +558,18 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp( uint64_t *res ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; + uint32_t len1 = (*k).len; + uint64_t *n = (*k).n; + uint64_t mu = (*k).mu; + uint64_t *r2 = (*k).r2; KRML_CHECK_SIZE(sizeof (uint64_t), len1); uint64_t n2[len1]; memset(n2, 0U, len1 * sizeof (uint64_t)); - uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2); + uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2); uint64_t c1; if (1U < len1) { - uint64_t *a1 = k1.n + 1U; + uint64_t *a1 = n + 1U; uint64_t *res1 = n2 + 1U; uint64_t c = c0; for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++) @@ -604,9 +602,9 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp( } KRML_MAYBE_UNUSED_VAR(c1); Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1, - k1.n, - k1.mu, - k1.r2, + n, + mu, + r2, a, 64U * len1, n2, @@ -652,9 +650,9 @@ uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b) memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < bnLen; i++) { - uint64_t *os = res2; uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = res2; os[i] = x; } return res2; @@ -693,11 +691,11 @@ uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b) memcpy(tmp, b, len * sizeof (uint8_t)); for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++) { - uint64_t *os = res2; uint8_t *bj = tmp + i * 8U; uint64_t u = load64_le(bj); uint64_t r1 = u; uint64_t x = r1; + uint64_t *os = res2; os[i] = x; } return res2; @@ -761,7 +759,7 @@ uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } return acc; } diff --git a/src/Hacl_Chacha20.c b/src/Hacl_Chacha20.c index 38a5c373..cc5b5fb4 100644 --- a/src/Hacl_Chacha20.c +++ b/src/Hacl_Chacha20.c @@ -102,45 +102,43 @@ static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr) 0U, 16U, 1U, - uint32_t *os = k; uint32_t x = k[i] + ctx[i]; + uint32_t *os = k; os[i] = x;); k[12U] = k[12U] + ctr_u32; } -static const -uint32_t -chacha20_constants[4U] = { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U }; - void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr) { KRML_MAYBE_FOR4(i, 0U, 4U, 1U, + uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; uint32_t *os = ctx; - uint32_t x = chacha20_constants[i]; os[i] = x;); + uint32_t *uu____0 = ctx + 4U; KRML_MAYBE_FOR8(i, 0U, 8U, 1U, - uint32_t *os = ctx + 4U; uint8_t *bj = k + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); ctx[12U] = ctr; + uint32_t *uu____1 = ctx + 13U; KRML_MAYBE_FOR3(i, 0U, 3U, 1U, - uint32_t *os = ctx + 13U; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); } @@ -153,18 +151,18 @@ static void chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, u 0U, 16U, 1U, - uint32_t *os = bl; uint8_t *bj = text + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = bl; uint32_t x = bl[i] ^ k[i]; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, bl[i]);); } @@ -174,7 +172,9 @@ chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr, { uint8_t plain[64U] = { 0U }; memcpy(plain, text, len * sizeof (uint8_t)); - chacha20_encrypt_block(ctx, plain, incr, plain); + uint8_t plain_copy[64U] = { 0U }; + memcpy(plain_copy, plain, 64U * sizeof (uint8_t)); + chacha20_encrypt_block(ctx, plain, incr, plain_copy); memcpy(out, plain, len * sizeof (uint8_t)); } diff --git a/src/Hacl_Chacha20_Vec128.c b/src/Hacl_Chacha20_Vec128.c index deab1dfc..1c49e409 100644 --- a/src/Hacl_Chacha20_Vec128.c +++ b/src/Hacl_Chacha20_Vec128.c @@ -153,8 +153,8 @@ chacha20_core_128( 0U, 16U, 1U, - Lib_IntVector_Intrinsics_vec128 *os = k; Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]); + Lib_IntVector_Intrinsics_vec128 *os = k; os[i] = x;); k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv); } @@ -167,37 +167,39 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n, 0U, 4U, 1U, - uint32_t *os = ctx1; uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; + uint32_t *os = ctx1; os[i] = x;); + uint32_t *uu____0 = ctx1 + 4U; KRML_MAYBE_FOR8(i, 0U, 8U, 1U, - uint32_t *os = ctx1 + 4U; uint8_t *bj = k + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); ctx1[12U] = ctr; + uint32_t *uu____1 = ctx1 + 13U; KRML_MAYBE_FOR3(i, 0U, 3U, 1U, - uint32_t *os = ctx1 + 13U; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - Lib_IntVector_Intrinsics_vec128 *os = ctx; uint32_t x = ctx1[i]; Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x); + Lib_IntVector_Intrinsics_vec128 *os = ctx; os[i] = x0;); Lib_IntVector_Intrinsics_vec128 ctr1 = Lib_IntVector_Intrinsics_vec128_load32s(0U, 1U, 2U, 3U); Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U]; diff --git a/src/Hacl_Chacha20_Vec256.c b/src/Hacl_Chacha20_Vec256.c index e61a7cfe..83195c90 100644 --- a/src/Hacl_Chacha20_Vec256.c +++ b/src/Hacl_Chacha20_Vec256.c @@ -153,8 +153,8 @@ chacha20_core_256( 0U, 16U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = k; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]); + Lib_IntVector_Intrinsics_vec256 *os = k; os[i] = x;); k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv); } @@ -167,37 +167,39 @@ chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n, 0U, 4U, 1U, - uint32_t *os = ctx1; uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; + uint32_t *os = ctx1; os[i] = x;); + uint32_t *uu____0 = ctx1 + 4U; KRML_MAYBE_FOR8(i, 0U, 8U, 1U, - uint32_t *os = ctx1 + 4U; uint8_t *bj = k + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); ctx1[12U] = ctr; + uint32_t *uu____1 = ctx1 + 13U; KRML_MAYBE_FOR3(i, 0U, 3U, 1U, - uint32_t *os = ctx1 + 13U; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = ctx; uint32_t x = ctx1[i]; Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x); + Lib_IntVector_Intrinsics_vec256 *os = ctx; os[i] = x0;); Lib_IntVector_Intrinsics_vec256 ctr1 = Lib_IntVector_Intrinsics_vec256_load32s(0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U); diff --git a/src/Hacl_Chacha20_Vec32.c b/src/Hacl_Chacha20_Vec32.c index 0dce915c..63f1e951 100644 --- a/src/Hacl_Chacha20_Vec32.c +++ b/src/Hacl_Chacha20_Vec32.c @@ -147,8 +147,8 @@ static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr) 0U, 16U, 1U, - uint32_t *os = k; uint32_t x = k[i] + ctx[i]; + uint32_t *os = k; os[i] = x;); k[12U] = k[12U] + cv; } @@ -160,36 +160,38 @@ static inline void chacha20_init_32(uint32_t *ctx, uint8_t *k, uint8_t *n, uint3 0U, 4U, 1U, - uint32_t *os = ctx1; uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; + uint32_t *os = ctx1; os[i] = x;); + uint32_t *uu____0 = ctx1 + 4U; KRML_MAYBE_FOR8(i, 0U, 8U, 1U, - uint32_t *os = ctx1 + 4U; uint8_t *bj = k + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); ctx1[12U] = ctr; + uint32_t *uu____1 = ctx1 + 13U; KRML_MAYBE_FOR3(i, 0U, 3U, 1U, - uint32_t *os = ctx1 + 13U; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = ctx; uint32_t x = ctx1[i]; + uint32_t *os = ctx; os[i] = x;); uint32_t ctr1 = 0U; uint32_t c12 = ctx[12U]; diff --git a/src/Hacl_Curve25519_51.c b/src/Hacl_Curve25519_51.c index ca561e89..2d1b7c76 100644 --- a/src/Hacl_Curve25519_51.c +++ b/src/Hacl_Curve25519_51.c @@ -38,64 +38,87 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_ uint64_t *x1 = q; uint64_t *x2 = nq; uint64_t *z2 = nq + 5U; - uint64_t *z3 = nq_p1 + 5U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + 5U; - uint64_t *ab = tmp1; uint64_t *dc = tmp1 + 10U; + uint64_t *ab = tmp1; + uint64_t *a = ab; + uint64_t *b = ab + 5U; Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2); Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2); + uint64_t *ab1 = tmp1; uint64_t *x3 = nq_p1; uint64_t *z31 = nq_p1 + 5U; uint64_t *d0 = dc; uint64_t *c0 = dc + 5U; Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31); Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31); - Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2); - Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0); - Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0); - uint64_t *a1 = tmp1; - uint64_t *b1 = tmp1 + 5U; - uint64_t *d = tmp1 + 10U; - uint64_t *c = tmp1 + 15U; - uint64_t *ab1 = tmp1; + uint64_t f1_copy0[10U] = { 0U }; + memcpy(f1_copy0, dc, 10U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul2(dc, f1_copy0, ab1, tmp2); + uint64_t *d1 = dc; + uint64_t *c1 = dc + 5U; + Hacl_Impl_Curve25519_Field51_fadd(x3, d1, c1); + Hacl_Impl_Curve25519_Field51_fsub(z31, d1, c1); + uint64_t *ab2 = tmp1; uint64_t *dc1 = tmp1 + 10U; - Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2); - Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2); + Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab2, tmp2); + uint64_t f1_copy1[10U] = { 0U }; + memcpy(f1_copy1, nq_p1, 10U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, f1_copy1, tmp2); + uint64_t *a1 = ab2; + uint64_t *b1 = ab2 + 5U; + uint64_t *d = dc1; + uint64_t *c = dc1 + 5U; a1[0U] = c[0U]; a1[1U] = c[1U]; a1[2U] = c[2U]; a1[3U] = c[3U]; a1[4U] = c[4U]; - Hacl_Impl_Curve25519_Field51_fsub(c, d, c); + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, c, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fsub(c, d, f2_copy); Hacl_Impl_Curve25519_Field51_fmul1(b1, c, 121665ULL); - Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d); - Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2); - Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2); + uint64_t f1_copy2[5U] = { 0U }; + memcpy(f1_copy2, b1, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fadd(b1, f1_copy2, d); + uint64_t *ab3 = tmp1; + uint64_t *dc2 = tmp1 + 10U; + Hacl_Impl_Curve25519_Field51_fmul2(nq, dc2, ab3, tmp2); + uint64_t *z310 = nq_p1 + 5U; + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, z310, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(z310, f1_copy, x1, tmp2); } static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2) { uint64_t *x2 = nq; uint64_t *z2 = nq + 5U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + 5U; - uint64_t *d = tmp1 + 10U; - uint64_t *c = tmp1 + 15U; uint64_t *ab = tmp1; uint64_t *dc = tmp1 + 10U; + uint64_t *a = ab; + uint64_t *b = ab + 5U; Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2); Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2); Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2); - a[0U] = c[0U]; - a[1U] = c[1U]; - a[2U] = c[2U]; - a[3U] = c[3U]; - a[4U] = c[4U]; - Hacl_Impl_Curve25519_Field51_fsub(c, d, c); - Hacl_Impl_Curve25519_Field51_fmul1(b, c, 121665ULL); - Hacl_Impl_Curve25519_Field51_fadd(b, b, d); - Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2); + uint64_t *d = dc; + uint64_t *c = dc + 5U; + uint64_t *a1 = ab; + uint64_t *b1 = ab + 5U; + a1[0U] = c[0U]; + a1[1U] = c[1U]; + a1[2U] = c[2U]; + a1[3U] = c[3U]; + a1[4U] = c[4U]; + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, c, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fsub(c, d, f2_copy); + Hacl_Impl_Curve25519_Field51_fmul1(b1, c, 121665ULL); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, b1, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fadd(b1, f1_copy, d); + uint64_t *ab1 = tmp1; + uint64_t *dc1 = tmp1 + 10U; + Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2); } static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) @@ -104,7 +127,6 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) for (uint32_t _i = 0U; _i < 10U; ++_i) tmp2[_i] = FStar_UInt128_uint64_to_uint128(0ULL); uint64_t p01_tmp1_swap[41U] = { 0U }; - uint64_t *p0 = p01_tmp1_swap; uint64_t *p01 = p01_tmp1_swap; uint64_t *p03 = p01; uint64_t *p11 = p01 + 10U; @@ -121,34 +143,39 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) z0[2U] = 0ULL; z0[3U] = 0ULL; z0[4U] = 0ULL; + uint64_t *swap = p01_tmp1_swap + 40U; uint64_t *p01_tmp1 = p01_tmp1_swap; + uint64_t *nq0 = p01_tmp1; + uint64_t *nq_p1 = p01_tmp1 + 10U; + Hacl_Impl_Curve25519_Field51_cswap2(1ULL, nq0, nq_p1); uint64_t *p01_tmp11 = p01_tmp1_swap; - uint64_t *nq1 = p01_tmp1_swap; - uint64_t *nq_p11 = p01_tmp1_swap + 10U; - uint64_t *swap = p01_tmp1_swap + 40U; - Hacl_Impl_Curve25519_Field51_cswap2(1ULL, nq1, nq_p11); point_add_and_double(init, p01_tmp11, tmp2); swap[0U] = 1ULL; for (uint32_t i = 0U; i < 251U; i++) { uint64_t *p01_tmp12 = p01_tmp1_swap; uint64_t *swap1 = p01_tmp1_swap + 40U; - uint64_t *nq2 = p01_tmp12; - uint64_t *nq_p12 = p01_tmp12 + 10U; + uint64_t *nq1 = p01_tmp12; + uint64_t *nq_p11 = p01_tmp12 + 10U; uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U); uint64_t sw = swap1[0U] ^ bit; - Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12); + Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11); point_add_and_double(init, p01_tmp12, tmp2); swap1[0U] = bit; } uint64_t sw = swap[0U]; + uint64_t *p01_tmp12 = p01_tmp1_swap; + uint64_t *nq1 = p01_tmp12; + uint64_t *nq_p11 = p01_tmp12 + 10U; Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11); - uint64_t *nq10 = p01_tmp1; - uint64_t *tmp1 = p01_tmp1 + 20U; - point_double(nq10, tmp1, tmp2); - point_double(nq10, tmp1, tmp2); - point_double(nq10, tmp1, tmp2); - memcpy(out, p0, 10U * sizeof (uint64_t)); + uint64_t *p01_tmp10 = p01_tmp1_swap; + uint64_t *nq = p01_tmp10; + uint64_t *tmp1 = p01_tmp10 + 20U; + point_double(nq, tmp1, tmp2); + point_double(nq, tmp1, tmp2); + point_double(nq, tmp1, tmp2); + uint64_t *p010 = p01_tmp1_swap; + memcpy(out, p010, 10U * sizeof (uint64_t)); } void @@ -162,7 +189,9 @@ Hacl_Curve25519_51_fsquare_times( Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp); for (uint32_t i = 0U; i < n - 1U; i++) { - Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, o, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fsqr(o, f1_copy, tmp); } } @@ -176,32 +205,59 @@ void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tm Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, 1U); Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 2U); Hacl_Impl_Curve25519_Field51_fmul(b1, t010, i, tmp); - Hacl_Impl_Curve25519_Field51_fmul(a1, b1, a1, tmp); - Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 1U); - Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp); - Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, 5U); - Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp); + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, a1, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(a1, b1, f2_copy, tmp); + FStar_UInt128_uint128 *tmp11 = tmp; + Hacl_Curve25519_51_fsquare_times(t010, a1, tmp11, 1U); + uint64_t f2_copy0[5U] = { 0U }; + memcpy(f2_copy0, b1, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(b1, t010, f2_copy0, tmp); + FStar_UInt128_uint128 *tmp12 = tmp; + Hacl_Curve25519_51_fsquare_times(t010, b1, tmp12, 5U); + uint64_t f2_copy1[5U] = { 0U }; + memcpy(f2_copy1, b1, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(b1, t010, f2_copy1, tmp); uint64_t *b10 = t1 + 5U; uint64_t *c10 = t1 + 10U; uint64_t *t011 = t1 + 15U; - FStar_UInt128_uint128 *tmp11 = tmp; - Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 10U); + FStar_UInt128_uint128 *tmp13 = tmp; + Hacl_Curve25519_51_fsquare_times(t011, b10, tmp13, 10U); Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp); - Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, 20U); - Hacl_Impl_Curve25519_Field51_fmul(t011, t011, c10, tmp); - Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, 10U); - Hacl_Impl_Curve25519_Field51_fmul(b10, t011, b10, tmp); - Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 50U); + FStar_UInt128_uint128 *tmp110 = tmp; + Hacl_Curve25519_51_fsquare_times(t011, c10, tmp110, 20U); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, t011, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(t011, f1_copy, c10, tmp); + FStar_UInt128_uint128 *tmp120 = tmp; + uint64_t i_copy0[5U] = { 0U }; + memcpy(i_copy0, t011, 5U * sizeof (uint64_t)); + Hacl_Curve25519_51_fsquare_times(t011, i_copy0, tmp120, 10U); + uint64_t f2_copy2[5U] = { 0U }; + memcpy(f2_copy2, b10, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(b10, t011, f2_copy2, tmp); + FStar_UInt128_uint128 *tmp130 = tmp; + Hacl_Curve25519_51_fsquare_times(t011, b10, tmp130, 50U); Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp); uint64_t *b11 = t1 + 5U; uint64_t *c1 = t1 + 10U; uint64_t *t01 = t1 + 15U; FStar_UInt128_uint128 *tmp1 = tmp; Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, 100U); - Hacl_Impl_Curve25519_Field51_fmul(t01, t01, c1, tmp); - Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 50U); - Hacl_Impl_Curve25519_Field51_fmul(t01, t01, b11, tmp); - Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 5U); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, t01, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(t01, f1_copy0, c1, tmp); + FStar_UInt128_uint128 *tmp111 = tmp; + uint64_t i_copy1[5U] = { 0U }; + memcpy(i_copy1, t01, 5U * sizeof (uint64_t)); + Hacl_Curve25519_51_fsquare_times(t01, i_copy1, tmp111, 50U); + uint64_t f1_copy1[5U] = { 0U }; + memcpy(f1_copy1, t01, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(t01, f1_copy1, b11, tmp); + FStar_UInt128_uint128 *tmp121 = tmp; + uint64_t i_copy[5U] = { 0U }; + memcpy(i_copy, t01, 5U * sizeof (uint64_t)); + Hacl_Curve25519_51_fsquare_times(t01, i_copy, tmp121, 5U); uint64_t *a = t1; uint64_t *t0 = t1 + 15U; Hacl_Impl_Curve25519_Field51_fmul(o, t0, a, tmp); @@ -217,7 +273,9 @@ static void encode_point(uint8_t *o, uint64_t *i) for (uint32_t _i = 0U; _i < 10U; ++_i) tmp_w[_i] = FStar_UInt128_uint64_to_uint128(0ULL); Hacl_Curve25519_51_finv(tmp, z, tmp_w); - Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, tmp, 5U * sizeof (uint64_t)); + Hacl_Impl_Curve25519_Field51_fmul(tmp, f1_copy, x, tmp_w); Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp); KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]);); } @@ -232,16 +290,17 @@ Compute the scalar multiple of a point. void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) { uint64_t init[10U] = { 0U }; + uint64_t init_copy[10U] = { 0U }; uint64_t tmp[4U] = { 0U }; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = tmp; uint8_t *bj = pub + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = tmp; os[i] = x;); uint64_t tmp3 = tmp[3U]; tmp[3U] = tmp3 & 0x7fffffffffffffffULL; @@ -265,7 +324,8 @@ void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) x[2U] = f1h | f2l; x[3U] = f2h | f3l; x[4U] = f3h; - montgomery_ladder(init, priv, init); + memcpy(init_copy, init, 10U * sizeof (uint64_t)); + montgomery_ladder(init, priv, init_copy); encode_point(out, init); } @@ -282,8 +342,8 @@ void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv) uint8_t basepoint[32U] = { 0U }; for (uint32_t i = 0U; i < 32U; i++) { - uint8_t *os = basepoint; uint8_t x = g25519[i]; + uint8_t *os = basepoint; os[i] = x; } Hacl_Curve25519_51_scalarmult(pub, priv, basepoint); diff --git a/src/Hacl_Curve25519_64.c b/src/Hacl_Curve25519_64.c index edcab306..0a0dd778 100644 --- a/src/Hacl_Curve25519_64.c +++ b/src/Hacl_Curve25519_64.c @@ -121,69 +121,91 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2 uint64_t *x1 = q; uint64_t *x2 = nq; uint64_t *z2 = nq + 4U; - uint64_t *z3 = nq_p1 + 4U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + 4U; - uint64_t *ab = tmp1; uint64_t *dc = tmp1 + 8U; + uint64_t *ab = tmp1; + uint64_t *a = ab; + uint64_t *b = ab + 4U; fadd0(a, x2, z2); fsub0(b, x2, z2); + uint64_t *ab1 = tmp1; uint64_t *x3 = nq_p1; uint64_t *z31 = nq_p1 + 4U; uint64_t *d0 = dc; uint64_t *c0 = dc + 4U; fadd0(c0, x3, z31); fsub0(d0, x3, z31); - fmul20(dc, dc, ab, tmp2); - fadd0(x3, d0, c0); - fsub0(z31, d0, c0); - uint64_t *a1 = tmp1; - uint64_t *b1 = tmp1 + 4U; - uint64_t *d = tmp1 + 8U; - uint64_t *c = tmp1 + 12U; - uint64_t *ab1 = tmp1; + uint64_t f1_copy0[8U] = { 0U }; + memcpy(f1_copy0, dc, 8U * sizeof (uint64_t)); + fmul20(dc, f1_copy0, ab1, tmp2); + uint64_t *d1 = dc; + uint64_t *c1 = dc + 4U; + fadd0(x3, d1, c1); + fsub0(z31, d1, c1); + uint64_t *ab2 = tmp1; uint64_t *dc1 = tmp1 + 8U; - fsqr20(dc1, ab1, tmp2); - fsqr20(nq_p1, nq_p1, tmp2); + fsqr20(dc1, ab2, tmp2); + uint64_t f1_copy1[8U] = { 0U }; + memcpy(f1_copy1, nq_p1, 8U * sizeof (uint64_t)); + fsqr20(nq_p1, f1_copy1, tmp2); + uint64_t *a1 = ab2; + uint64_t *b1 = ab2 + 4U; + uint64_t *d = dc1; + uint64_t *c = dc1 + 4U; a1[0U] = c[0U]; a1[1U] = c[1U]; a1[2U] = c[2U]; a1[3U] = c[3U]; - fsub0(c, d, c); + uint64_t f2_copy[4U] = { 0U }; + memcpy(f2_copy, c, 4U * sizeof (uint64_t)); + fsub0(c, d, f2_copy); fmul_scalar0(b1, c, 121665ULL); - fadd0(b1, b1, d); - fmul20(nq, dc1, ab1, tmp2); - fmul0(z3, z3, x1, tmp2); + uint64_t f1_copy2[4U] = { 0U }; + memcpy(f1_copy2, b1, 4U * sizeof (uint64_t)); + fadd0(b1, f1_copy2, d); + uint64_t *ab3 = tmp1; + uint64_t *dc2 = tmp1 + 8U; + fmul20(nq, dc2, ab3, tmp2); + uint64_t *z310 = nq_p1 + 4U; + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, z310, 4U * sizeof (uint64_t)); + fmul0(z310, f1_copy, x1, tmp2); } static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2) { uint64_t *x2 = nq; uint64_t *z2 = nq + 4U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + 4U; - uint64_t *d = tmp1 + 8U; - uint64_t *c = tmp1 + 12U; uint64_t *ab = tmp1; uint64_t *dc = tmp1 + 8U; + uint64_t *a = ab; + uint64_t *b = ab + 4U; fadd0(a, x2, z2); fsub0(b, x2, z2); fsqr20(dc, ab, tmp2); - a[0U] = c[0U]; - a[1U] = c[1U]; - a[2U] = c[2U]; - a[3U] = c[3U]; - fsub0(c, d, c); - fmul_scalar0(b, c, 121665ULL); - fadd0(b, b, d); - fmul20(nq, dc, ab, tmp2); + uint64_t *d = dc; + uint64_t *c = dc + 4U; + uint64_t *a1 = ab; + uint64_t *b1 = ab + 4U; + a1[0U] = c[0U]; + a1[1U] = c[1U]; + a1[2U] = c[2U]; + a1[3U] = c[3U]; + uint64_t f2_copy[4U] = { 0U }; + memcpy(f2_copy, c, 4U * sizeof (uint64_t)); + fsub0(c, d, f2_copy); + fmul_scalar0(b1, c, 121665ULL); + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, b1, 4U * sizeof (uint64_t)); + fadd0(b1, f1_copy, d); + uint64_t *ab1 = tmp1; + uint64_t *dc1 = tmp1 + 8U; + fmul20(nq, dc1, ab1, tmp2); } static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) { uint64_t tmp2[16U] = { 0U }; uint64_t p01_tmp1_swap[33U] = { 0U }; - uint64_t *p0 = p01_tmp1_swap; uint64_t *p01 = p01_tmp1_swap; uint64_t *p03 = p01; uint64_t *p11 = p01 + 8U; @@ -198,34 +220,39 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) z0[1U] = 0ULL; z0[2U] = 0ULL; z0[3U] = 0ULL; + uint64_t *swap = p01_tmp1_swap + 32U; uint64_t *p01_tmp1 = p01_tmp1_swap; + uint64_t *nq0 = p01_tmp1; + uint64_t *nq_p1 = p01_tmp1 + 8U; + cswap20(1ULL, nq0, nq_p1); uint64_t *p01_tmp11 = p01_tmp1_swap; - uint64_t *nq1 = p01_tmp1_swap; - uint64_t *nq_p11 = p01_tmp1_swap + 8U; - uint64_t *swap = p01_tmp1_swap + 32U; - cswap20(1ULL, nq1, nq_p11); point_add_and_double(init, p01_tmp11, tmp2); swap[0U] = 1ULL; for (uint32_t i = 0U; i < 251U; i++) { uint64_t *p01_tmp12 = p01_tmp1_swap; uint64_t *swap1 = p01_tmp1_swap + 32U; - uint64_t *nq2 = p01_tmp12; - uint64_t *nq_p12 = p01_tmp12 + 8U; + uint64_t *nq1 = p01_tmp12; + uint64_t *nq_p11 = p01_tmp12 + 8U; uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U); uint64_t sw = swap1[0U] ^ bit; - cswap20(sw, nq2, nq_p12); + cswap20(sw, nq1, nq_p11); point_add_and_double(init, p01_tmp12, tmp2); swap1[0U] = bit; } uint64_t sw = swap[0U]; + uint64_t *p01_tmp12 = p01_tmp1_swap; + uint64_t *nq1 = p01_tmp12; + uint64_t *nq_p11 = p01_tmp12 + 8U; cswap20(sw, nq1, nq_p11); - uint64_t *nq10 = p01_tmp1; - uint64_t *tmp1 = p01_tmp1 + 16U; - point_double(nq10, tmp1, tmp2); - point_double(nq10, tmp1, tmp2); - point_double(nq10, tmp1, tmp2); - memcpy(out, p0, 8U * sizeof (uint64_t)); + uint64_t *p01_tmp10 = p01_tmp1_swap; + uint64_t *nq = p01_tmp10; + uint64_t *tmp1 = p01_tmp10 + 16U; + point_double(nq, tmp1, tmp2); + point_double(nq, tmp1, tmp2); + point_double(nq, tmp1, tmp2); + uint64_t *p010 = p01_tmp1_swap; + memcpy(out, p010, 8U * sizeof (uint64_t)); } static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n) @@ -233,7 +260,9 @@ static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n) fsqr0(o, inp, tmp); for (uint32_t i = 0U; i < n - 1U; i++) { - fsqr0(o, o, tmp); + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, o, 4U * sizeof (uint64_t)); + fsqr0(o, f1_copy, tmp); } } @@ -247,32 +276,59 @@ static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp) fsquare_times(a1, i, tmp10, 1U); fsquare_times(t010, a1, tmp10, 2U); fmul0(b1, t010, i, tmp); - fmul0(a1, b1, a1, tmp); - fsquare_times(t010, a1, tmp10, 1U); - fmul0(b1, t010, b1, tmp); - fsquare_times(t010, b1, tmp10, 5U); - fmul0(b1, t010, b1, tmp); + uint64_t f2_copy[4U] = { 0U }; + memcpy(f2_copy, a1, 4U * sizeof (uint64_t)); + fmul0(a1, b1, f2_copy, tmp); + uint64_t *tmp11 = tmp; + fsquare_times(t010, a1, tmp11, 1U); + uint64_t f2_copy0[4U] = { 0U }; + memcpy(f2_copy0, b1, 4U * sizeof (uint64_t)); + fmul0(b1, t010, f2_copy0, tmp); + uint64_t *tmp12 = tmp; + fsquare_times(t010, b1, tmp12, 5U); + uint64_t f2_copy1[4U] = { 0U }; + memcpy(f2_copy1, b1, 4U * sizeof (uint64_t)); + fmul0(b1, t010, f2_copy1, tmp); uint64_t *b10 = t1 + 4U; uint64_t *c10 = t1 + 8U; uint64_t *t011 = t1 + 12U; - uint64_t *tmp11 = tmp; - fsquare_times(t011, b10, tmp11, 10U); + uint64_t *tmp13 = tmp; + fsquare_times(t011, b10, tmp13, 10U); fmul0(c10, t011, b10, tmp); - fsquare_times(t011, c10, tmp11, 20U); - fmul0(t011, t011, c10, tmp); - fsquare_times(t011, t011, tmp11, 10U); - fmul0(b10, t011, b10, tmp); - fsquare_times(t011, b10, tmp11, 50U); + uint64_t *tmp110 = tmp; + fsquare_times(t011, c10, tmp110, 20U); + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, t011, 4U * sizeof (uint64_t)); + fmul0(t011, f1_copy, c10, tmp); + uint64_t *tmp120 = tmp; + uint64_t i_copy0[4U] = { 0U }; + memcpy(i_copy0, t011, 4U * sizeof (uint64_t)); + fsquare_times(t011, i_copy0, tmp120, 10U); + uint64_t f2_copy2[4U] = { 0U }; + memcpy(f2_copy2, b10, 4U * sizeof (uint64_t)); + fmul0(b10, t011, f2_copy2, tmp); + uint64_t *tmp130 = tmp; + fsquare_times(t011, b10, tmp130, 50U); fmul0(c10, t011, b10, tmp); uint64_t *b11 = t1 + 4U; uint64_t *c1 = t1 + 8U; uint64_t *t01 = t1 + 12U; uint64_t *tmp1 = tmp; fsquare_times(t01, c1, tmp1, 100U); - fmul0(t01, t01, c1, tmp); - fsquare_times(t01, t01, tmp1, 50U); - fmul0(t01, t01, b11, tmp); - fsquare_times(t01, t01, tmp1, 5U); + uint64_t f1_copy0[4U] = { 0U }; + memcpy(f1_copy0, t01, 4U * sizeof (uint64_t)); + fmul0(t01, f1_copy0, c1, tmp); + uint64_t *tmp111 = tmp; + uint64_t i_copy1[4U] = { 0U }; + memcpy(i_copy1, t01, 4U * sizeof (uint64_t)); + fsquare_times(t01, i_copy1, tmp111, 50U); + uint64_t f1_copy1[4U] = { 0U }; + memcpy(f1_copy1, t01, 4U * sizeof (uint64_t)); + fmul0(t01, f1_copy1, b11, tmp); + uint64_t *tmp121 = tmp; + uint64_t i_copy[4U] = { 0U }; + memcpy(i_copy, t01, 4U * sizeof (uint64_t)); + fsquare_times(t01, i_copy, tmp121, 5U); uint64_t *a = t1; uint64_t *t0 = t1 + 12U; fmul0(o, t0, a, tmp); @@ -319,7 +375,9 @@ static void encode_point(uint8_t *o, uint64_t *i) uint64_t u64s[4U] = { 0U }; uint64_t tmp_w[16U] = { 0U }; finv(tmp, z, tmp_w); - fmul0(tmp, tmp, x, tmp_w); + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, tmp, 4U * sizeof (uint64_t)); + fmul0(tmp, f1_copy, x, tmp_w); store_felem(u64s, tmp); KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]);); } @@ -334,16 +392,17 @@ Compute the scalar multiple of a point. void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) { uint64_t init[8U] = { 0U }; + uint64_t init_copy[8U] = { 0U }; uint64_t tmp[4U] = { 0U }; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = tmp; uint8_t *bj = pub + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = tmp; os[i] = x;); uint64_t tmp3 = tmp[3U]; tmp[3U] = tmp3 & 0x7fffffffffffffffULL; @@ -357,7 +416,8 @@ void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) x[1U] = tmp[1U]; x[2U] = tmp[2U]; x[3U] = tmp[3U]; - montgomery_ladder(init, priv, init); + memcpy(init_copy, init, 8U * sizeof (uint64_t)); + montgomery_ladder(init, priv, init_copy); encode_point(out, init); } @@ -374,8 +434,8 @@ void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv) uint8_t basepoint[32U] = { 0U }; for (uint32_t i = 0U; i < 32U; i++) { - uint8_t *os = basepoint; uint8_t x = g25519[i]; + uint8_t *os = basepoint; os[i] = x; } Hacl_Curve25519_64_scalarmult(pub, priv, basepoint); diff --git a/src/Hacl_EC_K256.c b/src/Hacl_EC_K256.c index 581c223b..d5f6e1a9 100644 --- a/src/Hacl_EC_K256.c +++ b/src/Hacl_EC_K256.c @@ -267,9 +267,9 @@ void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out) 0U, 4U, 1U, - uint64_t *os = scalar_q; uint64_t u = load64_be(scalar + (4U - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = scalar_q; os[i] = x;); Hacl_Impl_K256_PointMul_point_mul(out, scalar_q, p); } diff --git a/src/Hacl_Ed25519.c b/src/Hacl_Ed25519.c index d1f8edf2..e347b02e 100644 --- a/src/Hacl_Ed25519.c +++ b/src/Hacl_Ed25519.c @@ -144,7 +144,9 @@ static inline void fsquare_times_inplace(uint64_t *output, uint32_t count) FStar_UInt128_uint128 tmp[5U]; for (uint32_t _i = 0U; _i < 5U; ++_i) tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL); - Hacl_Curve25519_51_fsquare_times(output, output, tmp, count); + uint64_t input[5U] = { 0U }; + memcpy(input, output, 5U * sizeof (uint64_t)); + Hacl_Curve25519_51_fsquare_times(output, input, tmp, count); } void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a) @@ -215,11 +217,11 @@ void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input) 0U, 4U, 1U, - uint64_t *os = u64s; uint8_t *bj = input + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = u64s; os[i] = x;); uint64_t u64s3 = u64s[3U]; u64s[3U] = u64s3 & 0x7fffffffffffffffULL; @@ -252,7 +254,9 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p) fsum(tmp30, tmp1, tmp20); fdifference(tmp40, tmp1, tmp20); fsquare(tmp1, z1); - times_2(tmp1, tmp1); + uint64_t a_copy[5U] = { 0U }; + memcpy(a_copy, tmp1, 5U * sizeof (uint64_t)); + times_2(tmp1, a_copy); uint64_t *tmp10 = tmp; uint64_t *tmp2 = tmp + 5U; uint64_t *tmp3 = tmp + 10U; @@ -260,12 +264,18 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p) uint64_t *x1 = p; uint64_t *y1 = p + 5U; fsum(tmp2, x1, y1); - fsquare(tmp2, tmp2); + uint64_t a_copy0[5U] = { 0U }; + memcpy(a_copy0, tmp2, 5U * sizeof (uint64_t)); + fsquare(tmp2, a_copy0); Hacl_Bignum25519_reduce_513(tmp3); - fdifference(tmp2, tmp3, tmp2); + uint64_t b_copy[5U] = { 0U }; + memcpy(b_copy, tmp2, 5U * sizeof (uint64_t)); + fdifference(tmp2, tmp3, b_copy); Hacl_Bignum25519_reduce_513(tmp10); Hacl_Bignum25519_reduce_513(tmp4); - fsum(tmp10, tmp10, tmp4); + uint64_t a_copy1[5U] = { 0U }; + memcpy(a_copy1, tmp10, 5U * sizeof (uint64_t)); + fsum(tmp10, a_copy1, tmp4); uint64_t *tmp_f = tmp; uint64_t *tmp_e = tmp + 5U; uint64_t *tmp_h = tmp + 10U; @@ -308,12 +318,18 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t * uint64_t *z2 = q + 10U; uint64_t *t2 = q + 15U; times_2d(tmp10, t1); - fmul0(tmp10, tmp10, t2); + uint64_t inp_copy[5U] = { 0U }; + memcpy(inp_copy, tmp10, 5U * sizeof (uint64_t)); + fmul0(tmp10, inp_copy, t2); times_2(tmp2, z1); - fmul0(tmp2, tmp2, z2); + uint64_t inp_copy0[5U] = { 0U }; + memcpy(inp_copy0, tmp2, 5U * sizeof (uint64_t)); + fmul0(tmp2, inp_copy0, z2); fdifference(tmp5, tmp4, tmp3); fdifference(tmp6, tmp2, tmp10); - fsum(tmp10, tmp2, tmp10); + uint64_t a_copy[5U] = { 0U }; + memcpy(a_copy, tmp10, 5U * sizeof (uint64_t)); + fsum(tmp10, a_copy, tmp2); fsum(tmp2, tmp4, tmp3); uint64_t *tmp_g = tmp; uint64_t *tmp_h = tmp + 5U; @@ -367,17 +383,27 @@ static inline void pow2_252m2(uint64_t *out, uint64_t *z) fsquare_times(a, z, 1U); fsquare_times(t00, a, 2U); fmul0(b0, t00, z); - fmul0(a, b0, a); + uint64_t inp_copy0[5U] = { 0U }; + memcpy(inp_copy0, a, 5U * sizeof (uint64_t)); + fmul0(a, inp_copy0, b0); fsquare_times(t00, a, 1U); - fmul0(b0, t00, b0); + uint64_t inp_copy1[5U] = { 0U }; + memcpy(inp_copy1, b0, 5U * sizeof (uint64_t)); + fmul0(b0, inp_copy1, t00); fsquare_times(t00, b0, 5U); - fmul0(b0, t00, b0); + uint64_t inp_copy2[5U] = { 0U }; + memcpy(inp_copy2, b0, 5U * sizeof (uint64_t)); + fmul0(b0, inp_copy2, t00); fsquare_times(t00, b0, 10U); fmul0(c0, t00, b0); fsquare_times(t00, c0, 20U); - fmul0(t00, t00, c0); + uint64_t inp_copy3[5U] = { 0U }; + memcpy(inp_copy3, t00, 5U * sizeof (uint64_t)); + fmul0(t00, inp_copy3, c0); fsquare_times_inplace(t00, 10U); - fmul0(b0, t00, b0); + uint64_t inp_copy4[5U] = { 0U }; + memcpy(inp_copy4, b0, 5U * sizeof (uint64_t)); + fmul0(b0, inp_copy4, t00); fsquare_times(t00, b0, 50U); uint64_t *a0 = buf; uint64_t *t0 = buf + 5U; @@ -386,9 +412,13 @@ static inline void pow2_252m2(uint64_t *out, uint64_t *z) fsquare_times(a0, z, 1U); fmul0(c, t0, b); fsquare_times(t0, c, 100U); - fmul0(t0, t0, c); + uint64_t inp_copy[5U] = { 0U }; + memcpy(inp_copy, t0, 5U * sizeof (uint64_t)); + fmul0(t0, inp_copy, c); fsquare_times_inplace(t0, 50U); - fmul0(t0, t0, b); + uint64_t inp_copy5[5U] = { 0U }; + memcpy(inp_copy5, t0, 5U * sizeof (uint64_t)); + fmul0(t0, inp_copy5, b); fsquare_times_inplace(t0, 2U); fmul0(out, t0, a0); } @@ -411,7 +441,9 @@ static inline void mul_modp_sqrt_m1(uint64_t *x) sqrt_m1[2U] = 0x0007ef5e9cbd0c60ULL; sqrt_m1[3U] = 0x00078595a6804c9eULL; sqrt_m1[4U] = 0x0002b8324804fc1dULL; - fmul0(x, x, sqrt_m1); + uint64_t inp_copy[5U] = { 0U }; + memcpy(inp_copy, x, 5U * sizeof (uint64_t)); + fmul0(x, inp_copy, sqrt_m1); } static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) @@ -450,11 +482,15 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) one[4U] = 0ULL; fsquare(y2, y); times_d(dyy, y2); - fsum(dyy, dyy, one); + uint64_t a_copy0[5U] = { 0U }; + memcpy(a_copy0, dyy, 5U * sizeof (uint64_t)); + fsum(dyy, a_copy0, one); Hacl_Bignum25519_reduce_513(dyy); Hacl_Bignum25519_inverse(dyyi, dyy); fdifference(x2, y2, one); - fmul0(x2, x2, dyyi); + uint64_t inp_copy[5U] = { 0U }; + memcpy(inp_copy, x2, 5U * sizeof (uint64_t)); + fmul0(x2, inp_copy, dyyi); reduce(x2); bool x2_is_0 = is_0(x2); uint8_t z; @@ -493,7 +529,9 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) uint64_t *t00 = tmp + 10U; pow2_252m2(x31, x210); fsquare(t00, x31); - fdifference(t00, t00, x210); + uint64_t a_copy1[5U] = { 0U }; + memcpy(a_copy1, t00, 5U * sizeof (uint64_t)); + fdifference(t00, a_copy1, x210); Hacl_Bignum25519_reduce_513(t00); reduce(t00); bool t0_is_0 = is_0(t00); @@ -505,15 +543,13 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) uint64_t *x3 = tmp + 5U; uint64_t *t01 = tmp + 10U; fsquare(t01, x3); - fdifference(t01, t01, x211); + uint64_t a_copy[5U] = { 0U }; + memcpy(a_copy, t01, 5U * sizeof (uint64_t)); + fdifference(t01, a_copy, x211); Hacl_Bignum25519_reduce_513(t01); reduce(t01); bool z1 = is_0(t01); - if (z1 == false) - { - res = false; - } - else + if (z1) { uint64_t *x32 = tmp + 5U; uint64_t *t0 = tmp + 10U; @@ -527,13 +563,19 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) t0[2U] = 0ULL; t0[3U] = 0ULL; t0[4U] = 0ULL; - fdifference(x32, t0, x32); + uint64_t b_copy[5U] = { 0U }; + memcpy(b_copy, x32, 5U * sizeof (uint64_t)); + fdifference(x32, t0, b_copy); Hacl_Bignum25519_reduce_513(x32); reduce(x32); } memcpy(x, x32, 5U * sizeof (uint64_t)); res = true; } + else + { + res = false; + } } } bool res0 = res; @@ -551,11 +593,7 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t * Hacl_Bignum25519_load_51(y, s); bool z0 = recover_x(x, y, sign); bool res; - if (z0 == false) - { - res = false; - } - else + if (z0) { uint64_t *outx = out; uint64_t *outy = out + 5U; @@ -571,6 +609,10 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t * fmul0(outt, x, y); res = true; } + else + { + res = false; + } bool res0 = res; return res0; } @@ -578,20 +620,20 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t * void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p) { uint64_t tmp[15U] = { 0U }; + uint64_t *zinv = tmp; uint64_t *x = tmp + 5U; - uint64_t *out = tmp + 10U; - uint64_t *zinv1 = tmp; - uint64_t *x1 = tmp + 5U; - uint64_t *out1 = tmp + 10U; + uint64_t *out0 = tmp + 10U; uint64_t *px = p; uint64_t *py = p + 5U; uint64_t *pz = p + 10U; - Hacl_Bignum25519_inverse(zinv1, pz); - fmul0(x1, px, zinv1); - reduce(x1); - fmul0(out1, py, zinv1); - Hacl_Bignum25519_reduce_513(out1); - uint64_t x0 = x[0U]; + Hacl_Bignum25519_inverse(zinv, pz); + fmul0(x, px, zinv); + reduce(x); + fmul0(out0, py, zinv); + Hacl_Bignum25519_reduce_513(out0); + uint64_t *x1 = tmp + 5U; + uint64_t *out = tmp + 10U; + uint64_t x0 = x1[0U]; uint64_t b = x0 & 1ULL; Hacl_Bignum25519_store_51(z, out); uint8_t xbyte = (uint8_t)b; @@ -1150,11 +1192,7 @@ static inline bool gte_q(uint64_t *s) { return false; } - if (s3 > 0x00000000000000ULL) - { - return true; - } - if (s2 > 0x000000000014deULL) + if (s3 > 0x00000000000000ULL || s2 > 0x000000000014deULL) { return true; } @@ -1170,11 +1208,7 @@ static inline bool gte_q(uint64_t *s) { return false; } - if (s0 >= 0x12631a5cf5d3edULL) - { - return true; - } - return false; + return s0 >= 0x12631a5cf5d3edULL; } static inline bool eq(uint64_t *a, uint64_t *b) @@ -1246,11 +1280,11 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t 0U, 4U, 1U, - uint64_t *os = bscalar; uint8_t *bj = scalar + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = bscalar; os[i] = x;); uint64_t table[320U] = { 0U }; uint64_t tmp[20U] = { 0U }; @@ -1258,23 +1292,35 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t uint64_t *t1 = table + 20U; Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0); memcpy(t1, q, 20U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 20U; - Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11); + uint64_t p_copy0[20U] = { 0U }; + memcpy(p_copy0, t11, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointDouble_point_double(tmp, p_copy0); memcpy(table + (2U * i + 2U) * 20U, tmp, 20U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 20U; - Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2); + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, q, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(tmp, p_copy, t2); memcpy(table + (2U * i + 3U) * 20U, tmp, 20U * sizeof (uint64_t));); Hacl_Impl_Ed25519_PointConstants_make_point_inf(out); uint64_t tmp0[20U] = { 0U }; for (uint32_t i0 = 0U; i0 < 64U; i0++) { - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointDouble_point_double(out, p_copy);); uint32_t k = 256U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)table, 20U * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -1284,11 +1330,13 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t const uint64_t *res_j = table + (i1 + 1U) * 20U; for (uint32_t i = 0U; i < 20U; i++) { - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x; }); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp0); + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy, tmp0); } } @@ -1303,8 +1351,8 @@ static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, const uint64_t *res_j = table + (i0 + 1U) * 20U; for (uint32_t i = 0U; i < 20U; i++) { - uint64_t *os = tmp; uint64_t x = (c & res_j[i]) | (~c & tmp[i]); + uint64_t *os = tmp; os[i] = x; }); } @@ -1316,11 +1364,11 @@ static inline void point_mul_g(uint64_t *out, uint8_t *scalar) 0U, 4U, 1U, - uint64_t *os = bscalar; uint8_t *bj = scalar + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = bscalar; os[i] = x;); uint64_t q1[20U] = { 0U }; uint64_t *gx = q1; @@ -1384,23 +1432,41 @@ static inline void point_mul_g(uint64_t *out, uint8_t *scalar) 0U, 16U, 1U, - KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR4(i0, + 0U, + 4U, + 1U, + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointDouble_point_double(out, p_copy);); uint32_t k = 64U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U); + KRML_HOST_IGNORE(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4); precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp); + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy, tmp); uint32_t k0 = 64U - 4U * i - 4U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U); + KRML_HOST_IGNORE(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4); precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp); + uint64_t p_copy0[20U] = { 0U }; + memcpy(p_copy0, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy0, tmp); uint32_t k1 = 64U - 4U * i - 4U; uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U); + KRML_HOST_IGNORE(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4); precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp); + uint64_t p_copy1[20U] = { 0U }; + memcpy(p_copy1, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy1, tmp); uint32_t k2 = 64U - 4U * i - 4U; uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U); + KRML_HOST_IGNORE(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4); precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);); + uint64_t p_copy2[20U] = { 0U }; + memcpy(p_copy2, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy2, tmp);); KRML_MAYBE_UNUSED_VAR(q2); KRML_MAYBE_UNUSED_VAR(q3); KRML_MAYBE_UNUSED_VAR(q4); @@ -1441,21 +1507,21 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui 0U, 4U, 1U, - uint64_t *os = bscalar1; uint8_t *bj = scalar1 + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = bscalar1; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = bscalar2; uint8_t *bj = scalar2 + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = bscalar2; os[i] = x;); uint64_t table2[640U] = { 0U }; uint64_t tmp1[20U] = { 0U }; @@ -1463,15 +1529,20 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui uint64_t *t1 = table2 + 20U; Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0); memcpy(t1, q2, 20U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table2); KRML_MAYBE_FOR15(i, 0U, 15U, 1U, uint64_t *t11 = table2 + (i + 1U) * 20U; - Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, t11); + uint64_t p_copy0[20U] = { 0U }; + memcpy(p_copy0, t11, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, p_copy0); memcpy(table2 + (2U * i + 2U) * 20U, tmp1, 20U * sizeof (uint64_t)); uint64_t *t2 = table2 + (2U * i + 2U) * 20U; - Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, q2, t2); + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, q2, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, p_copy, t2); memcpy(table2 + (2U * i + 3U) * 20U, tmp1, 20U * sizeof (uint64_t));); uint64_t tmp10[20U] = { 0U }; uint32_t i0 = 255U; @@ -1486,25 +1557,39 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui uint32_t bits_l320 = (uint32_t)bits_c0; const uint64_t *a_bits_l0 = table2 + bits_l320 * 20U; memcpy(tmp10, (uint64_t *)a_bits_l0, 20U * sizeof (uint64_t)); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp10); + uint64_t p_copy[20U] = { 0U }; + memcpy(p_copy, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy, tmp10); uint64_t tmp11[20U] = { 0U }; for (uint32_t i = 0U; i < 51U; i++) { - KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t p_copy0[20U] = { 0U }; + memcpy(p_copy0, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointDouble_point_double(out, p_copy0);); uint32_t k = 255U - 5U * i - 5U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, k, 5U); + KRML_MAYBE_UNUSED_VAR(table2); uint32_t bits_l321 = (uint32_t)bits_l; const uint64_t *a_bits_l1 = table2 + bits_l321 * 20U; memcpy(tmp11, (uint64_t *)a_bits_l1, 20U * sizeof (uint64_t)); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11); + uint64_t p_copy0[20U] = { 0U }; + memcpy(p_copy0, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy0, tmp11); uint32_t k0 = 255U - 5U * i - 5U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, k0, 5U); + KRML_HOST_IGNORE(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5); uint32_t bits_l322 = (uint32_t)bits_l0; const uint64_t *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 20U; memcpy(tmp11, (uint64_t *)a_bits_l2, 20U * sizeof (uint64_t)); - Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11); + uint64_t p_copy1[20U] = { 0U }; + memcpy(p_copy1, out, 20U * sizeof (uint64_t)); + Hacl_Impl_Ed25519_PointAdd_point_add(out, p_copy1, tmp11); } } @@ -1624,10 +1709,10 @@ static inline void sha512_pre_msg(uint8_t *hash, uint8_t *prefix, uint32_t len, { uint8_t buf[128U] = { 0U }; uint64_t block_state[8U] = { 0U }; + Hacl_Hash_SHA2_sha512_init(block_state); Hacl_Streaming_MD_state_64 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_64 p = s; - Hacl_Hash_SHA2_sha512_init(block_state); Hacl_Streaming_MD_state_64 *st = &p; Hacl_Streaming_Types_error_code err0 = Hacl_Hash_SHA2_update_512(st, prefix, 32U); Hacl_Streaming_Types_error_code err1 = Hacl_Hash_SHA2_update_512(st, input, len); @@ -1647,10 +1732,10 @@ sha512_pre_pre2_msg( { uint8_t buf[128U] = { 0U }; uint64_t block_state[8U] = { 0U }; + Hacl_Hash_SHA2_sha512_init(block_state); Hacl_Streaming_MD_state_64 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_64 p = s; - Hacl_Hash_SHA2_sha512_init(block_state); Hacl_Streaming_MD_state_64 *st = &p; Hacl_Streaming_Types_error_code err0 = Hacl_Hash_SHA2_update_512(st, prefix, 32U); Hacl_Streaming_Types_error_code err1 = Hacl_Hash_SHA2_update_512(st, prefix2, 32U); @@ -1734,10 +1819,10 @@ Compute the expanded keys for an Ed25519 signature. */ void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key) { - uint8_t *public_key = expanded_keys; uint8_t *s_prefix = expanded_keys + 32U; - uint8_t *s = expanded_keys + 32U; secret_expand(s_prefix, private_key); + uint8_t *public_key = expanded_keys; + uint8_t *s = expanded_keys + 32U; point_mul_g_compress(public_key, s); } @@ -1774,8 +1859,12 @@ Hacl_Ed25519_sign_expanded( sha512_modq_pre_pre2(hq, rs, public_key, msg_len, msg); uint64_t aq[5U] = { 0U }; load_32_bytes(aq, s); - mul_modq(aq, hq, aq); - add_modq(aq, rq, aq); + uint64_t y_copy[5U] = { 0U }; + memcpy(y_copy, aq, 5U * sizeof (uint64_t)); + mul_modq(aq, hq, y_copy); + uint64_t y_copy0[5U] = { 0U }; + memcpy(y_copy0, aq, 5U * sizeof (uint64_t)); + add_modq(aq, rq, y_copy0); store_56(ss, aq); } diff --git a/src/Hacl_FFDHE.c b/src/Hacl_FFDHE.c index 098aa607..55f5ce31 100644 --- a/src/Hacl_FFDHE.c +++ b/src/Hacl_FFDHE.c @@ -140,8 +140,8 @@ static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n) uint32_t len = ffdhe_len(a); for (uint32_t i = 0U; i < len; i++) { - uint8_t *os = p_s; uint8_t x = p[i]; + uint8_t *os = p_s; os[i] = x; } Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ffdhe_len(a), p_s, p_n); @@ -158,6 +158,7 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui uint64_t p_n1[nLen]; memset(p_n1, 0U, nLen * sizeof (uint64_t)); uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, p_n[0U], 1ULL, p_n1); + uint64_t c1; if (1U < nLen) { uint64_t *a1 = p_n + 1U; @@ -184,13 +185,14 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i); } - uint64_t c1 = c; - KRML_MAYBE_UNUSED_VAR(c1); + uint64_t c10 = c; + c1 = c10; } else { - KRML_MAYBE_UNUSED_VAR(c0); + c1 = c0; } + KRML_MAYBE_UNUSED_VAR(c1); KRML_CHECK_SIZE(sizeof (uint64_t), nLen); uint64_t b2[nLen]; memset(b2, 0U, nLen * sizeof (uint64_t)); @@ -202,7 +204,7 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui { uint64_t beq = FStar_UInt64_eq_mask(b2[i], pk_n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], pk_n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t res = acc0; uint64_t m0 = res; @@ -211,7 +213,7 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui { uint64_t beq = FStar_UInt64_eq_mask(pk_n[i], p_n1[i]); uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i], p_n1[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; return m0 & m1; @@ -279,8 +281,8 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp( memset(g_n, 0U, nLen * sizeof (uint64_t)); uint8_t g = 0U; { - uint8_t *os = &g; uint8_t x = Hacl_Impl_FFDHE_Constants_ffdhe_g2[0U]; + uint8_t *os = &g; os[0U] = x; } Hacl_Bignum_Convert_bn_from_bytes_be_uint64(1U, &g, g_n); diff --git a/src/Hacl_Frodo1344.c b/src/Hacl_Frodo1344.c index 9fe78471..33f87629 100644 --- a/src/Hacl_Frodo1344.c +++ b/src/Hacl_Frodo1344.c @@ -210,10 +210,10 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t kp_s[32U] = { 0U }; for (uint32_t i = 0U; i < 32U; i++) { - uint8_t *os = kp_s; uint8_t uu____0 = s[i]; uint8_t x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0)); + uint8_t *os = kp_s; os[i] = x; } uint32_t ss_init_len = 21664U; diff --git a/src/Hacl_Frodo64.c b/src/Hacl_Frodo64.c index 19f1562d..f88c5d63 100644 --- a/src/Hacl_Frodo64.c +++ b/src/Hacl_Frodo64.c @@ -214,10 +214,10 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) 0U, 16U, 1U, - uint8_t *os = kp_s; uint8_t uu____0 = s[i]; uint8_t x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0)); + uint8_t *os = kp_s; os[i] = x;); uint32_t ss_init_len = 1096U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); diff --git a/src/Hacl_Frodo640.c b/src/Hacl_Frodo640.c index 8cf0253e..95feeb20 100644 --- a/src/Hacl_Frodo640.c +++ b/src/Hacl_Frodo640.c @@ -212,10 +212,10 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) 0U, 16U, 1U, - uint8_t *os = kp_s; uint8_t uu____0 = s[i]; uint8_t x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0)); + uint8_t *os = kp_s; os[i] = x;); uint32_t ss_init_len = 9736U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); diff --git a/src/Hacl_Frodo976.c b/src/Hacl_Frodo976.c index 9360e3af..879fb5b2 100644 --- a/src/Hacl_Frodo976.c +++ b/src/Hacl_Frodo976.c @@ -210,10 +210,10 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t kp_s[24U] = { 0U }; for (uint32_t i = 0U; i < 24U; i++) { - uint8_t *os = kp_s; uint8_t uu____0 = s[i]; uint8_t x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0)); + uint8_t *os = kp_s; os[i] = x; } uint32_t ss_init_len = 15768U; diff --git a/src/Hacl_Frodo_KEM.c b/src/Hacl_Frodo_KEM.c index e0a65a47..f15d57ac 100644 --- a/src/Hacl_Frodo_KEM.c +++ b/src/Hacl_Frodo_KEM.c @@ -30,6 +30,7 @@ void randombytes_(uint32_t len, uint8_t *res) { - Lib_RandomBuffer_System_randombytes(res, len); + bool b = Lib_RandomBuffer_System_randombytes(res, len); + KRML_MAYBE_UNUSED_VAR(b); } diff --git a/src/Hacl_GenericField32.c b/src/Hacl_GenericField32.c index f509e6d4..3e7597bd 100644 --- a/src/Hacl_GenericField32.c +++ b/src/Hacl_GenericField32.c @@ -102,9 +102,9 @@ Deallocate the memory previously allocated by Hacl_GenericField32_field_init. */ void Hacl_GenericField32_field_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + uint32_t *n = uu____0.n; + uint32_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -117,8 +117,7 @@ Return the size of a modulus `n` in limbs. */ uint32_t Hacl_GenericField32_field_get_len(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - return k1.len; + return (*k).len; } /** @@ -137,8 +136,8 @@ Hacl_GenericField32_to_field( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_to_mont_u32(len1, k1.n, k1.mu, k1.r2, a, aM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_to_mont_u32(len1, uu____0.n, uu____0.mu, uu____0.r2, a, aM); } /** @@ -158,8 +157,8 @@ Hacl_GenericField32_from_field( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, k1.n, k1.mu, aM, a); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, uu____0.n, uu____0.mu, aM, a); } /** @@ -177,8 +176,16 @@ Hacl_GenericField32_add( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_bn_add_mod_n_u32(len1, k1.n, aM, bM, cM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + KRML_CHECK_SIZE(sizeof (uint32_t), len1); + uint32_t a_copy[len1]; + memset(a_copy, 0U, len1 * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len1); + uint32_t b_copy[len1]; + memset(b_copy, 0U, len1 * sizeof (uint32_t)); + memcpy(a_copy, aM, len1 * sizeof (uint32_t)); + memcpy(b_copy, bM, len1 * sizeof (uint32_t)); + Hacl_Bignum_bn_add_mod_n_u32(len1, uu____0.n, a_copy, b_copy, cM); } /** @@ -196,8 +203,7 @@ Hacl_GenericField32_sub( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_bn_sub_mod_n_u32(len1, k1.n, aM, bM, cM); + Hacl_Bignum_bn_sub_mod_n_u32(len1, (*k).n, aM, bM, cM); } /** @@ -215,8 +221,8 @@ Hacl_GenericField32_mul( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, k1.n, k1.mu, aM, bM, cM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, uu____0.n, uu____0.mu, aM, bM, cM); } /** @@ -233,8 +239,8 @@ Hacl_GenericField32_sqr( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, k1.n, k1.mu, aM, cM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, uu____0.n, uu____0.mu, aM, cM); } /** @@ -246,8 +252,8 @@ Convert a bignum `one` to its Montgomery representation. void Hacl_GenericField32_one(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, uint32_t *oneM) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, k1.n, k1.mu, k1.r2, oneM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, uu____0.n, uu____0.mu, uu____0.r2, oneM); } /** @@ -278,22 +284,22 @@ Hacl_GenericField32_exp_consttime( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint32_t), k1.len); - uint32_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint32_t)); - memcpy(aMc, aM, k1.len * sizeof (uint32_t)); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + KRML_CHECK_SIZE(sizeof (uint32_t), uu____0.len); + uint32_t aMc[uu____0.len]; + memset(aMc, 0U, uu____0.len * sizeof (uint32_t)); + memcpy(aMc, aM, uu____0.len * sizeof (uint32_t)); if (bBits < 200U) { KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); uint32_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint32_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint32_t)); uint32_t sw = 0U; uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, uu____0.mu, ctx_r2, resM); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 32U; @@ -308,9 +314,9 @@ Hacl_GenericField32_exp_consttime( aMc[i] = aMc[i] ^ dummy; } uint32_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, aMc, resM, aMc); + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, uu____0.mu, aMc, resM, aMc); uint32_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, resM, resM); + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, uu____0.mu, resM, resM); sw = bit; } uint32_t sw0 = sw; @@ -335,8 +341,8 @@ Hacl_GenericField32_exp_consttime( KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); uint32_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint32_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint32_t)); KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1); uint32_t table[16U * len1]; memset(table, 0U, 16U * len1 * sizeof (uint32_t)); @@ -347,19 +353,20 @@ Hacl_GenericField32_exp_consttime( uint32_t *t1 = table + len1; uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, uu____0.mu, ctx_r20, t0); memcpy(t1, aMc, len1 * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * len1; uint32_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp); + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, uu____0.mu, t11, tmp); memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * len1; uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp); + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, uu____0.mu, aMc, t2, tmp); memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -374,8 +381,8 @@ Hacl_GenericField32_exp_consttime( const uint32_t *res_j = table + (i1 + 1U) * len1; for (uint32_t i = 0U; i < len1; i++) { - uint32_t *os = resM; uint32_t x = (c & res_j[i]) | (~c & resM[i]); + uint32_t *os = resM; os[i] = x; }); } @@ -383,7 +390,7 @@ Hacl_GenericField32_exp_consttime( { uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, uu____0.mu, ctx_r2, resM); } KRML_CHECK_SIZE(sizeof (uint32_t), len1); uint32_t tmp0[len1]; @@ -395,9 +402,10 @@ Hacl_GenericField32_exp_consttime( 4U, 1U, uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM);); + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, uu____0.mu, resM, resM);); uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -407,12 +415,12 @@ Hacl_GenericField32_exp_consttime( const uint32_t *res_j = table + (i1 + 1U) * len1; for (uint32_t i = 0U; i < len1; i++) { - uint32_t *os = tmp0; uint32_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint32_t *os = tmp0; os[i] = x; }); uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, resM, tmp0, resM); + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, uu____0.mu, resM, tmp0, resM); } } } @@ -445,21 +453,21 @@ Hacl_GenericField32_exp_vartime( ) { uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint32_t), k1.len); - uint32_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint32_t)); - memcpy(aMc, aM, k1.len * sizeof (uint32_t)); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + KRML_CHECK_SIZE(sizeof (uint32_t), uu____0.len); + uint32_t aMc[uu____0.len]; + memset(aMc, 0U, uu____0.len * sizeof (uint32_t)); + memcpy(aMc, aM, uu____0.len * sizeof (uint32_t)); if (bBits < 200U) { KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); uint32_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint32_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint32_t)); uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, uu____0.mu, ctx_r2, resM); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 32U; @@ -469,10 +477,10 @@ Hacl_GenericField32_exp_vartime( if (!(bit == 0U)) { uint32_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, resM, aMc, resM); + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, uu____0.mu, resM, aMc, resM); } uint32_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n0, k1.mu, aMc, aMc); + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n0, uu____0.mu, aMc, aMc); } } else @@ -489,8 +497,8 @@ Hacl_GenericField32_exp_vartime( KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); uint32_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint32_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint32_t)); KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1); uint32_t table[16U * len1]; memset(table, 0U, 16U * len1 * sizeof (uint32_t)); @@ -501,19 +509,20 @@ Hacl_GenericField32_exp_vartime( uint32_t *t1 = table + len1; uint32_t *ctx_n0 = ctx; uint32_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, uu____0.mu, ctx_r20, t0); memcpy(t1, aMc, len1 * sizeof (uint32_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint32_t *t11 = table + (i + 1U) * len1; uint32_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp); + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, uu____0.mu, t11, tmp); memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t)); uint32_t *t2 = table + (2U * i + 2U) * len1; uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp); + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, uu____0.mu, aMc, t2, tmp); memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t));); if (bBits % 4U != 0U) { @@ -527,7 +536,7 @@ Hacl_GenericField32_exp_vartime( { uint32_t *ctx_n = ctx; uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, uu____0.mu, ctx_r2, resM); } KRML_CHECK_SIZE(sizeof (uint32_t), len1); uint32_t tmp0[len1]; @@ -539,14 +548,15 @@ Hacl_GenericField32_exp_vartime( 4U, 1U, uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM);); + Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, uu____0.mu, resM, resM);); uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U; uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = bits_l; const uint32_t *a_bits_l = table + bits_l32 * len1; memcpy(tmp0, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t)); uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, resM, tmp0, resM); + Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, uu____0.mu, resM, tmp0, resM); } } } @@ -569,16 +579,16 @@ Hacl_GenericField32_inverse( uint32_t *aInvM ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t len1 = k1.len; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 uu____0 = *k; + uint32_t len1 = uu____0.len; KRML_CHECK_SIZE(sizeof (uint32_t), len1); uint32_t n2[len1]; memset(n2, 0U, len1 * sizeof (uint32_t)); - uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2); + uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, uu____0.n[0U], 2U, n2); uint32_t c1; if (1U < len1) { - uint32_t *a1 = k1.n + 1U; + uint32_t *a1 = uu____0.n + 1U; uint32_t *res1 = n2 + 1U; uint32_t c = c0; for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++) @@ -610,6 +620,6 @@ Hacl_GenericField32_inverse( c1 = c0; } KRML_MAYBE_UNUSED_VAR(c1); - Hacl_GenericField32_exp_vartime(k, aM, k1.len * 32U, n2, aInvM); + Hacl_GenericField32_exp_vartime(k, aM, uu____0.len * 32U, n2, aInvM); } diff --git a/src/Hacl_GenericField64.c b/src/Hacl_GenericField64.c index 3f291d36..3092ac02 100644 --- a/src/Hacl_GenericField64.c +++ b/src/Hacl_GenericField64.c @@ -101,9 +101,9 @@ Deallocate the memory previously allocated by Hacl_GenericField64_field_init. */ void Hacl_GenericField64_field_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + uint64_t *n = uu____0.n; + uint64_t *r2 = uu____0.r2; KRML_HOST_FREE(n); KRML_HOST_FREE(r2); KRML_HOST_FREE(k); @@ -116,8 +116,7 @@ Return the size of a modulus `n` in limbs. */ uint32_t Hacl_GenericField64_field_get_len(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - return k1.len; + return (*k).len; } /** @@ -136,8 +135,8 @@ Hacl_GenericField64_to_field( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_to_mont_u64(len1, k1.n, k1.mu, k1.r2, a, aM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_to_mont_u64(len1, uu____0.n, uu____0.mu, uu____0.r2, a, aM); } /** @@ -157,8 +156,8 @@ Hacl_GenericField64_from_field( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, k1.n, k1.mu, aM, a); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, uu____0.n, uu____0.mu, aM, a); } /** @@ -176,8 +175,16 @@ Hacl_GenericField64_add( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_bn_add_mod_n_u64(len1, k1.n, aM, bM, cM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + KRML_CHECK_SIZE(sizeof (uint64_t), len1); + uint64_t a_copy[len1]; + memset(a_copy, 0U, len1 * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len1); + uint64_t b_copy[len1]; + memset(b_copy, 0U, len1 * sizeof (uint64_t)); + memcpy(a_copy, aM, len1 * sizeof (uint64_t)); + memcpy(b_copy, bM, len1 * sizeof (uint64_t)); + Hacl_Bignum_bn_add_mod_n_u64(len1, uu____0.n, a_copy, b_copy, cM); } /** @@ -195,8 +202,7 @@ Hacl_GenericField64_sub( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_bn_sub_mod_n_u64(len1, k1.n, aM, bM, cM); + Hacl_Bignum_bn_sub_mod_n_u64(len1, (*k).n, aM, bM, cM); } /** @@ -214,8 +220,8 @@ Hacl_GenericField64_mul( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, k1.n, k1.mu, aM, bM, cM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, uu____0.n, uu____0.mu, aM, bM, cM); } /** @@ -232,8 +238,8 @@ Hacl_GenericField64_sqr( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, k1.n, k1.mu, aM, cM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, uu____0.n, uu____0.mu, aM, cM); } /** @@ -245,8 +251,8 @@ Convert a bignum `one` to its Montgomery representation. void Hacl_GenericField64_one(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, uint64_t *oneM) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, k1.n, k1.mu, k1.r2, oneM); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, uu____0.n, uu____0.mu, uu____0.r2, oneM); } /** @@ -277,22 +283,22 @@ Hacl_GenericField64_exp_consttime( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint64_t), k1.len); - uint64_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint64_t)); - memcpy(aMc, aM, k1.len * sizeof (uint64_t)); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + KRML_CHECK_SIZE(sizeof (uint64_t), uu____0.len); + uint64_t aMc[uu____0.len]; + memset(aMc, 0U, uu____0.len * sizeof (uint64_t)); + memcpy(aMc, aM, uu____0.len * sizeof (uint64_t)); if (bBits < 200U) { KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); uint64_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint64_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint64_t)); uint64_t sw = 0ULL; uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, uu____0.mu, ctx_r2, resM); for (uint32_t i0 = 0U; i0 < bBits; i0++) { uint32_t i1 = (bBits - i0 - 1U) / 64U; @@ -307,9 +313,9 @@ Hacl_GenericField64_exp_consttime( aMc[i] = aMc[i] ^ dummy; } uint64_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, aMc, resM, aMc); + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, uu____0.mu, aMc, resM, aMc); uint64_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, resM, resM); + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, uu____0.mu, resM, resM); sw = bit; } uint64_t sw0 = sw; @@ -334,8 +340,8 @@ Hacl_GenericField64_exp_consttime( KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); uint64_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint64_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint64_t)); KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1); uint64_t table[16U * len1]; memset(table, 0U, 16U * len1 * sizeof (uint64_t)); @@ -346,19 +352,20 @@ Hacl_GenericField64_exp_consttime( uint64_t *t1 = table + len1; uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, uu____0.mu, ctx_r20, t0); memcpy(t1, aMc, len1 * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * len1; uint64_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp); + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, uu____0.mu, t11, tmp); memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * len1; uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp); + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, uu____0.mu, aMc, t2, tmp); memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -373,8 +380,8 @@ Hacl_GenericField64_exp_consttime( const uint64_t *res_j = table + (i1 + 1U) * len1; for (uint32_t i = 0U; i < len1; i++) { - uint64_t *os = resM; uint64_t x = (c & res_j[i]) | (~c & resM[i]); + uint64_t *os = resM; os[i] = x; }); } @@ -382,7 +389,7 @@ Hacl_GenericField64_exp_consttime( { uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, uu____0.mu, ctx_r2, resM); } KRML_CHECK_SIZE(sizeof (uint64_t), len1); uint64_t tmp0[len1]; @@ -394,9 +401,10 @@ Hacl_GenericField64_exp_consttime( 4U, 1U, uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM);); + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, uu____0.mu, resM, resM);); uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -406,12 +414,12 @@ Hacl_GenericField64_exp_consttime( const uint64_t *res_j = table + (i1 + 1U) * len1; for (uint32_t i = 0U; i < len1; i++) { - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x; }); uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, resM, tmp0, resM); + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, uu____0.mu, resM, tmp0, resM); } } } @@ -444,21 +452,21 @@ Hacl_GenericField64_exp_vartime( ) { uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint64_t), k1.len); - uint64_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint64_t)); - memcpy(aMc, aM, k1.len * sizeof (uint64_t)); + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + KRML_CHECK_SIZE(sizeof (uint64_t), uu____0.len); + uint64_t aMc[uu____0.len]; + memset(aMc, 0U, uu____0.len * sizeof (uint64_t)); + memcpy(aMc, aM, uu____0.len * sizeof (uint64_t)); if (bBits < 200U) { KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); uint64_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint64_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint64_t)); uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, uu____0.mu, ctx_r2, resM); for (uint32_t i = 0U; i < bBits; i++) { uint32_t i1 = i / 64U; @@ -468,10 +476,10 @@ Hacl_GenericField64_exp_vartime( if (!(bit == 0ULL)) { uint64_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, resM, aMc, resM); + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, uu____0.mu, resM, aMc, resM); } uint64_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n0, k1.mu, aMc, aMc); + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n0, uu____0.mu, aMc, aMc); } } else @@ -488,8 +496,8 @@ Hacl_GenericField64_exp_vartime( KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); uint64_t ctx[len1 + len1]; memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); + memcpy(ctx, uu____0.n, len1 * sizeof (uint64_t)); + memcpy(ctx + len1, uu____0.r2, len1 * sizeof (uint64_t)); KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1); uint64_t table[16U * len1]; memset(table, 0U, 16U * len1 * sizeof (uint64_t)); @@ -500,19 +508,20 @@ Hacl_GenericField64_exp_vartime( uint64_t *t1 = table + len1; uint64_t *ctx_n0 = ctx; uint64_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, uu____0.mu, ctx_r20, t0); memcpy(t1, aMc, len1 * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * len1; uint64_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp); + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, uu____0.mu, t11, tmp); memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * len1; uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp); + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, uu____0.mu, aMc, t2, tmp); memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t));); if (bBits % 4U != 0U) { @@ -526,7 +535,7 @@ Hacl_GenericField64_exp_vartime( { uint64_t *ctx_n = ctx; uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); + Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, uu____0.mu, ctx_r2, resM); } KRML_CHECK_SIZE(sizeof (uint64_t), len1); uint64_t tmp0[len1]; @@ -538,14 +547,15 @@ Hacl_GenericField64_exp_vartime( 4U, 1U, uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM);); + Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, uu____0.mu, resM, resM);); uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U); + KRML_MAYBE_UNUSED_VAR(table); uint32_t bits_l32 = (uint32_t)bits_l; const uint64_t *a_bits_l = table + bits_l32 * len1; memcpy(tmp0, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t)); uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, resM, tmp0, resM); + Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, uu____0.mu, resM, tmp0, resM); } } } @@ -568,16 +578,16 @@ Hacl_GenericField64_inverse( uint64_t *aInvM ) { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint32_t len1 = k1.len; + Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 uu____0 = *k; + uint32_t len1 = uu____0.len; KRML_CHECK_SIZE(sizeof (uint64_t), len1); uint64_t n2[len1]; memset(n2, 0U, len1 * sizeof (uint64_t)); - uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2); + uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, uu____0.n[0U], 2ULL, n2); uint64_t c1; if (1U < len1) { - uint64_t *a1 = k1.n + 1U; + uint64_t *a1 = uu____0.n + 1U; uint64_t *res1 = n2 + 1U; uint64_t c = c0; for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++) @@ -609,6 +619,6 @@ Hacl_GenericField64_inverse( c1 = c0; } KRML_MAYBE_UNUSED_VAR(c1); - Hacl_GenericField64_exp_vartime(k, aM, k1.len * 64U, n2, aInvM); + Hacl_GenericField64_exp_vartime(k, aM, uu____0.len * 64U, n2, aInvM); } diff --git a/src/Hacl_HKDF.c b/src/Hacl_HKDF.c index 027b719f..be05c08c 100644 --- a/src/Hacl_HKDF.c +++ b/src/Hacl_HKDF.c @@ -51,36 +51,45 @@ Hacl_HKDF_expand_sha2_256( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -131,36 +140,45 @@ Hacl_HKDF_expand_sha2_384( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -211,36 +229,45 @@ Hacl_HKDF_expand_sha2_512( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -291,36 +318,45 @@ Hacl_HKDF_expand_blake2s_32( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } @@ -371,36 +407,45 @@ Hacl_HKDF_expand_blake2b_32( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } diff --git a/src/Hacl_HKDF_Blake2b_256.c b/src/Hacl_HKDF_Blake2b_256.c index fe89115d..82a3ea15 100644 --- a/src/Hacl_HKDF_Blake2b_256.c +++ b/src/Hacl_HKDF_Blake2b_256.c @@ -51,36 +51,45 @@ Hacl_HKDF_Blake2b_256_expand_blake2b_256( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } diff --git a/src/Hacl_HKDF_Blake2s_128.c b/src/Hacl_HKDF_Blake2s_128.c index 4c9e9450..879432a4 100644 --- a/src/Hacl_HKDF_Blake2s_128.c +++ b/src/Hacl_HKDF_Blake2s_128.c @@ -51,36 +51,45 @@ Hacl_HKDF_Blake2s_128_expand_blake2s_128( KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U); uint8_t text[tlen + infolen + 1U]; memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t)); - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; memcpy(text + tlen, info, infolen * sizeof (uint8_t)); + KRML_CHECK_SIZE(sizeof (uint8_t), tlen); + uint8_t tag[tlen]; + memset(tag, 0U, tlen * sizeof (uint8_t)); for (uint32_t i = 0U; i < n; i++) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(i + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (i == 0U) { Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U); } memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); } if (n * tlen < len) { + uint8_t *ctr = text + tlen + infolen; ctr[0U] = (uint8_t)(n + 1U); + KRML_MAYBE_UNUSED_VAR(text); + uint8_t *text0 = text + tlen; if (n == 0U) { Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U); } else { + memcpy(text, tag, tlen * sizeof (uint8_t)); Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U); } uint8_t *block = okm + n * tlen; memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); + return; } } diff --git a/src/Hacl_HMAC.c b/src/Hacl_HMAC.c index b03bc7ac..66e18c5a 100644 --- a/src/Hacl_HMAC.c +++ b/src/Hacl_HMAC.c @@ -26,19 +26,753 @@ #include "internal/Hacl_HMAC.h" #include "internal/Hacl_Krmllib.h" +#include "internal/Hacl_Hash_SHA3.h" #include "internal/Hacl_Hash_SHA2.h" #include "internal/Hacl_Hash_SHA1.h" +#include "internal/Hacl_Hash_MD5.h" #include "internal/Hacl_Hash_Blake2s.h" #include "internal/Hacl_Hash_Blake2b.h" +/** +Write the HMAC-MD5 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 16 bytes of memory. +*/ +void +Hacl_HMAC_compute_md5( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 16U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_MD5_hash_oneshot(nkey, key, key_len); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U }; + if (data_len == 0U) + { + Hacl_Hash_MD5_update_last(s, 0ULL, ipad, 64U); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_MD5_update_multi(s, ipad, 1U); + Hacl_Hash_MD5_update_multi(s, full_blocks, n_blocks); + Hacl_Hash_MD5_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + } + uint8_t *dst1 = ipad; + Hacl_Hash_MD5_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_MD5_init(s); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 16U / block_len; + uint32_t rem0 = 16U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 16U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_MD5_update_multi(s, opad, 1U); + Hacl_Hash_MD5_update_multi(s, full_blocks, n_blocks); + Hacl_Hash_MD5_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + Hacl_Hash_MD5_finish(s, dst); +} + /** Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`. -The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. -`dst` must point to 20 bytes of memory. +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 20 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha1( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 20U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U }; + if (data_len == 0U) + { + Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA1_update_multi(s, ipad, 1U); + Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks); + Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + } + uint8_t *dst1 = ipad; + Hacl_Hash_SHA1_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_SHA1_init(s); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 20U / block_len; + uint32_t rem0 = 20U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA1_update_multi(s, opad, 1U); + Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks); + Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + Hacl_Hash_SHA1_finish(s, dst); +} + +/** +Write the HMAC-SHA-2-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 28 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_224( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 28U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA2_hash_224(nkey, key, key_len); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t st[8U] = { 0U }; + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint32_t x = Hacl_Hash_SHA2_h224[i]; + uint32_t *os = st; + os[i] = x;); + uint32_t *s = st; + if (data_len == 0U) + { + Hacl_Hash_SHA2_sha224_update_last(0ULL + (uint64_t)64U, 64U, ipad, s); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA2_sha224_update_nblocks(64U, ipad, s); + Hacl_Hash_SHA2_sha224_update_nblocks(n_blocks * 64U, full_blocks, s); + Hacl_Hash_SHA2_sha224_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, + rem_len, + rem, + s); + } + uint8_t *dst1 = ipad; + Hacl_Hash_SHA2_sha224_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_SHA2_sha224_init(s); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 28U / block_len; + uint32_t rem0 = 28U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 28U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA2_sha224_update_nblocks(64U, opad, s); + Hacl_Hash_SHA2_sha224_update_nblocks(n_blocks * 64U, full_blocks, s); + Hacl_Hash_SHA2_sha224_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, + rem_len, + rem, + s); + Hacl_Hash_SHA2_sha224_finish(s, dst); +} + +/** +Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 32 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_256( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 32U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA2_hash_256(nkey, key, key_len); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t st[8U] = { 0U }; + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint32_t x = Hacl_Hash_SHA2_h256[i]; + uint32_t *os = st; + os[i] = x;); + uint32_t *s = st; + if (data_len == 0U) + { + Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA2_sha256_update_nblocks(64U, ipad, s); + Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s); + Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, + rem_len, + rem, + s); + } + uint8_t *dst1 = ipad; + Hacl_Hash_SHA2_sha256_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_SHA2_sha256_init(s); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 32U / block_len; + uint32_t rem0 = 32U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA2_sha256_update_nblocks(64U, opad, s); + Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s); + Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, + rem_len, + rem, + s); + Hacl_Hash_SHA2_sha256_finish(s, dst); +} + +/** +Write the HMAC-SHA-2-384 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 48 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_384( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 128U) + { + ite = key_len; + } + else + { + ite = 48U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 128U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA2_hash_384(nkey, key, key_len); + } + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint64_t st[8U] = { 0U }; + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint64_t x = Hacl_Hash_SHA2_h384[i]; + uint64_t *os = st; + os[i] = x;); + uint64_t *s = st; + if (data_len == 0U) + { + Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL), + FStar_UInt128_uint64_to_uint128((uint64_t)128U)), + 128U, + ipad, + s); + } + else + { + uint32_t block_len = 128U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA2_sha384_update_nblocks(128U, ipad, s); + Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s); + Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), + FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), + FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), + rem_len, + rem, + s); + } + uint8_t *dst1 = ipad; + Hacl_Hash_SHA2_sha384_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_SHA2_sha384_init(s); + uint32_t block_len = 128U; + uint32_t n_blocks0 = 48U / block_len; + uint32_t rem0 = 48U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA2_sha384_update_nblocks(128U, opad, s); + Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s); + Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), + FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), + FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), + rem_len, + rem, + s); + Hacl_Hash_SHA2_sha384_finish(s, dst); +} + +/** +Write the HMAC-SHA-2-512 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 64 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_512( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 128U) + { + ite = key_len; + } + else + { + ite = 64U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 128U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA2_hash_512(nkey, key, key_len); + } + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint64_t st[8U] = { 0U }; + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint64_t x = Hacl_Hash_SHA2_h512[i]; + uint64_t *os = st; + os[i] = x;); + uint64_t *s = st; + if (data_len == 0U) + { + Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL), + FStar_UInt128_uint64_to_uint128((uint64_t)128U)), + 128U, + ipad, + s); + } + else + { + uint32_t block_len = 128U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA2_sha512_update_nblocks(128U, ipad, s); + Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s); + Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), + FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), + FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), + rem_len, + rem, + s); + } + uint8_t *dst1 = ipad; + Hacl_Hash_SHA2_sha512_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_SHA2_sha512_init(s); + uint32_t block_len = 128U; + uint32_t n_blocks0 = 64U / block_len; + uint32_t rem0 = 64U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA2_sha512_update_nblocks(128U, opad, s); + Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s); + Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), + FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), + FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), + rem_len, + rem, + s); + Hacl_Hash_SHA2_sha512_finish(s, dst); +} + +/** +Write the HMAC-SHA-3-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 144 bytes. +`dst` must point to 28 bytes of memory. */ void -Hacl_HMAC_compute_sha1( +Hacl_HMAC_compute_sha3_224( uint8_t *dst, uint8_t *key, uint32_t key_len, @@ -46,57 +780,52 @@ Hacl_HMAC_compute_sha1( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[144U]; + memset(key_block, 0U, 144U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; - if (key_len <= 64U) + if (key_len <= 144U) { ite = key_len; } else { - ite = 20U; + ite = 28U; } uint8_t *zeroes = key_block + ite; KRML_MAYBE_UNUSED_VAR(zeroes); - if (key_len <= 64U) + if (key_len <= 144U) { memcpy(nkey, key, key_len * sizeof (uint8_t)); } else { - Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len); + Hacl_Hash_SHA3_sha3_224(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[144U]; + memset(ipad, 0x36U, 144U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 144U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[144U]; + memset(opad, 0x5cU, 144U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 144U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } - uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U }; - uint8_t *dst1 = ipad; + uint64_t s[25U] = { 0U }; if (data_len == 0U) { - Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_224, s, ipad, 144U); } else { - uint32_t block_len = 64U; + uint32_t block_len = 144U; uint32_t n_blocks0 = data_len / block_len; uint32_t rem0 = data_len % block_len; K___uint32_t_uint32_t scrut; @@ -114,21 +843,30 @@ Hacl_HMAC_compute_sha1( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = data; uint8_t *rem = data + full_blocks_len; - Hacl_Hash_SHA1_update_multi(s, ipad, 1U); - Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks); - Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_224, s, rem, rem_len); } - Hacl_Hash_SHA1_finish(s, dst1); + uint8_t *dst1 = ipad; + uint32_t remOut = 28U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 28U - remOut, hbuf0, remOut * sizeof (uint8_t)); uint8_t *hash1 = ipad; - Hacl_Hash_SHA1_init(s); - uint32_t block_len = 64U; - uint32_t n_blocks0 = 20U / block_len; - uint32_t rem0 = 20U % block_len; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 144U; + uint32_t n_blocks0 = 28U / block_len; + uint32_t rem0 = 28U % block_len; K___uint32_t_uint32_t scrut; if (n_blocks0 > 0U && rem0 == 0U) { uint32_t n_blocks_ = n_blocks0 - 1U; - scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len }); + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 28U - n_blocks_ * block_len }); } else { @@ -139,20 +877,28 @@ Hacl_HMAC_compute_sha1( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = hash1; uint8_t *rem = hash1 + full_blocks_len; - Hacl_Hash_SHA1_update_multi(s, opad, 1U); - Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks); - Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); - Hacl_Hash_SHA1_finish(s, dst); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_224, s, rem, rem_len); + uint32_t remOut0 = 28U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 28U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); } /** -Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. +Write the HMAC-SHA-3-256 MAC of a message (`data`) by using a key (`key`) into `dst`. -The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +The key can be any length and will be hashed if it is longer and padded if it is shorter than 136 bytes. `dst` must point to 32 bytes of memory. */ void -Hacl_HMAC_compute_sha2_256( +Hacl_HMAC_compute_sha3_256( uint8_t *dst, uint8_t *key, uint32_t key_len, @@ -160,13 +906,11 @@ Hacl_HMAC_compute_sha2_256( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[136U]; + memset(key_block, 0U, 136U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; - if (key_len <= 64U) + if (key_len <= 136U) { ite = key_len; } @@ -176,49 +920,38 @@ Hacl_HMAC_compute_sha2_256( } uint8_t *zeroes = key_block + ite; KRML_MAYBE_UNUSED_VAR(zeroes); - if (key_len <= 64U) + if (key_len <= 136U) { memcpy(nkey, key, key_len * sizeof (uint8_t)); } else { - Hacl_Hash_SHA2_hash_256(nkey, key, key_len); + Hacl_Hash_SHA3_sha3_256(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[136U]; + memset(ipad, 0x36U, 136U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 136U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[136U]; + memset(opad, 0x5cU, 136U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 136U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } - uint32_t st[8U] = { 0U }; - KRML_MAYBE_FOR8(i, - 0U, - 8U, - 1U, - uint32_t *os = st; - uint32_t x = Hacl_Hash_SHA2_h256[i]; - os[i] = x;); - uint32_t *s = st; - uint8_t *dst1 = ipad; + uint64_t s[25U] = { 0U }; if (data_len == 0U) { - Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_256, s, ipad, 136U); } else { - uint32_t block_len = 64U; + uint32_t block_len = 136U; uint32_t n_blocks0 = data_len / block_len; uint32_t rem0 = data_len % block_len; K___uint32_t_uint32_t scrut; @@ -236,17 +969,23 @@ Hacl_HMAC_compute_sha2_256( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = data; uint8_t *rem = data + full_blocks_len; - Hacl_Hash_SHA2_sha256_update_nblocks(64U, ipad, s); - Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s); - Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, - rem_len, - rem, - s); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_256, s, rem, rem_len); } - Hacl_Hash_SHA2_sha256_finish(s, dst1); + uint8_t *dst1 = ipad; + uint32_t remOut = 32U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 32U - remOut, hbuf0, remOut * sizeof (uint8_t)); uint8_t *hash1 = ipad; - Hacl_Hash_SHA2_sha256_init(s); - uint32_t block_len = 64U; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 136U; uint32_t n_blocks0 = 32U / block_len; uint32_t rem0 = 32U % block_len; K___uint32_t_uint32_t scrut; @@ -264,23 +1003,28 @@ Hacl_HMAC_compute_sha2_256( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = hash1; uint8_t *rem = hash1 + full_blocks_len; - Hacl_Hash_SHA2_sha256_update_nblocks(64U, opad, s); - Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s); - Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, - rem_len, - rem, - s); - Hacl_Hash_SHA2_sha256_finish(s, dst); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_256, s, rem, rem_len); + uint32_t remOut0 = 32U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 32U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); } /** -Write the HMAC-SHA-2-384 MAC of a message (`data`) by using a key (`key`) into `dst`. +Write the HMAC-SHA-3-384 MAC of a message (`data`) by using a key (`key`) into `dst`. -The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +The key can be any length and will be hashed if it is longer and padded if it is shorter than 104 bytes. `dst` must point to 48 bytes of memory. */ void -Hacl_HMAC_compute_sha2_384( +Hacl_HMAC_compute_sha3_384( uint8_t *dst, uint8_t *key, uint32_t key_len, @@ -288,13 +1032,11 @@ Hacl_HMAC_compute_sha2_384( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[104U]; + memset(key_block, 0U, 104U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; - if (key_len <= 128U) + if (key_len <= 104U) { ite = key_len; } @@ -304,53 +1046,38 @@ Hacl_HMAC_compute_sha2_384( } uint8_t *zeroes = key_block + ite; KRML_MAYBE_UNUSED_VAR(zeroes); - if (key_len <= 128U) + if (key_len <= 104U) { memcpy(nkey, key, key_len * sizeof (uint8_t)); } else { - Hacl_Hash_SHA2_hash_384(nkey, key, key_len); + Hacl_Hash_SHA3_sha3_384(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[104U]; + memset(ipad, 0x36U, 104U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 104U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[104U]; + memset(opad, 0x5cU, 104U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 104U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } - uint64_t st[8U] = { 0U }; - KRML_MAYBE_FOR8(i, - 0U, - 8U, - 1U, - uint64_t *os = st; - uint64_t x = Hacl_Hash_SHA2_h384[i]; - os[i] = x;); - uint64_t *s = st; - uint8_t *dst1 = ipad; + uint64_t s[25U] = { 0U }; if (data_len == 0U) { - Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL), - FStar_UInt128_uint64_to_uint128((uint64_t)128U)), - 128U, - ipad, - s); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_384, s, ipad, 104U); } else { - uint32_t block_len = 128U; + uint32_t block_len = 104U; uint32_t n_blocks0 = data_len / block_len; uint32_t rem0 = data_len % block_len; K___uint32_t_uint32_t scrut; @@ -368,19 +1095,23 @@ Hacl_HMAC_compute_sha2_384( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = data; uint8_t *rem = data + full_blocks_len; - Hacl_Hash_SHA2_sha384_update_nblocks(128U, ipad, s); - Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s); - Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), - rem_len, - rem, - s); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_384, s, rem, rem_len); } - Hacl_Hash_SHA2_sha384_finish(s, dst1); + uint8_t *dst1 = ipad; + uint32_t remOut = 48U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 48U - remOut, hbuf0, remOut * sizeof (uint8_t)); uint8_t *hash1 = ipad; - Hacl_Hash_SHA2_sha384_init(s); - uint32_t block_len = 128U; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 104U; uint32_t n_blocks0 = 48U / block_len; uint32_t rem0 = 48U % block_len; K___uint32_t_uint32_t scrut; @@ -398,25 +1129,28 @@ Hacl_HMAC_compute_sha2_384( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = hash1; uint8_t *rem = hash1 + full_blocks_len; - Hacl_Hash_SHA2_sha384_update_nblocks(128U, opad, s); - Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s); - Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), - rem_len, - rem, - s); - Hacl_Hash_SHA2_sha384_finish(s, dst); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_384, s, rem, rem_len); + uint32_t remOut0 = 48U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 48U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); } /** -Write the HMAC-SHA-2-512 MAC of a message (`data`) by using a key (`key`) into `dst`. +Write the HMAC-SHA-3-512 MAC of a message (`data`) by using a key (`key`) into `dst`. -The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +The key can be any length and will be hashed if it is longer and padded if it is shorter than 72 bytes. `dst` must point to 64 bytes of memory. */ void -Hacl_HMAC_compute_sha2_512( +Hacl_HMAC_compute_sha3_512( uint8_t *dst, uint8_t *key, uint32_t key_len, @@ -424,13 +1158,11 @@ Hacl_HMAC_compute_sha2_512( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[72U]; + memset(key_block, 0U, 72U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; - if (key_len <= 128U) + if (key_len <= 72U) { ite = key_len; } @@ -440,53 +1172,38 @@ Hacl_HMAC_compute_sha2_512( } uint8_t *zeroes = key_block + ite; KRML_MAYBE_UNUSED_VAR(zeroes); - if (key_len <= 128U) + if (key_len <= 72U) { memcpy(nkey, key, key_len * sizeof (uint8_t)); } else { - Hacl_Hash_SHA2_hash_512(nkey, key, key_len); + Hacl_Hash_SHA3_sha3_512(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[72U]; + memset(ipad, 0x36U, 72U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 72U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[72U]; + memset(opad, 0x5cU, 72U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 72U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } - uint64_t st[8U] = { 0U }; - KRML_MAYBE_FOR8(i, - 0U, - 8U, - 1U, - uint64_t *os = st; - uint64_t x = Hacl_Hash_SHA2_h512[i]; - os[i] = x;); - uint64_t *s = st; - uint8_t *dst1 = ipad; + uint64_t s[25U] = { 0U }; if (data_len == 0U) { - Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL), - FStar_UInt128_uint64_to_uint128((uint64_t)128U)), - 128U, - ipad, - s); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_512, s, ipad, 72U); } else { - uint32_t block_len = 128U; + uint32_t block_len = 72U; uint32_t n_blocks0 = data_len / block_len; uint32_t rem0 = data_len % block_len; K___uint32_t_uint32_t scrut; @@ -504,19 +1221,23 @@ Hacl_HMAC_compute_sha2_512( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = data; uint8_t *rem = data + full_blocks_len; - Hacl_Hash_SHA2_sha512_update_nblocks(128U, ipad, s); - Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s); - Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), - rem_len, - rem, - s); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_512, s, rem, rem_len); } - Hacl_Hash_SHA2_sha512_finish(s, dst1); + uint8_t *dst1 = ipad; + uint32_t remOut = 64U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 64U - remOut, hbuf0, remOut * sizeof (uint8_t)); uint8_t *hash1 = ipad; - Hacl_Hash_SHA2_sha512_init(s); - uint32_t block_len = 128U; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 72U; uint32_t n_blocks0 = 64U / block_len; uint32_t rem0 = 64U % block_len; K___uint32_t_uint32_t scrut; @@ -534,15 +1255,18 @@ Hacl_HMAC_compute_sha2_512( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = hash1; uint8_t *rem = hash1 + full_blocks_len; - Hacl_Hash_SHA2_sha512_update_nblocks(128U, opad, s); - Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s); - Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)), - rem_len, - rem, - s); - Hacl_Hash_SHA2_sha512_finish(s, dst); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_512, s, rem, rem_len); + uint32_t remOut0 = 64U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 64U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); } /** @@ -560,10 +1284,8 @@ Hacl_HMAC_compute_blake2s_32( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -584,19 +1306,17 @@ Hacl_HMAC_compute_blake2s_32( { Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -605,11 +1325,10 @@ Hacl_HMAC_compute_blake2s_32( uint32_t s[16U] = { 0U }; Hacl_Hash_Blake2s_init(s, 0U, 32U); uint32_t *s0 = s; - uint8_t *dst1 = ipad; if (data_len == 0U) { uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad); + Hacl_Hash_Blake2s_update_last(64U, wv, s0, false, 0ULL, 64U, ipad); } else { @@ -644,10 +1363,12 @@ Hacl_HMAC_compute_blake2s_32( Hacl_Hash_Blake2s_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); } + uint8_t *dst1 = ipad; Hacl_Hash_Blake2s_finish(32U, dst1, s0); uint8_t *hash1 = ipad; Hacl_Hash_Blake2s_init(s0, 0U, 32U); @@ -682,6 +1403,7 @@ Hacl_HMAC_compute_blake2s_32( Hacl_Hash_Blake2s_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); @@ -703,10 +1425,8 @@ Hacl_HMAC_compute_blake2b_32( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -727,19 +1447,17 @@ Hacl_HMAC_compute_blake2b_32( { Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -748,11 +1466,16 @@ Hacl_HMAC_compute_blake2b_32( uint64_t s[16U] = { 0U }; Hacl_Hash_Blake2b_init(s, 0U, 64U); uint64_t *s0 = s; - uint8_t *dst1 = ipad; if (data_len == 0U) { uint64_t wv[16U] = { 0U }; - Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad); + Hacl_Hash_Blake2b_update_last(128U, + wv, + s0, + false, + FStar_UInt128_uint64_to_uint128(0ULL), + 128U, + ipad); } else { @@ -787,11 +1510,13 @@ Hacl_HMAC_compute_blake2b_32( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, rem); } + uint8_t *dst1 = ipad; Hacl_Hash_Blake2b_finish(64U, dst1, s0); uint8_t *hash1 = ipad; Hacl_Hash_Blake2b_init(s0, 0U, 64U); @@ -826,6 +1551,7 @@ Hacl_HMAC_compute_blake2b_32( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, diff --git a/src/Hacl_HMAC_Blake2b_256.c b/src/Hacl_HMAC_Blake2b_256.c index 6197490a..9be9fe7f 100644 --- a/src/Hacl_HMAC_Blake2b_256.c +++ b/src/Hacl_HMAC_Blake2b_256.c @@ -44,10 +44,8 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -68,19 +66,17 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( { Hacl_Hash_Blake2b_Simd256_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -89,13 +85,13 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[4U] KRML_POST_ALIGN(32) = { 0U }; Hacl_Hash_Blake2b_Simd256_init(s, 0U, 64U); Lib_IntVector_Intrinsics_vec256 *s0 = s; - uint8_t *dst1 = ipad; if (data_len == 0U) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U }; Hacl_Hash_Blake2b_Simd256_update_last(128U, wv, s0, + false, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad); @@ -138,11 +134,13 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( Hacl_Hash_Blake2b_Simd256_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, rem); } + uint8_t *dst1 = ipad; Hacl_Hash_Blake2b_Simd256_finish(64U, dst1, s0); uint8_t *hash1 = ipad; Hacl_Hash_Blake2b_Simd256_init(s0, 0U, 64U); @@ -182,6 +180,7 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( Hacl_Hash_Blake2b_Simd256_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, diff --git a/src/Hacl_HMAC_Blake2s_128.c b/src/Hacl_HMAC_Blake2s_128.c index 0741bffb..76cc2b62 100644 --- a/src/Hacl_HMAC_Blake2s_128.c +++ b/src/Hacl_HMAC_Blake2s_128.c @@ -43,10 +43,8 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -67,19 +65,17 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( { Hacl_Hash_Blake2s_Simd128_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t ipad[l]; - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t opad[l]; - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -88,11 +84,10 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 s[4U] KRML_POST_ALIGN(16) = { 0U }; Hacl_Hash_Blake2s_Simd128_init(s, 0U, 32U); Lib_IntVector_Intrinsics_vec128 *s0 = s; - uint8_t *dst1 = ipad; if (data_len == 0U) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Hash_Blake2s_Simd128_update_last(64U, wv, s0, 0ULL, 64U, ipad); + Hacl_Hash_Blake2s_Simd128_update_last(64U, wv, s0, false, 0ULL, 64U, ipad); } else { @@ -127,10 +122,12 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( Hacl_Hash_Blake2s_Simd128_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); } + uint8_t *dst1 = ipad; Hacl_Hash_Blake2s_Simd128_finish(32U, dst1, s0); uint8_t *hash1 = ipad; Hacl_Hash_Blake2s_Simd128_init(s0, 0U, 32U); @@ -165,6 +162,7 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( Hacl_Hash_Blake2s_Simd128_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); diff --git a/src/Hacl_Hash_Blake2b.c b/src/Hacl_Hash_Blake2b.c index d490a1a5..980b9997 100644 --- a/src/Hacl_Hash_Blake2b.c +++ b/src/Hacl_Hash_Blake2b.c @@ -29,18 +29,25 @@ #include "lib_memzero0.h" static void -update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totlen, uint8_t *d) +update_block( + uint64_t *wv, + uint64_t *hash, + bool flag, + bool last_node, + FStar_UInt128_uint128 totlen, + uint8_t *d +) { uint64_t m_w[16U] = { 0U }; KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint64_t *os = m_w; uint8_t *bj = d + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = m_w; os[i] = x;); uint64_t mask[4U] = { 0U }; uint64_t wv_14; @@ -52,7 +59,15 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl { wv_14 = 0ULL; } - uint64_t wv_15 = 0ULL; + uint64_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFFFFFFFFFULL; + } + else + { + wv_15 = 0ULL; + } mask[0U] = FStar_UInt128_uint128_to_uint64(totlen); mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)); mask[2U] = wv_14; @@ -63,8 +78,8 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl 0U, 4U, 1U, - uint64_t *os = wv3; uint64_t x = wv3[i] ^ mask[i]; + uint64_t *os = wv3; os[i] = x;); KRML_MAYBE_FOR12(i0, 0U, @@ -124,131 +139,127 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl uint64_t *y = m_st + 4U; uint64_t *z = m_st + 8U; uint64_t *w = m_st + 12U; - uint32_t a = 0U; - uint32_t b0 = 1U; - uint32_t c0 = 2U; - uint32_t d10 = 3U; - uint64_t *wv_a0 = wv + a * 4U; - uint64_t *wv_b0 = wv + b0 * 4U; + uint64_t *wv_a = wv; + uint64_t *wv_b0 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + wv_b0[i]; + uint64_t x1 = wv_a[i] + wv_b0[i]; + uint64_t *os = wv_a; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + x[i]; + uint64_t x1 = wv_a[i] + x[i]; + uint64_t *os = wv_a; os[i] = x1;); - uint64_t *wv_a1 = wv + d10 * 4U; - uint64_t *wv_b1 = wv + a * 4U; + uint64_t *wv_a0 = wv + 12U; + uint64_t *wv_b1 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a1; - uint64_t x1 = wv_a1[i] ^ wv_b1[i]; + uint64_t x1 = wv_a0[i] ^ wv_b1[i]; + uint64_t *os = wv_a0; os[i] = x1;); - uint64_t *r10 = wv_a1; + uint64_t *r10 = wv_a0; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r10; uint64_t x1 = r10[i]; uint64_t x10 = x1 >> 32U | x1 << 32U; + uint64_t *os = r10; os[i] = x10;); - uint64_t *wv_a2 = wv + c0 * 4U; - uint64_t *wv_b2 = wv + d10 * 4U; + uint64_t *wv_a1 = wv + 8U; + uint64_t *wv_b2 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a2; - uint64_t x1 = wv_a2[i] + wv_b2[i]; + uint64_t x1 = wv_a1[i] + wv_b2[i]; + uint64_t *os = wv_a1; os[i] = x1;); - uint64_t *wv_a3 = wv + b0 * 4U; - uint64_t *wv_b3 = wv + c0 * 4U; + uint64_t *wv_a2 = wv + 4U; + uint64_t *wv_b3 = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a3; - uint64_t x1 = wv_a3[i] ^ wv_b3[i]; + uint64_t x1 = wv_a2[i] ^ wv_b3[i]; + uint64_t *os = wv_a2; os[i] = x1;); - uint64_t *r12 = wv_a3; + uint64_t *r12 = wv_a2; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r12; uint64_t x1 = r12[i]; uint64_t x10 = x1 >> 24U | x1 << 40U; + uint64_t *os = r12; os[i] = x10;); - uint64_t *wv_a4 = wv + a * 4U; - uint64_t *wv_b4 = wv + b0 * 4U; + uint64_t *wv_a3 = wv; + uint64_t *wv_b4 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + wv_b4[i]; + uint64_t x1 = wv_a3[i] + wv_b4[i]; + uint64_t *os = wv_a3; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + y[i]; + uint64_t x1 = wv_a3[i] + y[i]; + uint64_t *os = wv_a3; os[i] = x1;); - uint64_t *wv_a5 = wv + d10 * 4U; - uint64_t *wv_b5 = wv + a * 4U; + uint64_t *wv_a4 = wv + 12U; + uint64_t *wv_b5 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a5; - uint64_t x1 = wv_a5[i] ^ wv_b5[i]; + uint64_t x1 = wv_a4[i] ^ wv_b5[i]; + uint64_t *os = wv_a4; os[i] = x1;); - uint64_t *r13 = wv_a5; + uint64_t *r13 = wv_a4; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r13; uint64_t x1 = r13[i]; uint64_t x10 = x1 >> 16U | x1 << 48U; + uint64_t *os = r13; os[i] = x10;); - uint64_t *wv_a6 = wv + c0 * 4U; - uint64_t *wv_b6 = wv + d10 * 4U; + uint64_t *wv_a5 = wv + 8U; + uint64_t *wv_b6 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a6; - uint64_t x1 = wv_a6[i] + wv_b6[i]; + uint64_t x1 = wv_a5[i] + wv_b6[i]; + uint64_t *os = wv_a5; os[i] = x1;); - uint64_t *wv_a7 = wv + b0 * 4U; - uint64_t *wv_b7 = wv + c0 * 4U; + uint64_t *wv_a6 = wv + 4U; + uint64_t *wv_b7 = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a7; - uint64_t x1 = wv_a7[i] ^ wv_b7[i]; + uint64_t x1 = wv_a6[i] ^ wv_b7[i]; + uint64_t *os = wv_a6; os[i] = x1;); - uint64_t *r14 = wv_a7; + uint64_t *r14 = wv_a6; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r14; uint64_t x1 = r14[i]; uint64_t x10 = x1 >> 63U | x1 << 1U; + uint64_t *os = r14; os[i] = x10;); uint64_t *r15 = wv + 4U; uint64_t *r21 = wv + 8U; @@ -280,131 +291,127 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl r112[1U] = x12; r112[2U] = x22; r112[3U] = x32; - uint32_t a0 = 0U; - uint32_t b = 1U; - uint32_t c = 2U; - uint32_t d1 = 3U; - uint64_t *wv_a = wv + a0 * 4U; - uint64_t *wv_b8 = wv + b * 4U; + uint64_t *wv_a7 = wv; + uint64_t *wv_b8 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + wv_b8[i]; + uint64_t x1 = wv_a7[i] + wv_b8[i]; + uint64_t *os = wv_a7; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + z[i]; + uint64_t x1 = wv_a7[i] + z[i]; + uint64_t *os = wv_a7; os[i] = x1;); - uint64_t *wv_a8 = wv + d1 * 4U; - uint64_t *wv_b9 = wv + a0 * 4U; + uint64_t *wv_a8 = wv + 12U; + uint64_t *wv_b9 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a8; uint64_t x1 = wv_a8[i] ^ wv_b9[i]; + uint64_t *os = wv_a8; os[i] = x1;); uint64_t *r16 = wv_a8; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r16; uint64_t x1 = r16[i]; uint64_t x13 = x1 >> 32U | x1 << 32U; + uint64_t *os = r16; os[i] = x13;); - uint64_t *wv_a9 = wv + c * 4U; - uint64_t *wv_b10 = wv + d1 * 4U; + uint64_t *wv_a9 = wv + 8U; + uint64_t *wv_b10 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a9; uint64_t x1 = wv_a9[i] + wv_b10[i]; + uint64_t *os = wv_a9; os[i] = x1;); - uint64_t *wv_a10 = wv + b * 4U; - uint64_t *wv_b11 = wv + c * 4U; + uint64_t *wv_a10 = wv + 4U; + uint64_t *wv_b11 = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a10; uint64_t x1 = wv_a10[i] ^ wv_b11[i]; + uint64_t *os = wv_a10; os[i] = x1;); uint64_t *r17 = wv_a10; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r17; uint64_t x1 = r17[i]; uint64_t x13 = x1 >> 24U | x1 << 40U; + uint64_t *os = r17; os[i] = x13;); - uint64_t *wv_a11 = wv + a0 * 4U; - uint64_t *wv_b12 = wv + b * 4U; + uint64_t *wv_a11 = wv; + uint64_t *wv_b12 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a11; uint64_t x1 = wv_a11[i] + wv_b12[i]; + uint64_t *os = wv_a11; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a11; uint64_t x1 = wv_a11[i] + w[i]; + uint64_t *os = wv_a11; os[i] = x1;); - uint64_t *wv_a12 = wv + d1 * 4U; - uint64_t *wv_b13 = wv + a0 * 4U; + uint64_t *wv_a12 = wv + 12U; + uint64_t *wv_b13 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a12; uint64_t x1 = wv_a12[i] ^ wv_b13[i]; + uint64_t *os = wv_a12; os[i] = x1;); uint64_t *r18 = wv_a12; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r18; uint64_t x1 = r18[i]; uint64_t x13 = x1 >> 16U | x1 << 48U; + uint64_t *os = r18; os[i] = x13;); - uint64_t *wv_a13 = wv + c * 4U; - uint64_t *wv_b14 = wv + d1 * 4U; + uint64_t *wv_a13 = wv + 8U; + uint64_t *wv_b14 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a13; uint64_t x1 = wv_a13[i] + wv_b14[i]; + uint64_t *os = wv_a13; os[i] = x1;); - uint64_t *wv_a14 = wv + b * 4U; - uint64_t *wv_b = wv + c * 4U; + uint64_t *wv_a14 = wv + 4U; + uint64_t *wv_b = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = wv_a14; uint64_t x1 = wv_a14[i] ^ wv_b[i]; + uint64_t *os = wv_a14; os[i] = x1;); uint64_t *r19 = wv_a14; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = r19; uint64_t x1 = r19[i]; uint64_t x13 = x1 >> 63U | x1 << 1U; + uint64_t *os = r19; os[i] = x13;); uint64_t *r113 = wv + 4U; uint64_t *r2 = wv + 8U; @@ -446,29 +453,29 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl 0U, 4U, 1U, - uint64_t *os = s0; uint64_t x = s0[i] ^ r0[i]; + uint64_t *os = s0; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = s0; uint64_t x = s0[i] ^ r2[i]; + uint64_t *os = s0; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = s1; uint64_t x = s1[i] ^ r1[i]; + uint64_t *os = s1; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = s1; uint64_t x = s1[i] ^ r3[i]; + uint64_t *os = s1; os[i] = x;); } @@ -505,25 +512,27 @@ void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) r3[3U] = iv7; uint8_t kk1 = (uint8_t)kk; uint8_t nn1 = (uint8_t)nn; + uint64_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 4U; uint8_t *bj = p.salt + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____0; os[i] = x;); + uint64_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 6U; uint8_t *bj = p.personal + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____1; os[i] = x;); tmp[0U] = (uint64_t)nn1 @@ -560,86 +569,6 @@ void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) r1[3U] = iv7_; } -static void init_with_params(uint64_t *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint64_t tmp[8U] = { 0U }; - uint64_t *r0 = hash; - uint64_t *r1 = hash + 4U; - uint64_t *r2 = hash + 8U; - uint64_t *r3 = hash + 12U; - uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - uint8_t kk = p.key_length; - uint8_t nn = p.digest_length; - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - tmp[0U] = - (uint64_t)nn - ^ - ((uint64_t)kk - << 8U - ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); - tmp[1U] = p.node_offset; - tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; - tmp[3U] = 0ULL; - uint64_t tmp0 = tmp[0U]; - uint64_t tmp1 = tmp[1U]; - uint64_t tmp2 = tmp[2U]; - uint64_t tmp3 = tmp[3U]; - uint64_t tmp4 = tmp[4U]; - uint64_t tmp5 = tmp[5U]; - uint64_t tmp6 = tmp[6U]; - uint64_t tmp7 = tmp[7U]; - uint64_t iv0_ = iv0 ^ tmp0; - uint64_t iv1_ = iv1 ^ tmp1; - uint64_t iv2_ = iv2 ^ tmp2; - uint64_t iv3_ = iv3 ^ tmp3; - uint64_t iv4_ = iv4 ^ tmp4; - uint64_t iv5_ = iv5 ^ tmp5; - uint64_t iv6_ = iv6 ^ tmp6; - uint64_t iv7_ = iv7 ^ tmp7; - r0[0U] = iv0_; - r0[1U] = iv1_; - r0[2U] = iv2_; - r0[3U] = iv3_; - r1[0U] = iv4_; - r1[1U] = iv5_; - r1[2U] = iv6_; - r1[3U] = iv7_; -} - static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) { FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U); @@ -647,11 +576,11 @@ static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, ui memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -674,7 +603,7 @@ Hacl_Hash_Blake2b_update_multi( FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U))); uint8_t *b = blocks + i * 128U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -683,6 +612,7 @@ Hacl_Hash_Blake2b_update_last( uint32_t len, uint64_t *wv, uint64_t *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d @@ -693,7 +623,7 @@ Hacl_Hash_Blake2b_update_last( memcpy(b, last, rem * sizeof (uint8_t)); FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -727,7 +657,7 @@ update_blocks( rem = rem0; } Hacl_Hash_Blake2b_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2b_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2b_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -756,22 +686,115 @@ void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash) uint64_t *row1 = hash + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(first + i * 8U, row0[i]);); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]);); + KRML_MAYBE_UNUSED_VAR(b); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } static Hacl_Hash_Blake2b_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); Hacl_Hash_Blake2b_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { .fst = kk.key_length, .snd = kk.digest_length, .thd = kk.last_node, .f3 = wv, .f4 = b }; + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint64_t *h = block_state.f4; + uint32_t kk20 = (uint32_t)i0.key_length; + uint8_t *k_ = key.snd; + if (!(kk20 == 0U)) + { + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_, kk20 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = h; + uint64_t *r1 = h + 4U; + uint64_t *r2 = h + 8U; + uint64_t *r3 = h + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + uint64_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + uint64_t *os = uu____0; + os[i] = x;); + uint64_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + uint64_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -785,23 +808,9 @@ static Hacl_Hash_Blake2b_state_t Hacl_Hash_Blake2b_state_t s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2b_state_t - *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t)); - p[0U] = s; - Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; - uint8_t kk1 = p1->key_length; - uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i.key_length; - uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) - { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); - } - Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); - return p; + *p0 = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t)); + p0[0U] = s; + return p0; } /** @@ -820,14 +829,16 @@ The caller must satisfy the following requirements. */ Hacl_Hash_Blake2b_state_t -*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +*Hacl_Hash_Blake2b_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** @@ -844,7 +855,7 @@ The caller must satisfy the following requirements. Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t kk) { uint8_t nn = 64U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; uint8_t salt[16U] = { 0U }; uint8_t personal[16U] = { 0U }; Hacl_Hash_Blake2b_blake2_params @@ -855,7 +866,7 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - Hacl_Hash_Blake2b_state_t *s = Hacl_Hash_Blake2b_malloc_with_params_and_key(&p0, k); + Hacl_Hash_Blake2b_state_t *s = Hacl_Hash_Blake2b_malloc_with_params_and_key(&p0, false, k); return s; } @@ -872,39 +883,117 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_state_t *s) { Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } -static void -reset_raw( - Hacl_Hash_Blake2b_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +static void reset_raw(Hacl_Hash_Blake2b_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { - Hacl_Hash_Blake2b_state_t scrut = *state; - uint8_t *buf = scrut.buf; - Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; + Hacl_Hash_Blake2b_block_state_t block_state = (*state).block_state; + uint8_t *buf = (*state).buf; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; - KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i1.key_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint64_t *h = block_state.f4; + uint32_t kk20 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) + if (!(kk20 == 0U)) { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_1, kk20 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); - uint8_t kk11 = i.key_length; + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = h; + uint64_t *r1 = h + 4U; + uint64_t *r2 = h + 8U; + uint64_t *r3 = h + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + uint64_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + uint64_t *os = uu____0; + os[i] = x;); + uint64_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + uint64_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; + uint8_t kk11 = i0.key_length; uint32_t ite; if (kk11 != 0U) { @@ -914,14 +1003,13 @@ reset_raw( { ite = 0U; } - Hacl_Hash_Blake2b_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)ite; + state->total_len = total_len; } /** General-purpose re-initialization function with parameters and -key. You cannot change digest_length or key_length, meaning those values in +key. You cannot change digest_length, key_length, or last_node, meaning those values in the parameters object must be the same as originally decided via one of the malloc functions. All other values of the parameter can be changed. The behavior is unspecified if you violate this precondition. @@ -933,8 +1021,9 @@ Hacl_Hash_Blake2b_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** @@ -957,7 +1046,7 @@ void Hacl_Hash_Blake2b_reset_with_key(Hacl_Hash_Blake2b_state_t *s, uint8_t *k) .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** @@ -979,8 +1068,8 @@ void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *s) Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Hash_Blake2b_state_t s = *state; - uint64_t total_len = s.total_len; + Hacl_Hash_Blake2b_block_state_t block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -996,10 +1085,8 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 } if (chunk_len <= 128U - sz) { - Hacl_Hash_Blake2b_state_t s1 = *state; - Hacl_Hash_Blake2b_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -1012,22 +1099,12 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Hash_Blake2b_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Hash_Blake2b_state_t s1 = *state; - Hacl_Hash_Blake2b_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -1040,9 +1117,8 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint64_t___uint64_t_ acc = block_state1.thd; - uint64_t *wv = acc.fst; - uint64_t *hash = acc.snd; + uint64_t *hash = block_state.f4; + uint64_t *wv = block_state.f3; uint32_t nb = 1U; Hacl_Hash_Blake2b_update_multi(128U, wv, @@ -1065,9 +1141,8 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____uint64_t___uint64_t_ acc = block_state1.thd; - uint64_t *wv = acc.fst; - uint64_t *hash = acc.snd; + uint64_t *hash = block_state.f4; + uint64_t *wv = block_state.f3; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_update_multi(data1_len, wv, @@ -1077,25 +1152,15 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 nb); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2b_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 128U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Hash_Blake2b_state_t s1 = *state; - Hacl_Hash_Blake2b_block_state_t block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL) { @@ -1105,22 +1170,12 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 { sz10 = (uint32_t)(total_len10 % (uint64_t)128U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Hash_Blake2b_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Hash_Blake2b_state_t s10 = *state; - Hacl_Hash_Blake2b_block_state_t block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -1133,15 +1188,14 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint64_t___uint64_t_ acc = block_state1.thd; - uint64_t *wv = acc.fst; - uint64_t *hash = acc.snd; + uint64_t *hash = block_state.f4; + uint64_t *wv = block_state.f3; uint32_t nb = 1U; Hacl_Hash_Blake2b_update_multi(128U, wv, hash, FStar_UInt128_uint64_to_uint128(prevlen), - buf, + buf0, nb); } uint32_t ite; @@ -1159,9 +1213,8 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____uint64_t___uint64_t_ acc = block_state1.thd; - uint64_t *wv = acc.fst; - uint64_t *hash = acc.snd; + uint64_t *hash = block_state.f4; + uint64_t *wv = block_state.f3; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_update_multi(data1_len, wv, @@ -1169,17 +1222,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 FStar_UInt128_uint64_to_uint128(total_len1), data1, nb); - uint8_t *dst = buf; + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2b_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } @@ -1190,19 +1235,22 @@ at least `digest_length` bytes, where `digest_length` was determined by your choice of `malloc` function. Concretely, if you used `malloc` or `malloc_with_key`, then the expected length is 32 for S, or 64 for B (default digest length). If you used `malloc_with_params_and_key`, then the expected -length is whatever you chose for the `digest_length` field of your -parameters. +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2b_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2b_state_t scrut = *state; - Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + Hacl_Hash_Blake2b_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + uint8_t *buf_ = (*s).buf; + uint64_t total_len = (*s).total_len; uint32_t r; if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL) { @@ -1217,11 +1265,12 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) uint64_t b[16U] = { 0U }; Hacl_Hash_Blake2b_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - uint64_t *src_b = block_state.thd.snd; - uint64_t *dst_b = tmp_block_state.thd.snd; + { .fst = i1.key_length, .snd = i1.digest_length, .thd = i1.last_node, .f3 = wv0, .f4 = b }; + uint64_t *src_b = block_state.f4; + uint64_t *dst_b = tmp_block_state.f4; memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 128U == 0U && r > 0U) { @@ -1232,10 +1281,8 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) ite = r % 128U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; - K____uint64_t___uint64_t_ acc0 = tmp_block_state.thd; - uint64_t *wv1 = acc0.fst; - uint64_t *hash0 = acc0.snd; + uint64_t *hash0 = tmp_block_state.f4; + uint64_t *wv1 = tmp_block_state.f3; uint32_t nb = 0U; Hacl_Hash_Blake2b_update_multi(0U, wv1, @@ -1244,17 +1291,34 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - K____uint64_t___uint64_t_ acc = tmp_block_state.thd; - uint64_t *wv = acc.fst; - uint64_t *hash = acc.snd; + uint64_t *hash = tmp_block_state.f4; + uint64_t *wv = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; Hacl_Hash_Blake2b_update_last(r, wv, hash, + last_node1, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2b_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2b_finish((uint32_t)nn1, dst, tmp_block_state.f4); + Hacl_Hash_Blake2b_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_info(Hacl_Hash_Blake2b_state_t *s) +{ + Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1265,8 +1329,8 @@ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state) Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - uint64_t *b = block_state.thd.snd; - uint64_t *wv = block_state.thd.fst; + uint64_t *b = block_state.f4; + uint64_t *wv = block_state.f3; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); @@ -1278,21 +1342,22 @@ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state) */ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_copy(Hacl_Hash_Blake2b_state_t *state) { - Hacl_Hash_Blake2b_state_t scrut = *state; - Hacl_Hash_Blake2b_block_state_t block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + Hacl_Hash_Blake2b_block_state_t block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); memcpy(buf, buf0, 128U * sizeof (uint8_t)); uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); Hacl_Hash_Blake2b_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - uint64_t *src_b = block_state0.thd.snd; - uint64_t *dst_b = block_state.thd.snd; + block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = i.last_node, .f3 = wv, .f4 = b }; + uint64_t *src_b = block_state0.f4; + uint64_t *dst_b = block_state.f4; memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); Hacl_Hash_Blake2b_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1335,10 +1400,10 @@ Hacl_Hash_Blake2b_hash_with_key( Write the BLAKE2b digest of message `input` using key `key` and parameters `params` into `output`. The `key` array must be of length `params.key_length`. The `output` array must be of length -`params.digest_length`. +`params.digest_length`. */ void -Hacl_Hash_Blake2b_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, @@ -1371,25 +1436,27 @@ Hacl_Hash_Blake2b_hash_with_key_and_paramas( r3[3U] = iv7; uint8_t kk = params.key_length; uint8_t nn = params.digest_length; + uint64_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 4U; uint8_t *bj = params.salt + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____0; os[i] = x;); + uint64_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 6U; uint8_t *bj = params.personal + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____1; os[i] = x;); tmp[0U] = (uint64_t)nn diff --git a/src/Hacl_Hash_Blake2b_Simd256.c b/src/Hacl_Hash_Blake2b_Simd256.c index 0afd93bc..fec92c90 100644 --- a/src/Hacl_Hash_Blake2b_Simd256.c +++ b/src/Hacl_Hash_Blake2b_Simd256.c @@ -34,6 +34,7 @@ update_block( Lib_IntVector_Intrinsics_vec256 *wv, Lib_IntVector_Intrinsics_vec256 *hash, bool flag, + bool last_node, FStar_UInt128_uint128 totlen, uint8_t *d ) @@ -43,11 +44,11 @@ update_block( 0U, 16U, 1U, - uint64_t *os = m_w; uint8_t *bj = d + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = m_w; os[i] = x;); Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero; uint64_t wv_14; @@ -59,7 +60,15 @@ update_block( { wv_14 = 0ULL; } - uint64_t wv_15 = 0ULL; + uint64_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFFFFFFFFFULL; + } + else + { + wv_15 = 0ULL; + } mask = Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen), FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)), @@ -102,40 +111,36 @@ update_block( Lib_IntVector_Intrinsics_vec256 *y = m_st + 1U; Lib_IntVector_Intrinsics_vec256 *z = m_st + 2U; Lib_IntVector_Intrinsics_vec256 *w = m_st + 3U; - uint32_t a = 0U; - uint32_t b0 = 1U; - uint32_t c0 = 2U; - uint32_t d10 = 3U; - Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * 1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * 1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], 32U); - Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * 1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * 1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], 24U); - Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * 1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * 1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], 16U); - Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * 1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * 1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], 63U); + Lib_IntVector_Intrinsics_vec256 *wv_a = wv; + Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + 1U; + wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b0[0U]); + wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], x[0U]); + Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + 3U; + Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv; + wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a0[0U], wv_b1[0U]); + wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a0[0U], 32U); + Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + 2U; + Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + 3U; + wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a1[0U], wv_b2[0U]); + Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + 1U; + Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + 2U; + wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a2[0U], wv_b3[0U]); + wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a2[0U], 24U); + Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv; + Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + 1U; + wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a3[0U], wv_b4[0U]); + wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a3[0U], y[0U]); + Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + 3U; + Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv; + wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a4[0U], wv_b5[0U]); + wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a4[0U], 16U); + Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + 2U; + Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + 3U; + wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a5[0U], wv_b6[0U]); + Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + 1U; + Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + 2U; + wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a6[0U], wv_b7[0U]); + wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a6[0U], 63U); Lib_IntVector_Intrinsics_vec256 *r10 = wv + 1U; Lib_IntVector_Intrinsics_vec256 *r21 = wv + 2U; Lib_IntVector_Intrinsics_vec256 *r31 = wv + 3U; @@ -151,38 +156,34 @@ update_block( Lib_IntVector_Intrinsics_vec256 v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, 3U); r31[0U] = v11; - uint32_t a0 = 0U; - uint32_t b = 1U; - uint32_t c = 2U; - uint32_t d1 = 3U; - Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * 1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv; + Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + 1U; + wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a7[0U], wv_b8[0U]); + wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a7[0U], z[0U]); + Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + 3U; + Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv; wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]); wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], 32U); - Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + 2U; + Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + 3U; wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + 1U; + Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + 2U; wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]); wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], 24U); - Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv; + Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + 1U; wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]); wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + 3U; + Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv; wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]); wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], 16U); - Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + 2U; + Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + 3U; wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * 1U; - Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * 1U; + Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + 1U; + Lib_IntVector_Intrinsics_vec256 *wv_b = wv + 2U; wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]); wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], 63U); Lib_IntVector_Intrinsics_vec256 *r11 = wv + 1U; @@ -240,25 +241,27 @@ Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t k r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); uint8_t kk1 = (uint8_t)kk; uint8_t nn1 = (uint8_t)nn; + uint64_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 4U; uint8_t *bj = p.salt + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____0; os[i] = x;); + uint64_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 6U; uint8_t *bj = p.personal + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____1; os[i] = x;); tmp[0U] = (uint64_t)nn1 @@ -289,75 +292,6 @@ Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t k r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); } -static void -init_with_params(Lib_IntVector_Intrinsics_vec256 *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint64_t tmp[8U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 *r0 = hash; - Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; - Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; - Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U; - uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; - r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - uint8_t kk = p.key_length; - uint8_t nn = p.digest_length; - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - tmp[0U] = - (uint64_t)nn - ^ - ((uint64_t)kk - << 8U - ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); - tmp[1U] = p.node_offset; - tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; - tmp[3U] = 0ULL; - uint64_t tmp0 = tmp[0U]; - uint64_t tmp1 = tmp[1U]; - uint64_t tmp2 = tmp[2U]; - uint64_t tmp3 = tmp[3U]; - uint64_t tmp4 = tmp[4U]; - uint64_t tmp5 = tmp[5U]; - uint64_t tmp6 = tmp[6U]; - uint64_t tmp7 = tmp[7U]; - uint64_t iv0_ = iv0 ^ tmp0; - uint64_t iv1_ = iv1 ^ tmp1; - uint64_t iv2_ = iv2 ^ tmp2; - uint64_t iv3_ = iv3 ^ tmp3; - uint64_t iv4_ = iv4 ^ tmp4; - uint64_t iv5_ = iv5 ^ tmp5; - uint64_t iv6_ = iv6 ^ tmp6; - uint64_t iv7_ = iv7 ^ tmp7; - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); -} - static void update_key( Lib_IntVector_Intrinsics_vec256 *wv, @@ -372,11 +306,11 @@ update_key( memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -399,7 +333,7 @@ Hacl_Hash_Blake2b_Simd256_update_multi( FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U))); uint8_t *b = blocks + i * 128U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -408,6 +342,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec256 *wv, Lib_IntVector_Intrinsics_vec256 *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d @@ -418,7 +353,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( memcpy(b, last, rem * sizeof (uint8_t)); FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -452,7 +387,7 @@ update_blocks( rem = rem0; } Hacl_Hash_Blake2b_Simd256_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2b_Simd256_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2b_Simd256_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -493,6 +428,7 @@ Hacl_Hash_Blake2b_Simd256_finish( Lib_IntVector_Intrinsics_vec256 *row1 = hash + 1U; Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]); Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]); + KRML_MAYBE_UNUSED_VAR(b); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); Lib_Memzero0_memzero(b, 64U, uint8_t, void *); @@ -538,11 +474,11 @@ Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32( 0U, 4U, 1U, - uint64_t *os = b0; uint8_t *bj = b8 + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = b0; os[i] = x;); uint8_t b80[32U] = { 0U }; Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]); @@ -550,11 +486,11 @@ Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32( 0U, 4U, 1U, - uint64_t *os = b1; uint8_t *bj = b80 + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = b1; os[i] = x;); uint8_t b81[32U] = { 0U }; Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]); @@ -562,11 +498,11 @@ Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32( 0U, 4U, 1U, - uint64_t *os = b2; uint8_t *bj = b81 + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = b2; os[i] = x;); uint8_t b82[32U] = { 0U }; Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]); @@ -574,11 +510,11 @@ Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32( 0U, 4U, 1U, - uint64_t *os = b3; uint8_t *bj = b82 + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = b3; os[i] = x;); } @@ -593,10 +529,7 @@ Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void) } static Hacl_Hash_Blake2b_Simd256_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec256 @@ -610,7 +543,90 @@ static Hacl_Hash_Blake2b_Simd256_state_t sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); Hacl_Hash_Blake2b_Simd256_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { .fst = kk.key_length, .snd = kk.digest_length, .thd = kk.last_node, .f3 = wv, .f4 = b }; + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec256 *h = block_state.f4; + uint32_t kk20 = (uint32_t)i0.key_length; + uint8_t *k_ = key.snd; + if (!(kk20 == 0U)) + { + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_, kk20 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = h; + Lib_IntVector_Intrinsics_vec256 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = h + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + uint64_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + uint64_t *os = uu____0; + os[i] = x;); + uint64_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + uint64_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -624,60 +640,60 @@ static Hacl_Hash_Blake2b_Simd256_state_t Hacl_Hash_Blake2b_Simd256_state_t s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2b_Simd256_state_t - *p = + *p0 = (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof ( Hacl_Hash_Blake2b_Simd256_state_t )); - p[0U] = s; - Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; - uint8_t kk1 = p1->key_length; - uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i.key_length; - uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) - { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); - } - Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); - return p; + p0[0U] = s; + return p0; } /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (256 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 256 for S, 64 for B. +- The digest_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (256 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk) { uint8_t nn = 64U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; - uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); - uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; Hacl_Hash_Blake2b_blake2_params p = { @@ -685,21 +701,16 @@ Hacl_Hash_Blake2b_Simd256_state_t .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; - Hacl_Hash_Blake2b_blake2_params - *p0 = - (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); - p0[0U] = p; + Hacl_Hash_Blake2b_blake2_params p0 = p; Hacl_Hash_Blake2b_Simd256_state_t - *s = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(p0, k); - Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; - KRML_HOST_FREE(p1.salt); - KRML_HOST_FREE(p1.personal); - KRML_HOST_FREE(p0); + *s = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(&p0, false, k); return s; } /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) { @@ -709,39 +720,106 @@ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_Simd256_state_t *s) { Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } static void -reset_raw( - Hacl_Hash_Blake2b_Simd256_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +reset_raw(Hacl_Hash_Blake2b_Simd256_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { - Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; - uint8_t *buf = scrut.buf; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*state).block_state; + uint8_t *buf = (*state).buf; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; - KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i1.key_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec256 *h = block_state.f4; + uint32_t kk20 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) + if (!(kk20 == 0U)) { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_1, kk20 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); - uint8_t kk11 = i.key_length; + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = h; + Lib_IntVector_Intrinsics_vec256 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = h + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + uint64_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + uint64_t *os = uu____0; + os[i] = x;); + uint64_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + uint64_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); + uint8_t kk11 = i0.key_length; uint32_t ite; if (kk11 != 0U) { @@ -751,15 +829,16 @@ reset_raw( { ite = 0U; } - Hacl_Hash_Blake2b_Simd256_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)ite; + state->total_len = total_len; } /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( @@ -768,15 +847,17 @@ Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k) { @@ -791,11 +872,16 @@ void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s) { @@ -803,7 +889,7 @@ void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s) } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_Simd256_update( @@ -812,8 +898,8 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t chunk_len ) { - Hacl_Hash_Blake2b_Simd256_state_t s = *state; - uint64_t total_len = s.total_len; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -829,10 +915,8 @@ Hacl_Hash_Blake2b_Simd256_update( } if (chunk_len <= 128U - sz) { - Hacl_Hash_Blake2b_Simd256_state_t s1 = *state; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -845,22 +929,12 @@ Hacl_Hash_Blake2b_Simd256_update( uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Hash_Blake2b_Simd256_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Hash_Blake2b_Simd256_state_t s1 = *state; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -873,10 +947,8 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec256 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.f3; uint32_t nb = 1U; Hacl_Hash_Blake2b_Simd256_update_multi(128U, wv, @@ -899,9 +971,8 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec256 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.f3; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_Simd256_update_multi(data1_len, wv, @@ -911,25 +982,15 @@ Hacl_Hash_Blake2b_Simd256_update( nb); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2b_Simd256_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 128U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Hash_Blake2b_Simd256_state_t s1 = *state; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL) { @@ -939,22 +1000,12 @@ Hacl_Hash_Blake2b_Simd256_update( { sz10 = (uint32_t)(total_len10 % (uint64_t)128U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Hash_Blake2b_Simd256_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Hash_Blake2b_Simd256_state_t s10 = *state; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -967,16 +1018,14 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec256 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.f3; uint32_t nb = 1U; Hacl_Hash_Blake2b_Simd256_update_multi(128U, wv, hash, FStar_UInt128_uint64_to_uint128(prevlen), - buf, + buf0, nb); } uint32_t ite; @@ -994,9 +1043,8 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec256 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.f3; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_Simd256_update_multi(data1_len, wv, @@ -1004,35 +1052,35 @@ Hacl_Hash_Blake2b_Simd256_update( FStar_UInt128_uint64_to_uint128(total_len1), data1, nb); - uint8_t *dst = buf; + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2b_Simd256_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 256 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_256_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + uint8_t *buf_ = (*s).buf; + uint64_t total_len = (*s).total_len; uint32_t r; if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL) { @@ -1047,11 +1095,12 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; Hacl_Hash_Blake2b_Simd256_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - Lib_IntVector_Intrinsics_vec256 *src_b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.thd.snd; + { .fst = i1.key_length, .snd = i1.digest_length, .thd = i1.last_node, .f3 = wv0, .f4 = b }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state.f4; + Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.f4; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 128U == 0U && r > 0U) { @@ -1062,11 +1111,8 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 ite = r % 128U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc0 = tmp_block_state.thd; - Lib_IntVector_Intrinsics_vec256 *wv1 = acc0.fst; - Lib_IntVector_Intrinsics_vec256 *hash0 = acc0.snd; + Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.f3; uint32_t nb = 0U; Hacl_Hash_Blake2b_Simd256_update_multi(0U, wv1, @@ -1075,18 +1121,34 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc = tmp_block_state.thd; - Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; Hacl_Hash_Blake2b_Simd256_update_last(r, wv, hash, + last_node1, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2b_Simd256_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2b_Simd256_finish((uint32_t)nn1, dst, tmp_block_state.f4); + Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_Simd256_info(Hacl_Hash_Blake2b_Simd256_state_t *s) +{ + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1097,8 +1159,8 @@ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state) Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec256 *b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec256 *wv = block_state.thd.fst; + Lib_IntVector_Intrinsics_vec256 *b = block_state.f4; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.f3; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); @@ -1106,18 +1168,18 @@ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state) } /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state) { - Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; - Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); memcpy(buf, buf0, 128U * sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec256 @@ -1131,9 +1193,10 @@ Hacl_Hash_Blake2b_Simd256_state_t sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); Hacl_Hash_Blake2b_Simd256_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - Lib_IntVector_Intrinsics_vec256 *src_b = block_state0.thd.snd; - Lib_IntVector_Intrinsics_vec256 *dst_b = block_state.thd.snd; + block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = i.last_node, .f3 = wv, .f4 = b }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state0.f4; + Lib_IntVector_Intrinsics_vec256 *dst_b = block_state.f4; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); Hacl_Hash_Blake2b_Simd256_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1175,8 +1238,14 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); } +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, @@ -1203,25 +1272,27 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); uint8_t kk = params.key_length; uint8_t nn = params.digest_length; + uint64_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 4U; uint8_t *bj = params.salt + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____0; os[i] = x;); + uint64_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint64_t *os = tmp + 6U; uint8_t *bj = params.personal + i * 8U; uint64_t u = load64_le(bj); uint64_t r = u; uint64_t x = r; + uint64_t *os = uu____1; os[i] = x;); tmp[0U] = (uint64_t)nn diff --git a/src/Hacl_Hash_Blake2s.c b/src/Hacl_Hash_Blake2s.c index 6e19d83d..60cc5c7c 100644 --- a/src/Hacl_Hash_Blake2s.c +++ b/src/Hacl_Hash_Blake2s.c @@ -30,18 +30,25 @@ #include "lib_memzero0.h" static inline void -update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d) +update_block( + uint32_t *wv, + uint32_t *hash, + bool flag, + bool last_node, + uint64_t totlen, + uint8_t *d +) { uint32_t m_w[16U] = { 0U }; KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = m_w; uint8_t *bj = d + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = m_w; os[i] = x;); uint32_t mask[4U] = { 0U }; uint32_t wv_14; @@ -53,7 +60,15 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * { wv_14 = 0U; } - uint32_t wv_15 = 0U; + uint32_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFU; + } + else + { + wv_15 = 0U; + } mask[0U] = (uint32_t)totlen; mask[1U] = (uint32_t)(totlen >> 32U); mask[2U] = wv_14; @@ -64,8 +79,8 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * 0U, 4U, 1U, - uint32_t *os = wv3; uint32_t x = wv3[i] ^ mask[i]; + uint32_t *os = wv3; os[i] = x;); KRML_MAYBE_FOR10(i0, 0U, @@ -125,131 +140,127 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * uint32_t *y = m_st + 4U; uint32_t *z = m_st + 8U; uint32_t *w = m_st + 12U; - uint32_t a = 0U; - uint32_t b0 = 1U; - uint32_t c0 = 2U; - uint32_t d10 = 3U; - uint32_t *wv_a0 = wv + a * 4U; - uint32_t *wv_b0 = wv + b0 * 4U; + uint32_t *wv_a = wv; + uint32_t *wv_b0 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + wv_b0[i]; + uint32_t x1 = wv_a[i] + wv_b0[i]; + uint32_t *os = wv_a; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + x[i]; + uint32_t x1 = wv_a[i] + x[i]; + uint32_t *os = wv_a; os[i] = x1;); - uint32_t *wv_a1 = wv + d10 * 4U; - uint32_t *wv_b1 = wv + a * 4U; + uint32_t *wv_a0 = wv + 12U; + uint32_t *wv_b1 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a1; - uint32_t x1 = wv_a1[i] ^ wv_b1[i]; + uint32_t x1 = wv_a0[i] ^ wv_b1[i]; + uint32_t *os = wv_a0; os[i] = x1;); - uint32_t *r10 = wv_a1; + uint32_t *r10 = wv_a0; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r10; uint32_t x1 = r10[i]; uint32_t x10 = x1 >> 16U | x1 << 16U; + uint32_t *os = r10; os[i] = x10;); - uint32_t *wv_a2 = wv + c0 * 4U; - uint32_t *wv_b2 = wv + d10 * 4U; + uint32_t *wv_a1 = wv + 8U; + uint32_t *wv_b2 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a2; - uint32_t x1 = wv_a2[i] + wv_b2[i]; + uint32_t x1 = wv_a1[i] + wv_b2[i]; + uint32_t *os = wv_a1; os[i] = x1;); - uint32_t *wv_a3 = wv + b0 * 4U; - uint32_t *wv_b3 = wv + c0 * 4U; + uint32_t *wv_a2 = wv + 4U; + uint32_t *wv_b3 = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a3; - uint32_t x1 = wv_a3[i] ^ wv_b3[i]; + uint32_t x1 = wv_a2[i] ^ wv_b3[i]; + uint32_t *os = wv_a2; os[i] = x1;); - uint32_t *r12 = wv_a3; + uint32_t *r12 = wv_a2; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r12; uint32_t x1 = r12[i]; uint32_t x10 = x1 >> 12U | x1 << 20U; + uint32_t *os = r12; os[i] = x10;); - uint32_t *wv_a4 = wv + a * 4U; - uint32_t *wv_b4 = wv + b0 * 4U; + uint32_t *wv_a3 = wv; + uint32_t *wv_b4 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + wv_b4[i]; + uint32_t x1 = wv_a3[i] + wv_b4[i]; + uint32_t *os = wv_a3; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + y[i]; + uint32_t x1 = wv_a3[i] + y[i]; + uint32_t *os = wv_a3; os[i] = x1;); - uint32_t *wv_a5 = wv + d10 * 4U; - uint32_t *wv_b5 = wv + a * 4U; + uint32_t *wv_a4 = wv + 12U; + uint32_t *wv_b5 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a5; - uint32_t x1 = wv_a5[i] ^ wv_b5[i]; + uint32_t x1 = wv_a4[i] ^ wv_b5[i]; + uint32_t *os = wv_a4; os[i] = x1;); - uint32_t *r13 = wv_a5; + uint32_t *r13 = wv_a4; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r13; uint32_t x1 = r13[i]; uint32_t x10 = x1 >> 8U | x1 << 24U; + uint32_t *os = r13; os[i] = x10;); - uint32_t *wv_a6 = wv + c0 * 4U; - uint32_t *wv_b6 = wv + d10 * 4U; + uint32_t *wv_a5 = wv + 8U; + uint32_t *wv_b6 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a6; - uint32_t x1 = wv_a6[i] + wv_b6[i]; + uint32_t x1 = wv_a5[i] + wv_b6[i]; + uint32_t *os = wv_a5; os[i] = x1;); - uint32_t *wv_a7 = wv + b0 * 4U; - uint32_t *wv_b7 = wv + c0 * 4U; + uint32_t *wv_a6 = wv + 4U; + uint32_t *wv_b7 = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a7; - uint32_t x1 = wv_a7[i] ^ wv_b7[i]; + uint32_t x1 = wv_a6[i] ^ wv_b7[i]; + uint32_t *os = wv_a6; os[i] = x1;); - uint32_t *r14 = wv_a7; + uint32_t *r14 = wv_a6; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r14; uint32_t x1 = r14[i]; uint32_t x10 = x1 >> 7U | x1 << 25U; + uint32_t *os = r14; os[i] = x10;); uint32_t *r15 = wv + 4U; uint32_t *r21 = wv + 8U; @@ -281,131 +292,127 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * r112[1U] = x12; r112[2U] = x22; r112[3U] = x32; - uint32_t a0 = 0U; - uint32_t b = 1U; - uint32_t c = 2U; - uint32_t d1 = 3U; - uint32_t *wv_a = wv + a0 * 4U; - uint32_t *wv_b8 = wv + b * 4U; + uint32_t *wv_a7 = wv; + uint32_t *wv_b8 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + wv_b8[i]; + uint32_t x1 = wv_a7[i] + wv_b8[i]; + uint32_t *os = wv_a7; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + z[i]; + uint32_t x1 = wv_a7[i] + z[i]; + uint32_t *os = wv_a7; os[i] = x1;); - uint32_t *wv_a8 = wv + d1 * 4U; - uint32_t *wv_b9 = wv + a0 * 4U; + uint32_t *wv_a8 = wv + 12U; + uint32_t *wv_b9 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a8; uint32_t x1 = wv_a8[i] ^ wv_b9[i]; + uint32_t *os = wv_a8; os[i] = x1;); uint32_t *r16 = wv_a8; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r16; uint32_t x1 = r16[i]; uint32_t x13 = x1 >> 16U | x1 << 16U; + uint32_t *os = r16; os[i] = x13;); - uint32_t *wv_a9 = wv + c * 4U; - uint32_t *wv_b10 = wv + d1 * 4U; + uint32_t *wv_a9 = wv + 8U; + uint32_t *wv_b10 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a9; uint32_t x1 = wv_a9[i] + wv_b10[i]; + uint32_t *os = wv_a9; os[i] = x1;); - uint32_t *wv_a10 = wv + b * 4U; - uint32_t *wv_b11 = wv + c * 4U; + uint32_t *wv_a10 = wv + 4U; + uint32_t *wv_b11 = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a10; uint32_t x1 = wv_a10[i] ^ wv_b11[i]; + uint32_t *os = wv_a10; os[i] = x1;); uint32_t *r17 = wv_a10; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r17; uint32_t x1 = r17[i]; uint32_t x13 = x1 >> 12U | x1 << 20U; + uint32_t *os = r17; os[i] = x13;); - uint32_t *wv_a11 = wv + a0 * 4U; - uint32_t *wv_b12 = wv + b * 4U; + uint32_t *wv_a11 = wv; + uint32_t *wv_b12 = wv + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a11; uint32_t x1 = wv_a11[i] + wv_b12[i]; + uint32_t *os = wv_a11; os[i] = x1;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a11; uint32_t x1 = wv_a11[i] + w[i]; + uint32_t *os = wv_a11; os[i] = x1;); - uint32_t *wv_a12 = wv + d1 * 4U; - uint32_t *wv_b13 = wv + a0 * 4U; + uint32_t *wv_a12 = wv + 12U; + uint32_t *wv_b13 = wv; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a12; uint32_t x1 = wv_a12[i] ^ wv_b13[i]; + uint32_t *os = wv_a12; os[i] = x1;); uint32_t *r18 = wv_a12; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r18; uint32_t x1 = r18[i]; uint32_t x13 = x1 >> 8U | x1 << 24U; + uint32_t *os = r18; os[i] = x13;); - uint32_t *wv_a13 = wv + c * 4U; - uint32_t *wv_b14 = wv + d1 * 4U; + uint32_t *wv_a13 = wv + 8U; + uint32_t *wv_b14 = wv + 12U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a13; uint32_t x1 = wv_a13[i] + wv_b14[i]; + uint32_t *os = wv_a13; os[i] = x1;); - uint32_t *wv_a14 = wv + b * 4U; - uint32_t *wv_b = wv + c * 4U; + uint32_t *wv_a14 = wv + 4U; + uint32_t *wv_b = wv + 8U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = wv_a14; uint32_t x1 = wv_a14[i] ^ wv_b[i]; + uint32_t *os = wv_a14; os[i] = x1;); uint32_t *r19 = wv_a14; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = r19; uint32_t x1 = r19[i]; uint32_t x13 = x1 >> 7U | x1 << 25U; + uint32_t *os = r19; os[i] = x13;); uint32_t *r113 = wv + 4U; uint32_t *r2 = wv + 8U; @@ -447,29 +454,29 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * 0U, 4U, 1U, - uint32_t *os = s0; uint32_t x = s0[i] ^ r0[i]; + uint32_t *os = s0; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = s0; uint32_t x = s0[i] ^ r2[i]; + uint32_t *os = s0; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = s1; uint32_t x = s1[i] ^ r1[i]; + uint32_t *os = s1; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = s1; uint32_t x = s1[i] ^ r3[i]; + uint32_t *os = s1; os[i] = x;); } @@ -504,25 +511,27 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; + uint32_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 4U; uint8_t *bj = p.salt + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); + uint32_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 6U; uint8_t *bj = p.personal + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); tmp[0U] = (uint32_t)(uint8_t)nn @@ -558,83 +567,6 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) r1[3U] = iv7_; } -static void init_with_params(uint32_t *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint32_t tmp[8U] = { 0U }; - uint32_t *r0 = hash; - uint32_t *r1 = hash + 4U; - uint32_t *r2 = hash + 8U; - uint32_t *r3 = hash + 12U; - uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - tmp[0U] = - (uint32_t)p.digest_length - ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); - tmp[1U] = p.leaf_length; - tmp[2U] = (uint32_t)p.node_offset; - tmp[3U] = - (uint32_t)(p.node_offset >> 32U) - ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); - uint32_t tmp0 = tmp[0U]; - uint32_t tmp1 = tmp[1U]; - uint32_t tmp2 = tmp[2U]; - uint32_t tmp3 = tmp[3U]; - uint32_t tmp4 = tmp[4U]; - uint32_t tmp5 = tmp[5U]; - uint32_t tmp6 = tmp[6U]; - uint32_t tmp7 = tmp[7U]; - uint32_t iv0_ = iv0 ^ tmp0; - uint32_t iv1_ = iv1 ^ tmp1; - uint32_t iv2_ = iv2 ^ tmp2; - uint32_t iv3_ = iv3 ^ tmp3; - uint32_t iv4_ = iv4 ^ tmp4; - uint32_t iv5_ = iv5 ^ tmp5; - uint32_t iv6_ = iv6 ^ tmp6; - uint32_t iv7_ = iv7 ^ tmp7; - r0[0U] = iv0_; - r0[1U] = iv1_; - r0[2U] = iv2_; - r0[3U] = iv3_; - r1[0U] = iv4_; - r1[1U] = iv5_; - r1[2U] = iv6_; - r1[3U] = iv7_; -} - static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) { uint64_t lb = (uint64_t)64U; @@ -642,11 +574,11 @@ static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, ui memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -666,7 +598,7 @@ Hacl_Hash_Blake2s_update_multi( { uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U); uint8_t *b = blocks + i * 64U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -675,6 +607,7 @@ Hacl_Hash_Blake2s_update_last( uint32_t len, uint32_t *wv, uint32_t *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d @@ -684,7 +617,7 @@ Hacl_Hash_Blake2s_update_last( uint8_t *last = d + len - rem; memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -712,7 +645,7 @@ update_blocks(uint32_t len, uint32_t *wv, uint32_t *hash, uint64_t prev, uint8_t rem = rem0; } Hacl_Hash_Blake2s_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2s_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2s_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -741,22 +674,112 @@ void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash) uint32_t *row1 = hash + 4U; KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(first + i * 4U, row0[i]);); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]);); + KRML_MAYBE_UNUSED_VAR(b); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); Lib_Memzero0_memzero(b, 32U, uint8_t, void *); } static Hacl_Hash_Blake2s_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); Hacl_Hash_Blake2s_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { .fst = kk.key_length, .snd = kk.digest_length, .thd = kk.last_node, .f3 = wv, .f4 = b }; + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint32_t *h = block_state.f4; + uint32_t kk2 = (uint32_t)i0.key_length; + uint8_t *k_ = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = h; + uint32_t *r1 = h + 4U; + uint32_t *r2 = h + 8U; + uint32_t *r3 = h + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint32_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + uint32_t *os = uu____0; + os[i] = x;); + uint32_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + uint32_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -770,53 +793,56 @@ static Hacl_Hash_Blake2s_state_t Hacl_Hash_Blake2s_state_t s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2s_state_t - *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t)); - p[0U] = s; - Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; - uint8_t kk1 = p1->key_length; - uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i.key_length; - uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) - { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); - } - Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); - return p; + *p0 = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t)); + p0[0U] = s; + return p0; } /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (32 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t -*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +*Hacl_Hash_Blake2s_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (32 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk) { uint8_t nn = 32U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; - uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); - uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; Hacl_Hash_Blake2b_blake2_params p = { @@ -824,20 +850,15 @@ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; - Hacl_Hash_Blake2b_blake2_params - *p0 = - (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); - p0[0U] = p; - Hacl_Hash_Blake2s_state_t *s = Hacl_Hash_Blake2s_malloc_with_params_and_key(p0, k); - Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; - KRML_HOST_FREE(p1.salt); - KRML_HOST_FREE(p1.personal); - KRML_HOST_FREE(p0); + Hacl_Hash_Blake2b_blake2_params p0 = p; + Hacl_Hash_Blake2s_state_t *s = Hacl_Hash_Blake2s_malloc_with_params_and_key(&p0, false, k); return s; } /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) { @@ -847,28 +868,29 @@ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_state_t *s) { Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } -static void -reset_raw( - Hacl_Hash_Blake2s_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +static void reset_raw(Hacl_Hash_Blake2s_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { - Hacl_Hash_Blake2s_state_t scrut = *state; - uint8_t *buf = scrut.buf; - Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; + Hacl_Hash_Blake2s_block_state_t block_state = (*state).block_state; + uint8_t *buf = (*state).buf; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; - KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint32_t *h = block_state.f4; uint32_t kk2 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; if (!(kk2 == 0U)) @@ -878,8 +900,82 @@ reset_raw( memcpy(buf, k_1, kk2 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); - uint8_t kk11 = i.key_length; + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = h; + uint32_t *r1 = h + 4U; + uint32_t *r2 = h + 8U; + uint32_t *r3 = h + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint32_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + uint32_t *os = uu____0; + os[i] = x;); + uint32_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + uint32_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; + uint8_t kk11 = i0.key_length; uint32_t ite; if (kk11 != 0U) { @@ -889,15 +985,16 @@ reset_raw( { ite = 0U; } - Hacl_Hash_Blake2s_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)ite; + state->total_len = total_len; } /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key_and_params( @@ -906,15 +1003,17 @@ Hacl_Hash_Blake2s_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k) { @@ -929,11 +1028,16 @@ void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k) .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s) { @@ -941,13 +1045,13 @@ void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s) } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Hash_Blake2s_state_t s = *state; - uint64_t total_len = s.total_len; + Hacl_Hash_Blake2s_block_state_t block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -963,10 +1067,8 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 } if (chunk_len <= 64U - sz) { - Hacl_Hash_Blake2s_state_t s1 = *state; - Hacl_Hash_Blake2s_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -979,22 +1081,12 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Hash_Blake2s_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Hash_Blake2s_state_t s1 = *state; - Hacl_Hash_Blake2s_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1007,9 +1099,8 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint32_t___uint32_t_ acc = block_state1.thd; - uint32_t *wv = acc.fst; - uint32_t *hash = acc.snd; + uint32_t *hash = block_state.f4; + uint32_t *wv = block_state.f3; uint32_t nb = 1U; Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -1027,32 +1118,21 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____uint32_t___uint32_t_ acc = block_state1.thd; - uint32_t *wv = acc.fst; - uint32_t *hash = acc.snd; + uint32_t *hash = block_state.f4; + uint32_t *wv = block_state.f3; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2s_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 64U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Hash_Blake2s_state_t s1 = *state; - Hacl_Hash_Blake2s_block_state_t block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL) { @@ -1062,22 +1142,12 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 { sz10 = (uint32_t)(total_len10 % (uint64_t)64U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Hash_Blake2s_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Hash_Blake2s_state_t s10 = *state; - Hacl_Hash_Blake2s_block_state_t block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1090,11 +1160,10 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint32_t___uint32_t_ acc = block_state1.thd; - uint32_t *wv = acc.fst; - uint32_t *hash = acc.snd; + uint32_t *hash = block_state.f4; + uint32_t *wv = block_state.f3; uint32_t nb = 1U; - Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb); + Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf0, nb); } uint32_t ite; if @@ -1111,39 +1180,39 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____uint32_t___uint32_t_ acc = block_state1.thd; - uint32_t *wv = acc.fst; - uint32_t *hash = acc.snd; + uint32_t *hash = block_state.f4; + uint32_t *wv = block_state.f3; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb); - uint8_t *dst = buf; + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2s_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2s_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2s_state_t scrut = *state; - Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + Hacl_Hash_Blake2s_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + uint8_t *buf_ = (*s).buf; + uint64_t total_len = (*s).total_len; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -1158,11 +1227,12 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) uint32_t b[16U] = { 0U }; Hacl_Hash_Blake2s_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - uint32_t *src_b = block_state.thd.snd; - uint32_t *dst_b = tmp_block_state.thd.snd; + { .fst = i1.key_length, .snd = i1.digest_length, .thd = i1.last_node, .f3 = wv0, .f4 = b }; + uint32_t *src_b = block_state.f4; + uint32_t *dst_b = tmp_block_state.f4; memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 64U == 0U && r > 0U) { @@ -1173,19 +1243,33 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) ite = r % 64U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; - K____uint32_t___uint32_t_ acc0 = tmp_block_state.thd; - uint32_t *wv1 = acc0.fst; - uint32_t *hash0 = acc0.snd; + uint32_t *hash0 = tmp_block_state.f4; + uint32_t *wv1 = tmp_block_state.f3; uint32_t nb = 0U; Hacl_Hash_Blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - K____uint32_t___uint32_t_ acc = tmp_block_state.thd; - uint32_t *wv = acc.fst; - uint32_t *hash = acc.snd; - Hacl_Hash_Blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2s_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + uint32_t *hash = tmp_block_state.f4; + uint32_t *wv = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; + Hacl_Hash_Blake2s_update_last(r, wv, hash, last_node1, prev_len_last, r, buf_last); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2s_finish((uint32_t)nn1, dst, tmp_block_state.f4); + Hacl_Hash_Blake2s_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_info(Hacl_Hash_Blake2s_state_t *s) +{ + Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1196,8 +1280,8 @@ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state) Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - uint32_t *b = block_state.thd.snd; - uint32_t *wv = block_state.thd.fst; + uint32_t *b = block_state.f4; + uint32_t *wv = block_state.f3; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); @@ -1205,25 +1289,26 @@ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state) } /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state) { - Hacl_Hash_Blake2s_state_t scrut = *state; - Hacl_Hash_Blake2s_block_state_t block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + Hacl_Hash_Blake2s_block_state_t block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); Hacl_Hash_Blake2s_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - uint32_t *src_b = block_state0.thd.snd; - uint32_t *dst_b = block_state.thd.snd; + block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = i.last_node, .f3 = wv, .f4 = b }; + uint32_t *src_b = block_state0.f4; + uint32_t *dst_b = block_state.f4; memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); Hacl_Hash_Blake2s_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1262,8 +1347,14 @@ Hacl_Hash_Blake2s_hash_with_key( Lib_Memzero0_memzero(b, 16U, uint32_t, void *); } +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, @@ -1294,25 +1385,27 @@ Hacl_Hash_Blake2s_hash_with_key_and_paramas( r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; + uint32_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 4U; uint8_t *bj = params.salt + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); + uint32_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 6U; uint8_t *bj = params.personal + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); tmp[0U] = (uint32_t)params.digest_length diff --git a/src/Hacl_Hash_Blake2s_Simd128.c b/src/Hacl_Hash_Blake2s_Simd128.c index c02da8fa..57fd9b25 100644 --- a/src/Hacl_Hash_Blake2s_Simd128.c +++ b/src/Hacl_Hash_Blake2s_Simd128.c @@ -34,6 +34,7 @@ update_block( Lib_IntVector_Intrinsics_vec128 *wv, Lib_IntVector_Intrinsics_vec128 *hash, bool flag, + bool last_node, uint64_t totlen, uint8_t *d ) @@ -43,11 +44,11 @@ update_block( 0U, 16U, 1U, - uint32_t *os = m_w; uint8_t *bj = d + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = m_w; os[i] = x;); Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero; uint32_t wv_14; @@ -59,7 +60,15 @@ update_block( { wv_14 = 0U; } - uint32_t wv_15 = 0U; + uint32_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFU; + } + else + { + wv_15 = 0U; + } mask = Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen, (uint32_t)(totlen >> 32U), @@ -102,40 +111,36 @@ update_block( Lib_IntVector_Intrinsics_vec128 *y = m_st + 1U; Lib_IntVector_Intrinsics_vec128 *z = m_st + 2U; Lib_IntVector_Intrinsics_vec128 *w = m_st + 3U; - uint32_t a = 0U; - uint32_t b0 = 1U; - uint32_t c0 = 2U; - uint32_t d10 = 3U; - Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * 1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * 1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], 16U); - Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * 1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * 1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], 12U); - Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * 1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * 1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], 8U); - Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * 1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * 1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], 7U); + Lib_IntVector_Intrinsics_vec128 *wv_a = wv; + Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + 1U; + wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b0[0U]); + wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], x[0U]); + Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + 3U; + Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv; + wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a0[0U], wv_b1[0U]); + wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a0[0U], 16U); + Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + 2U; + Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + 3U; + wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a1[0U], wv_b2[0U]); + Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + 1U; + Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + 2U; + wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a2[0U], wv_b3[0U]); + wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a2[0U], 12U); + Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv; + Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + 1U; + wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a3[0U], wv_b4[0U]); + wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a3[0U], y[0U]); + Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + 3U; + Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv; + wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a4[0U], wv_b5[0U]); + wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a4[0U], 8U); + Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + 2U; + Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + 3U; + wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a5[0U], wv_b6[0U]); + Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + 1U; + Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + 2U; + wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a6[0U], wv_b7[0U]); + wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a6[0U], 7U); Lib_IntVector_Intrinsics_vec128 *r10 = wv + 1U; Lib_IntVector_Intrinsics_vec128 *r21 = wv + 2U; Lib_IntVector_Intrinsics_vec128 *r31 = wv + 3U; @@ -151,38 +156,34 @@ update_block( Lib_IntVector_Intrinsics_vec128 v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, 3U); r31[0U] = v11; - uint32_t a0 = 0U; - uint32_t b = 1U; - uint32_t c = 2U; - uint32_t d1 = 3U; - Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * 1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv; + Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + 1U; + wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a7[0U], wv_b8[0U]); + wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a7[0U], z[0U]); + Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + 3U; + Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv; wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]); wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], 16U); - Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + 2U; + Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + 3U; wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + 1U; + Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + 2U; wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]); wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], 12U); - Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv; + Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + 1U; wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]); wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + 3U; + Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv; wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]); wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], 8U); - Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + 2U; + Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + 3U; wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * 1U; - Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * 1U; + Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + 1U; + Lib_IntVector_Intrinsics_vec128 *wv_b = wv + 2U; wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]); wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], 7U); Lib_IntVector_Intrinsics_vec128 *r11 = wv + 1U; @@ -238,25 +239,27 @@ Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t k uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + uint32_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 4U; uint8_t *bj = p.salt + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); + uint32_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 6U; uint8_t *bj = p.personal + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); tmp[0U] = (uint32_t)(uint8_t)nn @@ -286,72 +289,6 @@ Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t k r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); } -static void -init_with_params(Lib_IntVector_Intrinsics_vec128 *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint32_t tmp[8U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = hash; - Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; - Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; - Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; - uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - tmp[0U] = - (uint32_t)p.digest_length - ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); - tmp[1U] = p.leaf_length; - tmp[2U] = (uint32_t)p.node_offset; - tmp[3U] = - (uint32_t)(p.node_offset >> 32U) - ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); - uint32_t tmp0 = tmp[0U]; - uint32_t tmp1 = tmp[1U]; - uint32_t tmp2 = tmp[2U]; - uint32_t tmp3 = tmp[3U]; - uint32_t tmp4 = tmp[4U]; - uint32_t tmp5 = tmp[5U]; - uint32_t tmp6 = tmp[6U]; - uint32_t tmp7 = tmp[7U]; - uint32_t iv0_ = iv0 ^ tmp0; - uint32_t iv1_ = iv1 ^ tmp1; - uint32_t iv2_ = iv2 ^ tmp2; - uint32_t iv3_ = iv3 ^ tmp3; - uint32_t iv4_ = iv4 ^ tmp4; - uint32_t iv5_ = iv5 ^ tmp5; - uint32_t iv6_ = iv6 ^ tmp6; - uint32_t iv7_ = iv7 ^ tmp7; - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); -} - static void update_key( Lib_IntVector_Intrinsics_vec128 *wv, @@ -366,11 +303,11 @@ update_key( memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -390,7 +327,7 @@ Hacl_Hash_Blake2s_Simd128_update_multi( { uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U); uint8_t *b = blocks + i * 64U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -399,6 +336,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec128 *wv, Lib_IntVector_Intrinsics_vec128 *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d @@ -408,7 +346,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( uint8_t *last = d + len - rem; memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -442,7 +380,7 @@ update_blocks( rem = rem0; } Hacl_Hash_Blake2s_Simd128_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2s_Simd128_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2s_Simd128_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -483,6 +421,7 @@ Hacl_Hash_Blake2s_Simd128_finish( Lib_IntVector_Intrinsics_vec128 *row1 = hash + 1U; Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]); Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]); + KRML_MAYBE_UNUSED_VAR(b); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); Lib_Memzero0_memzero(b, 32U, uint8_t, void *); @@ -508,11 +447,11 @@ Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32( 0U, 4U, 1U, - uint32_t *os = b0; uint8_t *bj = b8 + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = b0; os[i] = x;); uint8_t b80[16U] = { 0U }; Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]); @@ -520,11 +459,11 @@ Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32( 0U, 4U, 1U, - uint32_t *os = b1; uint8_t *bj = b80 + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = b1; os[i] = x;); uint8_t b81[16U] = { 0U }; Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]); @@ -532,11 +471,11 @@ Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32( 0U, 4U, 1U, - uint32_t *os = b2; uint8_t *bj = b81 + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = b2; os[i] = x;); uint8_t b82[16U] = { 0U }; Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]); @@ -544,11 +483,11 @@ Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32( 0U, 4U, 1U, - uint32_t *os = b3; uint8_t *bj = b82 + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = b3; os[i] = x;); } @@ -583,10 +522,7 @@ Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void) } static Hacl_Hash_Blake2s_Simd128_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec128 @@ -600,7 +536,87 @@ static Hacl_Hash_Blake2s_Simd128_state_t sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); Hacl_Hash_Blake2s_Simd128_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { .fst = kk.key_length, .snd = kk.digest_length, .thd = kk.last_node, .f3 = wv, .f4 = b }; + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec128 *h = block_state.f4; + uint32_t kk2 = (uint32_t)i0.key_length; + uint8_t *k_ = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = h; + Lib_IntVector_Intrinsics_vec128 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = h + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + uint32_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + uint32_t *os = uu____0; + os[i] = x;); + uint32_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + uint32_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -614,60 +630,60 @@ static Hacl_Hash_Blake2s_Simd128_state_t Hacl_Hash_Blake2s_Simd128_state_t s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2s_Simd128_state_t - *p = + *p0 = (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof ( Hacl_Hash_Blake2s_Simd128_state_t )); - p[0U] = s; - Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; - uint8_t kk1 = p1->key_length; - uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i.key_length; - uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) - { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); - } - Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); - return p; + p0[0U] = s; + return p0; } /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (128 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 128 for S, 64 for B. +- The digest_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (128 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk) { uint8_t nn = 32U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; - uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); - uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; Hacl_Hash_Blake2b_blake2_params p = { @@ -675,21 +691,16 @@ Hacl_Hash_Blake2s_Simd128_state_t .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; - Hacl_Hash_Blake2b_blake2_params - *p0 = - (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); - p0[0U] = p; + Hacl_Hash_Blake2b_blake2_params p0 = p; Hacl_Hash_Blake2s_Simd128_state_t - *s = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(p0, k); - Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; - KRML_HOST_FREE(p1.salt); - KRML_HOST_FREE(p1.personal); - KRML_HOST_FREE(p0); + *s = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(&p0, false, k); return s; } /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) { @@ -699,28 +710,30 @@ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_Simd128_state_t *s) { Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } static void -reset_raw( - Hacl_Hash_Blake2s_Simd128_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +reset_raw(Hacl_Hash_Blake2s_Simd128_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { - Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; - uint8_t *buf = scrut.buf; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*state).block_state; + uint8_t *buf = (*state).buf; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; - KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_index + i0 = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec128 *h = block_state.f4; uint32_t kk2 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; if (!(kk2 == 0U)) @@ -730,8 +743,70 @@ reset_raw( memcpy(buf, k_1, kk2 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); - uint8_t kk11 = i.key_length; + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = h; + Lib_IntVector_Intrinsics_vec128 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = h + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + uint32_t *uu____0 = tmp + 4U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + uint32_t *os = uu____0; + os[i] = x;); + uint32_t *uu____1 = tmp + 6U; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint8_t *bj = pv.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + uint32_t *os = uu____1; + os[i] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); + uint8_t kk11 = i0.key_length; uint32_t ite; if (kk11 != 0U) { @@ -741,15 +816,16 @@ reset_raw( { ite = 0U; } - Hacl_Hash_Blake2s_Simd128_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)ite; + state->total_len = total_len; } /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( @@ -758,15 +834,17 @@ Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k) { @@ -781,11 +859,16 @@ void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s) { @@ -793,7 +876,7 @@ void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s) } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_Simd128_update( @@ -802,8 +885,8 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t chunk_len ) { - Hacl_Hash_Blake2s_Simd128_state_t s = *state; - uint64_t total_len = s.total_len; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -819,10 +902,8 @@ Hacl_Hash_Blake2s_Simd128_update( } if (chunk_len <= 64U - sz) { - Hacl_Hash_Blake2s_Simd128_state_t s1 = *state; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -835,22 +916,12 @@ Hacl_Hash_Blake2s_Simd128_update( uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Hash_Blake2s_Simd128_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Hash_Blake2s_Simd128_state_t s1 = *state; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -863,10 +934,8 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec128 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.f3; uint32_t nb = 1U; Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -884,32 +953,21 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec128 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.f3; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2s_Simd128_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 64U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Hash_Blake2s_Simd128_state_t s1 = *state; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL) { @@ -919,22 +977,12 @@ Hacl_Hash_Blake2s_Simd128_update( { sz10 = (uint32_t)(total_len10 % (uint64_t)64U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Hash_Blake2s_Simd128_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Hash_Blake2s_Simd128_state_t s10 = *state; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -947,12 +995,10 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec128 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.f3; uint32_t nb = 1U; - Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb); + Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf0, nb); } uint32_t ite; if @@ -969,40 +1015,39 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; - Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; + Lib_IntVector_Intrinsics_vec128 *hash = block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.f3; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb); - uint8_t *dst = buf; + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_Blake2s_Simd128_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 128 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_128_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + uint8_t *buf_ = (*s).buf; + uint64_t total_len = (*s).total_len; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -1017,11 +1062,12 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; Hacl_Hash_Blake2s_Simd128_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - Lib_IntVector_Intrinsics_vec128 *src_b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.thd.snd; + { .fst = i1.key_length, .snd = i1.digest_length, .thd = i1.last_node, .f3 = wv0, .f4 = b }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state.f4; + Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.f4; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); uint64_t prev_len = total_len - (uint64_t)r; + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 64U == 0U && r > 0U) { @@ -1032,21 +1078,33 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 ite = r % 64U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc0 = tmp_block_state.thd; - Lib_IntVector_Intrinsics_vec128 *wv1 = acc0.fst; - Lib_IntVector_Intrinsics_vec128 *hash0 = acc0.snd; + Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.f3; uint32_t nb = 0U; Hacl_Hash_Blake2s_Simd128_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc = tmp_block_state.thd; - Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; - Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; - Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, prev_len_last, r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2s_Simd128_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; + Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, last_node1, prev_len_last, r, buf_last); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2s_Simd128_finish((uint32_t)nn1, dst, tmp_block_state.f4); + Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_Simd128_info(Hacl_Hash_Blake2s_Simd128_state_t *s) +{ + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1057,8 +1115,8 @@ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state) Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec128 *b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec128 *wv = block_state.thd.fst; + Lib_IntVector_Intrinsics_vec128 *b = block_state.f4; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.f3; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); @@ -1066,18 +1124,18 @@ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state) } /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state) { - Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; - Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec128 @@ -1091,9 +1149,10 @@ Hacl_Hash_Blake2s_Simd128_state_t sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); Hacl_Hash_Blake2s_Simd128_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - Lib_IntVector_Intrinsics_vec128 *src_b = block_state0.thd.snd; - Lib_IntVector_Intrinsics_vec128 *dst_b = block_state.thd.snd; + block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = i.last_node, .f3 = wv, .f4 = b }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state0.f4; + Lib_IntVector_Intrinsics_vec128 *dst_b = block_state.f4; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); Hacl_Hash_Blake2s_Simd128_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1135,8 +1194,14 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); } +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, @@ -1161,25 +1226,27 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + uint32_t *uu____0 = tmp + 4U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 4U; uint8_t *bj = params.salt + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____0; os[i] = x;); + uint32_t *uu____1 = tmp + 6U; KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = tmp + 6U; uint8_t *bj = params.personal + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = uu____1; os[i] = x;); tmp[0U] = (uint32_t)params.digest_length diff --git a/src/Hacl_Hash_MD5.c b/src/Hacl_Hash_MD5.c index ed294839..55c755f2 100644 --- a/src/Hacl_Hash_MD5.c +++ b/src/Hacl_Hash_MD5.c @@ -1167,24 +1167,21 @@ Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t)); + Hacl_Hash_MD5_init(block_state); Hacl_Streaming_MD_state_32 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_32 *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32)); p[0U] = s; - Hacl_Hash_MD5_init(block_state); return p; } void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; + uint32_t *block_state = (*state).block_state; Hacl_Hash_MD5_init(block_state); - Hacl_Streaming_MD_state_32 - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } /** @@ -1193,8 +1190,8 @@ void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state) Hacl_Streaming_Types_error_code Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Streaming_MD_state_32 s = *state; - uint64_t total_len = s.total_len; + uint32_t *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -1210,10 +1207,8 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t } if (chunk_len <= 64U - sz) { - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1226,22 +1221,12 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1253,7 +1238,7 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t } if (!(sz1 == 0U)) { - Hacl_Hash_MD5_update_multi(block_state1, buf, 1U); + Hacl_Hash_MD5_update_multi(block_state, buf, 1U); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -1269,28 +1254,18 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U); + Hacl_Hash_MD5_update_multi(block_state, data1, data1_len / 64U); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 64U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL) { @@ -1300,22 +1275,12 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t { sz10 = (uint32_t)(total_len10 % (uint64_t)64U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Streaming_MD_state_32 s10 = *state; - uint32_t *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1327,7 +1292,7 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t } if (!(sz1 == 0U)) { - Hacl_Hash_MD5_update_multi(block_state1, buf, 1U); + Hacl_Hash_MD5_update_multi(block_state, buf0, 1U); } uint32_t ite; if @@ -1344,28 +1309,19 @@ Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U); - uint8_t *dst = buf; + Hacl_Hash_MD5_update_multi(block_state, data1, data1_len / 64U); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + uint32_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -1378,6 +1334,7 @@ void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output) uint8_t *buf_1 = buf_; uint32_t tmp_block_state[4U] = { 0U }; memcpy(tmp_block_state, block_state, 4U * sizeof (uint32_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 64U == 0U && r > 0U) { @@ -1388,7 +1345,6 @@ void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output) ite = r % 64U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; Hacl_Hash_MD5_update_multi(tmp_block_state, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_MD5_update_last(tmp_block_state, prev_len_last, buf_last, r); @@ -1407,10 +1363,9 @@ void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state) Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + uint32_t *block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t)); diff --git a/src/Hacl_Hash_SHA1.c b/src/Hacl_Hash_SHA1.c index 1a8b09b1..ec9ce312 100644 --- a/src/Hacl_Hash_SHA1.c +++ b/src/Hacl_Hash_SHA1.c @@ -200,24 +200,21 @@ Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t)); + Hacl_Hash_SHA1_init(block_state); Hacl_Streaming_MD_state_32 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_32 *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32)); p[0U] = s; - Hacl_Hash_SHA1_init(block_state); return p; } void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; + uint32_t *block_state = (*state).block_state; Hacl_Hash_SHA1_init(block_state); - Hacl_Streaming_MD_state_32 - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } /** @@ -226,8 +223,8 @@ void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state) Hacl_Streaming_Types_error_code Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Streaming_MD_state_32 s = *state; - uint64_t total_len = s.total_len; + uint32_t *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -243,10 +240,8 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ } if (chunk_len <= 64U - sz) { - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -259,22 +254,12 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -286,7 +271,7 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ } if (!(sz1 == 0U)) { - Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U); + Hacl_Hash_SHA1_update_multi(block_state, buf, 1U); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -302,28 +287,18 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U); + Hacl_Hash_SHA1_update_multi(block_state, data1, data1_len / 64U); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 64U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL) { @@ -333,22 +308,12 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ { sz10 = (uint32_t)(total_len10 % (uint64_t)64U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Streaming_MD_state_32 s10 = *state; - uint32_t *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -360,7 +325,7 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ } if (!(sz1 == 0U)) { - Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U); + Hacl_Hash_SHA1_update_multi(block_state, buf0, 1U); } uint32_t ite; if @@ -377,28 +342,19 @@ Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_ uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U); - uint8_t *dst = buf; + Hacl_Hash_SHA1_update_multi(block_state, data1, data1_len / 64U); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + uint32_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -411,6 +367,7 @@ void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output) uint8_t *buf_1 = buf_; uint32_t tmp_block_state[5U] = { 0U }; memcpy(tmp_block_state, block_state, 5U * sizeof (uint32_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 64U == 0U && r > 0U) { @@ -421,7 +378,6 @@ void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output) ite = r % 64U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; Hacl_Hash_SHA1_update_multi(tmp_block_state, buf_multi, 0U); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_SHA1_update_last(tmp_block_state, prev_len_last, buf_last, r); @@ -440,10 +396,9 @@ void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state) Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + uint32_t *block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t)); diff --git a/src/Hacl_Hash_SHA2.c b/src/Hacl_Hash_SHA2.c index 995fe707..7f2e606d 100644 --- a/src/Hacl_Hash_SHA2.c +++ b/src/Hacl_Hash_SHA2.c @@ -33,8 +33,8 @@ void Hacl_Hash_SHA2_sha256_init(uint32_t *hash) 0U, 8U, 1U, - uint32_t *os = hash; uint32_t x = Hacl_Hash_SHA2_h256[i]; + uint32_t *os = hash; os[i] = x;); } @@ -140,8 +140,8 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash) 0U, 8U, 1U, - uint32_t *os = hash; uint32_t x = hash[i] + hash_old[i]; + uint32_t *os = hash; os[i] = x;); } @@ -206,12 +206,12 @@ void Hacl_Hash_SHA2_sha224_init(uint32_t *hash) 0U, 8U, 1U, - uint32_t *os = hash; uint32_t x = Hacl_Hash_SHA2_h224[i]; + uint32_t *os = hash; os[i] = x;); } -static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) +void Hacl_Hash_SHA2_sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) { Hacl_Hash_SHA2_sha256_update_nblocks(len, b, st); } @@ -234,8 +234,8 @@ void Hacl_Hash_SHA2_sha512_init(uint64_t *hash) 0U, 8U, 1U, - uint64_t *os = hash; uint64_t x = Hacl_Hash_SHA2_h512[i]; + uint64_t *os = hash; os[i] = x;); } @@ -341,8 +341,8 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash) 0U, 8U, 1U, - uint64_t *os = hash; uint64_t x = hash[i] + hash_old[i]; + uint64_t *os = hash; os[i] = x;); } @@ -412,8 +412,8 @@ void Hacl_Hash_SHA2_sha384_init(uint64_t *hash) 0U, 8U, 1U, - uint64_t *os = hash; uint64_t x = Hacl_Hash_SHA2_h384[i]; + uint64_t *os = hash; os[i] = x;); } @@ -448,12 +448,12 @@ Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t)); + Hacl_Hash_SHA2_sha256_init(block_state); Hacl_Streaming_MD_state_32 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_32 *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32)); p[0U] = s; - Hacl_Hash_SHA2_sha256_init(block_state); return p; } @@ -465,10 +465,9 @@ more (different) data into the hash in each branch. */ Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + uint32_t *block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t)); @@ -486,20 +485,17 @@ Reset an existing state to the initial hash state with empty data. */ void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; + uint32_t *block_state = (*state).block_state; Hacl_Hash_SHA2_sha256_init(block_state); - Hacl_Streaming_MD_state_32 - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } static inline Hacl_Streaming_Types_error_code update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Streaming_MD_state_32 s = *state; - uint64_t total_len = s.total_len; + uint32_t *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -515,10 +511,8 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk } if (chunk_len <= 64U - sz) { - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -531,22 +525,12 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -558,7 +542,7 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk } if (!(sz1 == 0U)) { - Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1); + Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -574,28 +558,18 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1); + Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 64U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Streaming_MD_state_32 s1 = *state; - uint32_t *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL) { @@ -605,22 +579,12 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk { sz10 = (uint32_t)(total_len10 % (uint64_t)64U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Streaming_MD_state_32 s10 = *state; - uint32_t *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -632,7 +596,7 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk } if (!(sz1 == 0U)) { - Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1); + Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf0, block_state); } uint32_t ite; if @@ -649,18 +613,10 @@ update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1); - uint8_t *dst = buf; + Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_32){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } @@ -690,10 +646,9 @@ the state and therefore does not invalidate the client-held state `p`.) */ void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + uint32_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -706,6 +661,7 @@ void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *outpu uint8_t *buf_1 = buf_; uint32_t tmp_block_state[8U] = { 0U }; memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 64U == 0U && r > 0U) { @@ -716,7 +672,6 @@ void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *outpu ite = r % 64U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; Hacl_Hash_SHA2_sha256_update_nblocks(0U, buf_multi, tmp_block_state); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_SHA2_sha256_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state); @@ -761,24 +716,21 @@ Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t)); + Hacl_Hash_SHA2_sha224_init(block_state); Hacl_Streaming_MD_state_32 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_32 *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32)); p[0U] = s; - Hacl_Hash_SHA2_sha224_init(block_state); return p; } void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; + uint32_t *block_state = (*state).block_state; Hacl_Hash_SHA2_sha224_init(block_state); - Hacl_Streaming_MD_state_32 - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } Hacl_Streaming_Types_error_code @@ -798,10 +750,9 @@ the hash via `update_224`. */ void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output) { - Hacl_Streaming_MD_state_32 scrut = *state; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + uint32_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -814,6 +765,7 @@ void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *outpu uint8_t *buf_1 = buf_; uint32_t tmp_block_state[8U] = { 0U }; memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 64U == 0U && r > 0U) { @@ -824,8 +776,7 @@ void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *outpu ite = r % 64U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; - sha224_update_nblocks(0U, buf_multi, tmp_block_state); + Hacl_Hash_SHA2_sha224_update_nblocks(0U, buf_multi, tmp_block_state); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_SHA2_sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state); Hacl_Hash_SHA2_sha224_finish(tmp_block_state, output); @@ -847,7 +798,7 @@ void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len Hacl_Hash_SHA2_sha224_init(st); uint32_t rem = input_len % 64U; uint64_t len_ = (uint64_t)input_len; - sha224_update_nblocks(input_len, ib, st); + Hacl_Hash_SHA2_sha224_update_nblocks(input_len, ib, st); uint32_t rem1 = input_len % 64U; uint8_t *b0 = ib; uint8_t *lb = b0 + input_len - rem1; @@ -859,12 +810,12 @@ Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t)); + Hacl_Hash_SHA2_sha512_init(block_state); Hacl_Streaming_MD_state_64 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_64 *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64)); p[0U] = s; - Hacl_Hash_SHA2_sha512_init(block_state); return p; } @@ -876,10 +827,9 @@ more (different) data into the hash in each branch. */ Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state) { - Hacl_Streaming_MD_state_64 scrut = *state; - uint64_t *block_state0 = scrut.block_state; - uint8_t *buf0 = scrut.buf; - uint64_t total_len0 = scrut.total_len; + uint64_t *block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); memcpy(buf, buf0, 128U * sizeof (uint8_t)); uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t)); @@ -894,20 +844,17 @@ Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 * void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state) { - Hacl_Streaming_MD_state_64 scrut = *state; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; + uint64_t *block_state = (*state).block_state; Hacl_Hash_SHA2_sha512_init(block_state); - Hacl_Streaming_MD_state_64 - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } static inline Hacl_Streaming_Types_error_code update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Streaming_MD_state_64 s = *state; - uint64_t total_len = s.total_len; + uint64_t *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 18446744073709551615ULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -923,10 +870,8 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk } if (chunk_len <= 128U - sz) { - Hacl_Streaming_MD_state_64 s1 = *state; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -939,22 +884,12 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_Streaming_MD_state_64){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2 - } - ); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Streaming_MD_state_64 s1 = *state; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -966,7 +901,7 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk } if (!(sz1 == 0U)) { - Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1); + Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -982,28 +917,18 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1); + Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_64){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = 128U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Streaming_MD_state_64 s1 = *state; - uint64_t *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL) { @@ -1013,22 +938,12 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk { sz10 = (uint32_t)(total_len10 % (uint64_t)128U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Streaming_MD_state_64){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Streaming_MD_state_64 s10 = *state; - uint64_t *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL) { @@ -1040,7 +955,7 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk } if (!(sz1 == 0U)) { - Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1); + Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf0, block_state); } uint32_t ite; if @@ -1057,18 +972,10 @@ update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1); - uint8_t *dst = buf; + Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Streaming_MD_state_64){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } @@ -1098,10 +1005,9 @@ the state and therefore does not invalidate the client-held state `p`.) */ void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output) { - Hacl_Streaming_MD_state_64 scrut = *state; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + uint64_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL) { @@ -1114,6 +1020,7 @@ void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *outpu uint8_t *buf_1 = buf_; uint64_t tmp_block_state[8U] = { 0U }; memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 128U == 0U && r > 0U) { @@ -1124,7 +1031,6 @@ void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *outpu ite = r % 128U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; Hacl_Hash_SHA2_sha512_update_nblocks(0U, buf_multi, tmp_block_state); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last), @@ -1173,24 +1079,21 @@ Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t)); + Hacl_Hash_SHA2_sha384_init(block_state); Hacl_Streaming_MD_state_64 s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; Hacl_Streaming_MD_state_64 *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64)); p[0U] = s; - Hacl_Hash_SHA2_sha384_init(block_state); return p; } void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state) { - Hacl_Streaming_MD_state_64 scrut = *state; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; + uint64_t *block_state = (*state).block_state; Hacl_Hash_SHA2_sha384_init(block_state); - Hacl_Streaming_MD_state_64 - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } Hacl_Streaming_Types_error_code @@ -1210,10 +1113,9 @@ the hash via `update_384`. */ void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output) { - Hacl_Streaming_MD_state_64 scrut = *state; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; + uint64_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL) { @@ -1226,6 +1128,7 @@ void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *outpu uint8_t *buf_1 = buf_; uint64_t tmp_block_state[8U] = { 0U }; memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 128U == 0U && r > 0U) { @@ -1236,7 +1139,6 @@ void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *outpu ite = r % 128U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; Hacl_Hash_SHA2_sha384_update_nblocks(0U, buf_multi, tmp_block_state); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last), diff --git a/src/Hacl_Hash_SHA3.c b/src/Hacl_Hash_SHA3.c index 89bb0491..8d35d4fe 100644 --- a/src/Hacl_Hash_SHA3.c +++ b/src/Hacl_Hash_SHA3.c @@ -251,7 +251,8 @@ Hacl_Hash_SHA3_update_multi_sha3( uint8_t *bl0 = b_; uint8_t *uu____0 = b0 + i * block_len(a); memcpy(bl0, uu____0, block_len(a) * sizeof (uint8_t)); - block_len(a); + uint32_t unused = block_len(a); + KRML_MAYBE_UNUSED_VAR(unused); absorb_inner_32(b_, s); } } @@ -544,13 +545,6 @@ Hacl_Hash_SHA3_update_last_sha3( absorb_inner_32(b3, s); } -typedef struct hash_buf2_s -{ - Hacl_Hash_SHA3_hash_buf fst; - Hacl_Hash_SHA3_hash_buf snd; -} -hash_buf2; - Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s) { Hacl_Hash_SHA3_hash_buf block_state = (*s).block_state; @@ -563,13 +557,13 @@ Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a) uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t)); uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); Hacl_Hash_SHA3_hash_buf block_state = { .fst = a, .snd = buf }; + uint64_t *s = block_state.snd; + memset(s, 0U, 25U * sizeof (uint64_t)); Hacl_Hash_SHA3_state_t - s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U }; + s0 = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U }; Hacl_Hash_SHA3_state_t *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t)); - p[0U] = s; - uint64_t *s1 = block_state.snd; - memset(s1, 0U, 25U * sizeof (uint64_t)); + p[0U] = s0; return p; } @@ -586,19 +580,17 @@ void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state) Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state) { - Hacl_Hash_SHA3_state_t scrut0 = *state; - Hacl_Hash_SHA3_hash_buf block_state0 = scrut0.block_state; - uint8_t *buf0 = scrut0.buf; - uint64_t total_len0 = scrut0.total_len; + Hacl_Hash_SHA3_hash_buf block_state0 = (*state).block_state; + uint8_t *buf0 = (*state).buf; + uint64_t total_len0 = (*state).total_len; Spec_Hash_Definitions_hash_alg i = block_state0.fst; KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i)); uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t)); memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t)); uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); Hacl_Hash_SHA3_hash_buf block_state = { .fst = i, .snd = buf }; - hash_buf2 scrut = { .fst = block_state0, .snd = block_state }; - uint64_t *s_dst = scrut.snd.snd; - uint64_t *s_src = scrut.fst.snd; + uint64_t *s_src = block_state0.snd; + uint64_t *s_dst = block_state.snd; memcpy(s_dst, s_src, 25U * sizeof (uint64_t)); Hacl_Hash_SHA3_state_t s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 }; @@ -610,24 +602,18 @@ Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state) void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state) { - Hacl_Hash_SHA3_state_t scrut = *state; - uint8_t *buf = scrut.buf; - Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state; - Spec_Hash_Definitions_hash_alg i = block_state.fst; - KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_SHA3_hash_buf block_state = (*state).block_state; uint64_t *s = block_state.snd; memset(s, 0U, 25U * sizeof (uint64_t)); - Hacl_Hash_SHA3_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; } Hacl_Streaming_Types_error_code Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_Hash_SHA3_state_t s = *state; - Hacl_Hash_SHA3_hash_buf block_state = s.block_state; - uint64_t total_len = s.total_len; + Hacl_Hash_SHA3_hash_buf block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; Spec_Hash_Definitions_hash_alg i = block_state.fst; if ((uint64_t)chunk_len > 0xFFFFFFFFFFFFFFFFULL - total_len) { @@ -644,10 +630,8 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch } if (chunk_len <= block_len(i) - sz) { - Hacl_Hash_SHA3_state_t s1 = *state; - Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL) { @@ -660,16 +644,12 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ((Hacl_Hash_SHA3_state_t){ .block_state = block_state1, .buf = buf, .total_len = total_len2 }); + state->total_len = total_len2; } else if (sz == 0U) { - Hacl_Hash_SHA3_state_t s1 = *state; - Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL) { @@ -681,9 +661,9 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch } if (!(sz1 == 0U)) { - Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; - uint64_t *s2 = block_state1.snd; - Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1)); + Spec_Hash_Definitions_hash_alg a1 = block_state.fst; + uint64_t *s1 = block_state.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s1, buf, block_len(i) / block_len(a1)); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)block_len(i) == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -699,30 +679,20 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; - uint64_t *s2 = block_state1.snd; - Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1)); + Spec_Hash_Definitions_hash_alg a1 = block_state.fst; + uint64_t *s1 = block_state.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s1, data1, data1_len / block_len(a1)); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_SHA3_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; } else { uint32_t diff = block_len(i) - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_Hash_SHA3_state_t s1 = *state; - Hacl_Hash_SHA3_hash_buf block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; uint32_t sz10; if (total_len10 % (uint64_t)block_len(i) == 0ULL && total_len10 > 0ULL) { @@ -732,22 +702,12 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch { sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i)); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_Hash_SHA3_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2 - } - ); - Hacl_Hash_SHA3_state_t s10 = *state; - Hacl_Hash_SHA3_hash_buf block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; + state->total_len = total_len2; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; uint32_t sz1; if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL) { @@ -759,9 +719,9 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch } if (!(sz1 == 0U)) { - Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; - uint64_t *s2 = block_state1.snd; - Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1)); + Spec_Hash_Definitions_hash_alg a1 = block_state.fst; + uint64_t *s1 = block_state.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s1, buf0, block_len(i) / block_len(a1)); } uint32_t ite; if @@ -783,20 +743,12 @@ Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t ch uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; - uint64_t *s2 = block_state1.snd; - Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1)); - uint8_t *dst = buf; + Spec_Hash_Definitions_hash_alg a1 = block_state.fst; + uint64_t *s1 = block_state.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s1, data1, data1_len / block_len(a1)); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_Hash_SHA3_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff) - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); } return Hacl_Streaming_Types_Success; } @@ -809,10 +761,9 @@ digest_( uint32_t l ) { - Hacl_Hash_SHA3_state_t scrut0 = *state; - Hacl_Hash_SHA3_hash_buf block_state = scrut0.block_state; - uint8_t *buf_ = scrut0.buf; - uint64_t total_len = scrut0.total_len; + Hacl_Hash_SHA3_hash_buf block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; uint32_t r; if (total_len % (uint64_t)block_len(a) == 0ULL && total_len > 0ULL) { @@ -825,10 +776,10 @@ digest_( uint8_t *buf_1 = buf_; uint64_t buf[25U] = { 0U }; Hacl_Hash_SHA3_hash_buf tmp_block_state = { .fst = a, .snd = buf }; - hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state }; - uint64_t *s_dst = scrut.snd.snd; - uint64_t *s_src = scrut.fst.snd; + uint64_t *s_src = block_state.snd; + uint64_t *s_dst = tmp_block_state.snd; memcpy(s_dst, s_src, 25U * sizeof (uint64_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % block_len(a) == 0U && r > 0U) { @@ -839,7 +790,6 @@ digest_( ite = r % block_len(a); } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst; uint64_t *s0 = tmp_block_state.snd; Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, 0U / block_len(a1)); @@ -2166,7 +2116,7 @@ void Hacl_Hash_SHA3_state_free(uint64_t *s) Absorb number of input blocks and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + It processes an input of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] @@ -2191,14 +2141,14 @@ Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t Absorb a final partial block of input and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses a sequence of bytes at end of input buffer that is less + It processes a sequence of bytes at end of input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffer are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] The argument `input` (IN) points to `inputByteLen` bytes of valid memory, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffer must be passed to `inputByteLen` including the number of full-block bytes at start of input buffer that are ignored */ diff --git a/src/Hacl_Hash_SHA3_Simd256.c b/src/Hacl_Hash_SHA3_Simd256.c index 131c34e6..e0bb7e0b 100644 --- a/src/Hacl_Hash_SHA3_Simd256.c +++ b/src/Hacl_Hash_SHA3_Simd256.c @@ -5992,12 +5992,12 @@ void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) Absorb number of blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + It processes an inputs of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block for each buffer are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void @@ -6038,15 +6038,15 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( Absorb a final partial blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses a sequence of bytes at end of each input buffer that is less + It processes a sequence of bytes at end of each input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffers are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffers must be passed to `inputByteLen` including the number of full-block bytes at start of each input buffer that are ignored */ @@ -6378,7 +6378,7 @@ Squeeze a quadruple hash state to 4 output buffers The argument `state` (IN) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void diff --git a/src/Hacl_K256_ECDSA.c b/src/Hacl_K256_ECDSA.c index 0b72b166..26138792 100644 --- a/src/Hacl_K256_ECDSA.c +++ b/src/Hacl_K256_ECDSA.c @@ -30,34 +30,32 @@ #include "internal/Hacl_Bignum_K256.h" #include "internal/Hacl_Bignum_Base.h" -static inline uint64_t -bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res) +static inline uint64_t bn_add_sa(uint32_t aLen, uint32_t bLen, uint64_t *b, uint64_t *res) { - uint64_t *a0 = a; uint64_t *res0 = res; uint64_t c0 = 0ULL; for (uint32_t i = 0U; i < bLen / 4U; i++) { - uint64_t t1 = a0[4U * i]; + uint64_t t1 = res0[4U * i]; uint64_t t20 = b[4U * i]; uint64_t *res_i0 = res0 + 4U * i; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0); - uint64_t t10 = a0[4U * i + 1U]; + uint64_t t10 = res0[4U * i + 1U]; uint64_t t21 = b[4U * i + 1U]; uint64_t *res_i1 = res0 + 4U * i + 1U; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1); - uint64_t t11 = a0[4U * i + 2U]; + uint64_t t11 = res0[4U * i + 2U]; uint64_t t22 = b[4U * i + 2U]; uint64_t *res_i2 = res0 + 4U * i + 2U; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2); - uint64_t t12 = a0[4U * i + 3U]; + uint64_t t12 = res0[4U * i + 3U]; uint64_t t2 = b[4U * i + 3U]; uint64_t *res_i = res0 + 4U * i + 3U; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i); } for (uint32_t i = bLen / 4U * 4U; i < bLen; i++) { - uint64_t t1 = a0[i]; + uint64_t t1 = res0[i]; uint64_t t2 = b[i]; uint64_t *res_i = res0 + i; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t2, res_i); @@ -65,27 +63,26 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res) uint64_t c00 = c0; if (bLen < aLen) { - uint64_t *a1 = a + bLen; uint64_t *res1 = res + bLen; uint64_t c = c00; for (uint32_t i = 0U; i < (aLen - bLen) / 4U; i++) { - uint64_t t1 = a1[4U * i]; + uint64_t t1 = res1[4U * i]; uint64_t *res_i0 = res1 + 4U * i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0); - uint64_t t10 = a1[4U * i + 1U]; + uint64_t t10 = res1[4U * i + 1U]; uint64_t *res_i1 = res1 + 4U * i + 1U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1); - uint64_t t11 = a1[4U * i + 2U]; + uint64_t t11 = res1[4U * i + 2U]; uint64_t *res_i2 = res1 + 4U * i + 2U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2); - uint64_t t12 = a1[4U * i + 3U]; + uint64_t t12 = res1[4U * i + 3U]; uint64_t *res_i = res1 + 4U * i + 3U; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i); } for (uint32_t i = (aLen - bLen) / 4U * 4U; i < aLen - bLen; i++) { - uint64_t t1 = a1[i]; + uint64_t t1 = res1[i]; uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i); } @@ -167,8 +164,8 @@ static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x;); } @@ -221,8 +218,8 @@ static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]); + uint64_t *os = res; os[i] = x;); } @@ -261,8 +258,8 @@ static void sqr4(uint64_t *a, uint64_t *res) 0U, 4U, 1U, - uint64_t *ab = a; uint64_t a_j = a[i0]; + uint64_t *ab = a; uint64_t *res_j = res + i0; uint64_t c = 0ULL; for (uint32_t i = 0U; i < i0 / 4U; i++) @@ -288,7 +285,12 @@ static void sqr4(uint64_t *a, uint64_t *res) } uint64_t r = c; res[i0 + i0] = r;); - uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res); + uint64_t a_copy0[8U] = { 0U }; + uint64_t b_copy0[8U] = { 0U }; + memcpy(a_copy0, res, 8U * sizeof (uint64_t)); + memcpy(b_copy0, res, 8U * sizeof (uint64_t)); + uint64_t r = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, a_copy0, b_copy0, res); + uint64_t c0 = r; KRML_MAYBE_UNUSED_VAR(c0); uint64_t tmp[8U] = { 0U }; KRML_MAYBE_FOR4(i, @@ -300,7 +302,12 @@ static void sqr4(uint64_t *a, uint64_t *res) uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); tmp[2U * i] = lo; tmp[2U * i + 1U] = hi;); - uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res); + uint64_t a_copy[8U] = { 0U }; + uint64_t b_copy[8U] = { 0U }; + memcpy(a_copy, res, 8U * sizeof (uint64_t)); + memcpy(b_copy, tmp, 8U * sizeof (uint64_t)); + uint64_t r0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, a_copy, b_copy, res); + uint64_t c1 = r0; KRML_MAYBE_UNUSED_VAR(c1); } @@ -339,9 +346,9 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b) 0U, 4U, 1U, - uint64_t *os = f; uint64_t u = load64_be(b + (4U - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = f; os[i] = x;); uint64_t is_zero = is_qelem_zero(f); uint64_t acc = 0ULL; @@ -351,7 +358,7 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t is_lt_q = acc; return ~is_zero & is_lt_q; } @@ -362,9 +369,9 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b) 0U, 4U, 1U, - uint64_t *os = f; uint64_t u = load64_be(b + (4U - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = f; os[i] = x;); bool is_zero = is_qelem_zero_vartime(f); uint64_t a0 = f[0U]; @@ -372,11 +379,7 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b) uint64_t a2 = f[2U]; uint64_t a3 = f[3U]; bool is_lt_q_b; - if (a3 < 0xffffffffffffffffULL) - { - is_lt_q_b = true; - } - else if (a2 < 0xfffffffffffffffeULL) + if (a3 < 0xffffffffffffffffULL || a2 < 0xfffffffffffffffeULL) { is_lt_q_b = true; } @@ -412,8 +415,8 @@ static inline void modq_short(uint64_t *out, uint64_t *a) 0U, 4U, 1U, - uint64_t *os = out; uint64_t x = (mask & out[i]) | (~mask & a[i]); + uint64_t *os = out; os[i] = x;); } @@ -424,9 +427,9 @@ static inline void load_qelem_modq(uint64_t *f, uint8_t *b) 0U, 4U, 1U, - uint64_t *os = f; uint64_t u = load64_be(b + (4U - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = f; os[i] = x;); memcpy(tmp, f, 4U * sizeof (uint64_t)); modq_short(f, tmp); @@ -494,8 +497,8 @@ mul_pow2_256_minus_q_add( uint64_t r = c; tmp[len + i0] = r;); memcpy(res + 2U, a, len * sizeof (uint64_t)); - bn_add(resLen, res, len + 2U, tmp, res); - uint64_t c = bn_add(resLen, res, 4U, e, res); + bn_add_sa(resLen, len + 2U, tmp, res); + uint64_t c = bn_add_sa(resLen, 4U, e, res); return c; } @@ -510,18 +513,26 @@ static inline void modq(uint64_t *out, uint64_t *a) uint64_t *t01 = tmp; uint64_t m[7U] = { 0U }; uint64_t p[5U] = { 0U }; - mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m); - mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p); - uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p + 4U, p, r); - uint64_t c0 = c2; + uint64_t *a0 = a; + uint64_t *a1 = a + 4U; + uint64_t c0 = mul_pow2_256_minus_q_add(4U, 7U, t01, a1, a0, m); + KRML_MAYBE_UNUSED_VAR(c0); + uint64_t *m0 = m; + uint64_t *m1 = m + 4U; + uint64_t c10 = mul_pow2_256_minus_q_add(3U, 5U, t01, m1, m0, p); + KRML_MAYBE_UNUSED_VAR(c10); + uint64_t *p0 = p; + uint64_t *p1 = p + 4U; + uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p1, p0, r); + uint64_t c00 = c2; uint64_t c1 = add4(r, tmp, out); - uint64_t mask = 0ULL - (c0 + c1); + uint64_t mask = 0ULL - (c00 + c1); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint64_t *os = out; uint64_t x = (mask & out[i]) | (~mask & r[i]); + uint64_t *os = out; os[i] = x;); } @@ -549,7 +560,9 @@ static inline void qnegate_conditional_vartime(uint64_t *f, bool is_negate) uint64_t zero[4U] = { 0U }; if (is_negate) { - sub_mod4(n, zero, f, f); + uint64_t b_copy[4U] = { 0U }; + memcpy(b_copy, f, 4U * sizeof (uint64_t)); + sub_mod4(n, zero, b_copy, f); } } @@ -567,11 +580,7 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f) { return false; } - if (a2 < 0xffffffffffffffffULL) - { - return true; - } - if (a1 < 0x5d576e7357a4501dULL) + if (a2 < 0xffffffffffffffffULL || a1 < 0x5d576e7357a4501dULL) { return true; } @@ -607,8 +616,8 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b) 0U, 4U, 1U, - uint64_t *os = res; uint64_t x = (mask & res[i]) | (~mask & res_b_padded[i]); + uint64_t *os = res; os[i] = x;); } @@ -616,7 +625,9 @@ static inline void qsquare_times_in_place(uint64_t *out, uint32_t b) { for (uint32_t i = 0U; i < b; i++) { - qsqr(out, out); + uint64_t f_copy[4U] = { 0U }; + memcpy(f_copy, out, 4U * sizeof (uint64_t)); + qsqr(out, f_copy); } } @@ -625,7 +636,9 @@ static inline void qsquare_times(uint64_t *out, uint64_t *a, uint32_t b) memcpy(out, a, 4U * sizeof (uint64_t)); for (uint32_t i = 0U; i < b; i++) { - qsqr(out, out); + uint64_t f_copy[4U] = { 0U }; + memcpy(f_copy, out, 4U * sizeof (uint64_t)); + qsqr(out, f_copy); } } @@ -649,68 +662,130 @@ static inline void qinv(uint64_t *out, uint64_t *f) uint64_t x8[4U] = { 0U }; uint64_t x14[4U] = { 0U }; qsquare_times(x6, x_1101, 2U); - qmul(x6, x6, x_1011); + uint64_t f1_copy0[4U] = { 0U }; + memcpy(f1_copy0, x6, 4U * sizeof (uint64_t)); + qmul(x6, f1_copy0, x_1011); qsquare_times(x8, x6, 2U); - qmul(x8, x8, x_11); + uint64_t f1_copy1[4U] = { 0U }; + memcpy(f1_copy1, x8, 4U * sizeof (uint64_t)); + qmul(x8, f1_copy1, x_11); qsquare_times(x14, x8, 6U); - qmul(x14, x14, x6); + uint64_t f1_copy2[4U] = { 0U }; + memcpy(f1_copy2, x14, 4U * sizeof (uint64_t)); + qmul(x14, f1_copy2, x6); uint64_t x56[4U] = { 0U }; qsquare_times(out, x14, 14U); - qmul(out, out, x14); + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy, x14); qsquare_times(x56, out, 28U); - qmul(x56, x56, out); + uint64_t f1_copy3[4U] = { 0U }; + memcpy(f1_copy3, x56, 4U * sizeof (uint64_t)); + qmul(x56, f1_copy3, out); qsquare_times(out, x56, 56U); - qmul(out, out, x56); + uint64_t f1_copy4[4U] = { 0U }; + memcpy(f1_copy4, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy4, x56); qsquare_times_in_place(out, 14U); - qmul(out, out, x14); + uint64_t f1_copy5[4U] = { 0U }; + memcpy(f1_copy5, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy5, x14); qsquare_times_in_place(out, 3U); - qmul(out, out, x_101); + uint64_t f1_copy6[4U] = { 0U }; + memcpy(f1_copy6, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy6, x_101); qsquare_times_in_place(out, 4U); - qmul(out, out, x_111); + uint64_t f1_copy7[4U] = { 0U }; + memcpy(f1_copy7, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy7, x_111); qsquare_times_in_place(out, 4U); - qmul(out, out, x_101); + uint64_t f1_copy8[4U] = { 0U }; + memcpy(f1_copy8, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy8, x_101); qsquare_times_in_place(out, 5U); - qmul(out, out, x_1011); + uint64_t f1_copy9[4U] = { 0U }; + memcpy(f1_copy9, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy9, x_1011); qsquare_times_in_place(out, 4U); - qmul(out, out, x_1011); + uint64_t f1_copy10[4U] = { 0U }; + memcpy(f1_copy10, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy10, x_1011); qsquare_times_in_place(out, 4U); - qmul(out, out, x_111); + uint64_t f1_copy11[4U] = { 0U }; + memcpy(f1_copy11, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy11, x_111); qsquare_times_in_place(out, 5U); - qmul(out, out, x_111); + uint64_t f1_copy12[4U] = { 0U }; + memcpy(f1_copy12, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy12, x_111); qsquare_times_in_place(out, 6U); - qmul(out, out, x_1101); + uint64_t f1_copy13[4U] = { 0U }; + memcpy(f1_copy13, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy13, x_1101); qsquare_times_in_place(out, 4U); - qmul(out, out, x_101); + uint64_t f1_copy14[4U] = { 0U }; + memcpy(f1_copy14, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy14, x_101); qsquare_times_in_place(out, 3U); - qmul(out, out, x_111); + uint64_t f1_copy15[4U] = { 0U }; + memcpy(f1_copy15, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy15, x_111); qsquare_times_in_place(out, 5U); - qmul(out, out, x_1001); + uint64_t f1_copy16[4U] = { 0U }; + memcpy(f1_copy16, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy16, x_1001); qsquare_times_in_place(out, 6U); - qmul(out, out, x_101); + uint64_t f1_copy17[4U] = { 0U }; + memcpy(f1_copy17, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy17, x_101); qsquare_times_in_place(out, 10U); - qmul(out, out, x_111); + uint64_t f1_copy18[4U] = { 0U }; + memcpy(f1_copy18, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy18, x_111); qsquare_times_in_place(out, 4U); - qmul(out, out, x_111); + uint64_t f1_copy19[4U] = { 0U }; + memcpy(f1_copy19, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy19, x_111); qsquare_times_in_place(out, 9U); - qmul(out, out, x8); + uint64_t f1_copy20[4U] = { 0U }; + memcpy(f1_copy20, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy20, x8); qsquare_times_in_place(out, 5U); - qmul(out, out, x_1001); + uint64_t f1_copy21[4U] = { 0U }; + memcpy(f1_copy21, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy21, x_1001); qsquare_times_in_place(out, 6U); - qmul(out, out, x_1011); + uint64_t f1_copy22[4U] = { 0U }; + memcpy(f1_copy22, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy22, x_1011); qsquare_times_in_place(out, 4U); - qmul(out, out, x_1101); + uint64_t f1_copy23[4U] = { 0U }; + memcpy(f1_copy23, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy23, x_1101); qsquare_times_in_place(out, 5U); - qmul(out, out, x_11); + uint64_t f1_copy24[4U] = { 0U }; + memcpy(f1_copy24, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy24, x_11); qsquare_times_in_place(out, 6U); - qmul(out, out, x_1101); + uint64_t f1_copy25[4U] = { 0U }; + memcpy(f1_copy25, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy25, x_1101); qsquare_times_in_place(out, 10U); - qmul(out, out, x_1101); + uint64_t f1_copy26[4U] = { 0U }; + memcpy(f1_copy26, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy26, x_1101); qsquare_times_in_place(out, 4U); - qmul(out, out, x_1001); + uint64_t f1_copy27[4U] = { 0U }; + memcpy(f1_copy27, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy27, x_1001); qsquare_times_in_place(out, 6U); - qmul(out, out, f); + uint64_t f1_copy28[4U] = { 0U }; + memcpy(f1_copy28, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy28, f); qsquare_times_in_place(out, 8U); - qmul(out, out, x6); + uint64_t f1_copy29[4U] = { 0U }; + memcpy(f1_copy29, out, 4U * sizeof (uint64_t)); + qmul(out, f1_copy29, x6); } void Hacl_Impl_K256_Point_make_point_at_inf(uint64_t *p) @@ -735,8 +810,12 @@ static inline void to_aff_point(uint64_t *p_aff, uint64_t *p) Hacl_Impl_K256_Finv_finv(zinv, z1); Hacl_K256_Field_fmul(x, x1, zinv); Hacl_K256_Field_fmul(y, y1, zinv); - Hacl_K256_Field_fnormalize(x, x); - Hacl_K256_Field_fnormalize(y, y); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, x, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(x, f_copy); + uint64_t f_copy0[5U] = { 0U }; + memcpy(f_copy0, y, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(y, f_copy0); } static inline void to_aff_point_x(uint64_t *x, uint64_t *p) @@ -746,7 +825,9 @@ static inline void to_aff_point_x(uint64_t *x, uint64_t *p) uint64_t zinv[5U] = { 0U }; Hacl_Impl_K256_Finv_finv(zinv, z1); Hacl_K256_Field_fmul(x, x1, zinv); - Hacl_K256_Field_fnormalize(x, x); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, x, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(x, f_copy); } static inline bool is_on_curve_vartime(uint64_t *p) @@ -761,12 +842,20 @@ static inline bool is_on_curve_vartime(uint64_t *p) b[3U] = 0ULL; b[4U] = 0ULL; Hacl_K256_Field_fsqr(y2_exp, x); - Hacl_K256_Field_fmul(y2_exp, y2_exp, x); - Hacl_K256_Field_fadd(y2_exp, y2_exp, b); - Hacl_K256_Field_fnormalize(y2_exp, y2_exp); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, y2_exp, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(y2_exp, f1_copy, x); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, y2_exp, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fadd(y2_exp, f1_copy0, b); + uint64_t f_copy0[5U] = { 0U }; + memcpy(f_copy0, y2_exp, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(y2_exp, f_copy0); uint64_t y2_comp[5U] = { 0U }; Hacl_K256_Field_fsqr(y2_comp, y); - Hacl_K256_Field_fnormalize(y2_comp, y2_comp); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, y2_comp, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(y2_comp, f_copy); bool res = Hacl_K256_Field_is_felem_eq_vartime(y2_exp, y2_comp); bool res0 = res; return res0; @@ -810,14 +899,18 @@ void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p) oy[2U] = f2; oy[3U] = f3; oy[4U] = f4; - Hacl_K256_Field_fnormalize_weak(oy, oy); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, oy, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(oy, f_copy); } static inline void point_negate_conditional_vartime(uint64_t *p, bool is_negate) { if (is_negate) { - Hacl_Impl_K256_Point_point_negate(p, p); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, p, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_Point_point_negate(p, p_copy); return; } } @@ -894,14 +987,24 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_ b[3U] = 0ULL; b[4U] = 0ULL; Hacl_K256_Field_fsqr(y2, x); - Hacl_K256_Field_fmul(y2, y2, x); - Hacl_K256_Field_fadd(y2, y2, b); - Hacl_K256_Field_fnormalize(y2, y2); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, y2, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(y2, f1_copy, x); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, y2, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fadd(y2, f1_copy0, b); + uint64_t f_copy0[5U] = { 0U }; + memcpy(f_copy0, y2, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(y2, f_copy0); Hacl_Impl_K256_Finv_fsqrt(y, y2); - Hacl_K256_Field_fnormalize(y, y); + uint64_t f_copy1[5U] = { 0U }; + memcpy(f_copy1, y, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(y, f_copy1); uint64_t y2_comp[5U] = { 0U }; Hacl_K256_Field_fsqr(y2_comp, y); - Hacl_K256_Field_fnormalize(y2_comp, y2_comp); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, y2_comp, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(y2_comp, f_copy); bool res = Hacl_K256_Field_is_felem_eq_vartime(y2, y2_comp); bool is_y_valid = res; bool is_y_valid0 = is_y_valid; @@ -932,22 +1035,42 @@ void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p) Hacl_K256_Field_fsqr(yy, y1); Hacl_K256_Field_fsqr(zz, z1); Hacl_K256_Field_fmul_small_num(x3, x1, 2ULL); - Hacl_K256_Field_fmul(x3, x3, y1); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x3, f1_copy, y1); Hacl_K256_Field_fmul(tmp1, yy, y1); Hacl_K256_Field_fmul(z3, tmp1, z1); - Hacl_K256_Field_fmul_small_num(z3, z3, 8ULL); - Hacl_K256_Field_fnormalize_weak(z3, z3); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, z3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul_small_num(z3, f_copy, 8ULL); + uint64_t f_copy1[5U] = { 0U }; + memcpy(f_copy1, z3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(z3, f_copy1); Hacl_K256_Field_fmul_small_num(bzz3, zz, 21ULL); - Hacl_K256_Field_fnormalize_weak(bzz3, bzz3); + uint64_t f_copy0[5U] = { 0U }; + memcpy(f_copy0, bzz3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(bzz3, f_copy0); Hacl_K256_Field_fmul_small_num(bzz9, bzz3, 3ULL); - Hacl_K256_Field_fsub(bzz9, yy, bzz9, 6ULL); + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, bzz9, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsub(bzz9, yy, f2_copy, 6ULL); Hacl_K256_Field_fadd(tmp1, yy, bzz3); - Hacl_K256_Field_fmul(tmp1, bzz9, tmp1); + uint64_t f2_copy0[5U] = { 0U }; + memcpy(f2_copy0, tmp1, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(tmp1, bzz9, f2_copy0); Hacl_K256_Field_fmul(y3, yy, zz); - Hacl_K256_Field_fmul(x3, x3, bzz9); - Hacl_K256_Field_fmul_small_num(y3, y3, 168ULL); - Hacl_K256_Field_fadd(y3, tmp1, y3); - Hacl_K256_Field_fnormalize_weak(y3, y3); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x3, f1_copy0, bzz9); + uint64_t f_copy2[5U] = { 0U }; + memcpy(f_copy2, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul_small_num(y3, f_copy2, 168ULL); + uint64_t f2_copy1[5U] = { 0U }; + memcpy(f2_copy1, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fadd(y3, tmp1, f2_copy1); + uint64_t f_copy3[5U] = { 0U }; + memcpy(f_copy3, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(y3, f_copy3); } void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q) @@ -976,40 +1099,76 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q) Hacl_K256_Field_fmul(zz, z1, z2); Hacl_K256_Field_fadd(xy_pairs, x1, y1); Hacl_K256_Field_fadd(tmp1, x2, y2); - Hacl_K256_Field_fmul(xy_pairs, xy_pairs, tmp1); + uint64_t f1_copy[5U] = { 0U }; + memcpy(f1_copy, xy_pairs, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(xy_pairs, f1_copy, tmp1); Hacl_K256_Field_fadd(tmp1, xx, yy); - Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, 4ULL); + uint64_t f1_copy0[5U] = { 0U }; + memcpy(f1_copy0, xy_pairs, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsub(xy_pairs, f1_copy0, tmp1, 4ULL); Hacl_K256_Field_fadd(yz_pairs, y1, z1); Hacl_K256_Field_fadd(tmp1, y2, z2); - Hacl_K256_Field_fmul(yz_pairs, yz_pairs, tmp1); + uint64_t f1_copy1[5U] = { 0U }; + memcpy(f1_copy1, yz_pairs, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(yz_pairs, f1_copy1, tmp1); Hacl_K256_Field_fadd(tmp1, yy, zz); - Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, 4ULL); + uint64_t f1_copy2[5U] = { 0U }; + memcpy(f1_copy2, yz_pairs, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsub(yz_pairs, f1_copy2, tmp1, 4ULL); Hacl_K256_Field_fadd(xz_pairs, x1, z1); Hacl_K256_Field_fadd(tmp1, x2, z2); - Hacl_K256_Field_fmul(xz_pairs, xz_pairs, tmp1); + uint64_t f1_copy3[5U] = { 0U }; + memcpy(f1_copy3, xz_pairs, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(xz_pairs, f1_copy3, tmp1); Hacl_K256_Field_fadd(tmp1, xx, zz); - Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, 4ULL); + uint64_t f1_copy4[5U] = { 0U }; + memcpy(f1_copy4, xz_pairs, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsub(xz_pairs, f1_copy4, tmp1, 4ULL); Hacl_K256_Field_fmul_small_num(tmp1, zz, 21ULL); - Hacl_K256_Field_fnormalize_weak(tmp1, tmp1); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, tmp1, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(tmp1, f_copy); Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, 2ULL); Hacl_K256_Field_fadd(yy_p_bzz3, yy, tmp1); Hacl_K256_Field_fmul_small_num(x3, yz_pairs, 21ULL); - Hacl_K256_Field_fnormalize_weak(x3, x3); + uint64_t f_copy0[5U] = { 0U }; + memcpy(f_copy0, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(x3, f_copy0); Hacl_K256_Field_fmul_small_num(z3, xx, 3ULL); Hacl_K256_Field_fmul_small_num(y3, z3, 21ULL); - Hacl_K256_Field_fnormalize_weak(y3, y3); + uint64_t f_copy1[5U] = { 0U }; + memcpy(f_copy1, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(y3, f_copy1); Hacl_K256_Field_fmul(tmp1, xy_pairs, yy_m_bzz3); - Hacl_K256_Field_fmul(x3, x3, xz_pairs); - Hacl_K256_Field_fsub(x3, tmp1, x3, 2ULL); - Hacl_K256_Field_fnormalize_weak(x3, x3); + uint64_t f1_copy5[5U] = { 0U }; + memcpy(f1_copy5, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(x3, f1_copy5, xz_pairs); + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fsub(x3, tmp1, f2_copy, 2ULL); + uint64_t f_copy2[5U] = { 0U }; + memcpy(f_copy2, x3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(x3, f_copy2); Hacl_K256_Field_fmul(tmp1, yy_p_bzz3, yy_m_bzz3); - Hacl_K256_Field_fmul(y3, y3, xz_pairs); - Hacl_K256_Field_fadd(y3, tmp1, y3); - Hacl_K256_Field_fnormalize_weak(y3, y3); + uint64_t f1_copy6[5U] = { 0U }; + memcpy(f1_copy6, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(y3, f1_copy6, xz_pairs); + uint64_t f2_copy0[5U] = { 0U }; + memcpy(f2_copy0, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fadd(y3, tmp1, f2_copy0); + uint64_t f_copy3[5U] = { 0U }; + memcpy(f_copy3, y3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(y3, f_copy3); Hacl_K256_Field_fmul(tmp1, yz_pairs, yy_p_bzz3); - Hacl_K256_Field_fmul(z3, z3, xy_pairs); - Hacl_K256_Field_fadd(z3, tmp1, z3); - Hacl_K256_Field_fnormalize_weak(z3, z3); + uint64_t f1_copy7[5U] = { 0U }; + memcpy(f1_copy7, z3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(z3, f1_copy7, xy_pairs); + uint64_t f2_copy1[5U] = { 0U }; + memcpy(f2_copy1, z3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fadd(z3, tmp1, f2_copy1); + uint64_t f_copy4[5U] = { 0U }; + memcpy(f_copy4, z3, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize_weak(z3, f_copy4); } static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k) @@ -1034,13 +1193,19 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k) tmp2[1U] = 0x8a280ac50774346dULL; tmp2[2U] = 0xfffffffffffffffeULL; tmp2[3U] = 0xffffffffffffffffULL; - qmul(r1, r1, tmp1); - qmul(r2, r2, tmp2); + uint64_t f1_copy[4U] = { 0U }; + memcpy(f1_copy, r1, 4U * sizeof (uint64_t)); + qmul(r1, f1_copy, tmp1); + uint64_t f1_copy0[4U] = { 0U }; + memcpy(f1_copy0, r2, 4U * sizeof (uint64_t)); + qmul(r2, f1_copy0, tmp2); tmp1[0U] = 0xe0cfc810b51283cfULL; tmp1[1U] = 0xa880b9fc8ec739c2ULL; tmp1[2U] = 0x5ad9e3fd77ed9ba4ULL; tmp1[3U] = 0xac9c52b33fa3cf1fULL; - qadd(r2, r1, r2); + uint64_t f2_copy[4U] = { 0U }; + memcpy(f2_copy, r2, 4U * sizeof (uint64_t)); + qadd(r2, r1, f2_copy); qmul(tmp2, r2, tmp1); qadd(r1, k, tmp2); } @@ -1081,7 +1246,9 @@ static inline void point_mul_lambda_inplace(uint64_t *res) beta[2U] = 0xc3434e99cf049ULL; beta[3U] = 0x7106e64479eaULL; beta[4U] = 0x7ae96a2b657cULL; - Hacl_K256_Field_fmul(rx, beta, rx); + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, rx, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fmul(rx, beta, f2_copy); } typedef struct __bool_bool_s @@ -1123,23 +1290,35 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t uint64_t *t1 = table + 15U; Hacl_Impl_K256_Point_make_point_at_inf(t0); memcpy(t1, q, 15U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 15U; - Hacl_Impl_K256_PointDouble_point_double(tmp, t11); + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, t11, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(tmp, p_copy0); memcpy(table + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 15U; - Hacl_Impl_K256_PointAdd_point_add(tmp, q, t2); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, q, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(tmp, p_copy, t2); memcpy(table + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t));); Hacl_Impl_K256_Point_make_point_at_inf(out); uint64_t tmp0[15U] = { 0U }; for (uint32_t i0 = 0U; i0 < 64U; i0++) { - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(out, p_copy);); uint32_t k = 256U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)table, 15U * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -1151,10 +1330,12 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t 0U, 15U, 1U, - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x;);); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy, tmp0); } } @@ -1171,8 +1352,8 @@ static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, 0U, 15U, 1U, - uint64_t *os = tmp; uint64_t x = (c & res_j[i]) | (~c & tmp[i]); + uint64_t *os = tmp; os[i] = x;);); } @@ -1231,23 +1412,41 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) 0U, 16U, 1U, - KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR4(i0, + 0U, + 4U, + 1U, + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(out, p_copy);); uint32_t k = 64U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4); precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy, tmp); uint32_t k0 = 64U - 4U * i - 4U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4); precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp); + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy0, tmp); uint32_t k1 = 64U - 4U * i - 4U; uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4); precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp); + uint64_t p_copy1[15U] = { 0U }; + memcpy(p_copy1, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy1, tmp); uint32_t k2 = 64U - 4U * i - 4U; uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_basepoint_table_w4); precomp_get_consttime(Hacl_K256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);); + uint64_t p_copy2[15U] = { 0U }; + memcpy(p_copy2, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy2, tmp);); } static inline void @@ -1275,15 +1474,20 @@ point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2, uint64_t *t1 = table2 + 15U; Hacl_Impl_K256_Point_make_point_at_inf(t0); memcpy(t1, q2, 15U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table2); KRML_MAYBE_FOR15(i, 0U, 15U, 1U, uint64_t *t11 = table2 + (i + 1U) * 15U; - Hacl_Impl_K256_PointDouble_point_double(tmp, t11); + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, t11, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(tmp, p_copy0); memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t)); uint64_t *t2 = table2 + (2U * i + 2U) * 15U; - Hacl_Impl_K256_PointAdd_point_add(tmp, q2, t2); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, q2, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(tmp, p_copy, t2); memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t));); uint64_t tmp0[15U] = { 0U }; uint32_t i0 = 255U; @@ -1296,25 +1500,39 @@ point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2, uint32_t bits_l320 = (uint32_t)bits_c0; const uint64_t *a_bits_l0 = table2 + bits_l320 * 15U; memcpy(tmp0, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t)); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy, tmp0); uint64_t tmp1[15U] = { 0U }; for (uint32_t i = 0U; i < 51U; i++) { - KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(out, p_copy0);); uint32_t k = 255U - 5U * i - 5U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U); + KRML_MAYBE_UNUSED_VAR(table2); uint32_t bits_l321 = (uint32_t)bits_l; const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U; memcpy(tmp1, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t)); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1); + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy0, tmp1); uint32_t k0 = 255U - 5U * i - 5U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_basepoint_table_w5); uint32_t bits_l322 = (uint32_t)bits_l0; const uint64_t *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 15U; memcpy(tmp1, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t)); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1); + uint64_t p_copy1[15U] = { 0U }; + memcpy(p_copy1, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy1, tmp1); } } @@ -1338,15 +1556,20 @@ point_mul_g_double_split_lambda_table( uint64_t *t1 = table2 + 15U; Hacl_Impl_K256_Point_make_point_at_inf(t0); memcpy(t1, p2, 15U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table2); KRML_MAYBE_FOR15(i, 0U, 15U, 1U, uint64_t *t11 = table2 + (i + 1U) * 15U; - Hacl_Impl_K256_PointDouble_point_double(tmp, t11); + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, t11, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(tmp, p_copy0); memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t)); uint64_t *t2 = table2 + (2U * i + 2U) * 15U; - Hacl_Impl_K256_PointAdd_point_add(tmp, p2, t2); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, p2, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(tmp, p_copy, t2); memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t));); uint64_t tmp0[15U] = { 0U }; uint64_t tmp1[15U] = { 0U }; @@ -1365,7 +1588,9 @@ point_mul_g_double_split_lambda_table( memcpy(tmp1, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t)); point_negate_conditional_vartime(tmp1, is_negate2); point_mul_lambda_inplace(tmp1); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1); + uint64_t p_copy[15U] = { 0U }; + memcpy(p_copy, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy, tmp1); uint64_t tmp10[15U] = { 0U }; uint32_t i2 = 125U; uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, i2, 5U); @@ -1380,29 +1605,46 @@ point_mul_g_double_split_lambda_table( memcpy(tmp10, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t)); point_negate_conditional_vartime(tmp10, is_negate4); point_mul_lambda_inplace(tmp10); - Hacl_Impl_K256_PointAdd_point_add(tmp0, tmp0, tmp10); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0); + uint64_t p_copy0[15U] = { 0U }; + memcpy(p_copy0, tmp0, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(tmp0, p_copy0, tmp10); + uint64_t p_copy1[15U] = { 0U }; + memcpy(p_copy1, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy1, tmp0); uint64_t tmp2[15U] = { 0U }; for (uint32_t i = 0U; i < 25U; i++) { - KRML_MAYBE_FOR5(i4, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out);); + KRML_MAYBE_FOR5(i4, + 0U, + 5U, + 1U, + uint64_t p_copy2[15U] = { 0U }; + memcpy(p_copy2, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointDouble_point_double(out, p_copy2);); uint32_t k = 125U - 5U * i - 5U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, k, 5U); + KRML_MAYBE_UNUSED_VAR(table2); uint32_t bits_l323 = (uint32_t)bits_l; const uint64_t *a_bits_l3 = table2 + bits_l323 * 15U; memcpy(tmp2, (uint64_t *)a_bits_l3, 15U * sizeof (uint64_t)); point_negate_conditional_vartime(tmp2, is_negate4); point_mul_lambda_inplace(tmp2); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2); + uint64_t p_copy2[15U] = { 0U }; + memcpy(p_copy2, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy2, tmp2); uint32_t k0 = 125U - 5U * i - 5U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, k0, 5U); + KRML_MAYBE_UNUSED_VAR(table2); uint32_t bits_l324 = (uint32_t)bits_l0; const uint64_t *a_bits_l4 = table2 + bits_l324 * 15U; memcpy(tmp2, (uint64_t *)a_bits_l4, 15U * sizeof (uint64_t)); point_negate_conditional_vartime(tmp2, is_negate3); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2); + uint64_t p_copy3[15U] = { 0U }; + memcpy(p_copy3, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy3, tmp2); uint32_t k1 = 125U - 5U * i - 5U; uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, k1, 5U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_basepoint_table_w5); uint32_t bits_l325 = (uint32_t)bits_l1; const uint64_t @@ -1410,16 +1652,21 @@ point_mul_g_double_split_lambda_table( memcpy(tmp2, (uint64_t *)a_bits_l5, 15U * sizeof (uint64_t)); point_negate_conditional_vartime(tmp2, is_negate2); point_mul_lambda_inplace(tmp2); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2); + uint64_t p_copy4[15U] = { 0U }; + memcpy(p_copy4, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy4, tmp2); uint32_t k2 = 125U - 5U * i - 5U; uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, k2, 5U); + KRML_HOST_IGNORE(Hacl_K256_PrecompTable_precomp_basepoint_table_w5); uint32_t bits_l326 = (uint32_t)bits_l2; const uint64_t *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * 15U; memcpy(tmp2, (uint64_t *)a_bits_l6, 15U * sizeof (uint64_t)); point_negate_conditional_vartime(tmp2, is_negate1); - Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2); + uint64_t p_copy5[15U] = { 0U }; + memcpy(p_copy5, out, 15U * sizeof (uint64_t)); + Hacl_Impl_K256_PointAdd_point_add(out, p_copy5, tmp2); } } @@ -1520,7 +1767,9 @@ static inline bool fmul_eq_vartime(uint64_t *r, uint64_t *z, uint64_t *x) { uint64_t tmp[5U] = { 0U }; Hacl_K256_Field_fmul(tmp, r, z); - Hacl_K256_Field_fnormalize(tmp, tmp); + uint64_t f_copy[5U] = { 0U }; + memcpy(f_copy, tmp, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fnormalize(tmp, f_copy); bool b = Hacl_K256_Field_is_felem_eq_vartime(tmp, x); return b; } @@ -1573,9 +1822,9 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( 0U, 4U, 1U, - uint64_t *os = d_a; uint64_t uu____0 = oneq10[i]; uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0)); + uint64_t *os = d_a; os[i] = x;); uint64_t is_sk_valid = is_b_valid0; uint64_t is_b_valid = load_qelem_check(k_q, nonce); @@ -1584,9 +1833,9 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( 0U, 4U, 1U, - uint64_t *os = k_q; uint64_t uu____1 = oneq1[i]; uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1)); + uint64_t *os = k_q; os[i] = x;); uint64_t is_nonce_valid = is_b_valid; uint64_t are_sk_nonce_valid = is_sk_valid & is_nonce_valid; @@ -1602,8 +1851,12 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( load_qelem_modq(z, msgHash); qinv(kinv, k_q); qmul(s_q, r_q, d_a); - qadd(s_q, z, s_q); - qmul(s_q, kinv, s_q); + uint64_t f2_copy[4U] = { 0U }; + memcpy(f2_copy, s_q, 4U * sizeof (uint64_t)); + qadd(s_q, z, f2_copy); + uint64_t f2_copy0[4U] = { 0U }; + memcpy(f2_copy0, s_q, 4U * sizeof (uint64_t)); + qmul(s_q, kinv, f2_copy0); store_qelem(signature, r_q); store_qelem(signature + 32U, s_q); uint64_t is_r_zero = is_qelem_zero(r_q); @@ -1706,7 +1959,9 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t tmp_q[2U] = 0xffffffebaaedcULL; tmp_q[3U] = 0xfffffffffffffULL; tmp_q[4U] = 0xffffffffffffULL; - Hacl_K256_Field_fadd(tmp_q, r_fe, tmp_q); + uint64_t f2_copy[5U] = { 0U }; + memcpy(f2_copy, tmp_q, 5U * sizeof (uint64_t)); + Hacl_K256_Field_fadd(tmp_q, r_fe, f2_copy); return fmul_eq_vartime(tmp_q, z, tmp_x); } return false; @@ -1952,8 +2207,8 @@ bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk) { uint64_t xa[5U] = { 0U }; uint64_t ya[5U] = { 0U }; - uint8_t *pk_xb = pk + 1U; bool b = aff_point_decompress_vartime(xa, ya, pk); + uint8_t *pk_xb = pk + 1U; if (b) { memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t)); @@ -2059,9 +2314,9 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key) 0U, 4U, 1U, - uint64_t *os = sk; uint64_t uu____0 = oneq[i]; uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0)); + uint64_t *os = sk; os[i] = x;); uint64_t is_sk_valid = is_b_valid; point_mul_g(pk, sk); @@ -2094,9 +2349,9 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t 0U, 4U, 1U, - uint64_t *os = sk; uint64_t uu____0 = oneq[i]; uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0)); + uint64_t *os = sk; os[i] = x;); uint64_t is_sk_valid = is_b_valid; if (is_pk_valid) diff --git a/src/Hacl_MAC_Poly1305.c b/src/Hacl_MAC_Poly1305.c index 28cbca5a..a3816bfa 100644 --- a/src/Hacl_MAC_Poly1305.c +++ b/src/Hacl_MAC_Poly1305.c @@ -445,6 +445,7 @@ Hacl_MAC_Poly1305_state_t *Hacl_MAC_Poly1305_malloc(uint8_t *key) uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); uint64_t *block_state = r1; + Hacl_MAC_Poly1305_poly1305_init(block_state, key); uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t)); memcpy(k_, key, 32U * sizeof (uint8_t)); uint8_t *k_0 = k_; @@ -453,22 +454,19 @@ Hacl_MAC_Poly1305_state_t *Hacl_MAC_Poly1305_malloc(uint8_t *key) Hacl_MAC_Poly1305_state_t *p = (Hacl_MAC_Poly1305_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_MAC_Poly1305_state_t)); p[0U] = s; - Hacl_MAC_Poly1305_poly1305_init(block_state, key); return p; } void Hacl_MAC_Poly1305_reset(Hacl_MAC_Poly1305_state_t *state, uint8_t *key) { - Hacl_MAC_Poly1305_state_t scrut = *state; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; + uint64_t *block_state = (*state).block_state; + uint8_t *k_ = (*state).p_key; Hacl_MAC_Poly1305_poly1305_init(block_state, key); memcpy(k_, key, 32U * sizeof (uint8_t)); uint8_t *k_1 = k_; - Hacl_MAC_Poly1305_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; + state->p_key = k_1; } /** @@ -477,8 +475,8 @@ void Hacl_MAC_Poly1305_reset(Hacl_MAC_Poly1305_state_t *state, uint8_t *key) Hacl_Streaming_Types_error_code Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint32_t chunk_len) { - Hacl_MAC_Poly1305_state_t s = *state; - uint64_t total_len = s.total_len; + uint64_t *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -494,11 +492,9 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 } if (chunk_len <= 16U - sz) { - Hacl_MAC_Poly1305_state_t s1 = *state; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL) { @@ -511,24 +507,14 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_MAC_Poly1305_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2, - .p_key = k_1 - } - ); + state->total_len = total_len2; + state->p_key = k_1; } else if (sz == 0U) { - Hacl_MAC_Poly1305_state_t s1 = *state; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL) { @@ -540,7 +526,7 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 } if (!(sz1 == 0U)) { - poly1305_update(block_state1, 16U, buf); + poly1305_update(block_state, 16U, buf); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)16U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -556,30 +542,20 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - poly1305_update(block_state1, data1_len, data1); + poly1305_update(block_state, data1_len, data1); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_MAC_Poly1305_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len, - .p_key = k_1 - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; + state->p_key = k_1; } else { uint32_t diff = 16U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_MAC_Poly1305_state_t s1 = *state; - uint64_t *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz10; if (total_len10 % (uint64_t)16U == 0ULL && total_len10 > 0ULL) { @@ -589,24 +565,14 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 { sz10 = (uint32_t)(total_len10 % (uint64_t)16U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_MAC_Poly1305_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2, - .p_key = k_1 - } - ); - Hacl_MAC_Poly1305_state_t s10 = *state; - uint64_t *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; - uint8_t *k_10 = s10.p_key; + state->total_len = total_len2; + state->p_key = k_1; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_10 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL) { @@ -618,7 +584,7 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 } if (!(sz1 == 0U)) { - poly1305_update(block_state1, 16U, buf); + poly1305_update(block_state, 16U, buf0); } uint32_t ite; if @@ -635,30 +601,21 @@ Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - poly1305_update(block_state1, data1_len, data1); - uint8_t *dst = buf; + poly1305_update(block_state, data1_len, data1); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_MAC_Poly1305_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff), - .p_key = k_10 - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); + state->p_key = k_10; } return Hacl_Streaming_Types_Success; } void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output) { - Hacl_MAC_Poly1305_state_t scrut = *state; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint8_t *k_ = scrut.p_key; + uint64_t *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; + uint8_t *k_ = (*state).p_key; uint32_t r; if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL) { @@ -672,6 +629,7 @@ void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output) uint64_t r1[25U] = { 0U }; uint64_t *tmp_block_state = r1; memcpy(tmp_block_state, block_state, 25U * sizeof (uint64_t)); + uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 16U == 0U && r > 0U) { @@ -682,7 +640,6 @@ void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output) ite = r % 16U; } uint8_t *buf_last = buf_1 + r - ite; - uint8_t *buf_multi = buf_1; poly1305_update(tmp_block_state, 0U, buf_multi); poly1305_update(tmp_block_state, r, buf_last); uint64_t tmp[25U] = { 0U }; diff --git a/src/Hacl_MAC_Poly1305_Simd128.c b/src/Hacl_MAC_Poly1305_Simd128.c index 17e26978..cbfda978 100644 --- a/src/Hacl_MAC_Poly1305_Simd128.c +++ b/src/Hacl_MAC_Poly1305_Simd128.c @@ -1310,6 +1310,7 @@ Hacl_MAC_Poly1305_Simd128_state_t *Hacl_MAC_Poly1305_Simd128_malloc(uint8_t *key sizeof (Lib_IntVector_Intrinsics_vec128) * 25U); memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec128)); Lib_IntVector_Intrinsics_vec128 *block_state = r1; + Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key); uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t)); memcpy(k_, key, 32U * sizeof (uint8_t)); uint8_t *k_0 = k_; @@ -1321,22 +1322,19 @@ Hacl_MAC_Poly1305_Simd128_state_t *Hacl_MAC_Poly1305_Simd128_malloc(uint8_t *key Hacl_MAC_Poly1305_Simd128_state_t )); p[0U] = s; - Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key); return p; } void Hacl_MAC_Poly1305_Simd128_reset(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *key) { - Hacl_MAC_Poly1305_Simd128_state_t scrut = *state; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; + Lib_IntVector_Intrinsics_vec128 *block_state = (*state).block_state; + uint8_t *k_ = (*state).p_key; Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key); memcpy(k_, key, 32U * sizeof (uint8_t)); uint8_t *k_1 = k_; - Hacl_MAC_Poly1305_Simd128_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; + state->p_key = k_1; } /** @@ -1349,8 +1347,8 @@ Hacl_MAC_Poly1305_Simd128_update( uint32_t chunk_len ) { - Hacl_MAC_Poly1305_Simd128_state_t s = *state; - uint64_t total_len = s.total_len; + Lib_IntVector_Intrinsics_vec128 *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -1366,11 +1364,9 @@ Hacl_MAC_Poly1305_Simd128_update( } if (chunk_len <= 32U - sz) { - Hacl_MAC_Poly1305_Simd128_state_t s1 = *state; - Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL) { @@ -1383,24 +1379,14 @@ Hacl_MAC_Poly1305_Simd128_update( uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_MAC_Poly1305_Simd128_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2, - .p_key = k_1 - } - ); + state->total_len = total_len2; + state->p_key = k_1; } else if (sz == 0U) { - Hacl_MAC_Poly1305_Simd128_state_t s1 = *state; - Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL) { @@ -1412,7 +1398,7 @@ Hacl_MAC_Poly1305_Simd128_update( } if (!(sz1 == 0U)) { - poly1305_update(block_state1, 32U, buf); + poly1305_update(block_state, 32U, buf); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)32U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -1428,30 +1414,20 @@ Hacl_MAC_Poly1305_Simd128_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - poly1305_update(block_state1, data1_len, data1); + poly1305_update(block_state, data1_len, data1); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_MAC_Poly1305_Simd128_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len, - .p_key = k_1 - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; + state->p_key = k_1; } else { uint32_t diff = 32U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_MAC_Poly1305_Simd128_state_t s1 = *state; - Lib_IntVector_Intrinsics_vec128 *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz10; if (total_len10 % (uint64_t)32U == 0ULL && total_len10 > 0ULL) { @@ -1461,24 +1437,14 @@ Hacl_MAC_Poly1305_Simd128_update( { sz10 = (uint32_t)(total_len10 % (uint64_t)32U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_MAC_Poly1305_Simd128_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2, - .p_key = k_1 - } - ); - Hacl_MAC_Poly1305_Simd128_state_t s10 = *state; - Lib_IntVector_Intrinsics_vec128 *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; - uint8_t *k_10 = s10.p_key; + state->total_len = total_len2; + state->p_key = k_1; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_10 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL) { @@ -1490,7 +1456,7 @@ Hacl_MAC_Poly1305_Simd128_update( } if (!(sz1 == 0U)) { - poly1305_update(block_state1, 32U, buf); + poly1305_update(block_state, 32U, buf0); } uint32_t ite; if @@ -1507,19 +1473,11 @@ Hacl_MAC_Poly1305_Simd128_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - poly1305_update(block_state1, data1_len, data1); - uint8_t *dst = buf; + poly1305_update(block_state, data1_len, data1); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_MAC_Poly1305_Simd128_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff), - .p_key = k_10 - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); + state->p_key = k_10; } return Hacl_Streaming_Types_Success; } @@ -1527,11 +1485,10 @@ Hacl_MAC_Poly1305_Simd128_update( void Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *output) { - Hacl_MAC_Poly1305_Simd128_state_t scrut = *state; - Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint8_t *k_ = scrut.p_key; + Lib_IntVector_Intrinsics_vec128 *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; + uint8_t *k_ = (*state).p_key; uint32_t r; if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL) { @@ -1545,6 +1502,7 @@ Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8 KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U }; Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1; memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128)); + uint8_t *buf_multi = buf_1; uint32_t ite0; if (r % 16U == 0U && r > 0U) { @@ -1555,7 +1513,6 @@ Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8 ite0 = r % 16U; } uint8_t *buf_last = buf_1 + r - ite0; - uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 16U == 0U && r > 0U) { diff --git a/src/Hacl_MAC_Poly1305_Simd256.c b/src/Hacl_MAC_Poly1305_Simd256.c index f25e8fff..b02880d4 100644 --- a/src/Hacl_MAC_Poly1305_Simd256.c +++ b/src/Hacl_MAC_Poly1305_Simd256.c @@ -1761,6 +1761,7 @@ Hacl_MAC_Poly1305_Simd256_state_t *Hacl_MAC_Poly1305_Simd256_malloc(uint8_t *key sizeof (Lib_IntVector_Intrinsics_vec256) * 25U); memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); Lib_IntVector_Intrinsics_vec256 *block_state = r1; + Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key); uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t)); memcpy(k_, key, 32U * sizeof (uint8_t)); uint8_t *k_0 = k_; @@ -1772,22 +1773,19 @@ Hacl_MAC_Poly1305_Simd256_state_t *Hacl_MAC_Poly1305_Simd256_malloc(uint8_t *key Hacl_MAC_Poly1305_Simd256_state_t )); p[0U] = s; - Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key); return p; } void Hacl_MAC_Poly1305_Simd256_reset(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *key) { - Hacl_MAC_Poly1305_Simd256_state_t scrut = *state; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; + Lib_IntVector_Intrinsics_vec256 *block_state = (*state).block_state; + uint8_t *k_ = (*state).p_key; Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key); memcpy(k_, key, 32U * sizeof (uint8_t)); uint8_t *k_1 = k_; - Hacl_MAC_Poly1305_Simd256_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 }; - state[0U] = tmp; + uint64_t total_len = (uint64_t)0U; + state->total_len = total_len; + state->p_key = k_1; } /** @@ -1800,8 +1798,8 @@ Hacl_MAC_Poly1305_Simd256_update( uint32_t chunk_len ) { - Hacl_MAC_Poly1305_Simd256_state_t s = *state; - uint64_t total_len = s.total_len; + Lib_IntVector_Intrinsics_vec256 *block_state = (*state).block_state; + uint64_t total_len = (*state).total_len; if ((uint64_t)chunk_len > 0xffffffffULL - total_len) { return Hacl_Streaming_Types_MaximumLengthExceeded; @@ -1817,11 +1815,9 @@ Hacl_MAC_Poly1305_Simd256_update( } if (chunk_len <= 64U - sz) { - Hacl_MAC_Poly1305_Simd256_state_t s1 = *state; - Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1834,24 +1830,14 @@ Hacl_MAC_Poly1305_Simd256_update( uint8_t *buf2 = buf + sz1; memcpy(buf2, chunk, chunk_len * sizeof (uint8_t)); uint64_t total_len2 = total_len1 + (uint64_t)chunk_len; - *state - = - ( - (Hacl_MAC_Poly1305_Simd256_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len2, - .p_key = k_1 - } - ); + state->total_len = total_len2; + state->p_key = k_1; } else if (sz == 0U) { - Hacl_MAC_Poly1305_Simd256_state_t s1 = *state; - Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1863,7 +1849,7 @@ Hacl_MAC_Poly1305_Simd256_update( } if (!(sz1 == 0U)) { - poly1305_update(block_state1, 64U, buf); + poly1305_update(block_state, 64U, buf); } uint32_t ite; if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL) @@ -1879,30 +1865,20 @@ Hacl_MAC_Poly1305_Simd256_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - poly1305_update(block_state1, data1_len, data1); + poly1305_update(block_state, data1_len, data1); uint8_t *dst = buf; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_MAC_Poly1305_Simd256_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)chunk_len, - .p_key = k_1 - } - ); + state->total_len = total_len1 + (uint64_t)chunk_len; + state->p_key = k_1; } else { uint32_t diff = 64U - sz; uint8_t *chunk1 = chunk; uint8_t *chunk2 = chunk + diff; - Hacl_MAC_Poly1305_Simd256_state_t s1 = *state; - Lib_IntVector_Intrinsics_vec256 *block_state10 = s1.block_state; - uint8_t *buf0 = s1.buf; - uint64_t total_len10 = s1.total_len; - uint8_t *k_1 = s1.p_key; + uint8_t *buf = (*state).buf; + uint64_t total_len10 = (*state).total_len; + uint8_t *k_1 = (*state).p_key; uint32_t sz10; if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL) { @@ -1912,24 +1888,14 @@ Hacl_MAC_Poly1305_Simd256_update( { sz10 = (uint32_t)(total_len10 % (uint64_t)64U); } - uint8_t *buf2 = buf0 + sz10; + uint8_t *buf2 = buf + sz10; memcpy(buf2, chunk1, diff * sizeof (uint8_t)); uint64_t total_len2 = total_len10 + (uint64_t)diff; - *state - = - ( - (Hacl_MAC_Poly1305_Simd256_state_t){ - .block_state = block_state10, - .buf = buf0, - .total_len = total_len2, - .p_key = k_1 - } - ); - Hacl_MAC_Poly1305_Simd256_state_t s10 = *state; - Lib_IntVector_Intrinsics_vec256 *block_state1 = s10.block_state; - uint8_t *buf = s10.buf; - uint64_t total_len1 = s10.total_len; - uint8_t *k_10 = s10.p_key; + state->total_len = total_len2; + state->p_key = k_1; + uint8_t *buf0 = (*state).buf; + uint64_t total_len1 = (*state).total_len; + uint8_t *k_10 = (*state).p_key; uint32_t sz1; if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL) { @@ -1941,7 +1907,7 @@ Hacl_MAC_Poly1305_Simd256_update( } if (!(sz1 == 0U)) { - poly1305_update(block_state1, 64U, buf); + poly1305_update(block_state, 64U, buf0); } uint32_t ite; if @@ -1958,19 +1924,11 @@ Hacl_MAC_Poly1305_Simd256_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - poly1305_update(block_state1, data1_len, data1); - uint8_t *dst = buf; + poly1305_update(block_state, data1_len, data1); + uint8_t *dst = buf0; memcpy(dst, data2, data2_len * sizeof (uint8_t)); - *state - = - ( - (Hacl_MAC_Poly1305_Simd256_state_t){ - .block_state = block_state1, - .buf = buf, - .total_len = total_len1 + (uint64_t)(chunk_len - diff), - .p_key = k_10 - } - ); + state->total_len = total_len1 + (uint64_t)(chunk_len - diff); + state->p_key = k_10; } return Hacl_Streaming_Types_Success; } @@ -1978,11 +1936,10 @@ Hacl_MAC_Poly1305_Simd256_update( void Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *output) { - Hacl_MAC_Poly1305_Simd256_state_t scrut = *state; - Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint8_t *k_ = scrut.p_key; + Lib_IntVector_Intrinsics_vec256 *block_state = (*state).block_state; + uint8_t *buf_ = (*state).buf; + uint64_t total_len = (*state).total_len; + uint8_t *k_ = (*state).p_key; uint32_t r; if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL) { @@ -1996,6 +1953,7 @@ Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8 KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U }; Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1; memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + uint8_t *buf_multi = buf_1; uint32_t ite0; if (r % 16U == 0U && r > 0U) { @@ -2006,7 +1964,6 @@ Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8 ite0 = r % 16U; } uint8_t *buf_last = buf_1 + r - ite0; - uint8_t *buf_multi = buf_1; uint32_t ite; if (r % 16U == 0U && r > 0U) { diff --git a/src/Hacl_NaCl.c b/src/Hacl_NaCl.c index a1bbd25c..54cf0171 100644 --- a/src/Hacl_NaCl.c +++ b/src/Hacl_NaCl.c @@ -62,8 +62,8 @@ secretbox_detached(uint32_t mlen, uint8_t *c, uint8_t *tag, uint8_t *k, uint8_t memcpy(block0, m0, mlen0 * sizeof (uint8_t)); for (uint32_t i = 0U; i < 32U; i++) { - uint8_t *os = block0; uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i]; + uint8_t *os = block0; os[i] = x; } uint8_t *c0 = c; @@ -117,8 +117,8 @@ secretbox_open_detached( memcpy(block0, c0, mlen0 * sizeof (uint8_t)); for (uint32_t i = 0U; i < 32U; i++) { - uint8_t *os = block0; uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i]; + uint8_t *os = block0; os[i] = x; } uint8_t *m0 = m; diff --git a/src/Hacl_P256.c b/src/Hacl_P256.c index 609fed81..c1db5d68 100644 --- a/src/Hacl_P256.c +++ b/src/Hacl_P256.c @@ -77,9 +77,9 @@ static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_ 0U, 4U, 1U, - uint64_t *os = res; uint64_t uu____0 = x[i]; uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0)); + uint64_t *os = res; os[i] = x1;); } @@ -131,8 +131,8 @@ static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t 0U, 4U, 1U, - uint64_t *os = res; uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x1;); } @@ -210,8 +210,8 @@ static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t 0U, 4U, 1U, - uint64_t *os = res; uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]); + uint64_t *os = res; os[i] = x1;); } @@ -250,8 +250,8 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x) 0U, 4U, 1U, - uint64_t *ab = x; uint64_t a_j = x[i0]; + uint64_t *ab = x; uint64_t *res_j = res + i0; uint64_t c = 0ULL; for (uint32_t i = 0U; i < i0 / 4U; i++) @@ -277,7 +277,12 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x) } uint64_t r = c; res[i0 + i0] = r;); - uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res); + uint64_t a_copy0[8U] = { 0U }; + uint64_t b_copy0[8U] = { 0U }; + memcpy(a_copy0, res, 8U * sizeof (uint64_t)); + memcpy(b_copy0, res, 8U * sizeof (uint64_t)); + uint64_t r = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, a_copy0, b_copy0, res); + uint64_t c0 = r; KRML_MAYBE_UNUSED_VAR(c0); uint64_t tmp[8U] = { 0U }; KRML_MAYBE_FOR4(i, @@ -289,7 +294,12 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x) uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); tmp[2U * i] = lo; tmp[2U * i + 1U] = hi;); - uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res); + uint64_t a_copy[8U] = { 0U }; + uint64_t b_copy[8U] = { 0U }; + memcpy(a_copy, res, 8U * sizeof (uint64_t)); + memcpy(b_copy, tmp, 8U * sizeof (uint64_t)); + uint64_t r0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, a_copy, b_copy, res); + uint64_t c1 = r0; KRML_MAYBE_UNUSED_VAR(c1); } @@ -306,9 +316,9 @@ static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b) 0U, 4U, 1U, - uint64_t *os = res; uint64_t u = load64_be(b + (4U - i - 1U) * 8U); uint64_t x = u; + uint64_t *os = res; os[i] = x;); } @@ -394,8 +404,11 @@ static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f) { uint64_t tmp[4U] = { 0U }; make_prime(tmp); - uint64_t c = bn_sub4(tmp, f, tmp); - return 0ULL - c; + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, tmp, 4U * sizeof (uint64_t)); + uint64_t c = bn_sub4(tmp, f, y_copy); + uint64_t c0 = c; + return 0ULL - c0; } static inline uint64_t feq_mask(uint64_t *a, uint64_t *b) @@ -423,7 +436,9 @@ static inline void fnegate_conditional_vartime(uint64_t *f, bool is_negate) uint64_t zero[4U] = { 0U }; if (is_negate) { - fsub0(f, zero, f); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, f, 4U * sizeof (uint64_t)); + fsub0(f, zero, y_copy); } } @@ -455,8 +470,8 @@ static inline void mont_reduction(uint64_t *res, uint64_t *x) } uint64_t r = c; uint64_t c1 = r; - uint64_t *resb = x + 4U + i0; uint64_t res_j = x[4U + i0]; + uint64_t *resb = x + 4U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);); memcpy(res, x + 4U, 4U * sizeof (uint64_t)); uint64_t c00 = c0; @@ -486,8 +501,8 @@ static inline void mont_reduction(uint64_t *res, uint64_t *x) 0U, 4U, 1U, - uint64_t *os = res; uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x1;); } @@ -529,7 +544,9 @@ static inline void fmul_by_b_coeff(uint64_t *res, uint64_t *x) static inline void fcube(uint64_t *res, uint64_t *x) { fsqr0(res, x); - fmul0(res, res, x); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, res, 4U * sizeof (uint64_t)); + fmul0(res, x_copy, x); } static inline void finv(uint64_t *res, uint64_t *a) @@ -541,51 +558,121 @@ static inline void finv(uint64_t *res, uint64_t *a) uint64_t *tmp2 = tmp + 12U; memcpy(x2, a, 4U * sizeof (uint64_t)); { - fsqr0(x2, x2); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x2, 4U * sizeof (uint64_t)); + fsqr0(x2, x_copy); } - fmul0(x2, x2, a); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x2, 4U * sizeof (uint64_t)); + fmul0(x2, x_copy, a); memcpy(x30, x2, 4U * sizeof (uint64_t)); { - fsqr0(x30, x30); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, x30, 4U * sizeof (uint64_t)); + fsqr0(x30, x_copy0); } - fmul0(x30, x30, a); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, x30, 4U * sizeof (uint64_t)); + fmul0(x30, x_copy0, a); memcpy(tmp1, x30, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1);); - fmul0(tmp1, tmp1, x30); + KRML_MAYBE_FOR3(i, + 0U, + 3U, + 1U, + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, tmp1, 4U * sizeof (uint64_t)); + fsqr0(tmp1, x_copy1);); + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, tmp1, 4U * sizeof (uint64_t)); + fmul0(tmp1, x_copy1, x30); memcpy(tmp2, tmp1, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR6(i, 0U, 6U, 1U, fsqr0(tmp2, tmp2);); - fmul0(tmp2, tmp2, tmp1); + KRML_MAYBE_FOR6(i, + 0U, + 6U, + 1U, + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, tmp2, 4U * sizeof (uint64_t)); + fsqr0(tmp2, x_copy2);); + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, tmp2, 4U * sizeof (uint64_t)); + fmul0(tmp2, x_copy2, tmp1); memcpy(tmp1, tmp2, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1);); - fmul0(tmp1, tmp1, x30); + KRML_MAYBE_FOR3(i, + 0U, + 3U, + 1U, + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, tmp1, 4U * sizeof (uint64_t)); + fsqr0(tmp1, x_copy3);); + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, tmp1, 4U * sizeof (uint64_t)); + fmul0(tmp1, x_copy3, x30); memcpy(x30, tmp1, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i, 0U, 15U, 1U, fsqr0(x30, x30);); - fmul0(x30, x30, tmp1); + KRML_MAYBE_FOR15(i, + 0U, + 15U, + 1U, + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, x30, 4U * sizeof (uint64_t)); + fsqr0(x30, x_copy4);); + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, x30, 4U * sizeof (uint64_t)); + fmul0(x30, x_copy4, tmp1); memcpy(tmp1, x30, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp1, tmp1);); - fmul0(tmp1, tmp1, x2); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, tmp1, 4U * sizeof (uint64_t)); + fsqr0(tmp1, x_copy5);); + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, tmp1, 4U * sizeof (uint64_t)); + fmul0(tmp1, x_copy5, x2); memcpy(x2, tmp1, 4U * sizeof (uint64_t)); for (uint32_t i = 0U; i < 32U; i++) { - fsqr0(x2, x2); + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, x2, 4U * sizeof (uint64_t)); + fsqr0(x2, x_copy6); } - fmul0(x2, x2, a); + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, x2, 4U * sizeof (uint64_t)); + fmul0(x2, x_copy6, a); for (uint32_t i = 0U; i < 128U; i++) { - fsqr0(x2, x2); + uint64_t x_copy7[4U] = { 0U }; + memcpy(x_copy7, x2, 4U * sizeof (uint64_t)); + fsqr0(x2, x_copy7); } - fmul0(x2, x2, tmp1); + uint64_t x_copy7[4U] = { 0U }; + memcpy(x_copy7, x2, 4U * sizeof (uint64_t)); + fmul0(x2, x_copy7, tmp1); for (uint32_t i = 0U; i < 32U; i++) { - fsqr0(x2, x2); + uint64_t x_copy8[4U] = { 0U }; + memcpy(x_copy8, x2, 4U * sizeof (uint64_t)); + fsqr0(x2, x_copy8); } - fmul0(x2, x2, tmp1); + uint64_t x_copy8[4U] = { 0U }; + memcpy(x_copy8, x2, 4U * sizeof (uint64_t)); + fmul0(x2, x_copy8, tmp1); for (uint32_t i = 0U; i < 30U; i++) { - fsqr0(x2, x2); + uint64_t x_copy9[4U] = { 0U }; + memcpy(x_copy9, x2, 4U * sizeof (uint64_t)); + fsqr0(x2, x_copy9); } - fmul0(x2, x2, x30); - KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(x2, x2);); + uint64_t x_copy9[4U] = { 0U }; + memcpy(x_copy9, x2, 4U * sizeof (uint64_t)); + fmul0(x2, x_copy9, x30); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t x_copy10[4U] = { 0U }; + memcpy(x_copy10, x2, 4U * sizeof (uint64_t)); + fsqr0(x2, x_copy10);); fmul0(tmp1, x2, a); memcpy(res, tmp1, 4U * sizeof (uint64_t)); } @@ -597,35 +684,81 @@ static inline void fsqrt(uint64_t *res, uint64_t *a) uint64_t *tmp2 = tmp + 4U; memcpy(tmp1, a, 4U * sizeof (uint64_t)); { - fsqr0(tmp1, tmp1); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, tmp1, 4U * sizeof (uint64_t)); + fsqr0(tmp1, x_copy); } - fmul0(tmp1, tmp1, a); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, tmp1, 4U * sizeof (uint64_t)); + fmul0(tmp1, x_copy, a); memcpy(tmp2, tmp1, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp2, tmp2);); - fmul0(tmp2, tmp2, tmp1); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, tmp2, 4U * sizeof (uint64_t)); + fsqr0(tmp2, x_copy0);); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, tmp2, 4U * sizeof (uint64_t)); + fmul0(tmp2, x_copy0, tmp1); memcpy(tmp1, tmp2, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, fsqr0(tmp1, tmp1);); - fmul0(tmp1, tmp1, tmp2); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, tmp1, 4U * sizeof (uint64_t)); + fsqr0(tmp1, x_copy1);); + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, tmp1, 4U * sizeof (uint64_t)); + fmul0(tmp1, x_copy1, tmp2); memcpy(tmp2, tmp1, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR8(i, 0U, 8U, 1U, fsqr0(tmp2, tmp2);); - fmul0(tmp2, tmp2, tmp1); + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, tmp2, 4U * sizeof (uint64_t)); + fsqr0(tmp2, x_copy2);); + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, tmp2, 4U * sizeof (uint64_t)); + fmul0(tmp2, x_copy2, tmp1); memcpy(tmp1, tmp2, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR16(i, 0U, 16U, 1U, fsqr0(tmp1, tmp1);); - fmul0(tmp1, tmp1, tmp2); + KRML_MAYBE_FOR16(i, + 0U, + 16U, + 1U, + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, tmp1, 4U * sizeof (uint64_t)); + fsqr0(tmp1, x_copy3);); + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, tmp1, 4U * sizeof (uint64_t)); + fmul0(tmp1, x_copy3, tmp2); memcpy(tmp2, tmp1, 4U * sizeof (uint64_t)); for (uint32_t i = 0U; i < 32U; i++) { - fsqr0(tmp2, tmp2); + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, tmp2, 4U * sizeof (uint64_t)); + fsqr0(tmp2, x_copy4); } - fmul0(tmp2, tmp2, a); + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, tmp2, 4U * sizeof (uint64_t)); + fmul0(tmp2, x_copy4, a); for (uint32_t i = 0U; i < 96U; i++) { - fsqr0(tmp2, tmp2); + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, tmp2, 4U * sizeof (uint64_t)); + fsqr0(tmp2, x_copy5); } - fmul0(tmp2, tmp2, a); + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, tmp2, 4U * sizeof (uint64_t)); + fmul0(tmp2, x_copy5, a); for (uint32_t i = 0U; i < 94U; i++) { - fsqr0(tmp2, tmp2); + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, tmp2, 4U * sizeof (uint64_t)); + fsqr0(tmp2, x_copy6); } memcpy(res, tmp2, 4U * sizeof (uint64_t)); } @@ -667,8 +800,12 @@ static inline void to_aff_point(uint64_t *res, uint64_t *p) finv(zinv, pz); fmul0(x, px, zinv); fmul0(y, py, zinv); - from_mont(x, x); - from_mont(y, y); + uint64_t a_copy[4U] = { 0U }; + memcpy(a_copy, x, 4U * sizeof (uint64_t)); + from_mont(x, a_copy); + uint64_t a_copy0[4U] = { 0U }; + memcpy(a_copy0, y, 4U * sizeof (uint64_t)); + from_mont(y, a_copy0); } static inline void to_aff_point_x(uint64_t *res, uint64_t *p) @@ -678,7 +815,9 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p) uint64_t *pz = p + 8U; finv(zinv, pz); fmul0(res, px, zinv); - from_mont(res, res); + uint64_t a_copy[4U] = { 0U }; + memcpy(a_copy, res, 4U * sizeof (uint64_t)); + from_mont(res, a_copy); } static inline void to_proj_point(uint64_t *res, uint64_t *p) @@ -705,11 +844,19 @@ static inline bool is_on_curve_vartime(uint64_t *p) uint64_t tmp[4U] = { 0U }; fcube(rp, tx); make_a_coeff(tmp); - fmul0(tmp, tmp, tx); - fadd0(rp, tmp, rp); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, tmp, 4U * sizeof (uint64_t)); + fmul0(tmp, x_copy, tx); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, rp, 4U * sizeof (uint64_t)); + fadd0(rp, tmp, y_copy); make_b_coeff(tmp); - fadd0(rp, tmp, rp); - fsqr0(ty, ty); + uint64_t y_copy0[4U] = { 0U }; + memcpy(y_copy0, rp, 4U * sizeof (uint64_t)); + fadd0(rp, tmp, y_copy0); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, ty, 4U * sizeof (uint64_t)); + fsqr0(ty, x_copy0); uint64_t r = feq_mask(ty, rp); bool r0 = r == 0xFFFFFFFFFFFFFFFFULL; return r0; @@ -785,13 +932,21 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_ uint64_t tmp[4U] = { 0U }; fcube(y2M, xM); make_a_coeff(tmp); - fmul0(tmp, tmp, xM); - fadd0(y2M, tmp, y2M); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, tmp, 4U * sizeof (uint64_t)); + fmul0(tmp, x_copy, xM); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, y2M, 4U * sizeof (uint64_t)); + fadd0(y2M, tmp, y_copy); make_b_coeff(tmp); - fadd0(y2M, tmp, y2M); + uint64_t y_copy0[4U] = { 0U }; + memcpy(y_copy0, y2M, 4U * sizeof (uint64_t)); + fadd0(y2M, tmp, y_copy0); fsqrt(yM, y2M); from_mont(y, yM); - fsqr0(yM, yM); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, yM, 4U * sizeof (uint64_t)); + fsqr0(yM, x_copy0); uint64_t r = feq_mask(yM, y2M); bool is_y_valid = r == 0xFFFFFFFFFFFFFFFFULL; bool is_y_valid0 = is_y_valid; @@ -808,8 +963,6 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_ static inline void point_double(uint64_t *res, uint64_t *p) { uint64_t tmp[20U] = { 0U }; - uint64_t *x = p; - uint64_t *z = p + 8U; uint64_t *x3 = res; uint64_t *y3 = res + 4U; uint64_t *z3 = res + 8U; @@ -818,43 +971,85 @@ static inline void point_double(uint64_t *res, uint64_t *p) uint64_t *t2 = tmp + 8U; uint64_t *t3 = tmp + 12U; uint64_t *t4 = tmp + 16U; - uint64_t *x1 = p; + uint64_t *x = p; uint64_t *y = p + 4U; - uint64_t *z1 = p + 8U; - fsqr0(t0, x1); + uint64_t *z0 = p + 8U; + fsqr0(t0, x); fsqr0(t1, y); - fsqr0(t2, z1); - fmul0(t3, x1, y); - fadd0(t3, t3, t3); - fmul0(t4, y, z1); - fmul0(z3, x, z); - fadd0(z3, z3, z3); + fsqr0(t2, z0); + fmul0(t3, x, y); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, t3, 4U * sizeof (uint64_t)); + fadd0(t3, x_copy, x_copy); + fmul0(t4, y, z0); + uint64_t *x0 = p; + uint64_t *z = p + 8U; + fmul0(z3, x0, z); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, z3, 4U * sizeof (uint64_t)); + fadd0(z3, x_copy0, x_copy0); fmul_by_b_coeff(y3, t2); - fsub0(y3, y3, z3); + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, y3, 4U * sizeof (uint64_t)); + fsub0(y3, x_copy1, z3); fadd0(x3, y3, y3); - fadd0(y3, x3, y3); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, y3, 4U * sizeof (uint64_t)); + fadd0(y3, x3, y_copy); fsub0(x3, t1, y3); - fadd0(y3, t1, y3); - fmul0(y3, x3, y3); - fmul0(x3, x3, t3); + uint64_t y_copy0[4U] = { 0U }; + memcpy(y_copy0, y3, 4U * sizeof (uint64_t)); + fadd0(y3, t1, y_copy0); + uint64_t y_copy1[4U] = { 0U }; + memcpy(y_copy1, y3, 4U * sizeof (uint64_t)); + fmul0(y3, x3, y_copy1); + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, x3, 4U * sizeof (uint64_t)); + fmul0(x3, x_copy2, t3); fadd0(t3, t2, t2); - fadd0(t2, t2, t3); - fmul_by_b_coeff(z3, z3); - fsub0(z3, z3, t2); - fsub0(z3, z3, t0); + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, t2, 4U * sizeof (uint64_t)); + fadd0(t2, x_copy3, t3); + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, z3, 4U * sizeof (uint64_t)); + fmul_by_b_coeff(z3, x_copy4); + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, z3, 4U * sizeof (uint64_t)); + fsub0(z3, x_copy5, t2); + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, z3, 4U * sizeof (uint64_t)); + fsub0(z3, x_copy6, t0); fadd0(t3, z3, z3); - fadd0(z3, z3, t3); + uint64_t x_copy7[4U] = { 0U }; + memcpy(x_copy7, z3, 4U * sizeof (uint64_t)); + fadd0(z3, x_copy7, t3); fadd0(t3, t0, t0); - fadd0(t0, t3, t0); - fsub0(t0, t0, t2); - fmul0(t0, t0, z3); - fadd0(y3, y3, t0); + uint64_t y_copy2[4U] = { 0U }; + memcpy(y_copy2, t0, 4U * sizeof (uint64_t)); + fadd0(t0, t3, y_copy2); + uint64_t x_copy8[4U] = { 0U }; + memcpy(x_copy8, t0, 4U * sizeof (uint64_t)); + fsub0(t0, x_copy8, t2); + uint64_t x_copy9[4U] = { 0U }; + memcpy(x_copy9, t0, 4U * sizeof (uint64_t)); + fmul0(t0, x_copy9, z3); + uint64_t x_copy10[4U] = { 0U }; + memcpy(x_copy10, y3, 4U * sizeof (uint64_t)); + fadd0(y3, x_copy10, t0); fadd0(t0, t4, t4); - fmul0(z3, t0, z3); - fsub0(x3, x3, z3); + uint64_t y_copy3[4U] = { 0U }; + memcpy(y_copy3, z3, 4U * sizeof (uint64_t)); + fmul0(z3, t0, y_copy3); + uint64_t x_copy11[4U] = { 0U }; + memcpy(x_copy11, x3, 4U * sizeof (uint64_t)); + fsub0(x3, x_copy11, z3); fmul0(z3, t0, t1); - fadd0(z3, z3, z3); - fadd0(z3, z3, z3); + uint64_t x_copy12[4U] = { 0U }; + memcpy(x_copy12, z3, 4U * sizeof (uint64_t)); + fadd0(z3, x_copy12, x_copy12); + uint64_t x_copy13[4U] = { 0U }; + memcpy(x_copy13, z3, 4U * sizeof (uint64_t)); + fadd0(z3, x_copy13, x_copy13); } static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q) @@ -882,52 +1077,92 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q) fmul0(t2, z10, z20); fadd0(t3, x1, y1); fadd0(t4, x20, y20); - fmul0(t3, t3, t4); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, t3, 4U * sizeof (uint64_t)); + fmul0(t3, x_copy0, t4); fadd0(t4, t01, t11); uint64_t *y10 = p + 4U; uint64_t *z11 = p + 8U; uint64_t *y2 = q + 4U; uint64_t *z21 = q + 8U; - fsub0(t3, t3, t4); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, t3, 4U * sizeof (uint64_t)); + fsub0(t3, x_copy, t4); fadd0(t4, y10, z11); fadd0(t5, y2, z21); - fmul0(t4, t4, t5); + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, t4, 4U * sizeof (uint64_t)); + fmul0(t4, x_copy1, t5); fadd0(t5, t11, t2); - fsub0(t4, t4, t5); + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, t4, 4U * sizeof (uint64_t)); + fsub0(t4, x_copy2, t5); uint64_t *x10 = p; uint64_t *z1 = p + 8U; uint64_t *x2 = q; uint64_t *z2 = q + 8U; fadd0(x3, x10, z1); fadd0(y3, x2, z2); - fmul0(x3, x3, y3); + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, x3, 4U * sizeof (uint64_t)); + fmul0(x3, x_copy3, y3); fadd0(y3, t01, t2); - fsub0(y3, x3, y3); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, y3, 4U * sizeof (uint64_t)); + fsub0(y3, x3, y_copy); fmul_by_b_coeff(z3, t2); fsub0(x3, y3, z3); fadd0(z3, x3, x3); - fadd0(x3, x3, z3); + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, x3, 4U * sizeof (uint64_t)); + fadd0(x3, x_copy4, z3); fsub0(z3, t11, x3); - fadd0(x3, t11, x3); - fmul_by_b_coeff(y3, y3); + uint64_t y_copy0[4U] = { 0U }; + memcpy(y_copy0, x3, 4U * sizeof (uint64_t)); + fadd0(x3, t11, y_copy0); + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, y3, 4U * sizeof (uint64_t)); + fmul_by_b_coeff(y3, x_copy5); fadd0(t11, t2, t2); - fadd0(t2, t11, t2); - fsub0(y3, y3, t2); - fsub0(y3, y3, t01); + uint64_t y_copy1[4U] = { 0U }; + memcpy(y_copy1, t2, 4U * sizeof (uint64_t)); + fadd0(t2, t11, y_copy1); + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, y3, 4U * sizeof (uint64_t)); + fsub0(y3, x_copy6, t2); + uint64_t x_copy7[4U] = { 0U }; + memcpy(x_copy7, y3, 4U * sizeof (uint64_t)); + fsub0(y3, x_copy7, t01); fadd0(t11, y3, y3); - fadd0(y3, t11, y3); + uint64_t y_copy2[4U] = { 0U }; + memcpy(y_copy2, y3, 4U * sizeof (uint64_t)); + fadd0(y3, t11, y_copy2); fadd0(t11, t01, t01); - fadd0(t01, t11, t01); - fsub0(t01, t01, t2); + uint64_t y_copy3[4U] = { 0U }; + memcpy(y_copy3, t01, 4U * sizeof (uint64_t)); + fadd0(t01, t11, y_copy3); + uint64_t x_copy8[4U] = { 0U }; + memcpy(x_copy8, t01, 4U * sizeof (uint64_t)); + fsub0(t01, x_copy8, t2); fmul0(t11, t4, y3); fmul0(t2, t01, y3); fmul0(y3, x3, z3); - fadd0(y3, y3, t2); - fmul0(x3, t3, x3); - fsub0(x3, x3, t11); - fmul0(z3, t4, z3); + uint64_t x_copy9[4U] = { 0U }; + memcpy(x_copy9, y3, 4U * sizeof (uint64_t)); + fadd0(y3, x_copy9, t2); + uint64_t y_copy4[4U] = { 0U }; + memcpy(y_copy4, x3, 4U * sizeof (uint64_t)); + fmul0(x3, t3, y_copy4); + uint64_t x_copy10[4U] = { 0U }; + memcpy(x_copy10, x3, 4U * sizeof (uint64_t)); + fsub0(x3, x_copy10, t11); + uint64_t y_copy5[4U] = { 0U }; + memcpy(y_copy5, z3, 4U * sizeof (uint64_t)); + fmul0(z3, t4, y_copy5); fmul0(t11, t3, t01); - fadd0(z3, z3, t11); + uint64_t x_copy11[4U] = { 0U }; + memcpy(x_copy11, z3, 4U * sizeof (uint64_t)); + fadd0(z3, x_copy11, t11); memcpy(res, t1, 12U * sizeof (uint64_t)); } @@ -939,23 +1174,35 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p) uint64_t *t1 = table + 12U; make_point_at_inf(t0); memcpy(t1, p, 12U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table); KRML_MAYBE_FOR7(i, 0U, 7U, 1U, uint64_t *t11 = table + (i + 1U) * 12U; - point_double(tmp, t11); + uint64_t p_copy0[12U] = { 0U }; + memcpy(p_copy0, t11, 12U * sizeof (uint64_t)); + point_double(tmp, p_copy0); memcpy(table + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t)); uint64_t *t2 = table + (2U * i + 2U) * 12U; - point_add(tmp, p, t2); + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, p, 12U * sizeof (uint64_t)); + point_add(tmp, p_copy, t2); memcpy(table + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t));); make_point_at_inf(res); uint64_t tmp0[12U] = { 0U }; for (uint32_t i0 = 0U; i0 < 64U; i0++) { - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, point_double(res, res);); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, res, 12U * sizeof (uint64_t)); + point_double(res, p_copy);); uint32_t k = 256U - 4U * i0 - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U); + KRML_MAYBE_UNUSED_VAR(table); memcpy(tmp0, (uint64_t *)table, 12U * sizeof (uint64_t)); KRML_MAYBE_FOR15(i1, 0U, @@ -967,10 +1214,12 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p) 0U, 12U, 1U, - uint64_t *os = tmp0; uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + uint64_t *os = tmp0; os[i] = x;);); - point_add(res, res, tmp0); + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy, tmp0); } } @@ -987,8 +1236,8 @@ static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, 0U, 12U, 1U, - uint64_t *os = tmp; uint64_t x = (c & res_j[i]) | (~c & tmp[i]); + uint64_t *os = tmp; os[i] = x;);); } @@ -1030,23 +1279,41 @@ static inline void point_mul_g(uint64_t *res, uint64_t *scalar) 0U, 16U, 1U, - KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, point_double(res, res);); + KRML_MAYBE_FOR4(i0, + 0U, + 4U, + 1U, + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, res, 12U * sizeof (uint64_t)); + point_double(res, p_copy);); uint32_t k = 64U - 4U * i - 4U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U); + KRML_HOST_IGNORE(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4); precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp); - point_add(res, res, tmp); + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy, tmp); uint32_t k0 = 64U - 4U * i - 4U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U); + KRML_HOST_IGNORE(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4); precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp); - point_add(res, res, tmp); + uint64_t p_copy0[12U] = { 0U }; + memcpy(p_copy0, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy0, tmp); uint32_t k1 = 64U - 4U * i - 4U; uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U); + KRML_HOST_IGNORE(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4); precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp); - point_add(res, res, tmp); + uint64_t p_copy1[12U] = { 0U }; + memcpy(p_copy1, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy1, tmp); uint32_t k2 = 64U - 4U * i - 4U; uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U); + KRML_HOST_IGNORE(Hacl_P256_PrecompTable_precomp_basepoint_table_w4); precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp); - point_add(res, res, tmp);); + uint64_t p_copy2[12U] = { 0U }; + memcpy(p_copy2, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy2, tmp);); KRML_MAYBE_UNUSED_VAR(q1); KRML_MAYBE_UNUSED_VAR(q2); KRML_MAYBE_UNUSED_VAR(q3); @@ -1064,15 +1331,20 @@ point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t uint64_t *t1 = table2 + 12U; make_point_at_inf(t0); memcpy(t1, q2, 12U * sizeof (uint64_t)); + KRML_MAYBE_UNUSED_VAR(table2); KRML_MAYBE_FOR15(i, 0U, 15U, 1U, uint64_t *t11 = table2 + (i + 1U) * 12U; - point_double(tmp, t11); + uint64_t p_copy0[12U] = { 0U }; + memcpy(p_copy0, t11, 12U * sizeof (uint64_t)); + point_double(tmp, p_copy0); memcpy(table2 + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t)); uint64_t *t2 = table2 + (2U * i + 2U) * 12U; - point_add(tmp, q2, t2); + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, q2, 12U * sizeof (uint64_t)); + point_add(tmp, p_copy, t2); memcpy(table2 + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t));); uint64_t tmp0[12U] = { 0U }; uint32_t i0 = 255U; @@ -1085,25 +1357,39 @@ point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t uint32_t bits_l320 = (uint32_t)bits_c0; const uint64_t *a_bits_l0 = table2 + bits_l320 * 12U; memcpy(tmp0, (uint64_t *)a_bits_l0, 12U * sizeof (uint64_t)); - point_add(res, res, tmp0); + uint64_t p_copy[12U] = { 0U }; + memcpy(p_copy, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy, tmp0); uint64_t tmp1[12U] = { 0U }; for (uint32_t i = 0U; i < 51U; i++) { - KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, point_double(res, res);); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t p_copy0[12U] = { 0U }; + memcpy(p_copy0, res, 12U * sizeof (uint64_t)); + point_double(res, p_copy0);); uint32_t k = 255U - 5U * i - 5U; uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U); + KRML_MAYBE_UNUSED_VAR(table2); uint32_t bits_l321 = (uint32_t)bits_l; const uint64_t *a_bits_l1 = table2 + bits_l321 * 12U; memcpy(tmp1, (uint64_t *)a_bits_l1, 12U * sizeof (uint64_t)); - point_add(res, res, tmp1); + uint64_t p_copy0[12U] = { 0U }; + memcpy(p_copy0, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy0, tmp1); uint32_t k0 = 255U - 5U * i - 5U; uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U); + KRML_HOST_IGNORE(Hacl_P256_PrecompTable_precomp_basepoint_table_w5); uint32_t bits_l322 = (uint32_t)bits_l0; const uint64_t *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 12U; memcpy(tmp1, (uint64_t *)a_bits_l2, 12U * sizeof (uint64_t)); - point_add(res, res, tmp1); + uint64_t p_copy1[12U] = { 0U }; + memcpy(p_copy1, res, 12U * sizeof (uint64_t)); + point_add(res, p_copy1, tmp1); } } @@ -1111,8 +1397,11 @@ static inline uint64_t bn_is_lt_order_mask4(uint64_t *f) { uint64_t tmp[4U] = { 0U }; make_order(tmp); - uint64_t c = bn_sub4(tmp, f, tmp); - return 0ULL - c; + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, tmp, 4U * sizeof (uint64_t)); + uint64_t c = bn_sub4(tmp, f, y_copy); + uint64_t c0 = c; + return 0ULL - c0; } static inline uint64_t bn_is_lt_order_and_gt_zero_mask4(uint64_t *f) @@ -1126,8 +1415,11 @@ static inline void qmod_short(uint64_t *res, uint64_t *x) { uint64_t tmp[4U] = { 0U }; make_order(tmp); - uint64_t c = bn_sub4(tmp, x, tmp); - bn_cmovznz4(res, c, tmp, x); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, tmp, 4U * sizeof (uint64_t)); + uint64_t c = bn_sub4(tmp, x, y_copy); + uint64_t c0 = c; + bn_cmovznz4(res, c0, tmp, x); } static inline void qadd(uint64_t *res, uint64_t *x, uint64_t *y) @@ -1165,8 +1457,8 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x) } uint64_t r = c; uint64_t c1 = r; - uint64_t *resb = x + 4U + i0; uint64_t res_j = x[4U + i0]; + uint64_t *resb = x + 4U + i0; c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);); memcpy(res, x + 4U, 4U * sizeof (uint64_t)); uint64_t c00 = c0; @@ -1196,8 +1488,8 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x) 0U, 4U, 1U, - uint64_t *os = res; uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]); + uint64_t *os = res; os[i] = x1;); } @@ -1238,9 +1530,9 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key) 0U, 4U, 1U, - uint64_t *os = sk; uint64_t uu____0 = oneq[i]; uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0)); + uint64_t *os = sk; os[i] = x;); uint64_t is_sk_valid = is_b_valid; point_mul_g(pk, sk); @@ -1270,9 +1562,9 @@ Hacl_Impl_P256_DH_ecp256dh_r( 0U, 4U, 1U, - uint64_t *os = sk; uint64_t uu____0 = oneq[i]; uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0)); + uint64_t *os = sk; os[i] = x;); uint64_t is_sk_valid = is_b_valid; uint64_t ss_proj[12U] = { 0U }; @@ -1296,98 +1588,348 @@ static inline void qinv(uint64_t *res, uint64_t *r) uint64_t *x_101111 = tmp + 24U; memcpy(x6, r, 4U * sizeof (uint64_t)); { - qsqr(x6, x6); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x6, 4U * sizeof (uint64_t)); + qsqr(x6, x_copy); } qmul(x_11, x6, r); qmul(x_101, x6, x_11); qmul(x_111, x6, x_101); memcpy(x6, x_101, 4U * sizeof (uint64_t)); { - qsqr(x6, x6); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x6, 4U * sizeof (uint64_t)); + qsqr(x6, x_copy); } qmul(x_1111, x_101, x6); { - qsqr(x6, x6); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x6, 4U * sizeof (uint64_t)); + qsqr(x6, x_copy); } qmul(x_10101, x6, r); memcpy(x6, x_10101, 4U * sizeof (uint64_t)); { - qsqr(x6, x6); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x6, 4U * sizeof (uint64_t)); + qsqr(x6, x_copy); } qmul(x_101111, x_101, x6); - qmul(x6, x_10101, x6); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, x6, 4U * sizeof (uint64_t)); + qmul(x6, x_10101, y_copy); uint64_t tmp1[4U] = { 0U }; - KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(x6, x6);); - qmul(x6, x6, x_11); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x6, 4U * sizeof (uint64_t)); + qsqr(x6, x_copy);); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x6, 4U * sizeof (uint64_t)); + qmul(x6, x_copy, x_11); memcpy(tmp1, x6, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR8(i, 0U, 8U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x6); + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy0);); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy0, x6); memcpy(x6, tmp1, 4U * sizeof (uint64_t)); - KRML_MAYBE_FOR16(i, 0U, 16U, 1U, qsqr(x6, x6);); - qmul(x6, x6, tmp1); + KRML_MAYBE_FOR16(i, + 0U, + 16U, + 1U, + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, x6, 4U * sizeof (uint64_t)); + qsqr(x6, x_copy1);); + uint64_t x_copy1[4U] = { 0U }; + memcpy(x_copy1, x6, 4U * sizeof (uint64_t)); + qmul(x6, x_copy1, tmp1); memcpy(tmp1, x6, 4U * sizeof (uint64_t)); for (uint32_t i = 0U; i < 64U; i++) { - qsqr(tmp1, tmp1); + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy2); } - qmul(tmp1, tmp1, x6); + uint64_t x_copy2[4U] = { 0U }; + memcpy(x_copy2, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy2, x6); for (uint32_t i = 0U; i < 32U; i++) { - qsqr(tmp1, tmp1); + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy3); } - qmul(tmp1, tmp1, x6); - KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101111); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_111); - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_11); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_1111); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_10101); - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101); - KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101); - KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_111); - KRML_MAYBE_FOR9(i, 0U, 9U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101111); - KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_1111); - KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, r); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, r); - KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_1111); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_111); - KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_111); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_111); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101); - KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_11); - KRML_MAYBE_FOR10(i, 0U, 10U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_101111); - KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_11); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_11); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_11); - KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, r); - KRML_MAYBE_FOR7(i, 0U, 7U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_10101); - KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1);); - qmul(tmp1, tmp1, x_1111); + uint64_t x_copy3[4U] = { 0U }; + memcpy(x_copy3, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy3, x6); + KRML_MAYBE_FOR6(i, + 0U, + 6U, + 1U, + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy4);); + uint64_t x_copy4[4U] = { 0U }; + memcpy(x_copy4, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy4, x_101111); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy5);); + uint64_t x_copy5[4U] = { 0U }; + memcpy(x_copy5, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy5, x_111); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy6);); + uint64_t x_copy6[4U] = { 0U }; + memcpy(x_copy6, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy6, x_11); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy7[4U] = { 0U }; + memcpy(x_copy7, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy7);); + uint64_t x_copy7[4U] = { 0U }; + memcpy(x_copy7, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy7, x_1111); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy8[4U] = { 0U }; + memcpy(x_copy8, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy8);); + uint64_t x_copy8[4U] = { 0U }; + memcpy(x_copy8, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy8, x_10101); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t x_copy9[4U] = { 0U }; + memcpy(x_copy9, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy9);); + uint64_t x_copy9[4U] = { 0U }; + memcpy(x_copy9, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy9, x_101); + KRML_MAYBE_FOR3(i, + 0U, + 3U, + 1U, + uint64_t x_copy10[4U] = { 0U }; + memcpy(x_copy10, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy10);); + uint64_t x_copy10[4U] = { 0U }; + memcpy(x_copy10, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy10, x_101); + KRML_MAYBE_FOR3(i, + 0U, + 3U, + 1U, + uint64_t x_copy11[4U] = { 0U }; + memcpy(x_copy11, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy11);); + uint64_t x_copy11[4U] = { 0U }; + memcpy(x_copy11, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy11, x_101); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy12[4U] = { 0U }; + memcpy(x_copy12, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy12);); + uint64_t x_copy12[4U] = { 0U }; + memcpy(x_copy12, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy12, x_111); + KRML_MAYBE_FOR9(i, + 0U, + 9U, + 1U, + uint64_t x_copy13[4U] = { 0U }; + memcpy(x_copy13, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy13);); + uint64_t x_copy13[4U] = { 0U }; + memcpy(x_copy13, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy13, x_101111); + KRML_MAYBE_FOR6(i, + 0U, + 6U, + 1U, + uint64_t x_copy14[4U] = { 0U }; + memcpy(x_copy14, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy14);); + uint64_t x_copy14[4U] = { 0U }; + memcpy(x_copy14, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy14, x_1111); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t x_copy15[4U] = { 0U }; + memcpy(x_copy15, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy15);); + uint64_t x_copy15[4U] = { 0U }; + memcpy(x_copy15, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy15, r); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy16[4U] = { 0U }; + memcpy(x_copy16, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy16);); + uint64_t x_copy16[4U] = { 0U }; + memcpy(x_copy16, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy16, r); + KRML_MAYBE_FOR6(i, + 0U, + 6U, + 1U, + uint64_t x_copy17[4U] = { 0U }; + memcpy(x_copy17, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy17);); + uint64_t x_copy17[4U] = { 0U }; + memcpy(x_copy17, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy17, x_1111); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy18[4U] = { 0U }; + memcpy(x_copy18, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy18);); + uint64_t x_copy18[4U] = { 0U }; + memcpy(x_copy18, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy18, x_111); + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t x_copy19[4U] = { 0U }; + memcpy(x_copy19, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy19);); + uint64_t x_copy19[4U] = { 0U }; + memcpy(x_copy19, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy19, x_111); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy20[4U] = { 0U }; + memcpy(x_copy20, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy20);); + uint64_t x_copy20[4U] = { 0U }; + memcpy(x_copy20, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy20, x_111); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy21[4U] = { 0U }; + memcpy(x_copy21, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy21);); + uint64_t x_copy21[4U] = { 0U }; + memcpy(x_copy21, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy21, x_101); + KRML_MAYBE_FOR3(i, + 0U, + 3U, + 1U, + uint64_t x_copy22[4U] = { 0U }; + memcpy(x_copy22, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy22);); + uint64_t x_copy22[4U] = { 0U }; + memcpy(x_copy22, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy22, x_11); + KRML_MAYBE_FOR10(i, + 0U, + 10U, + 1U, + uint64_t x_copy23[4U] = { 0U }; + memcpy(x_copy23, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy23);); + uint64_t x_copy23[4U] = { 0U }; + memcpy(x_copy23, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy23, x_101111); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t x_copy24[4U] = { 0U }; + memcpy(x_copy24, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy24);); + uint64_t x_copy24[4U] = { 0U }; + memcpy(x_copy24, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy24, x_11); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy25[4U] = { 0U }; + memcpy(x_copy25, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy25);); + uint64_t x_copy25[4U] = { 0U }; + memcpy(x_copy25, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy25, x_11); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t x_copy26[4U] = { 0U }; + memcpy(x_copy26, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy26);); + uint64_t x_copy26[4U] = { 0U }; + memcpy(x_copy26, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy26, x_11); + KRML_MAYBE_FOR3(i, + 0U, + 3U, + 1U, + uint64_t x_copy27[4U] = { 0U }; + memcpy(x_copy27, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy27);); + uint64_t x_copy27[4U] = { 0U }; + memcpy(x_copy27, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy27, r); + KRML_MAYBE_FOR7(i, + 0U, + 7U, + 1U, + uint64_t x_copy28[4U] = { 0U }; + memcpy(x_copy28, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy28);); + uint64_t x_copy28[4U] = { 0U }; + memcpy(x_copy28, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy28, x_10101); + KRML_MAYBE_FOR6(i, + 0U, + 6U, + 1U, + uint64_t x_copy29[4U] = { 0U }; + memcpy(x_copy29, tmp1, 4U * sizeof (uint64_t)); + qsqr(tmp1, x_copy29);); + uint64_t x_copy29[4U] = { 0U }; + memcpy(x_copy29, tmp1, 4U * sizeof (uint64_t)); + qmul(tmp1, x_copy29, x_1111); memcpy(x6, tmp1, 4U * sizeof (uint64_t)); memcpy(res, x6, 4U * sizeof (uint64_t)); } @@ -1435,7 +1977,9 @@ ecdsa_verify_msg_as_qelem( } uint64_t x[4U] = { 0U }; to_aff_point_x(x, res); - qmod_short(x, x); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, x, 4U * sizeof (uint64_t)); + qmod_short(x, x_copy); bool res1 = bn_is_eq_vartime4(x, r_q); return res1; } @@ -1464,9 +2008,9 @@ ecdsa_sign_msg_as_qelem( 0U, 4U, 1U, - uint64_t *os = d_a; uint64_t uu____0 = oneq0[i]; uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0)); + uint64_t *os = d_a; os[i] = x;); uint64_t is_sk_valid = is_b_valid0; bn_from_bytes_be4(k_q, nonce); @@ -1480,22 +2024,30 @@ ecdsa_sign_msg_as_qelem( 0U, 4U, 1U, - uint64_t *os = k_q; uint64_t uu____1 = oneq[i]; uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1)); + uint64_t *os = k_q; os[i] = x;); uint64_t is_nonce_valid = is_b_valid; uint64_t are_sk_nonce_valid = is_sk_valid & is_nonce_valid; uint64_t p[12U] = { 0U }; point_mul_g(p, k_q); to_aff_point_x(r_q, p); - qmod_short(r_q, r_q); + uint64_t x_copy0[4U] = { 0U }; + memcpy(x_copy0, r_q, 4U * sizeof (uint64_t)); + qmod_short(r_q, x_copy0); uint64_t kinv[4U] = { 0U }; qinv(kinv, k_q); qmul(s_q, r_q, d_a); - from_qmont(m_q, m_q); - qadd(s_q, m_q, s_q); - qmul(s_q, kinv, s_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + from_qmont(m_q, x_copy); + uint64_t y_copy[4U] = { 0U }; + memcpy(y_copy, s_q, 4U * sizeof (uint64_t)); + qadd(s_q, m_q, y_copy); + uint64_t y_copy0[4U] = { 0U }; + memcpy(y_copy0, s_q, 4U * sizeof (uint64_t)); + qmul(s_q, kinv, y_copy0); bn2_to_bytes_be4(signature, r_q, s_q); uint64_t is_r_zero = bn_is_zero_mask4(r_q); uint64_t is_s_zero = bn_is_zero_mask4(s_q); @@ -1551,7 +2103,9 @@ Hacl_P256_ecdsa_sign_p256_sha2( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_sign_msg_as_qelem(signature, m_q, private_key, nonce); return res; } @@ -1584,7 +2138,9 @@ Hacl_P256_ecdsa_sign_p256_sha384( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_sign_msg_as_qelem(signature, m_q, private_key, nonce); return res; } @@ -1617,7 +2173,9 @@ Hacl_P256_ecdsa_sign_p256_sha512( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_sign_msg_as_qelem(signature, m_q, private_key, nonce); return res; } @@ -1660,7 +2218,9 @@ Hacl_P256_ecdsa_sign_p256_without_hash( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_sign_msg_as_qelem(signature, m_q, private_key, nonce); return res; } @@ -1696,7 +2256,9 @@ Hacl_P256_ecdsa_verif_p256_sha2( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_verify_msg_as_qelem(m_q, public_key, signature_r, signature_s); return res; } @@ -1727,7 +2289,9 @@ Hacl_P256_ecdsa_verif_p256_sha384( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_verify_msg_as_qelem(m_q, public_key, signature_r, signature_s); return res; } @@ -1758,7 +2322,9 @@ Hacl_P256_ecdsa_verif_p256_sha512( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_verify_msg_as_qelem(m_q, public_key, signature_r, signature_s); return res; } @@ -1794,7 +2360,9 @@ Hacl_P256_ecdsa_verif_without_hash( KRML_MAYBE_UNUSED_VAR(msg_len); uint8_t *mHash32 = mHash; bn_from_bytes_be4(m_q, mHash32); - qmod_short(m_q, m_q); + uint64_t x_copy[4U] = { 0U }; + memcpy(x_copy, m_q, 4U * sizeof (uint64_t)); + qmod_short(m_q, x_copy); bool res = ecdsa_verify_msg_as_qelem(m_q, public_key, signature_r, signature_s); return res; } @@ -1891,8 +2459,8 @@ bool Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw) { uint64_t xa[4U] = { 0U }; uint64_t ya[4U] = { 0U }; - uint8_t *pk_xb = pk + 1U; bool b = aff_point_decompress_vartime(xa, ya, pk); + uint8_t *pk_xb = pk + 1U; if (b) { memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t)); diff --git a/src/Hacl_RSAPSS.c b/src/Hacl_RSAPSS.c index 71e141d0..fa8244e3 100644 --- a/src/Hacl_RSAPSS.c +++ b/src/Hacl_RSAPSS.c @@ -167,7 +167,7 @@ static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; return res; @@ -189,7 +189,7 @@ static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n) { uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; uint64_t m1 = res; @@ -252,8 +252,8 @@ pss_encode( mgf_hash(a, hLen, m1Hash, dbLen, dbMask); for (uint32_t i = 0U; i < dbLen; i++) { - uint8_t *os = db; uint8_t x = (uint32_t)db[i] ^ (uint32_t)dbMask[i]; + uint8_t *os = db; os[i] = x; } uint32_t msBits = emBits % 8U; @@ -288,11 +288,7 @@ pss_verify( em_0 = 0U; } uint8_t em_last = em[emLen - 1U]; - if (emLen < saltLen + hash_len(a) + 2U) - { - return false; - } - if (!(em_last == 0xbcU && em_0 == 0U)) + if (emLen < saltLen + hash_len(a) + 2U || !(em_last == 0xbcU && em_0 == 0U)) { return false; } @@ -310,8 +306,8 @@ pss_verify( mgf_hash(a, hLen, m1Hash, dbLen, dbMask); for (uint32_t i = 0U; i < dbLen; i++) { - uint8_t *os = dbMask; uint8_t x = (uint32_t)dbMask[i] ^ (uint32_t)maskedDB[i]; + uint8_t *os = dbMask; os[i] = x; } uint32_t msBits1 = emBits % 8U; @@ -486,9 +482,9 @@ Hacl_RSAPSS_rsapss_sign( uint64_t eq_m = mask1; for (uint32_t i = 0U; i < nLen2; i++) { - uint64_t *os = s; uint64_t x = s[i]; uint64_t x0 = eq_m & x; + uint64_t *os = s; os[i] = x0; } bool eq_b = eq_m == 0xFFFFFFFFFFFFFFFFULL; @@ -553,7 +549,7 @@ Hacl_RSAPSS_rsapss_verify( { uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t mask = acc; bool res; @@ -568,10 +564,9 @@ Hacl_RSAPSS_rsapss_verify( eBits, e, m); - bool ite; if (!((modBits - 1U) % 8U == 0U)) { - ite = true; + res = true; } else { @@ -579,15 +574,7 @@ Hacl_RSAPSS_rsapss_verify( uint32_t j = (modBits - 1U) % 64U; uint64_t tmp = m[i]; uint64_t get_bit = tmp >> j & 1ULL; - ite = get_bit == 0ULL; - } - if (ite) - { - res = true; - } - else - { - res = false; + res = get_bit == 0ULL; } } else diff --git a/src/Hacl_SHA2_Vec128.c b/src/Hacl_SHA2_Vec128.c index 02af75b1..e122dd8c 100644 --- a/src/Hacl_SHA2_Vec128.c +++ b/src/Hacl_SHA2_Vec128.c @@ -35,9 +35,9 @@ static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec128 *os = hash; uint32_t hi = Hacl_Hash_SHA2_h224[i]; Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi); + Lib_IntVector_Intrinsics_vec128 *os = hash; os[i] = x;); } @@ -286,9 +286,9 @@ sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec128 *os = hash; Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]); + Lib_IntVector_Intrinsics_vec128 *os = hash; os[i] = x;); } @@ -515,9 +515,9 @@ static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec128 *os = hash; uint32_t hi = Hacl_Hash_SHA2_h256[i]; Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi); + Lib_IntVector_Intrinsics_vec128 *os = hash; os[i] = x;); } @@ -766,9 +766,9 @@ sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec128 *os = hash; Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]); + Lib_IntVector_Intrinsics_vec128 *os = hash; os[i] = x;); } diff --git a/src/Hacl_SHA2_Vec256.c b/src/Hacl_SHA2_Vec256.c index c34767f5..2bee1692 100644 --- a/src/Hacl_SHA2_Vec256.c +++ b/src/Hacl_SHA2_Vec256.c @@ -36,9 +36,9 @@ static inline void sha224_init8(Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; uint32_t hi = Hacl_Hash_SHA2_h224[i]; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -371,9 +371,9 @@ sha224_update8(Hacl_Hash_SHA2_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -785,9 +785,9 @@ static inline void sha256_init8(Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; uint32_t hi = Hacl_Hash_SHA2_h256[i]; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -1120,9 +1120,9 @@ sha256_update8(Hacl_Hash_SHA2_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -1534,9 +1534,9 @@ static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; uint64_t hi = Hacl_Hash_SHA2_h384[i]; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -1769,9 +1769,9 @@ sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -1990,9 +1990,9 @@ static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; uint64_t hi = Hacl_Hash_SHA2_h512[i]; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } @@ -2225,9 +2225,9 @@ sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) 0U, 8U, 1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]); + Lib_IntVector_Intrinsics_vec256 *os = hash; os[i] = x;); } diff --git a/src/Hacl_Salsa20.c b/src/Hacl_Salsa20.c index 151df07d..372fd3c5 100644 --- a/src/Hacl_Salsa20.c +++ b/src/Hacl_Salsa20.c @@ -85,8 +85,8 @@ static inline void salsa20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr) 0U, 16U, 1U, - uint32_t *os = k; uint32_t x = k[i] + ctx[i]; + uint32_t *os = k; os[i] = x;); k[8U] = k[8U] + ctr_u32; } @@ -101,21 +101,21 @@ static inline void salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n) 0U, 8U, 1U, - uint32_t *os = k32; uint8_t *bj = key + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = k32; os[i] = x;); KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = n32; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = n32; os[i] = x;); ctx[0U] = 0x61707865U; uint32_t *k0 = k32; @@ -149,21 +149,21 @@ salsa20_encrypt( 0U, 8U, 1U, - uint32_t *os = k32; uint8_t *bj = key + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = k32; os[i] = x;); KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = n32; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = n32; os[i] = x;); ctx[0U] = 0x61707865U; uint32_t *k0 = k32; @@ -192,18 +192,18 @@ salsa20_encrypt( 0U, 16U, 1U, - uint32_t *os = bl; uint8_t *bj = uu____1 + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = bl; uint32_t x = bl[i] ^ k1[i]; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]);); } @@ -219,18 +219,18 @@ salsa20_encrypt( 0U, 16U, 1U, - uint32_t *os = bl; uint8_t *bj = plain + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = bl; uint32_t x = bl[i] ^ k1[i]; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]);); memcpy(uu____2, plain, rem * sizeof (uint8_t)); @@ -254,21 +254,21 @@ salsa20_decrypt( 0U, 8U, 1U, - uint32_t *os = k32; uint8_t *bj = key + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = k32; os[i] = x;); KRML_MAYBE_FOR2(i, 0U, 2U, 1U, - uint32_t *os = n32; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = n32; os[i] = x;); ctx[0U] = 0x61707865U; uint32_t *k0 = k32; @@ -297,18 +297,18 @@ salsa20_decrypt( 0U, 16U, 1U, - uint32_t *os = bl; uint8_t *bj = uu____1 + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = bl; uint32_t x = bl[i] ^ k1[i]; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]);); } @@ -324,18 +324,18 @@ salsa20_decrypt( 0U, 16U, 1U, - uint32_t *os = bl; uint8_t *bj = plain + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, - uint32_t *os = bl; uint32_t x = bl[i] ^ k1[i]; + uint32_t *os = bl; os[i] = x;); KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]);); memcpy(uu____2, plain, rem * sizeof (uint8_t)); @@ -351,21 +351,21 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n) 0U, 8U, 1U, - uint32_t *os = k32; uint8_t *bj = key + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = k32; os[i] = x;); KRML_MAYBE_FOR4(i, 0U, 4U, 1U, - uint32_t *os = n32; uint8_t *bj = n + i * 4U; uint32_t u = load32_le(bj); uint32_t r = u; uint32_t x = r; + uint32_t *os = n32; os[i] = x;); uint32_t *k0 = k32; uint32_t *k1 = k32 + 4U; diff --git a/src/Lib_Memzero0.c b/src/Lib_Memzero0.c index 3d8a1e5f..5c269d23 100644 --- a/src/Lib_Memzero0.c +++ b/src/Lib_Memzero0.c @@ -13,7 +13,7 @@ #include #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__NetBSD__) #include #endif @@ -36,7 +36,7 @@ void Lib_Memzero0_memzero0(void *dst, uint64_t len) { size_t len_ = (size_t) len; #ifdef _WIN32 - SecureZeroMemory(dst, len); + SecureZeroMemory(dst, len_); #elif defined(__APPLE__) && defined(__MACH__) memset_s(dst, len_, 0, len_); #elif (defined(__linux__) && !defined(LINUX_NO_EXPLICIT_BZERO)) || defined(__FreeBSD__) diff --git a/src/msvc/EverCrypt_AEAD.c b/src/msvc/EverCrypt_AEAD.c index b0fb4826..89965054 100644 --- a/src/msvc/EverCrypt_AEAD.c +++ b/src/msvc/EverCrypt_AEAD.c @@ -538,26 +538,27 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( KRML_MAYBE_UNUSED_VAR(cipher); KRML_MAYBE_UNUSED_VAR(tag); #if HACL_CAN_COMPILE_VALE - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 176U; + uint8_t ek0[480U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 176U; aes128_key_expansion(k, keys_b0); aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 304U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 304U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 176U; uint8_t tmp_iv[16U] = { 0U }; @@ -637,8 +638,9 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; #else KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n", @@ -680,26 +682,27 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( KRML_MAYBE_UNUSED_VAR(cipher); KRML_MAYBE_UNUSED_VAR(tag); #if HACL_CAN_COMPILE_VALE - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 240U; + uint8_t ek0[544U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 240U; aes256_key_expansion(k, keys_b0); aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 368U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 368U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 240U; uint8_t tmp_iv[16U] = { 0U }; @@ -779,8 +782,9 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; #else KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n", @@ -821,26 +825,27 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) { - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 176U; + uint8_t ek0[480U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 176U; aes128_key_expansion(k, keys_b0); aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 304U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 304U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 176U; uint8_t tmp_iv[16U] = { 0U }; @@ -920,8 +925,9 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; } return EverCrypt_Error_UnsupportedAlgorithm; @@ -960,26 +966,27 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) { - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + 240U; + uint8_t ek0[544U] = { 0U }; + uint8_t *keys_b0 = ek0; + uint8_t *hkeys_b0 = ek0 + 240U; aes256_key_expansion(k, keys_b0); aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; + EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek0 }; EverCrypt_AEAD_state_s *s = &p; + EverCrypt_Error_error_code r; if (s == NULL) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); + r = EverCrypt_Error_InvalidKey; } else if (iv_len == 0U) { - KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); + r = EverCrypt_Error_InvalidIVLength; } else { - uint8_t *ek0 = (*s).ek; - uint8_t *scratch_b = ek0 + 368U; - uint8_t *ek1 = ek0; + uint8_t *ek = (*s).ek; + uint8_t *scratch_b = ek + 368U; + uint8_t *ek1 = ek; uint8_t *keys_b = ek1; uint8_t *hkeys_b = ek1 + 240U; uint8_t tmp_iv[16U] = { 0U }; @@ -1059,8 +1066,9 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U, inout_b, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t)); - KRML_HOST_IGNORE(EverCrypt_Error_Success); + r = EverCrypt_Error_Success; } + KRML_MAYBE_UNUSED_VAR(r); return EverCrypt_Error_Success; } return EverCrypt_Error_UnsupportedAlgorithm; diff --git a/src/msvc/EverCrypt_HMAC.c b/src/msvc/EverCrypt_HMAC.c index 386cb17f..baa3b864 100644 --- a/src/msvc/EverCrypt_HMAC.c +++ b/src/msvc/EverCrypt_HMAC.c @@ -81,10 +81,8 @@ EverCrypt_HMAC_compute_sha1( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -105,19 +103,17 @@ EverCrypt_HMAC_compute_sha1( { Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -189,10 +185,8 @@ EverCrypt_HMAC_compute_sha2_256( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -213,19 +207,17 @@ EverCrypt_HMAC_compute_sha2_256( { EverCrypt_HMAC_hash_256(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -311,10 +303,8 @@ EverCrypt_HMAC_compute_sha2_384( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -335,19 +325,17 @@ EverCrypt_HMAC_compute_sha2_384( { Hacl_Hash_SHA2_hash_384(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -441,10 +429,8 @@ EverCrypt_HMAC_compute_sha2_512( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -465,19 +451,17 @@ EverCrypt_HMAC_compute_sha2_512( { Hacl_Hash_SHA2_hash_512(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -571,10 +555,8 @@ EverCrypt_HMAC_compute_blake2s( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -595,19 +577,17 @@ EverCrypt_HMAC_compute_blake2s( { Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -620,7 +600,7 @@ EverCrypt_HMAC_compute_blake2s( if (data_len == 0U) { uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad); + Hacl_Hash_Blake2s_update_last(64U, wv, s0, false, 0ULL, 64U, ipad); } else { @@ -655,6 +635,7 @@ EverCrypt_HMAC_compute_blake2s( Hacl_Hash_Blake2s_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); @@ -693,6 +674,7 @@ EverCrypt_HMAC_compute_blake2s( Hacl_Hash_Blake2s_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); @@ -708,10 +690,8 @@ EverCrypt_HMAC_compute_blake2b( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -732,19 +712,17 @@ EverCrypt_HMAC_compute_blake2b( { Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -757,7 +735,13 @@ EverCrypt_HMAC_compute_blake2b( if (data_len == 0U) { uint64_t wv[16U] = { 0U }; - Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad); + Hacl_Hash_Blake2b_update_last(128U, + wv, + s0, + false, + FStar_UInt128_uint64_to_uint128(0ULL), + 128U, + ipad); } else { @@ -792,6 +776,7 @@ EverCrypt_HMAC_compute_blake2b( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, @@ -831,6 +816,7 @@ EverCrypt_HMAC_compute_blake2b( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, diff --git a/src/msvc/EverCrypt_Hash.c b/src/msvc/EverCrypt_Hash.c index bfafa9be..153063cc 100644 --- a/src/msvc/EverCrypt_Hash.c +++ b/src/msvc/EverCrypt_Hash.c @@ -616,7 +616,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ { uint32_t *p1 = scrut.case_Blake2S_s; uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(last_len, wv, p1, prev_len, last_len, last); + Hacl_Hash_Blake2s_update_last(last_len, wv, p1, false, prev_len, last_len, last); return; } if (scrut.tag == Blake2S_128_s) @@ -624,7 +624,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s; #if HACL_CAN_COMPILE_VEC128 KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Hash_Blake2s_Simd128_update_last(last_len, wv, p1, prev_len, last_len, last); + Hacl_Hash_Blake2s_Simd128_update_last(last_len, wv, p1, false, prev_len, last_len, last); return; #else KRML_MAYBE_UNUSED_VAR(p1); @@ -638,6 +638,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ Hacl_Hash_Blake2b_update_last(last_len, wv, p1, + false, FStar_UInt128_uint64_to_uint128(prev_len), last_len, last); @@ -651,6 +652,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_ Hacl_Hash_Blake2b_Simd256_update_last(last_len, wv, p1, + false, FStar_UInt128_uint64_to_uint128(prev_len), last_len, last); diff --git a/src/msvc/Hacl_Bignum.c b/src/msvc/Hacl_Bignum.c index b99423f3..a87f2267 100644 --- a/src/msvc/Hacl_Bignum.c +++ b/src/msvc/Hacl_Bignum.c @@ -832,7 +832,7 @@ uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m1 = acc; return m0 & m1; @@ -1023,7 +1023,7 @@ uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; return m0 & m1; @@ -1415,7 +1415,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m10 = acc0; uint32_t m00 = m0 & m10; @@ -1442,7 +1442,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( { uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t res = acc; m1 = res; @@ -1456,7 +1456,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t m = m1 & m2; @@ -1809,7 +1809,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m10 = acc0; uint64_t m00 = m0 & m10; @@ -1836,7 +1836,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; m1 = res; @@ -1850,7 +1850,7 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t m = m1 & m2; diff --git a/src/msvc/Hacl_Bignum256.c b/src/msvc/Hacl_Bignum256.c index a4f00b83..bd67656b 100644 --- a/src/msvc/Hacl_Bignum256.c +++ b/src/msvc/Hacl_Bignum256.c @@ -512,7 +512,7 @@ bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res) 1U, uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t m1 = acc; uint64_t is_valid_m = m0 & m1; uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n); @@ -544,7 +544,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc0 = (beq & acc0) | (~beq & blt);); uint64_t m10 = acc0; uint64_t m00 = m0 & m10; uint32_t bLen; @@ -570,7 +570,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; m1 = res; @@ -586,7 +586,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t m2 = acc; uint64_t m = m1 & m2; return m00 & m; @@ -990,7 +990,7 @@ bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *re 1U, uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc0 = (beq & acc0) | (~beq & blt);); uint64_t m1 = acc0; uint64_t m00 = m0 & m1; uint64_t bn_zero[4U] = { 0U }; @@ -1011,7 +1011,7 @@ bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *re 1U, uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t m2 = acc; uint64_t is_valid_m = (m00 & ~m10) & m2; uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n); @@ -1351,7 +1351,7 @@ uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); return acc; } diff --git a/src/msvc/Hacl_Bignum256_32.c b/src/msvc/Hacl_Bignum256_32.c index 29a5a52e..b4490e6c 100644 --- a/src/msvc/Hacl_Bignum256_32.c +++ b/src/msvc/Hacl_Bignum256_32.c @@ -532,7 +532,7 @@ bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res) 1U, uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); uint32_t m1 = acc; uint32_t is_valid_m = m0 & m1; uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n); @@ -564,7 +564,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) 1U, uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc0 = (beq & acc0) | (~beq & blt);); uint32_t m10 = acc0; uint32_t m00 = m0 & m10; uint32_t bLen; @@ -590,7 +590,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t res = acc; m1 = res; @@ -606,7 +606,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) 1U, uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); uint32_t m2 = acc; uint32_t m = m1 & m2; return m00 & m; @@ -1010,7 +1010,7 @@ bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t 1U, uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc0 = (beq & acc0) | (~beq & blt);); uint32_t m1 = acc0; uint32_t m00 = m0 & m1; uint32_t bn_zero[8U] = { 0U }; @@ -1031,7 +1031,7 @@ bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t 1U, uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); uint32_t m2 = acc; uint32_t is_valid_m = (m00 & ~m10) & m2; uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n); @@ -1399,7 +1399,7 @@ uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b) 1U, uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));); + acc = (beq & acc) | (~beq & blt);); return acc; } diff --git a/src/msvc/Hacl_Bignum32.c b/src/msvc/Hacl_Bignum32.c index 55c3f90c..dcb7b7ec 100644 --- a/src/msvc/Hacl_Bignum32.c +++ b/src/msvc/Hacl_Bignum32.c @@ -46,9 +46,18 @@ of `len` unsigned 32-bit integers, i.e. uint32_t[len]. /** Write `a + b mod 2 ^ (32 * len)` in `res`. - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + This function returns the carry. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly equal memory + location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_add(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -60,7 +69,16 @@ Write `a - b mod 2 ^ (32 * len)` in `res`. This functions returns the carry. - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. */ uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -70,12 +88,23 @@ uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res /** Write `(a + b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -85,12 +114,23 @@ void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, /** Write `(a - b) mod n` in `res`. - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` */ void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -100,8 +140,13 @@ void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, /** Write `a * b` in `res`. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `b` and `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory locations of `a` and `b`. */ void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) { @@ -114,8 +159,10 @@ void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) /** Write `a * a` in `res`. - The argument a is meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory location of `a`. */ void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res) { @@ -149,13 +196,19 @@ bn_slow_precomp( /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `n`. + + @return `false` if any precondition is violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `1 < n` + - `n % 2 = 1` */ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) { @@ -171,7 +224,7 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m1 = acc; uint32_t is_valid_m = m0 & m1; @@ -195,22 +248,30 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_vartime( @@ -238,22 +299,30 @@ Hacl_Bignum32_mod_exp_vartime( /** Write `a ^ b mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n + This function is constant-time over its argument `b`, at the cost of a slower + execution time than `mod_exp_vartime_*`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` */ bool Hacl_Bignum32_mod_exp_consttime( @@ -281,18 +350,23 @@ Hacl_Bignum32_mod_exp_consttime( /** Write `a ^ (-1) mod n` in `res`. - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `n`. + + @return `false` if any preconditions (except the precondition: `n` is a prime) + are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `n % 2 = 1` + - `1 < n` + - `0 < a` + - `a < n` */ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) { @@ -308,7 +382,7 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m1 = acc0; uint32_t m00 = m0 & m1; @@ -329,7 +403,7 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t is_valid_m = (m00 & ~m10) & m2; @@ -393,15 +467,16 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, /** Heap-allocate and initialize a montgomery context. - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n + @param n Points to `len` number of limbs, i.e. `uint32_t[len]`. - The caller will need to call Hacl_Bignum32_mont_ctx_free on the return value - to avoid memory leaks. + @return A pointer to an allocated and initialized Montgomery context is returned. + Clients will need to call `Hacl_Bignum32_mont_ctx_free` on the return value to + avoid memory leaks. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` */ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum32_mont_ctx_init(uint32_t len, uint32_t *n) @@ -429,7 +504,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 /** Deallocate the memory previously allocated by Hacl_Bignum32_mont_ctx_init. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. */ void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) { @@ -444,9 +519,11 @@ void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) /** Write `a mod n` in `res`. - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. */ void Hacl_Bignum32_mod_precomp( @@ -464,21 +541,25 @@ Hacl_Bignum32_mod_precomp( /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_vartime_precomp( @@ -505,21 +586,25 @@ Hacl_Bignum32_mod_exp_vartime_precomp( /** Write `a ^ b mod n` in `res`. - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n + execution time than `mod_exp_vartime_*`. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` */ void Hacl_Bignum32_mod_exp_consttime_precomp( @@ -546,14 +631,17 @@ Hacl_Bignum32_mod_exp_consttime_precomp( /** Write `a ^ (-1) mod n` in `res`. - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n + @param[in] k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `0 < a` + - `a < n` */ void Hacl_Bignum32_mod_inv_prime_vartime_precomp( @@ -623,13 +711,13 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp( /** Load a bid-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) { @@ -664,13 +752,13 @@ uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) /** Load a little-endian bignum from memory. - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. */ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) { @@ -707,8 +795,11 @@ uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) /** Serialize a bignum into big-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res) { @@ -727,8 +818,11 @@ void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res) /** Serialize a bignum into little-endian memory. - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. */ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res) { @@ -753,7 +847,11 @@ void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res) /** Returns 2^32 - 1 if a < b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if `a < b`, otherwise, `0`. */ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) { @@ -762,7 +860,7 @@ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } return acc; } @@ -770,7 +868,11 @@ uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) /** Returns 2^32 - 1 if a = b, otherwise returns 0. - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if a = b, otherwise, `0`. */ uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b) { diff --git a/src/msvc/Hacl_Bignum4096.c b/src/msvc/Hacl_Bignum4096.c index 920ae2fb..c7c24306 100644 --- a/src/msvc/Hacl_Bignum4096.c +++ b/src/msvc/Hacl_Bignum4096.c @@ -459,7 +459,7 @@ bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; uint64_t is_valid_m = m0 & m1; @@ -490,7 +490,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m10 = acc0; uint64_t m00 = m0 & m10; @@ -517,7 +517,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; m1 = res; @@ -531,7 +531,7 @@ static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t m = m1 & m2; @@ -930,7 +930,7 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m1 = acc0; uint64_t m00 = m0 & m1; @@ -949,7 +949,7 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t is_valid_m = (m00 & ~m10) & m2; @@ -1326,7 +1326,7 @@ uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } return acc; } diff --git a/src/msvc/Hacl_Bignum4096_32.c b/src/msvc/Hacl_Bignum4096_32.c index f3330918..0d54cb21 100644 --- a/src/msvc/Hacl_Bignum4096_32.c +++ b/src/msvc/Hacl_Bignum4096_32.c @@ -451,7 +451,7 @@ bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m1 = acc; uint32_t is_valid_m = m0 & m1; @@ -482,7 +482,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m10 = acc0; uint32_t m00 = m0 & m10; @@ -509,7 +509,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t res = acc; m1 = res; @@ -523,7 +523,7 @@ static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t m = m1 & m2; @@ -922,7 +922,7 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t { uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc0 = (beq & acc0) | (~beq & blt); } uint32_t m1 = acc0; uint32_t m00 = m0 & m1; @@ -941,7 +941,7 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t { uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } uint32_t m2 = acc; uint32_t is_valid_m = (m00 & ~m10) & m2; @@ -1317,7 +1317,7 @@ uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b) { uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + acc = (beq & acc) | (~beq & blt); } return acc; } diff --git a/src/msvc/Hacl_Bignum64.c b/src/msvc/Hacl_Bignum64.c index e64b1a54..499ca740 100644 --- a/src/msvc/Hacl_Bignum64.c +++ b/src/msvc/Hacl_Bignum64.c @@ -170,7 +170,7 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res) { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; uint64_t is_valid_m = m0 & m1; @@ -307,7 +307,7 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a, { uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t m1 = acc0; uint64_t m00 = m0 & m1; @@ -328,7 +328,7 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a, { uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m2 = acc; uint64_t is_valid_m = (m00 & ~m10) & m2; @@ -761,7 +761,7 @@ uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } return acc; } diff --git a/src/msvc/Hacl_Ed25519.c b/src/msvc/Hacl_Ed25519.c index d1f8edf2..61e379d2 100644 --- a/src/msvc/Hacl_Ed25519.c +++ b/src/msvc/Hacl_Ed25519.c @@ -509,11 +509,7 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) Hacl_Bignum25519_reduce_513(t01); reduce(t01); bool z1 = is_0(t01); - if (z1 == false) - { - res = false; - } - else + if (z1) { uint64_t *x32 = tmp + 5U; uint64_t *t0 = tmp + 10U; @@ -534,6 +530,10 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) memcpy(x, x32, 5U * sizeof (uint64_t)); res = true; } + else + { + res = false; + } } } bool res0 = res; @@ -551,11 +551,7 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t * Hacl_Bignum25519_load_51(y, s); bool z0 = recover_x(x, y, sign); bool res; - if (z0 == false) - { - res = false; - } - else + if (z0) { uint64_t *outx = out; uint64_t *outy = out + 5U; @@ -571,6 +567,10 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t * fmul0(outt, x, y); res = true; } + else + { + res = false; + } bool res0 = res; return res0; } @@ -1150,11 +1150,7 @@ static inline bool gte_q(uint64_t *s) { return false; } - if (s3 > 0x00000000000000ULL) - { - return true; - } - if (s2 > 0x000000000014deULL) + if (s3 > 0x00000000000000ULL || s2 > 0x000000000014deULL) { return true; } @@ -1170,11 +1166,7 @@ static inline bool gte_q(uint64_t *s) { return false; } - if (s0 >= 0x12631a5cf5d3edULL) - { - return true; - } - return false; + return s0 >= 0x12631a5cf5d3edULL; } static inline bool eq(uint64_t *a, uint64_t *b) diff --git a/src/msvc/Hacl_FFDHE.c b/src/msvc/Hacl_FFDHE.c index a2cdfa52..9297c8b4 100644 --- a/src/msvc/Hacl_FFDHE.c +++ b/src/msvc/Hacl_FFDHE.c @@ -158,6 +158,7 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui uint64_t *p_n1 = (uint64_t *)alloca(nLen * sizeof (uint64_t)); memset(p_n1, 0U, nLen * sizeof (uint64_t)); uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, p_n[0U], 1ULL, p_n1); + uint64_t c1; if (1U < nLen) { uint64_t *a1 = p_n + 1U; @@ -184,13 +185,14 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i); } - uint64_t c1 = c; - KRML_MAYBE_UNUSED_VAR(c1); + uint64_t c10 = c; + c1 = c10; } else { - KRML_MAYBE_UNUSED_VAR(c0); + c1 = c0; } + KRML_MAYBE_UNUSED_VAR(c1); KRML_CHECK_SIZE(sizeof (uint64_t), nLen); uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t)); memset(b2, 0U, nLen * sizeof (uint64_t)); @@ -202,7 +204,7 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui { uint64_t beq = FStar_UInt64_eq_mask(b2[i], pk_n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], pk_n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc0 = (beq & acc0) | (~beq & blt); } uint64_t res = acc0; uint64_t m0 = res; @@ -211,7 +213,7 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui { uint64_t beq = FStar_UInt64_eq_mask(pk_n[i], p_n1[i]); uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i], p_n1[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t m1 = acc; return m0 & m1; diff --git a/src/msvc/Hacl_Frodo_KEM.c b/src/msvc/Hacl_Frodo_KEM.c index e0a65a47..f15d57ac 100644 --- a/src/msvc/Hacl_Frodo_KEM.c +++ b/src/msvc/Hacl_Frodo_KEM.c @@ -30,6 +30,7 @@ void randombytes_(uint32_t len, uint8_t *res) { - Lib_RandomBuffer_System_randombytes(res, len); + bool b = Lib_RandomBuffer_System_randombytes(res, len); + KRML_MAYBE_UNUSED_VAR(b); } diff --git a/src/msvc/Hacl_HMAC.c b/src/msvc/Hacl_HMAC.c index 63ab2032..d3f000b0 100644 --- a/src/msvc/Hacl_HMAC.c +++ b/src/msvc/Hacl_HMAC.c @@ -26,11 +26,123 @@ #include "internal/Hacl_HMAC.h" #include "internal/Hacl_Krmllib.h" +#include "internal/Hacl_Hash_SHA3.h" #include "internal/Hacl_Hash_SHA2.h" #include "internal/Hacl_Hash_SHA1.h" +#include "internal/Hacl_Hash_MD5.h" #include "internal/Hacl_Hash_Blake2s.h" #include "internal/Hacl_Hash_Blake2b.h" +/** +Write the HMAC-MD5 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 16 bytes of memory. +*/ +void +Hacl_HMAC_compute_md5( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 16U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_MD5_hash_oneshot(nkey, key, key_len); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U }; + uint8_t *dst1 = ipad; + if (data_len == 0U) + { + Hacl_Hash_MD5_update_last(s, 0ULL, ipad, 64U); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_MD5_update_multi(s, ipad, 1U); + Hacl_Hash_MD5_update_multi(s, full_blocks, n_blocks); + Hacl_Hash_MD5_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + } + Hacl_Hash_MD5_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_MD5_init(s); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 16U / block_len; + uint32_t rem0 = 16U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 16U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_MD5_update_multi(s, opad, 1U); + Hacl_Hash_MD5_update_multi(s, full_blocks, n_blocks); + Hacl_Hash_MD5_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len); + Hacl_Hash_MD5_finish(s, dst); +} + /** Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`. @@ -46,10 +158,8 @@ Hacl_HMAC_compute_sha1( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -70,19 +180,17 @@ Hacl_HMAC_compute_sha1( { Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -145,6 +253,130 @@ Hacl_HMAC_compute_sha1( Hacl_Hash_SHA1_finish(s, dst); } +/** +Write the HMAC-SHA-2-224 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 28 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha2_224( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 28U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA2_hash_224(nkey, key, key_len); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t st[8U] = { 0U }; + KRML_MAYBE_FOR8(i, + 0U, + 8U, + 1U, + uint32_t *os = st; + uint32_t x = Hacl_Hash_SHA2_h224[i]; + os[i] = x;); + uint32_t *s = st; + uint8_t *dst1 = ipad; + if (data_len == 0U) + { + Hacl_Hash_SHA2_sha224_update_last(0ULL + (uint64_t)64U, 64U, ipad, s); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA2_sha224_update_nblocks(64U, ipad, s); + Hacl_Hash_SHA2_sha224_update_nblocks(n_blocks * 64U, full_blocks, s); + Hacl_Hash_SHA2_sha224_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, + rem_len, + rem, + s); + } + Hacl_Hash_SHA2_sha224_finish(s, dst1); + uint8_t *hash1 = ipad; + Hacl_Hash_SHA2_sha224_init(s); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 28U / block_len; + uint32_t rem0 = 28U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 28U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA2_sha224_update_nblocks(64U, opad, s); + Hacl_Hash_SHA2_sha224_update_nblocks(n_blocks * 64U, full_blocks, s); + Hacl_Hash_SHA2_sha224_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len, + rem_len, + rem, + s); + Hacl_Hash_SHA2_sha224_finish(s, dst); +} + /** Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. @@ -160,10 +392,8 @@ Hacl_HMAC_compute_sha2_256( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -184,19 +414,17 @@ Hacl_HMAC_compute_sha2_256( { Hacl_Hash_SHA2_hash_256(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -288,10 +516,8 @@ Hacl_HMAC_compute_sha2_384( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -312,19 +538,17 @@ Hacl_HMAC_compute_sha2_384( { Hacl_Hash_SHA2_hash_384(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -424,10 +648,8 @@ Hacl_HMAC_compute_sha2_512( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -448,19 +670,17 @@ Hacl_HMAC_compute_sha2_512( { Hacl_Hash_SHA2_hash_512(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -546,13 +766,13 @@ Hacl_HMAC_compute_sha2_512( } /** -Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`. +Write the HMAC-SHA-3-224 MAC of a message (`data`) by using a key (`key`) into `dst`. -The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. -`dst` must point to 32 bytes of memory. +The key can be any length and will be hashed if it is longer and padded if it is shorter than 144 bytes. +`dst` must point to 28 bytes of memory. */ void -Hacl_HMAC_compute_blake2s_32( +Hacl_HMAC_compute_sha3_224( uint8_t *dst, uint8_t *key, uint32_t key_len, @@ -560,60 +780,53 @@ Hacl_HMAC_compute_blake2s_32( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[144U]; + memset(key_block, 0U, 144U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; - if (key_len <= 64U) + if (key_len <= 144U) { ite = key_len; } else { - ite = 32U; + ite = 28U; } uint8_t *zeroes = key_block + ite; KRML_MAYBE_UNUSED_VAR(zeroes); - if (key_len <= 64U) + if (key_len <= 144U) { memcpy(nkey, key, key_len * sizeof (uint8_t)); } else { - Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); + Hacl_Hash_SHA3_sha3_224(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[144U]; + memset(ipad, 0x36U, 144U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 144U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[144U]; + memset(opad, 0x5cU, 144U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 144U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } - uint32_t s[16U] = { 0U }; - Hacl_Hash_Blake2s_init(s, 0U, 32U); - uint32_t *s0 = s; + uint64_t s[25U] = { 0U }; uint8_t *dst1 = ipad; if (data_len == 0U) { - uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_224, s, ipad, 144U); } else { - uint32_t block_len = 64U; + uint32_t block_len = 144U; uint32_t n_blocks0 = data_len / block_len; uint32_t rem0 = data_len % block_len; K___uint32_t_uint32_t scrut; @@ -631,34 +844,29 @@ Hacl_HMAC_compute_blake2s_32( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = data; uint8_t *rem = data + full_blocks_len; - uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U); - uint32_t wv0[16U] = { 0U }; - Hacl_Hash_Blake2s_update_multi(n_blocks * 64U, - wv0, - s0, - (uint64_t)block_len, - full_blocks, - n_blocks); - uint32_t wv1[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(rem_len, - wv1, - s0, - (uint64_t)64U + (uint64_t)full_blocks_len, - rem_len, - rem); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_224, s, rem, rem_len); } - Hacl_Hash_Blake2s_finish(32U, dst1, s0); + uint32_t remOut = 28U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 28U - remOut, hbuf0, remOut * sizeof (uint8_t)); uint8_t *hash1 = ipad; - Hacl_Hash_Blake2s_init(s0, 0U, 32U); - uint32_t block_len = 64U; - uint32_t n_blocks0 = 32U / block_len; - uint32_t rem0 = 32U % block_len; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 144U; + uint32_t n_blocks0 = 28U / block_len; + uint32_t rem0 = 28U % block_len; K___uint32_t_uint32_t scrut; if (n_blocks0 > 0U && rem0 == 0U) { uint32_t n_blocks_ = n_blocks0 - 1U; - scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len }); + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 28U - n_blocks_ * block_len }); } else { @@ -669,33 +877,28 @@ Hacl_HMAC_compute_blake2s_32( uint32_t full_blocks_len = n_blocks * block_len; uint8_t *full_blocks = hash1; uint8_t *rem = hash1 + full_blocks_len; - uint32_t wv[16U] = { 0U }; - Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U); - uint32_t wv0[16U] = { 0U }; - Hacl_Hash_Blake2s_update_multi(n_blocks * 64U, - wv0, - s0, - (uint64_t)block_len, - full_blocks, - n_blocks); - uint32_t wv1[16U] = { 0U }; - Hacl_Hash_Blake2s_update_last(rem_len, - wv1, - s0, - (uint64_t)64U + (uint64_t)full_blocks_len, - rem_len, - rem); - Hacl_Hash_Blake2s_finish(32U, dst, s0); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_224, s, rem, rem_len); + uint32_t remOut0 = 28U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 28U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); } /** -Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`. +Write the HMAC-SHA-3-256 MAC of a message (`data`) by using a key (`key`) into `dst`. -The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. -`dst` must point to 64 bytes of memory. +The key can be any length and will be hashed if it is longer and padded if it is shorter than 136 bytes. +`dst` must point to 32 bytes of memory. */ void -Hacl_HMAC_compute_blake2b_32( +Hacl_HMAC_compute_sha3_256( uint8_t *dst, uint8_t *key, uint32_t key_len, @@ -703,56 +906,577 @@ Hacl_HMAC_compute_blake2b_32( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[136U]; + memset(key_block, 0U, 136U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; - if (key_len <= 128U) + if (key_len <= 136U) { ite = key_len; } else { - ite = 64U; + ite = 32U; } uint8_t *zeroes = key_block + ite; KRML_MAYBE_UNUSED_VAR(zeroes); - if (key_len <= 128U) + if (key_len <= 136U) { memcpy(nkey, key, key_len * sizeof (uint8_t)); } else { - Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); + Hacl_Hash_SHA3_sha3_256(nkey, key, key_len); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[136U]; + memset(ipad, 0x36U, 136U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 136U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[136U]; + memset(opad, 0x5cU, 136U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 136U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; opad[i] = (uint32_t)xi ^ (uint32_t)yi; } - uint64_t s[16U] = { 0U }; - Hacl_Hash_Blake2b_init(s, 0U, 64U); - uint64_t *s0 = s; + uint64_t s[25U] = { 0U }; uint8_t *dst1 = ipad; if (data_len == 0U) { - uint64_t wv[16U] = { 0U }; - Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_256, s, ipad, 136U); + } + else + { + uint32_t block_len = 136U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_256, s, rem, rem_len); + } + uint32_t remOut = 32U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 32U - remOut, hbuf0, remOut * sizeof (uint8_t)); + uint8_t *hash1 = ipad; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 136U; + uint32_t n_blocks0 = 32U / block_len; + uint32_t rem0 = 32U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_256, s, rem, rem_len); + uint32_t remOut0 = 32U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 32U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); +} + +/** +Write the HMAC-SHA-3-384 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 104 bytes. +`dst` must point to 48 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_384( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[104U]; + memset(key_block, 0U, 104U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 104U) + { + ite = key_len; + } + else + { + ite = 48U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 104U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA3_sha3_384(nkey, key, key_len); + } + uint8_t ipad[104U]; + memset(ipad, 0x36U, 104U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 104U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[104U]; + memset(opad, 0x5cU, 104U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 104U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint64_t s[25U] = { 0U }; + uint8_t *dst1 = ipad; + if (data_len == 0U) + { + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_384, s, ipad, 104U); + } + else + { + uint32_t block_len = 104U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_384, s, rem, rem_len); + } + uint32_t remOut = 48U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 48U - remOut, hbuf0, remOut * sizeof (uint8_t)); + uint8_t *hash1 = ipad; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 104U; + uint32_t n_blocks0 = 48U / block_len; + uint32_t rem0 = 48U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_384, s, rem, rem_len); + uint32_t remOut0 = 48U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 48U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); +} + +/** +Write the HMAC-SHA-3-512 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 72 bytes. +`dst` must point to 64 bytes of memory. +*/ +void +Hacl_HMAC_compute_sha3_512( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[72U]; + memset(key_block, 0U, 72U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 72U) + { + ite = key_len; + } + else + { + ite = 64U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 72U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_SHA3_sha3_512(nkey, key, key_len); + } + uint8_t ipad[72U]; + memset(ipad, 0x36U, 72U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 72U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[72U]; + memset(opad, 0x5cU, 72U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 72U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint64_t s[25U] = { 0U }; + uint8_t *dst1 = ipad; + if (data_len == 0U) + { + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_512, s, ipad, 72U); + } + else + { + uint32_t block_len = 72U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, ipad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_512, s, rem, rem_len); + } + uint32_t remOut = 64U; + uint8_t hbuf0[256U] = { 0U }; + uint64_t ws0[32U] = { 0U }; + memcpy(ws0, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf0 + i * 8U, ws0[i]); + } + memcpy(dst1 + 64U - remOut, hbuf0, remOut * sizeof (uint8_t)); + uint8_t *hash1 = ipad; + memset(s, 0U, 25U * sizeof (uint64_t)); + uint32_t block_len = 72U; + uint32_t n_blocks0 = 64U / block_len; + uint32_t rem0 = 64U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, opad, 1U); + Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, s, full_blocks, n_blocks); + Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_SHA3_512, s, rem, rem_len); + uint32_t remOut0 = 64U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 64U - remOut0, hbuf, remOut0 * sizeof (uint8_t)); +} + +/** +Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 32 bytes of memory. +*/ +void +Hacl_HMAC_compute_blake2s_32( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 64U) + { + ite = key_len; + } + else + { + ite = 32U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 64U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); + } + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint32_t s[16U] = { 0U }; + Hacl_Hash_Blake2s_init(s, 0U, 32U); + uint32_t *s0 = s; + uint8_t *dst1 = ipad; + if (data_len == 0U) + { + uint32_t wv[16U] = { 0U }; + Hacl_Hash_Blake2s_update_last(64U, wv, s0, false, 0ULL, 64U, ipad); + } + else + { + uint32_t block_len = 64U; + uint32_t n_blocks0 = data_len / block_len; + uint32_t rem0 = data_len % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = data; + uint8_t *rem = data + full_blocks_len; + uint32_t wv[16U] = { 0U }; + Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U); + uint32_t wv0[16U] = { 0U }; + Hacl_Hash_Blake2s_update_multi(n_blocks * 64U, + wv0, + s0, + (uint64_t)block_len, + full_blocks, + n_blocks); + uint32_t wv1[16U] = { 0U }; + Hacl_Hash_Blake2s_update_last(rem_len, + wv1, + s0, + false, + (uint64_t)64U + (uint64_t)full_blocks_len, + rem_len, + rem); + } + Hacl_Hash_Blake2s_finish(32U, dst1, s0); + uint8_t *hash1 = ipad; + Hacl_Hash_Blake2s_init(s0, 0U, 32U); + uint32_t block_len = 64U; + uint32_t n_blocks0 = 32U / block_len; + uint32_t rem0 = 32U % block_len; + K___uint32_t_uint32_t scrut; + if (n_blocks0 > 0U && rem0 == 0U) + { + uint32_t n_blocks_ = n_blocks0 - 1U; + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len }); + } + else + { + scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks0, .snd = rem0 }); + } + uint32_t n_blocks = scrut.fst; + uint32_t rem_len = scrut.snd; + uint32_t full_blocks_len = n_blocks * block_len; + uint8_t *full_blocks = hash1; + uint8_t *rem = hash1 + full_blocks_len; + uint32_t wv[16U] = { 0U }; + Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U); + uint32_t wv0[16U] = { 0U }; + Hacl_Hash_Blake2s_update_multi(n_blocks * 64U, + wv0, + s0, + (uint64_t)block_len, + full_blocks, + n_blocks); + uint32_t wv1[16U] = { 0U }; + Hacl_Hash_Blake2s_update_last(rem_len, + wv1, + s0, + false, + (uint64_t)64U + (uint64_t)full_blocks_len, + rem_len, + rem); + Hacl_Hash_Blake2s_finish(32U, dst, s0); +} + +/** +Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 64 bytes of memory. +*/ +void +Hacl_HMAC_compute_blake2b_32( + uint8_t *dst, + uint8_t *key, + uint32_t key_len, + uint8_t *data, + uint32_t data_len +) +{ + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); + uint8_t *nkey = key_block; + uint32_t ite; + if (key_len <= 128U) + { + ite = key_len; + } + else + { + ite = 64U; + } + uint8_t *zeroes = key_block + ite; + KRML_MAYBE_UNUSED_VAR(zeroes); + if (key_len <= 128U) + { + memcpy(nkey, key, key_len * sizeof (uint8_t)); + } + else + { + Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); + } + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) + { + uint8_t xi = ipad[i]; + uint8_t yi = key_block[i]; + ipad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) + { + uint8_t xi = opad[i]; + uint8_t yi = key_block[i]; + opad[i] = (uint32_t)xi ^ (uint32_t)yi; + } + uint64_t s[16U] = { 0U }; + Hacl_Hash_Blake2b_init(s, 0U, 64U); + uint64_t *s0 = s; + uint8_t *dst1 = ipad; + if (data_len == 0U) + { + uint64_t wv[16U] = { 0U }; + Hacl_Hash_Blake2b_update_last(128U, + wv, + s0, + false, + FStar_UInt128_uint64_to_uint128(0ULL), + 128U, + ipad); } else { @@ -787,6 +1511,7 @@ Hacl_HMAC_compute_blake2b_32( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, @@ -826,6 +1551,7 @@ Hacl_HMAC_compute_blake2b_32( Hacl_Hash_Blake2b_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, diff --git a/src/msvc/Hacl_HMAC_Blake2b_256.c b/src/msvc/Hacl_HMAC_Blake2b_256.c index cd16e65e..ca0ec144 100644 --- a/src/msvc/Hacl_HMAC_Blake2b_256.c +++ b/src/msvc/Hacl_HMAC_Blake2b_256.c @@ -44,10 +44,8 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( uint32_t data_len ) { - uint32_t l = 128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[128U]; + memset(key_block, 0U, 128U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 128U) @@ -68,19 +66,17 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( { Hacl_Hash_Blake2b_Simd256_hash_with_key(nkey, 64U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[128U]; + memset(ipad, 0x36U, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[128U]; + memset(opad, 0x5cU, 128U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 128U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -96,6 +92,7 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( Hacl_Hash_Blake2b_Simd256_update_last(128U, wv, s0, + false, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad); @@ -138,6 +135,7 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( Hacl_Hash_Blake2b_Simd256_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, @@ -182,6 +180,7 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256( Hacl_Hash_Blake2b_Simd256_update_last(rem_len, wv1, s0, + false, FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U), FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), rem_len, diff --git a/src/msvc/Hacl_HMAC_Blake2s_128.c b/src/msvc/Hacl_HMAC_Blake2s_128.c index bf2033a8..3f0c333d 100644 --- a/src/msvc/Hacl_HMAC_Blake2s_128.c +++ b/src/msvc/Hacl_HMAC_Blake2s_128.c @@ -43,10 +43,8 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( uint32_t data_len ) { - uint32_t l = 64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(key_block, 0U, l * sizeof (uint8_t)); + uint8_t key_block[64U]; + memset(key_block, 0U, 64U * sizeof (uint8_t)); uint8_t *nkey = key_block; uint32_t ite; if (key_len <= 64U) @@ -67,19 +65,17 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( { Hacl_Hash_Blake2s_Simd128_hash_with_key(nkey, 32U, key, key_len, NULL, 0U); } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(ipad, 0x36U, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t ipad[64U]; + memset(ipad, 0x36U, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = ipad[i]; uint8_t yi = key_block[i]; ipad[i] = (uint32_t)xi ^ (uint32_t)yi; } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t)); - memset(opad, 0x5cU, l * sizeof (uint8_t)); - for (uint32_t i = 0U; i < l; i++) + uint8_t opad[64U]; + memset(opad, 0x5cU, 64U * sizeof (uint8_t)); + for (uint32_t i = 0U; i < 64U; i++) { uint8_t xi = opad[i]; uint8_t yi = key_block[i]; @@ -92,7 +88,7 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( if (data_len == 0U) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Hash_Blake2s_Simd128_update_last(64U, wv, s0, 0ULL, 64U, ipad); + Hacl_Hash_Blake2s_Simd128_update_last(64U, wv, s0, false, 0ULL, 64U, ipad); } else { @@ -127,6 +123,7 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( Hacl_Hash_Blake2s_Simd128_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); @@ -165,6 +162,7 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128( Hacl_Hash_Blake2s_Simd128_update_last(rem_len, wv1, s0, + false, (uint64_t)64U + (uint64_t)full_blocks_len, rem_len, rem); diff --git a/src/msvc/Hacl_Hash_Blake2b.c b/src/msvc/Hacl_Hash_Blake2b.c index d490a1a5..1bab75e6 100644 --- a/src/msvc/Hacl_Hash_Blake2b.c +++ b/src/msvc/Hacl_Hash_Blake2b.c @@ -29,7 +29,14 @@ #include "lib_memzero0.h" static void -update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totlen, uint8_t *d) +update_block( + uint64_t *wv, + uint64_t *hash, + bool flag, + bool last_node, + FStar_UInt128_uint128 totlen, + uint8_t *d +) { uint64_t m_w[16U] = { 0U }; KRML_MAYBE_FOR16(i, @@ -52,7 +59,15 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl { wv_14 = 0ULL; } - uint64_t wv_15 = 0ULL; + uint64_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFFFFFFFFFULL; + } + else + { + wv_15 = 0ULL; + } mask[0U] = FStar_UInt128_uint128_to_uint64(totlen); mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)); mask[2U] = wv_14; @@ -560,86 +575,6 @@ void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) r1[3U] = iv7_; } -static void init_with_params(uint64_t *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint64_t tmp[8U] = { 0U }; - uint64_t *r0 = hash; - uint64_t *r1 = hash + 4U; - uint64_t *r2 = hash + 8U; - uint64_t *r3 = hash + 12U; - uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - uint8_t kk = p.key_length; - uint8_t nn = p.digest_length; - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - tmp[0U] = - (uint64_t)nn - ^ - ((uint64_t)kk - << 8U - ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); - tmp[1U] = p.node_offset; - tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; - tmp[3U] = 0ULL; - uint64_t tmp0 = tmp[0U]; - uint64_t tmp1 = tmp[1U]; - uint64_t tmp2 = tmp[2U]; - uint64_t tmp3 = tmp[3U]; - uint64_t tmp4 = tmp[4U]; - uint64_t tmp5 = tmp[5U]; - uint64_t tmp6 = tmp[6U]; - uint64_t tmp7 = tmp[7U]; - uint64_t iv0_ = iv0 ^ tmp0; - uint64_t iv1_ = iv1 ^ tmp1; - uint64_t iv2_ = iv2 ^ tmp2; - uint64_t iv3_ = iv3 ^ tmp3; - uint64_t iv4_ = iv4 ^ tmp4; - uint64_t iv5_ = iv5 ^ tmp5; - uint64_t iv6_ = iv6 ^ tmp6; - uint64_t iv7_ = iv7 ^ tmp7; - r0[0U] = iv0_; - r0[1U] = iv1_; - r0[2U] = iv2_; - r0[3U] = iv3_; - r1[0U] = iv4_; - r1[1U] = iv5_; - r1[2U] = iv6_; - r1[3U] = iv7_; -} - static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) { FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U); @@ -647,11 +582,11 @@ static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, ui memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -674,7 +609,7 @@ Hacl_Hash_Blake2b_update_multi( FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U))); uint8_t *b = blocks + i * 128U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -683,6 +618,7 @@ Hacl_Hash_Blake2b_update_last( uint32_t len, uint64_t *wv, uint64_t *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d @@ -693,7 +629,7 @@ Hacl_Hash_Blake2b_update_last( memcpy(b, last, rem * sizeof (uint8_t)); FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -727,7 +663,7 @@ update_blocks( rem = rem0; } Hacl_Hash_Blake2b_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2b_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2b_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -762,16 +698,19 @@ void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash) } static Hacl_Hash_Blake2b_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); Hacl_Hash_Blake2b_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { + .fst = kk.key_length, + .snd = kk.digest_length, + .thd = kk.last_node, + .f3 = { .fst = wv, .snd = b } + }; uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -790,17 +729,94 @@ static Hacl_Hash_Blake2b_state_t Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; uint8_t kk1 = p1->key_length; uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i.key_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint64_t *h = block_state.f3.snd; + uint32_t kk20 = (uint32_t)i.key_length; uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) + if (!(kk20 == 0U)) { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_1, kk20 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = h; + uint64_t *r1 = h + 4U; + uint64_t *r2 = h + 8U; + uint64_t *r3 = h + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + os[i0] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; return p; } @@ -820,14 +836,16 @@ The caller must satisfy the following requirements. */ Hacl_Hash_Blake2b_state_t -*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +*Hacl_Hash_Blake2b_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** @@ -844,7 +862,7 @@ The caller must satisfy the following requirements. Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t kk) { uint8_t nn = 64U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; uint8_t salt[16U] = { 0U }; uint8_t personal[16U] = { 0U }; Hacl_Hash_Blake2b_blake2_params @@ -855,7 +873,7 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - Hacl_Hash_Blake2b_state_t *s = Hacl_Hash_Blake2b_malloc_with_params_and_key(&p0, k); + Hacl_Hash_Blake2b_state_t *s = Hacl_Hash_Blake2b_malloc_with_params_and_key(&p0, false, k); return s; } @@ -872,38 +890,116 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_state_t *s) { Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } -static void -reset_raw( - Hacl_Hash_Blake2b_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +static void reset_raw(Hacl_Hash_Blake2b_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + Hacl_Hash_Blake2b_index + i = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; KRML_MAYBE_UNUSED_VAR(i); Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i1.key_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint64_t *h = block_state.f3.snd; + uint32_t kk20 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) + if (!(kk20 == 0U)) { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_1, kk20 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = h; + uint64_t *r1 = h + 4U; + uint64_t *r2 = h + 8U; + uint64_t *r3 = h + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i0] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; uint8_t kk11 = i.key_length; uint32_t ite; if (kk11 != 0U) @@ -915,13 +1011,13 @@ reset_raw( ite = 0U; } Hacl_Hash_Blake2b_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + tmp8 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; + state[0U] = tmp8; } /** General-purpose re-initialization function with parameters and -key. You cannot change digest_length or key_length, meaning those values in +key. You cannot change digest_length, key_length, or last_node, meaning those values in the parameters object must be the same as originally decided via one of the malloc functions. All other values of the parameter can be changed. The behavior is unspecified if you violate this precondition. @@ -933,8 +1029,9 @@ Hacl_Hash_Blake2b_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** @@ -957,7 +1054,7 @@ void Hacl_Hash_Blake2b_reset_with_key(Hacl_Hash_Blake2b_state_t *s, uint8_t *k) .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** @@ -1040,7 +1137,7 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint64_t___uint64_t_ acc = block_state1.thd; + K____uint64_t___uint64_t_ acc = block_state1.f3; uint64_t *wv = acc.fst; uint64_t *hash = acc.snd; uint32_t nb = 1U; @@ -1065,7 +1162,7 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____uint64_t___uint64_t_ acc = block_state1.thd; + K____uint64_t___uint64_t_ acc = block_state1.f3; uint64_t *wv = acc.fst; uint64_t *hash = acc.snd; uint32_t nb = data1_len / 128U; @@ -1133,7 +1230,7 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint64_t___uint64_t_ acc = block_state1.thd; + K____uint64_t___uint64_t_ acc = block_state1.f3; uint64_t *wv = acc.fst; uint64_t *hash = acc.snd; uint32_t nb = 1U; @@ -1159,7 +1256,7 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____uint64_t___uint64_t_ acc = block_state1.thd; + K____uint64_t___uint64_t_ acc = block_state1.f3; uint64_t *wv = acc.fst; uint64_t *hash = acc.snd; uint32_t nb = data1_len / 128U; @@ -1190,16 +1287,20 @@ at least `digest_length` bytes, where `digest_length` was determined by your choice of `malloc` function. Concretely, if you used `malloc` or `malloc_with_key`, then the expected length is 32 for S, or 64 for B (default digest length). If you used `malloc_with_params_and_key`, then the expected -length is whatever you chose for the `digest_length` field of your -parameters. +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2b_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2b_state_t scrut = *state; + Hacl_Hash_Blake2b_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2b_state_t scrut = *s; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; uint64_t total_len = scrut.total_len; @@ -1217,9 +1318,14 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) uint64_t b[16U] = { 0U }; Hacl_Hash_Blake2b_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - uint64_t *src_b = block_state.thd.snd; - uint64_t *dst_b = tmp_block_state.thd.snd; + { + .fst = i1.key_length, + .snd = i1.digest_length, + .thd = i1.last_node, + .f3 = { .fst = wv0, .snd = b } + }; + uint64_t *src_b = block_state.f3.snd; + uint64_t *dst_b = tmp_block_state.f3.snd; memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -1233,7 +1339,7 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - K____uint64_t___uint64_t_ acc0 = tmp_block_state.thd; + K____uint64_t___uint64_t_ acc0 = tmp_block_state.f3; uint64_t *wv1 = acc0.fst; uint64_t *hash0 = acc0.snd; uint32_t nb = 0U; @@ -1244,17 +1350,35 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - K____uint64_t___uint64_t_ acc = tmp_block_state.thd; + K____uint64_t___uint64_t_ acc = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; uint64_t *wv = acc.fst; uint64_t *hash = acc.snd; Hacl_Hash_Blake2b_update_last(r, wv, hash, + last_node1, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2b_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2b_finish((uint32_t)nn1, dst, tmp_block_state.f3.snd); + Hacl_Hash_Blake2b_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_info(Hacl_Hash_Blake2b_state_t *s) +{ + Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1265,8 +1389,8 @@ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state) Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - uint64_t *b = block_state.thd.snd; - uint64_t *wv = block_state.thd.fst; + uint64_t *b = block_state.f3.snd; + uint64_t *wv = block_state.f3.fst; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); @@ -1282,17 +1406,24 @@ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_copy(Hacl_Hash_Blake2b_state_t *sta Hacl_Hash_Blake2b_block_state_t block_state0 = scrut.block_state; uint8_t *buf0 = scrut.buf; uint64_t total_len0 = scrut.total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); memcpy(buf, buf0, 128U * sizeof (uint8_t)); uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); Hacl_Hash_Blake2b_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - uint64_t *src_b = block_state0.thd.snd; - uint64_t *dst_b = block_state.thd.snd; + block_state = + { + .fst = i.key_length, + .snd = i.digest_length, + .thd = i.last_node, + .f3 = { .fst = wv, .snd = b } + }; + uint64_t *src_b = block_state0.f3.snd; + uint64_t *dst_b = block_state.f3.snd; memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); Hacl_Hash_Blake2b_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1335,10 +1466,10 @@ Hacl_Hash_Blake2b_hash_with_key( Write the BLAKE2b digest of message `input` using key `key` and parameters `params` into `output`. The `key` array must be of length `params.key_length`. The `output` array must be of length -`params.digest_length`. +`params.digest_length`. */ void -Hacl_Hash_Blake2b_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/src/msvc/Hacl_Hash_Blake2b_Simd256.c b/src/msvc/Hacl_Hash_Blake2b_Simd256.c index 0afd93bc..19234ab9 100644 --- a/src/msvc/Hacl_Hash_Blake2b_Simd256.c +++ b/src/msvc/Hacl_Hash_Blake2b_Simd256.c @@ -34,6 +34,7 @@ update_block( Lib_IntVector_Intrinsics_vec256 *wv, Lib_IntVector_Intrinsics_vec256 *hash, bool flag, + bool last_node, FStar_UInt128_uint128 totlen, uint8_t *d ) @@ -59,7 +60,15 @@ update_block( { wv_14 = 0ULL; } - uint64_t wv_15 = 0ULL; + uint64_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFFFFFFFFFULL; + } + else + { + wv_15 = 0ULL; + } mask = Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen), FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)), @@ -289,75 +298,6 @@ Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t k r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); } -static void -init_with_params(Lib_IntVector_Intrinsics_vec256 *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint64_t tmp[8U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 *r0 = hash; - Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; - Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; - Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U; - uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; - r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - uint8_t kk = p.key_length; - uint8_t nn = p.digest_length; - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint64_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - tmp[0U] = - (uint64_t)nn - ^ - ((uint64_t)kk - << 8U - ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); - tmp[1U] = p.node_offset; - tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; - tmp[3U] = 0ULL; - uint64_t tmp0 = tmp[0U]; - uint64_t tmp1 = tmp[1U]; - uint64_t tmp2 = tmp[2U]; - uint64_t tmp3 = tmp[3U]; - uint64_t tmp4 = tmp[4U]; - uint64_t tmp5 = tmp[5U]; - uint64_t tmp6 = tmp[6U]; - uint64_t tmp7 = tmp[7U]; - uint64_t iv0_ = iv0 ^ tmp0; - uint64_t iv1_ = iv1 ^ tmp1; - uint64_t iv2_ = iv2 ^ tmp2; - uint64_t iv3_ = iv3 ^ tmp3; - uint64_t iv4_ = iv4 ^ tmp4; - uint64_t iv5_ = iv5 ^ tmp5; - uint64_t iv6_ = iv6 ^ tmp6; - uint64_t iv7_ = iv7 ^ tmp7; - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); -} - static void update_key( Lib_IntVector_Intrinsics_vec256 *wv, @@ -372,11 +312,11 @@ update_key( memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -399,7 +339,7 @@ Hacl_Hash_Blake2b_Simd256_update_multi( FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U))); uint8_t *b = blocks + i * 128U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -408,6 +348,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec256 *wv, Lib_IntVector_Intrinsics_vec256 *hash, + bool last_node, FStar_UInt128_uint128 prev, uint32_t rem, uint8_t *d @@ -418,7 +359,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( memcpy(b, last, rem * sizeof (uint8_t)); FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } @@ -452,7 +393,7 @@ update_blocks( rem = rem0; } Hacl_Hash_Blake2b_Simd256_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2b_Simd256_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2b_Simd256_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -593,10 +534,7 @@ Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void) } static Hacl_Hash_Blake2b_Simd256_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec256 @@ -610,7 +548,13 @@ static Hacl_Hash_Blake2b_Simd256_state_t sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); Hacl_Hash_Blake2b_Simd256_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { + .fst = kk.key_length, + .snd = kk.digest_length, + .thd = kk.last_node, + .f3 = { .fst = wv, .snd = b } + }; uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -632,52 +576,131 @@ static Hacl_Hash_Blake2b_Simd256_state_t Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; uint8_t kk1 = p1->key_length; uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i.key_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec256 *h = block_state.f3.snd; + uint32_t kk20 = (uint32_t)i.key_length; uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) + if (!(kk20 == 0U)) { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_1, kk20 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = h; + Lib_IntVector_Intrinsics_vec256 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = h + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r4 = u; + uint64_t x = r4; + os[i0] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); return p; } /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (256 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 256 for S, 64 for B. +- The digest_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (256 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 256 for S, 64 for B. + */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk) { uint8_t nn = 64U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; - uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); - uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; Hacl_Hash_Blake2b_blake2_params p = { @@ -685,21 +708,16 @@ Hacl_Hash_Blake2b_Simd256_state_t .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; - Hacl_Hash_Blake2b_blake2_params - *p0 = - (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); - p0[0U] = p; + Hacl_Hash_Blake2b_blake2_params p0 = p; Hacl_Hash_Blake2b_Simd256_state_t - *s = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(p0, k); - Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; - KRML_HOST_FREE(p1.salt); - KRML_HOST_FREE(p1.personal); - KRML_HOST_FREE(p0); + *s = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(&p0, false, k); return s; } /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) { @@ -709,38 +727,105 @@ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_Simd256_state_t *s) { Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } static void -reset_raw( - Hacl_Hash_Blake2b_Simd256_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +reset_raw(Hacl_Hash_Blake2b_Simd256_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + Hacl_Hash_Blake2b_index + i = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; KRML_MAYBE_UNUSED_VAR(i); Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; - uint32_t kk2 = (uint32_t)i1.key_length; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec256 *h = block_state.f3.snd; + uint32_t kk20 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; - if (!(kk2 == 0U)) + if (!(kk20 == 0U)) { - uint8_t *sub_b = buf + kk2; - memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); - memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + uint8_t *sub_b = buf + kk20; + memset(sub_b, 0U, (128U - kk20) * sizeof (uint8_t)); + memcpy(buf, k_1, kk20 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = h; + Lib_IntVector_Intrinsics_vec256 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = h + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk2 = pv.key_length; + uint8_t nn1 = pv.digest_length; + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i0] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk2 + << 8U + ^ ((uint64_t)pv.fanout << 16U ^ ((uint64_t)pv.depth << 24U ^ (uint64_t)pv.leaf_length << 32U))); + tmp[1U] = pv.node_offset; + tmp[2U] = (uint64_t)pv.node_depth ^ (uint64_t)pv.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); uint8_t kk11 = i.key_length; uint32_t ite; if (kk11 != 0U) @@ -752,14 +837,16 @@ reset_raw( ite = 0U; } Hacl_Hash_Blake2b_Simd256_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + tmp8 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; + state[0U] = tmp8; } /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( @@ -768,15 +855,17 @@ Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k) { @@ -791,11 +880,16 @@ void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s) { @@ -803,7 +897,7 @@ void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s) } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_Simd256_update( @@ -873,8 +967,7 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = 1U; @@ -899,7 +992,7 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = data1_len / 128U; @@ -967,8 +1060,7 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = 1U; @@ -994,7 +1086,7 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = data1_len / 128U; @@ -1020,16 +1112,25 @@ Hacl_Hash_Blake2b_Simd256_update( } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 256 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2B_256_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2b_Simd256_state_t scrut = *s; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; uint64_t total_len = scrut.total_len; @@ -1047,9 +1148,14 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; Hacl_Hash_Blake2b_Simd256_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - Lib_IntVector_Intrinsics_vec256 *src_b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.thd.snd; + { + .fst = i1.key_length, + .snd = i1.digest_length, + .thd = i1.last_node, + .f3 = { .fst = wv0, .snd = b } + }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state.f3.snd; + Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.f3.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -1064,7 +1170,7 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc0 = tmp_block_state.thd; + acc0 = tmp_block_state.f3; Lib_IntVector_Intrinsics_vec256 *wv1 = acc0.fst; Lib_IntVector_Intrinsics_vec256 *hash0 = acc0.snd; uint32_t nb = 0U; @@ -1076,17 +1182,35 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 nb); uint64_t prev_len_last = total_len - (uint64_t)r; K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ - acc = tmp_block_state.thd; + acc = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; Hacl_Hash_Blake2b_Simd256_update_last(r, wv, hash, + last_node1, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2b_Simd256_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2b_Simd256_finish((uint32_t)nn1, dst, tmp_block_state.f3.snd); + Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2b_Simd256_info(Hacl_Hash_Blake2b_Simd256_state_t *s) +{ + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1097,8 +1221,8 @@ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state) Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec256 *b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec256 *wv = block_state.thd.fst; + Lib_IntVector_Intrinsics_vec256 *b = block_state.f3.snd; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.f3.fst; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); @@ -1106,7 +1230,7 @@ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state) } /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state) @@ -1115,9 +1239,10 @@ Hacl_Hash_Blake2b_Simd256_state_t Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = scrut.block_state; uint8_t *buf0 = scrut.buf; uint64_t total_len0 = scrut.total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); memcpy(buf, buf0, 128U * sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec256 @@ -1131,9 +1256,15 @@ Hacl_Hash_Blake2b_Simd256_state_t sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); Hacl_Hash_Blake2b_Simd256_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - Lib_IntVector_Intrinsics_vec256 *src_b = block_state0.thd.snd; - Lib_IntVector_Intrinsics_vec256 *dst_b = block_state.thd.snd; + block_state = + { + .fst = i.key_length, + .snd = i.digest_length, + .thd = i.last_node, + .f3 = { .fst = wv, .snd = b } + }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state0.f3.snd; + Lib_IntVector_Intrinsics_vec256 *dst_b = block_state.f3.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); Hacl_Hash_Blake2b_Simd256_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1175,8 +1306,14 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); } +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/src/msvc/Hacl_Hash_Blake2s.c b/src/msvc/Hacl_Hash_Blake2s.c index 6e19d83d..ceb73850 100644 --- a/src/msvc/Hacl_Hash_Blake2s.c +++ b/src/msvc/Hacl_Hash_Blake2s.c @@ -30,7 +30,14 @@ #include "lib_memzero0.h" static inline void -update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d) +update_block( + uint32_t *wv, + uint32_t *hash, + bool flag, + bool last_node, + uint64_t totlen, + uint8_t *d +) { uint32_t m_w[16U] = { 0U }; KRML_MAYBE_FOR16(i, @@ -53,7 +60,15 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * { wv_14 = 0U; } - uint32_t wv_15 = 0U; + uint32_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFU; + } + else + { + wv_15 = 0U; + } mask[0U] = (uint32_t)totlen; mask[1U] = (uint32_t)(totlen >> 32U); mask[2U] = wv_14; @@ -558,83 +573,6 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) r1[3U] = iv7_; } -static void init_with_params(uint32_t *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint32_t tmp[8U] = { 0U }; - uint32_t *r0 = hash; - uint32_t *r1 = hash + 4U; - uint32_t *r2 = hash + 8U; - uint32_t *r3 = hash + 12U; - uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - tmp[0U] = - (uint32_t)p.digest_length - ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); - tmp[1U] = p.leaf_length; - tmp[2U] = (uint32_t)p.node_offset; - tmp[3U] = - (uint32_t)(p.node_offset >> 32U) - ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); - uint32_t tmp0 = tmp[0U]; - uint32_t tmp1 = tmp[1U]; - uint32_t tmp2 = tmp[2U]; - uint32_t tmp3 = tmp[3U]; - uint32_t tmp4 = tmp[4U]; - uint32_t tmp5 = tmp[5U]; - uint32_t tmp6 = tmp[6U]; - uint32_t tmp7 = tmp[7U]; - uint32_t iv0_ = iv0 ^ tmp0; - uint32_t iv1_ = iv1 ^ tmp1; - uint32_t iv2_ = iv2 ^ tmp2; - uint32_t iv3_ = iv3 ^ tmp3; - uint32_t iv4_ = iv4 ^ tmp4; - uint32_t iv5_ = iv5 ^ tmp5; - uint32_t iv6_ = iv6 ^ tmp6; - uint32_t iv7_ = iv7 ^ tmp7; - r0[0U] = iv0_; - r0[1U] = iv1_; - r0[2U] = iv2_; - r0[3U] = iv3_; - r1[0U] = iv4_; - r1[1U] = iv5_; - r1[2U] = iv6_; - r1[3U] = iv7_; -} - static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) { uint64_t lb = (uint64_t)64U; @@ -642,11 +580,11 @@ static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, ui memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -666,7 +604,7 @@ Hacl_Hash_Blake2s_update_multi( { uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U); uint8_t *b = blocks + i * 64U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -675,6 +613,7 @@ Hacl_Hash_Blake2s_update_last( uint32_t len, uint32_t *wv, uint32_t *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d @@ -684,7 +623,7 @@ Hacl_Hash_Blake2s_update_last( uint8_t *last = d + len - rem; memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -712,7 +651,7 @@ update_blocks(uint32_t len, uint32_t *wv, uint32_t *hash, uint64_t prev, uint8_t rem = rem0; } Hacl_Hash_Blake2s_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2s_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2s_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -747,16 +686,19 @@ void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash) } static Hacl_Hash_Blake2s_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); Hacl_Hash_Blake2s_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { + .fst = kk.key_length, + .snd = kk.digest_length, + .thd = kk.last_node, + .f3 = { .fst = wv, .snd = b } + }; uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -775,7 +717,9 @@ static Hacl_Hash_Blake2s_state_t Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; uint8_t kk1 = p1->key_length; uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint32_t *h = block_state.f3.snd; uint32_t kk2 = (uint32_t)i.key_length; uint8_t *k_1 = key.snd; if (!(kk2 == 0U)) @@ -785,38 +729,127 @@ static Hacl_Hash_Blake2s_state_t memcpy(buf, k_1, kk2 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = h; + uint32_t *r1 = h + 4U; + uint32_t *r2 = h + 8U; + uint32_t *r3 = h + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + os[i0] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; return p; } /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (32 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t -*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +*Hacl_Hash_Blake2s_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, + uint8_t *k +) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (32 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk) { uint8_t nn = 32U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; - uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); - uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; Hacl_Hash_Blake2b_blake2_params p = { @@ -824,20 +857,15 @@ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; - Hacl_Hash_Blake2b_blake2_params - *p0 = - (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); - p0[0U] = p; - Hacl_Hash_Blake2s_state_t *s = Hacl_Hash_Blake2s_malloc_with_params_and_key(p0, k); - Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; - KRML_HOST_FREE(p1.salt); - KRML_HOST_FREE(p1.personal); - KRML_HOST_FREE(p0); + Hacl_Hash_Blake2b_blake2_params p0 = p; + Hacl_Hash_Blake2s_state_t *s = Hacl_Hash_Blake2s_malloc_with_params_and_key(&p0, false, k); return s; } /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) { @@ -847,28 +875,31 @@ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_state_t *s) { Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } -static void -reset_raw( - Hacl_Hash_Blake2s_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +static void reset_raw(Hacl_Hash_Blake2s_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + Hacl_Hash_Blake2b_index + i = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; KRML_MAYBE_UNUSED_VAR(i); Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + uint32_t *h = block_state.f3.snd; uint32_t kk2 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; if (!(kk2 == 0U)) @@ -878,7 +909,79 @@ reset_raw( memcpy(buf, k_1, kk2 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = h; + uint32_t *r1 = h + 4U; + uint32_t *r2 = h + 8U; + uint32_t *r3 = h + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i0] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; uint8_t kk11 = i.key_length; uint32_t ite; if (kk11 != 0U) @@ -890,14 +993,16 @@ reset_raw( ite = 0U; } Hacl_Hash_Blake2s_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + tmp8 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; + state[0U] = tmp8; } /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key_and_params( @@ -906,15 +1011,17 @@ Hacl_Hash_Blake2s_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k) { @@ -929,11 +1036,16 @@ void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k) .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s) { @@ -941,7 +1053,7 @@ void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s) } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len) @@ -1007,7 +1119,7 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint32_t___uint32_t_ acc = block_state1.thd; + K____uint32_t___uint32_t_ acc = block_state1.f3; uint32_t *wv = acc.fst; uint32_t *hash = acc.snd; uint32_t nb = 1U; @@ -1027,7 +1139,7 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____uint32_t___uint32_t_ acc = block_state1.thd; + K____uint32_t___uint32_t_ acc = block_state1.f3; uint32_t *wv = acc.fst; uint32_t *hash = acc.snd; uint32_t nb = data1_len / 64U; @@ -1090,7 +1202,7 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____uint32_t___uint32_t_ acc = block_state1.thd; + K____uint32_t___uint32_t_ acc = block_state1.f3; uint32_t *wv = acc.fst; uint32_t *hash = acc.snd; uint32_t nb = 1U; @@ -1111,7 +1223,7 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____uint32_t___uint32_t_ acc = block_state1.thd; + K____uint32_t___uint32_t_ acc = block_state1.f3; uint32_t *wv = acc.fst; uint32_t *hash = acc.snd; uint32_t nb = data1_len / 64U; @@ -1132,15 +1244,25 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_32_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2s_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2s_state_t scrut = *state; + Hacl_Hash_Blake2s_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2s_state_t scrut = *s; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; uint64_t total_len = scrut.total_len; @@ -1158,9 +1280,14 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) uint32_t b[16U] = { 0U }; Hacl_Hash_Blake2s_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - uint32_t *src_b = block_state.thd.snd; - uint32_t *dst_b = tmp_block_state.thd.snd; + { + .fst = i1.key_length, + .snd = i1.digest_length, + .thd = i1.last_node, + .f3 = { .fst = wv0, .snd = b } + }; + uint32_t *src_b = block_state.f3.snd; + uint32_t *dst_b = tmp_block_state.f3.snd; memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -1174,18 +1301,35 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - K____uint32_t___uint32_t_ acc0 = tmp_block_state.thd; + K____uint32_t___uint32_t_ acc0 = tmp_block_state.f3; uint32_t *wv1 = acc0.fst; uint32_t *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - K____uint32_t___uint32_t_ acc = tmp_block_state.thd; + K____uint32_t___uint32_t_ acc = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; uint32_t *wv = acc.fst; uint32_t *hash = acc.snd; - Hacl_Hash_Blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2s_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + Hacl_Hash_Blake2s_update_last(r, wv, hash, last_node1, prev_len_last, r, buf_last); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2s_finish((uint32_t)nn1, dst, tmp_block_state.f3.snd); + Hacl_Hash_Blake2s_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_info(Hacl_Hash_Blake2s_state_t *s) +{ + Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1196,8 +1340,8 @@ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state) Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - uint32_t *b = block_state.thd.snd; - uint32_t *wv = block_state.thd.fst; + uint32_t *b = block_state.f3.snd; + uint32_t *wv = block_state.f3.fst; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); @@ -1205,7 +1349,7 @@ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state) } /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state) { @@ -1213,17 +1357,24 @@ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *sta Hacl_Hash_Blake2s_block_state_t block_state0 = scrut.block_state; uint8_t *buf0 = scrut.buf; uint64_t total_len0 = scrut.total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); Hacl_Hash_Blake2s_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - uint32_t *src_b = block_state0.thd.snd; - uint32_t *dst_b = block_state.thd.snd; + block_state = + { + .fst = i.key_length, + .snd = i.digest_length, + .thd = i.last_node, + .f3 = { .fst = wv, .snd = b } + }; + uint32_t *src_b = block_state0.f3.snd; + uint32_t *dst_b = block_state.f3.snd; memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); Hacl_Hash_Blake2s_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1262,8 +1413,14 @@ Hacl_Hash_Blake2s_hash_with_key( Lib_Memzero0_memzero(b, 16U, uint32_t, void *); } +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/src/msvc/Hacl_Hash_Blake2s_Simd128.c b/src/msvc/Hacl_Hash_Blake2s_Simd128.c index c02da8fa..3b68783b 100644 --- a/src/msvc/Hacl_Hash_Blake2s_Simd128.c +++ b/src/msvc/Hacl_Hash_Blake2s_Simd128.c @@ -34,6 +34,7 @@ update_block( Lib_IntVector_Intrinsics_vec128 *wv, Lib_IntVector_Intrinsics_vec128 *hash, bool flag, + bool last_node, uint64_t totlen, uint8_t *d ) @@ -59,7 +60,15 @@ update_block( { wv_14 = 0U; } - uint32_t wv_15 = 0U; + uint32_t wv_15; + if (last_node) + { + wv_15 = 0xFFFFFFFFU; + } + else + { + wv_15 = 0U; + } mask = Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen, (uint32_t)(totlen >> 32U), @@ -286,72 +295,6 @@ Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t k r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); } -static void -init_with_params(Lib_IntVector_Intrinsics_vec128 *hash, Hacl_Hash_Blake2b_blake2_params p) -{ - uint32_t tmp[8U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = hash; - Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; - Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; - Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; - uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 4U; - uint8_t *bj = p.salt + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - 0U, - 2U, - 1U, - uint32_t *os = tmp + 6U; - uint8_t *bj = p.personal + i * 4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - tmp[0U] = - (uint32_t)p.digest_length - ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); - tmp[1U] = p.leaf_length; - tmp[2U] = (uint32_t)p.node_offset; - tmp[3U] = - (uint32_t)(p.node_offset >> 32U) - ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); - uint32_t tmp0 = tmp[0U]; - uint32_t tmp1 = tmp[1U]; - uint32_t tmp2 = tmp[2U]; - uint32_t tmp3 = tmp[3U]; - uint32_t tmp4 = tmp[4U]; - uint32_t tmp5 = tmp[5U]; - uint32_t tmp6 = tmp[6U]; - uint32_t tmp7 = tmp[7U]; - uint32_t iv0_ = iv0 ^ tmp0; - uint32_t iv1_ = iv1 ^ tmp1; - uint32_t iv2_ = iv2 ^ tmp2; - uint32_t iv3_ = iv3 ^ tmp3; - uint32_t iv4_ = iv4 ^ tmp4; - uint32_t iv5_ = iv5 ^ tmp5; - uint32_t iv6_ = iv6 ^ tmp6; - uint32_t iv7_ = iv7 ^ tmp7; - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); -} - static void update_key( Lib_IntVector_Intrinsics_vec128 *wv, @@ -366,11 +309,11 @@ update_key( memcpy(b, k, kk * sizeof (uint8_t)); if (ll == 0U) { - update_block(wv, hash, true, lb, b); + update_block(wv, hash, true, false, lb, b); } else { - update_block(wv, hash, false, lb, b); + update_block(wv, hash, false, false, lb, b); } Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -390,7 +333,7 @@ Hacl_Hash_Blake2s_Simd128_update_multi( { uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U); uint8_t *b = blocks + i * 64U; - update_block(wv, hash, false, totlen, b); + update_block(wv, hash, false, false, totlen, b); } } @@ -399,6 +342,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( uint32_t len, Lib_IntVector_Intrinsics_vec128 *wv, Lib_IntVector_Intrinsics_vec128 *hash, + bool last_node, uint64_t prev, uint32_t rem, uint8_t *d @@ -408,7 +352,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( uint8_t *last = d + len - rem; memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; - update_block(wv, hash, true, totlen, b); + update_block(wv, hash, true, last_node, totlen, b); Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } @@ -442,7 +386,7 @@ update_blocks( rem = rem0; } Hacl_Hash_Blake2s_Simd128_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Hash_Blake2s_Simd128_update_last(len, wv, hash, prev, rem, blocks); + Hacl_Hash_Blake2s_Simd128_update_last(len, wv, hash, false, prev, rem, blocks); } static inline void @@ -583,10 +527,7 @@ Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void) } static Hacl_Hash_Blake2s_Simd128_state_t -*malloc_raw( - Hacl_Hash_Blake2b_index kk, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +*malloc_raw(Hacl_Hash_Blake2b_index kk, Hacl_Hash_Blake2b_params_and_key key) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec128 @@ -600,7 +541,13 @@ static Hacl_Hash_Blake2s_Simd128_state_t sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); Hacl_Hash_Blake2s_Simd128_block_state_t - block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + block_state = + { + .fst = kk.key_length, + .snd = kk.digest_length, + .thd = kk.last_node, + .f3 = { .fst = wv, .snd = b } + }; uint8_t kk10 = kk.key_length; uint32_t ite; if (kk10 != 0U) @@ -622,7 +569,9 @@ static Hacl_Hash_Blake2s_Simd128_state_t Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; uint8_t kk1 = p1->key_length; uint8_t nn = p1->digest_length; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec128 *h = block_state.f3.snd; uint32_t kk2 = (uint32_t)i.key_length; uint8_t *k_1 = key.snd; if (!(kk2 == 0U)) @@ -632,42 +581,116 @@ static Hacl_Hash_Blake2s_Simd128_state_t memcpy(buf, k_1, kk2 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; - init_with_params(block_state.thd.snd, pv); + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = h; + Lib_IntVector_Intrinsics_vec128 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = h + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r4 = u; + uint32_t x = r4; + os[i0] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); return p; } /** - State allocation function when there are parameters and a key. The -length of the key k MUST match the value of the field key_length in the -parameters. Furthermore, there is a static (not dynamically checked) requirement -that key_length does not exceed max_key (128 for S, 64 for B).) + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 128 for S, 64 for B. +- The digest_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( Hacl_Hash_Blake2b_blake2_params *p, + bool last_node, uint8_t *k ) { Hacl_Hash_Blake2b_blake2_params pv = p[0U]; Hacl_Hash_Blake2b_index - i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; - return - malloc_raw(i1, - ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length, .last_node = last_node }; + return malloc_raw(i1, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - State allocation function when there is just a custom key. All -other parameters are set to their respective default values, meaning the output -length is the maximum allowed output (128 for S, 64 for B). + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 128 for S, 64 for B. + */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk) { uint8_t nn = 32U; - Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; - uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); - uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn, .last_node = false }; + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; Hacl_Hash_Blake2b_blake2_params p = { @@ -675,21 +698,16 @@ Hacl_Hash_Blake2s_Simd128_state_t .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; - Hacl_Hash_Blake2b_blake2_params - *p0 = - (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); - p0[0U] = p; + Hacl_Hash_Blake2b_blake2_params p0 = p; Hacl_Hash_Blake2s_Simd128_state_t - *s = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(p0, k); - Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; - KRML_HOST_FREE(p1.salt); - KRML_HOST_FREE(p1.personal); - KRML_HOST_FREE(p0); + *s = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(&p0, false, k); return s; } /** - State allocation function when there is no key + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) { @@ -699,28 +717,32 @@ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_Simd128_state_t *s) { Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; uint8_t nn = block_state.snd; uint8_t kk1 = block_state.fst; - return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn, .last_node = last_node }); } static void -reset_raw( - Hacl_Hash_Blake2s_Simd128_state_t *state, - K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key -) +reset_raw(Hacl_Hash_Blake2s_Simd128_state_t *state, Hacl_Hash_Blake2b_params_and_key key) { Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; + bool last_node0 = block_state.thd; uint8_t nn0 = block_state.snd; uint8_t kk10 = block_state.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + Hacl_Hash_Blake2b_index + i = { .key_length = kk10, .digest_length = nn0, .last_node = last_node0 }; KRML_MAYBE_UNUSED_VAR(i); Hacl_Hash_Blake2b_blake2_params *p = key.fst; uint8_t kk1 = p->key_length; uint8_t nn = p->digest_length; - Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + bool last_node = block_state.thd; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; + Lib_IntVector_Intrinsics_vec128 *h = block_state.f3.snd; uint32_t kk2 = (uint32_t)i1.key_length; uint8_t *k_1 = key.snd; if (!(kk2 == 0U)) @@ -730,7 +752,67 @@ reset_raw( memcpy(buf, k_1, kk2 * sizeof (uint8_t)); } Hacl_Hash_Blake2b_blake2_params pv = p[0U]; - init_with_params(block_state.thd.snd, pv); + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = h; + Lib_IntVector_Intrinsics_vec128 *r1 = h + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = h + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = h + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = pv.salt + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i0] = x;); + KRML_MAYBE_FOR2(i0, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = pv.personal + i0 * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i0] = x;); + tmp[0U] = + (uint32_t)pv.digest_length + ^ ((uint32_t)pv.key_length << 8U ^ ((uint32_t)pv.fanout << 16U ^ (uint32_t)pv.depth << 24U)); + tmp[1U] = pv.leaf_length; + tmp[2U] = (uint32_t)pv.node_offset; + tmp[3U] = + (uint32_t)(pv.node_offset >> 32U) + ^ ((uint32_t)pv.node_depth << 16U ^ (uint32_t)pv.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); uint8_t kk11 = i.key_length; uint32_t ite; if (kk11 != 0U) @@ -742,14 +824,16 @@ reset_raw( ite = 0U; } Hacl_Hash_Blake2s_Simd128_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; - state[0U] = tmp; + tmp8 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; + state[0U] = tmp8; } /** - Re-initialization function. The reinitialization API is tricky -- -you MUST reuse the same original parameters for digest (output) length and key -length. + General-purpose re-initialization function with parameters and +key. You cannot change digest_length, key_length, or last_node, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( @@ -758,15 +842,17 @@ Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( uint8_t *k ) { - index_of_state(s); - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); + Hacl_Hash_Blake2b_index i1 = index_of_state(s); + KRML_MAYBE_UNUSED_VAR(i1); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = p, .snd = k })); } /** - Re-initialization function when there is a key. Note that the key -size is not allowed to change, which is why this function does not take a key -length -- the key has to be same key size that was originally passed to -`malloc_with_key` + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k) { @@ -781,11 +867,16 @@ void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t .personal = personal }; Hacl_Hash_Blake2b_blake2_params p0 = p; - reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); + reset_raw(s, ((Hacl_Hash_Blake2b_params_and_key){ .fst = &p0, .snd = k })); } /** - Re-initialization function when there is no key + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. */ void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s) { @@ -793,7 +884,7 @@ void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s) } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2s_Simd128_update( @@ -863,8 +954,7 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = 1U; @@ -884,7 +974,7 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = data1_len / 64U; @@ -947,8 +1037,7 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = 1U; @@ -969,7 +1058,7 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.f3; Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = data1_len / 64U; @@ -990,16 +1079,25 @@ Hacl_Hash_Blake2s_Simd128_update( } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 128 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your parameters. +For convenience, this function returns `digest_length`. When in doubt, callers +can pass an array of size HACL_BLAKE2S_128_OUT_BYTES, then use the return value +to see how many bytes were actually written. */ -void -Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output) +uint8_t Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *dst) { - Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*state).block_state; - uint8_t nn = block_state0.snd; - uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; - Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*s).block_state; + bool last_node0 = block_state0.thd; + uint8_t nn0 = block_state0.snd; + uint8_t kk0 = block_state0.fst; + Hacl_Hash_Blake2b_index + i1 = { .key_length = kk0, .digest_length = nn0, .last_node = last_node0 }; + Hacl_Hash_Blake2s_Simd128_state_t scrut = *s; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; uint64_t total_len = scrut.total_len; @@ -1017,9 +1115,14 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; Hacl_Hash_Blake2s_Simd128_block_state_t tmp_block_state = - { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; - Lib_IntVector_Intrinsics_vec128 *src_b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.thd.snd; + { + .fst = i1.key_length, + .snd = i1.digest_length, + .thd = i1.last_node, + .f3 = { .fst = wv0, .snd = b } + }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state.f3.snd; + Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.f3.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -1034,19 +1137,36 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc0 = tmp_block_state.thd; + acc0 = tmp_block_state.f3; Lib_IntVector_Intrinsics_vec128 *wv1 = acc0.fst; Lib_IntVector_Intrinsics_vec128 *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2s_Simd128_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ - acc = tmp_block_state.thd; + acc = tmp_block_state.f3; + bool last_node1 = tmp_block_state.thd; Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; - Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, prev_len_last, r, buf_last); - uint8_t nn0 = tmp_block_state.snd; - Hacl_Hash_Blake2s_Simd128_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); + Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, last_node1, prev_len_last, r, buf_last); + uint8_t nn1 = tmp_block_state.snd; + Hacl_Hash_Blake2s_Simd128_finish((uint32_t)nn1, dst, tmp_block_state.f3.snd); + Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = (*s).block_state; + bool last_node = block_state1.thd; + uint8_t nn = block_state1.snd; + uint8_t kk = block_state1.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }).digest_length; +} + +Hacl_Hash_Blake2b_index Hacl_Hash_Blake2s_Simd128_info(Hacl_Hash_Blake2s_Simd128_state_t *s) +{ + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + bool last_node = block_state.thd; + uint8_t nn = block_state.snd; + uint8_t kk = block_state.fst; + return + ((Hacl_Hash_Blake2b_index){ .key_length = kk, .digest_length = nn, .last_node = last_node }); } /** @@ -1057,8 +1177,8 @@ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state) Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec128 *b = block_state.thd.snd; - Lib_IntVector_Intrinsics_vec128 *wv = block_state.thd.fst; + Lib_IntVector_Intrinsics_vec128 *b = block_state.f3.snd; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.f3.fst; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); @@ -1066,7 +1186,7 @@ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state) } /** - Copying. The key length (or absence thereof) must match between source and destination. + Copying. This preserves all parameters. */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state) @@ -1075,9 +1195,10 @@ Hacl_Hash_Blake2s_Simd128_state_t Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = scrut.block_state; uint8_t *buf0 = scrut.buf; uint64_t total_len0 = scrut.total_len; + bool last_node = block_state0.thd; uint8_t nn = block_state0.snd; uint8_t kk1 = block_state0.fst; - Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn, .last_node = last_node }; uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); memcpy(buf, buf0, 64U * sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec128 @@ -1091,9 +1212,15 @@ Hacl_Hash_Blake2s_Simd128_state_t sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); Hacl_Hash_Blake2s_Simd128_block_state_t - block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; - Lib_IntVector_Intrinsics_vec128 *src_b = block_state0.thd.snd; - Lib_IntVector_Intrinsics_vec128 *dst_b = block_state.thd.snd; + block_state = + { + .fst = i.key_length, + .snd = i.digest_length, + .thd = i.last_node, + .f3 = { .fst = wv, .snd = b } + }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state0.f3.snd; + Lib_IntVector_Intrinsics_vec128 *dst_b = block_state.f3.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); Hacl_Hash_Blake2s_Simd128_state_t s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; @@ -1135,8 +1262,14 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); } +/** +Write the BLAKE2s digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ void -Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_params( uint8_t *output, uint8_t *input, uint32_t input_len, diff --git a/src/msvc/Hacl_Hash_SHA2.c b/src/msvc/Hacl_Hash_SHA2.c index 995fe707..a9a6f452 100644 --- a/src/msvc/Hacl_Hash_SHA2.c +++ b/src/msvc/Hacl_Hash_SHA2.c @@ -211,7 +211,7 @@ void Hacl_Hash_SHA2_sha224_init(uint32_t *hash) os[i] = x;); } -static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) +void Hacl_Hash_SHA2_sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) { Hacl_Hash_SHA2_sha256_update_nblocks(len, b, st); } @@ -825,7 +825,7 @@ void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *outpu } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - sha224_update_nblocks(0U, buf_multi, tmp_block_state); + Hacl_Hash_SHA2_sha224_update_nblocks(0U, buf_multi, tmp_block_state); uint64_t prev_len_last = total_len - (uint64_t)r; Hacl_Hash_SHA2_sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state); Hacl_Hash_SHA2_sha224_finish(tmp_block_state, output); @@ -847,7 +847,7 @@ void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len Hacl_Hash_SHA2_sha224_init(st); uint32_t rem = input_len % 64U; uint64_t len_ = (uint64_t)input_len; - sha224_update_nblocks(input_len, ib, st); + Hacl_Hash_SHA2_sha224_update_nblocks(input_len, ib, st); uint32_t rem1 = input_len % 64U; uint8_t *b0 = ib; uint8_t *lb = b0 + input_len - rem1; diff --git a/src/msvc/Hacl_Hash_SHA3.c b/src/msvc/Hacl_Hash_SHA3.c index 89bb0491..b964e1d9 100644 --- a/src/msvc/Hacl_Hash_SHA3.c +++ b/src/msvc/Hacl_Hash_SHA3.c @@ -251,7 +251,8 @@ Hacl_Hash_SHA3_update_multi_sha3( uint8_t *bl0 = b_; uint8_t *uu____0 = b0 + i * block_len(a); memcpy(bl0, uu____0, block_len(a) * sizeof (uint8_t)); - block_len(a); + uint32_t unused = block_len(a); + KRML_MAYBE_UNUSED_VAR(unused); absorb_inner_32(b_, s); } } @@ -2166,7 +2167,7 @@ void Hacl_Hash_SHA3_state_free(uint64_t *s) Absorb number of input blocks and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + It processes an input of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] @@ -2191,14 +2192,14 @@ Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t Absorb a final partial block of input and write the output state This function is intended to receive a hash state and input buffer. - It prcoesses a sequence of bytes at end of input buffer that is less + It processes a sequence of bytes at end of input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffer are ignored. The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] The argument `input` (IN) points to `inputByteLen` bytes of valid memory, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffer must be passed to `inputByteLen` including the number of full-block bytes at start of input buffer that are ignored */ diff --git a/src/msvc/Hacl_Hash_SHA3_Simd256.c b/src/msvc/Hacl_Hash_SHA3_Simd256.c index 131c34e6..e0bb7e0b 100644 --- a/src/msvc/Hacl_Hash_SHA3_Simd256.c +++ b/src/msvc/Hacl_Hash_SHA3_Simd256.c @@ -5992,12 +5992,12 @@ void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) Absorb number of blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + It processes an inputs of multiple of 168-bytes (SHAKE128 block size), any additional bytes of final partial block for each buffer are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void @@ -6038,15 +6038,15 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( Absorb a final partial blocks of 4 input buffers and write the output states This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses a sequence of bytes at end of each input buffer that is less + It processes a sequence of bytes at end of each input buffer that is less than 168-bytes (SHAKE128 block size), any bytes of full blocks at start of input buffers are ignored. The argument `state` (IN/OUT) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] - + Note: Full size of input buffers must be passed to `inputByteLen` including the number of full-block bytes at start of each input buffer that are ignored */ @@ -6378,7 +6378,7 @@ Squeeze a quadruple hash state to 4 output buffers The argument `state` (IN) points to quadruple hash state, i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes of valid memory for each buffer, i.e., uint8_t[inputByteLen] */ void diff --git a/src/msvc/Hacl_K256_ECDSA.c b/src/msvc/Hacl_K256_ECDSA.c index 0aaab085..6f7bb632 100644 --- a/src/msvc/Hacl_K256_ECDSA.c +++ b/src/msvc/Hacl_K256_ECDSA.c @@ -351,7 +351,7 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b) 1U, uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));); + acc = (beq & acc) | (~beq & blt);); uint64_t is_lt_q = acc; return ~is_zero & is_lt_q; } @@ -372,11 +372,7 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b) uint64_t a2 = f[2U]; uint64_t a3 = f[3U]; bool is_lt_q_b; - if (a3 < 0xffffffffffffffffULL) - { - is_lt_q_b = true; - } - else if (a2 < 0xfffffffffffffffeULL) + if (a3 < 0xffffffffffffffffULL || a2 < 0xfffffffffffffffeULL) { is_lt_q_b = true; } @@ -510,12 +506,14 @@ static inline void modq(uint64_t *out, uint64_t *a) uint64_t *t01 = tmp; uint64_t m[7U] = { 0U }; uint64_t p[5U] = { 0U }; - mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m); - mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p); + uint64_t c0 = mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m); + KRML_MAYBE_UNUSED_VAR(c0); + uint64_t c10 = mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p); + KRML_MAYBE_UNUSED_VAR(c10); uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p + 4U, p, r); - uint64_t c0 = c2; + uint64_t c00 = c2; uint64_t c1 = add4(r, tmp, out); - uint64_t mask = 0ULL - (c0 + c1); + uint64_t mask = 0ULL - (c00 + c1); KRML_MAYBE_FOR4(i, 0U, 4U, @@ -567,11 +565,7 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f) { return false; } - if (a2 < 0xffffffffffffffffULL) - { - return true; - } - if (a1 < 0x5d576e7357a4501dULL) + if (a2 < 0xffffffffffffffffULL || a1 < 0x5d576e7357a4501dULL) { return true; } diff --git a/src/msvc/Hacl_RSAPSS.c b/src/msvc/Hacl_RSAPSS.c index cd19195d..7b004455 100644 --- a/src/msvc/Hacl_RSAPSS.c +++ b/src/msvc/Hacl_RSAPSS.c @@ -167,7 +167,7 @@ static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b) { uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; return res; @@ -189,7 +189,7 @@ static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n) { uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t res = acc; uint64_t m1 = res; @@ -288,11 +288,7 @@ pss_verify( em_0 = 0U; } uint8_t em_last = em[emLen - 1U]; - if (emLen < saltLen + hash_len(a) + 2U) - { - return false; - } - if (!(em_last == 0xbcU && em_0 == 0U)) + if (emLen < saltLen + hash_len(a) + 2U || !(em_last == 0xbcU && em_0 == 0U)) { return false; } @@ -553,7 +549,7 @@ Hacl_RSAPSS_rsapss_verify( { uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]); uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + acc = (beq & acc) | (~beq & blt); } uint64_t mask = acc; bool res; @@ -568,10 +564,9 @@ Hacl_RSAPSS_rsapss_verify( eBits, e, m); - bool ite; if (!((modBits - 1U) % 8U == 0U)) { - ite = true; + res = true; } else { @@ -579,15 +574,7 @@ Hacl_RSAPSS_rsapss_verify( uint32_t j = (modBits - 1U) % 64U; uint64_t tmp = m[i]; uint64_t get_bit = tmp >> j & 1ULL; - ite = get_bit == 0ULL; - } - if (ite) - { - res = true; - } - else - { - res = false; + res = get_bit == 0ULL; } } else diff --git a/src/msvc/Lib_Memzero0.c b/src/msvc/Lib_Memzero0.c index 3d8a1e5f..5c269d23 100644 --- a/src/msvc/Lib_Memzero0.c +++ b/src/msvc/Lib_Memzero0.c @@ -13,7 +13,7 @@ #include #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__NetBSD__) #include #endif @@ -36,7 +36,7 @@ void Lib_Memzero0_memzero0(void *dst, uint64_t len) { size_t len_ = (size_t) len; #ifdef _WIN32 - SecureZeroMemory(dst, len); + SecureZeroMemory(dst, len_); #elif defined(__APPLE__) && defined(__MACH__) memset_s(dst, len_, 0, len_); #elif (defined(__linux__) && !defined(LINUX_NO_EXPLICIT_BZERO)) || defined(__FreeBSD__) diff --git a/src/wasm/EverCrypt_Hash.wasm b/src/wasm/EverCrypt_Hash.wasm index 1447feb38afaaca8ed624e0dc44b1238d670b653..c6ecc136716d17679ee78fbc5a0b782b728c03df 100644 GIT binary patch delta 149 zcmaEIl=;q4W-f-r;#@`s1_s8h6S+PKvDX7ZU41=ceFB(SUq4xpQE8$->%=&j&HjvC ze2gD9-{LRjW4y9CK+c4N$&q1mi<&kgh+eO*V8hwh*wDbh#8k&TxgbSl@}K0e&0#5g t{EQPOrxxooN^hQ893r{-ea{~!&R!ruHFW?0 delta 133 zcmca}l=;a~W-f-r;#@`s1_s8>6S+Q3+#oX1RdTWcBkN`Z#?^d`zc%0DFXdyrusKN1 zgky7unl|I+t?CLkoRbR2YRZw91@^6(L&fI6fk4+)_?=D*Q5BSUH`#7KPE;^pIF?gX_uBg3X2UmDR2tQTCxSu<6@<_Z(U8KEkHJrazdM$=!jB602N>x$Gn<6(s2&o6 z?iPtVdX0$U&wzez1&V64?i==y*b$UP!fTSR8;TdFm%E^73*lK3!jfpMG2B9Ni{@&{ zS{h*?(K4@=n0asYQ)*lEY609?=YTJW%qu1dU7KSTAdelc2x2*_=F3yx{Y4G#QiLB6 z#WR*Dme_-?XxO6H$$9bG#}iCl88)0i&RNwUI)andQ*i70&Y|ES)=}Zb`Zm0b&aofj zx7qtJ%f!1Zn*}yHjiI~ar!iRWXxd8DcWx#<)v}E3*%YO$1((ON#pzbX9?30G{K4gp z9L42k_IGfQP$}-d| zDv9ZNgqVR_X?4vJ!hIa8xJOfU89mplKEg6_M@>{F=NOdJR$`{t5;MP+{70>X9(9C~96zq3G7;OU%<1F2Oy>?NV?9a1?I(i}dp`?D zeB%`1CXNL=N$)#Vhgg`q3~?Zrn6vB2|Gtf|gzGmrdhNQ09>0c6EVH{|K7CEwnS8`8 zXK^3dZ)YhO8s{L+<(S5?;0yBHZLCMEY$E2^W@1J(li$LzoMZcL@&upjNA%uHWo&!R zR0i^>Oko}c59U$ut1U#0|B|qj>$f?Yex))U`-u5^J~205Bu;)lVISA0Tq1qVB`UML zmCCfW-T}wZp7|;beG9*Cam68;(LuwPR7^wER7`qq`!eQJM1wfAmQvKE=p%Wl?7fod z%34ZMEc?4eqvSE}KabgL(f@lX1U1I|+l5GF*UBjW(3;br%f;oMv~SM_95>_-gDUZ{2HJ1M+jGcEdOYdw@yc>~?=`R+?7!R4nuOU5E1U+)LiEseL92dAK8C(I+Mq2F(7t|QW7GE7Nt#yM% zN^+^e4HMh{Lr4*$fli7~Rav7u9K+2S{(m8Tr*URZ^8ky&tn~mbF`Kl253hs=hZ`~+ zSGMAL@Wm~$n>)NgkDFr8`tor^Opo7)W_V@%Fu31J|bQidx!QvxdsK{H4` zz&rV=k!a~3O@QJj0pSB*r1$_NBOnkv8YteMizxyydLXfsa#`_J#3d+zVt-?{gm z`|Y;Zs&@aZ%74xm%wSG{48e@ZT^W>SbJ=UZf_mX6A1z%TA2LM&EJx6Tz!nPLtREaz zFHcq|*=`{KRIFYY3l8!`aEQW@^$CIC#HI^=;Jir{B)zHhbQ1ROlM>ad0YGZglnZDI zT(ik|jJioiaKAyO&r_{J^VgU=eB`TA;%G>}mCSnoEHb+B9!_YUpH%69reg|UB{4La z<{GA&DDSNZEsbI$O=Oy994BVs8+T~wg6O3J7H8=90ARYg$rxHQcNX&4fhvEtcF9a> z;=DQ~IEf8vAc|`YQA{!AjyNcY{#YtZ&_2At#6_XQ3rK61oK@M9lb2C)+p;c8vOnvr z(y@V+6ENtr3ioEM!zdB2T`~&nnN)`1&L2mkTutgC^f#^{KgATswx+!<&jnS+u(#9m zDcb5V)((xj2!wXP#%unb8d$yV(?yY_YaUg3PV!(k{5aT&! zeop>r?!&{=$s^R9AT)6N>;%a~t|OTV=6U*o9TbQ>dHXERY| z%h{QGj_mh(N`{7T#0ZY59CvJ{$Y8^1#Dk5*^f^z=$nz98ajfKcaSKI) zn(iRFZ6le2ZAOxTLXz28NXZWhDf!iAq9%MpSjPQZ90PtMna=IRoK{54)h)y+Dk2=< z{)?^TC$*Byf;N)rXzK%8)1Lb-6=Sn^H9KMx1$R>OC6`bWHI|T{e=&|dT}+K=nOjlZ zE&C&RDeV2yN%Gu^;u!W%sajsgZ_h8dT0!(rHM&$%=}PDJ zq`~nJo;3KQ4gMjU-BBiY=E$z4Pp7rBAMRKq3+ZzAuF1>HR0u*wg4v)LuTc28=oNly zz1q$CHOEi0xLp8e*4mYX7ck>elh$BPF_`r)tTHH4Rx;1-Sky@A_Qc8D?mdWMmz#$v zb1W2H&nyhT+hfgKYzYTkNyJRcZ`Zs*qwKd0();A^K$?GB05Q)6v`o6XS7xMLli8cD zr_1E8u50n-v;2GXva4?-SozZAo38ZixRmr>e=o5fr;Gam$vAY$#!b2_tFpekHreby z>}0>cXOq?c*=Un>{(Haem$vEtmsn!_gO?CDJm`?iv5Sk-0BWIKbPyoG_BmTo94i(H zfFDv(r-Tc-D06lcUd0ztitL>1oKWW7Jyvv3K{#d@RDgRB=c`~J8jSSPb}sWo1I|!u%HGE*bIIey;`qfjh*3QofAxuixD->KsUwvt0LDKj@Z7t zVuBirwEpi9uc*N!Gff(x0~5F*NE!sM%!0s@P-z<>VkdYsI@xWiCPc&Xuj41 U-m6%rhhBLjkIt2g5#vDrFZoM?UjP6A diff --git a/src/wasm/Hacl_Bignum256.wasm b/src/wasm/Hacl_Bignum256.wasm index b28b276b9798c53b558f6b13fb71d5309e76a94b..ed099987e4df827b6f3c61b135a8925484242efc 100644 GIT binary patch delta 821 zcmdUtF>4f25XWbB_s-qy+uOa{m%HRV;!F|H!c<};)jf-VM3DFmY%DH^_zCU;i6*2y zWU;Ucb}7V%1RFyXL;|9TZ9v4zN<{1|oM+DSNNZ&`^JeD%pZW2gJr2HX2T%6P-opp> zRaw2nr}q7u&uI4!K6v=t4&Hvm?cO`jZXLEzt*y@k(6Ye}FAr|9wns-dXK=&Lemmn0 z_73Lk?eE&N_kSGcYIA*c6@0H(>wCT8ZiH9PHVF)_nDCB_8|<2}oB144copW0nG{=b z*zHhw0YE&8w!-EDg&a|2U@_=ZRHBF{XoBN08CE7F)#Zc|u23Z_H4U7X4U^)L!q%mf zQUeQ;SHzerep>=q3fd=4se2a7^dgKk6^B*S*Hg|2xk?!&1S@V^>Es##=c1C2snjHs ztDI1E6b=<#h4aDwQhia&m?m}Yk9o|KBCbfws4#K*PD8HGfa_G}22GNdLWdfTxpvI; z@8!<IiV)}8j+LE}$@hHR_q;xj?vF$5>0|B5IIn!a zAHB+(FZuK6HQrrIxKBA!-oc6yiUDyV4ww-VCz}{Rt_vIK?!ZD%+CzOve2R2M+Y2q!L@UE zHj#EhZt`l^15dEE*vC0=r4N@B7RU#EGasHTkq_?zOPkN9&7*P8+gt<>E5NY|6>xy< z%D<#`TmptQlB&%}Em1_4=KNXs$KsRp{Z`^9 Kcl5i&fAYU_$I|Zr diff --git a/src/wasm/Hacl_Bignum256_32.wasm b/src/wasm/Hacl_Bignum256_32.wasm index 5fcc70ae85583e1b2246b79018731dba44111943..31c7d5a7e207648cc2ad6b706ed66117ffd87494 100644 GIT binary patch delta 792 zcmdUtze{655XX0)`!vaW$xF~9fFkN53UY|V>e@RF2{8y-7=J4$$~A|w zt!QCmDeOOBcf}QY{UKs^NP4|%oW)-`TPwSnotf{E}xXXMvwu1kYmrh7W! zn2FM%AKuyC++>ckE_GkE??Vol;xkIjQmx_@B`Wa-3_(b-f<9Vq4T(w_LLH3h%UoVx zEsFw#6&n*95OyJq5s08yr+~f^;8!pTQH((>?&cJuVuc0>%2tjZsUCH4Kx% zK8oRooWMR)x1f{NCFx)7$q5KB4jdB@hbXWaJ11pgGcZh&O0rI+nLOIZ3MHP+M@1XK zq~_IBR#KS+fBdPrhb<)TgVYxbGMj=FrlAES;V47evXz#twA19l^E_x&3?fiWOQ^7sD{A&37yA?cJRn@U86&>R5X`u`a*tVa$zcdG2WP^~#u)_NS zX1Nt%B;j}(VH`spA!NV=N8sOngh+33gy;xl&4;pPwO0$z0~Il%5(+9~2@43T+^r8# z1%avzs}8^_TyZ+hEB7D!;KS(>RWQILklKn)B$kDA!TeT=usK^q9)y~A diff --git a/src/wasm/Hacl_Bignum32.wasm b/src/wasm/Hacl_Bignum32.wasm index c2102b8189b2a5ab8d663a53be33e8f1e9a552e4..b34c2c74659e85c8d7aceaf8a53b0e371f28afe7 100644 GIT binary patch delta 1252 zcmd6mOH30{6o&7ePHBhfW73-G}^e37$ag-a68^aXa|#*NA3fbIWaY(ZOcbyrbZ_VZ>2YOY2_o%IeWRIu;N^jxC2+5*}@0) zniR9g4A#b&Ap^@9JZH$c(m1O*Fz>;?EjM{@Q8g)FkmTNZ&s4Vp1D35*>eaqP7q2fY zFiD!vm?Z<@1SS+vw5mkjGmPINc@oq7MO^nAl%)epHH-DY4Vt;2C+QN)`e9CkRQNeQ z3@y?`D(vC>6ZSPqGXY3`#`%OihjUT0KHNItOHm{NPp&g3N zWsdRw@>=t*5O6;Q^0UwRKyz^jXcPW*bb%!VWUnyBHH*@EWWKPib*K&t<{e&R7p) z;2~Av!Hrh^gjZ<5g%31v0~;%AWrd#*2W%M#7_48;l(fN5BPzzy@zv6bptT|bBpB$U z0(3g5gX?(pq$+}>?*g4p(``!Sk_;8BaS?)$2tyEbTF62ezqd3ugQ}{~@W)luOBIHp z|Bhj%#_%7w9jkHMYIz+%i%_*eM2+wfxUh&oL_})IV`5G@~1eQPS7vCkS^j#=MZgs-Pz861L1xJO#lD@ delta 1630 zcmd^9O=uHA6rS1aHoHxmuGutcw`qK@`KfJfQ~!#ng)TxxiYSPP^iYe`pO~h9dXeH5 z@!$`LbuNNf(StpB(kMuav?$_1K}5ZH5JVA=dh+7Ix4TJfi}dD6*qxm>@6GqVdEeW4 zdi3kDt7bK~xqaAY+s=niH`Y%-4~Y!d?)Oh{agQ764P8tM92}e&-hX;@bY%RLv1C3l zJT!b}XnchE#4TQ5IhZ?fEI-h-naSb{2d)T3YT*^)wiM@nk&)U83(~wIeS9w_)ov~q z=G0k<%VJK=7G4D|NTzC9Y#n2k3@m5yoF(U6JFCkg9~7&(8)~j_RdlYYCAh_!Bc)Ja zz_Kfly7dqBGZ&|(m?WL;vE2ly0z(Zny#$dD_KSCQc>>czZ>ubO%(5XhVX?7s5i#3^ zZiLB|GG@y;KO2{6WhC4|r9Ib2)6#943g@ z<^vkfHIxYTd4vwccG2?9*dH?ejZcY2Jg)hwR1=tYeq9lx>-UH$tXRHy4Ik6?zNVKn zMiaze^d=tB_$#rM^xKlW!Tp6d$vZq+6bG~w^}Lk+NZC#_-<7n*^Iln#qONu8KGi9! z#ZOvt-r_|Sbj7K&#kxpkhLzwaKl*7J@b!?v`s5y05zxqzb6L=+AH3iyF^DGVh1H;8 z5P}x#m2C(r5X2g&L7hBnmgK__wNWUC1{fHIu&!dQhio|f>2W%ZI*Yc+sDKO#rNIb9 zEVj)h7lp7xE=&P+M2@~hF79DO93QcZw6kJ#y&EllwADHkO^9FvaEw6%7{E??DYMu} zeHJG$f+?T0kt`~b0~ae#%eZUZC8P~YXjXBB2LiMN^djLQgm#xIXY1HRX+wYnO2u~% zOXzf5onrzLn1m)kx4KEMv?(%q7V+;%(x3+d*bL23u`D3)q$qc~_!m`B`B|?>wYL4Q z+8?I^Ez$np=`HTJhYxu|J4%-Ni_|5VN#G==Acg6&&}mQT=2eAW=L+2dEh5yhgD#)F t9nN*SkeL*pI$o6Q!a~N!`9txnGfB>Gou%6-=G{i~8xGNJP`k06{{|*}HJ<3w6JE zXNWU(vhpbQcGnDbxZW((?MA6ZglcEsHp2VGZuNAKHI#Mq;yY_gb8}!Cv&o_{8@`|S z2aO0)OiF7?io6sBX%)O^nCcwq#h8S_ND*UEj8PrdkzLhEjT%JPFu-Yo(UdTA9KdiS_u1p><3b+YE~sh>A4{On}a7+6@@(g=2jx-V1t56SRyg8)1hMI4?yCcf20vBBZK94@4oMI=ll9?KGnz1^qqQMdp?XB z`Q1BWFFLq;APU`J&<(J0uM2~%Xgv6?i67DS{rxP_j1w=0542Rdnka`4tC{3xva|hY zR1>eG+pp|Qnn1(x;7s)L&0My7C6Rx4`_pqiZ*6RVp{*|j+PWTu?{+U-B@;o4HEG_G zq9;XPnteA%7Fu064V0Mz{+Gf zAuq&RR8!<@Oh$f)353O!Y$`6*xlR#@DLhwIO5u1$F3=(_QUmKq@!%TCh?^XW9+9G+ zV&W3IwoNT;;Sr7u%AtX|a{%_o)ISXL|HZ!*p1_oU|LEBbrX6~A_~ZX&o^mI(O*zKa vv1tuzVw1he%v(y#YfYI~VEl4yUK`ud&fdNd@53jf-|1a0Cs*mI|u2m^RdA?b1mc?CtC@!`Kqh#>$i! z1SWZ1nro7Wk~gH;@U7@IR}CN`*^Idj2@4X~3%zJ}b)q9pa2l9|B)Z`GJ%f^sG6uVouBMr&(MUngt>bp=xdOjRU9L6ZWU%GdQarit%>plN!yzCvSYd!J(lGMI%l2r7h- z4d@Mm#GugYTnQmbk^)cVLH8O&l1 cayZa2Iq=hDj7+NY`ZMPj(f(>n9j+bo5Al+Y#Q*>R delta 1162 zcmd^9L2J}N6rPFMZPTpTxEr%;w_V?Bv%9TeDOGCgX$PUGJr%`DZ>1nKjfHKGg4u&7 z5f+(?XMce(h$qF1h$rtnC(!_Gz6 zIoYi`>e@=LV7$E7?J`Zf>&b7o<~1CY)9cQ>0fu1INiZCO0l_zLfR^U5HU(@^Zwod- zG7O6Jzz2hZq_`kE>%*6+^f(x3fhE|29)gtv3yV-hn|4kBBR-*TMmkDDhf;uLD5C?8 zJ%}e0=Ca^{;PPX&61w0Db}^Px0Vk5;5KEP$&^wdjMq4DsJ9;5Fa{Wp)>}vh+n|`{g$Fljf^C}Ef)?ceC@`P* z&29d#=KgEwem}z_8M=3m|8h36F1=7>C;3W_df0#lHZy0NWb7Got97We6S1?CFsZ)y apE-Xj-vql>@-?nF;;A1Ow^Fr_!2;jRCZ$gNa(jvUckzN zJ)WGvto*c!(Vg`a({7`_ z`=UiX@5t2O`;ivrblLMt(Oy2(qfTKVbb9AJ5z~*eUHw?TF0VU91fjJ=;y`FkDdzX9 zmVhmziz13gNkMj1?_i_qejN>Ubqe5UwQlq^?#T?NWamSdvokk7>2X24?s zI@{le+Y~a~!GK5kv5K!4O8{x09OfvGAqL!NV!p;N2$YTIa)@Z#HpT^zK_(h#t^wE= zV~2h>_HP`DABTVN));yF`4PVaDnny+${Ooqae11-87fQ!7N!LnN?;VM40QYEboBM& X-K?zF%JQT7pr4(~VQnWW)ULHJvCW7f delta 1029 zcmd^;zfRjw6vof>AFxSqOzb#?gm4ZxB&kG)NCniP;$}dls6&S`bg5X4g*s7wwMx{!9f)3p(wa0c6Lf4L!Z26vhj=y{E_q+Gq(Wm39_vX~)*NY3N z#lOvk^ z_bb+y0Sjx&`BJ2ut5N*=!R8(r2vQtK<4}q~iclJ%t&)jm4>kZK&k2~V;xFau3aYx=742$Qp;`mj!@`CFZ z(4!g(s$&fugo8{!3}_L78m!fr&|2h(jY;L|y(jLg&pRtq))(Z*+46}LOw1dWfuCm*-Z{bK$NQ9XT?>t3k7On3#T~JNJkCzb%4U> zAxVtX|BvJjdoe-sAL+ehwOe|(m*FprDW}nKVRj6W@%hxmCQ}+=T+K1ICNLIRzMWz0 gU?(~4UMb=_?mYZu<=45MUC;g?Zf`tIj@LhmKRw&JwEzGB diff --git a/src/wasm/Hacl_Bignum_Base.wasm b/src/wasm/Hacl_Bignum_Base.wasm index e407cd7801c73d69dfcdfc15c6db86c600fcf01f..9e75139e78655daed16185b0b7c989c0559e0df4 100644 GIT binary patch delta 1271 zcmZXSTS(J!6vy{-n=Z_pbDKJK?xuOS2rqb_mr|!wx7pNOVrJl)X6;S z5wBY7Egtm#JiwSx%M2_)m|=P*_Rbm%l3-_BUzdHLr>DEOU-3`f+STUqbhnG7LLf_G zfx;1{WkEtOOYtqT3C1Kot2kTAB!X4k>H8{sCgB}l2;&cgM~4K?%1{UAOsJ7_B{Y+B zpO`L3eU^AjzQAi+7?O9xklYBvQ>Fm3iCx4yiXGk=4Zp-S`{7B@7*x%hsny31UXsV~RB88*yvMjKt6Lq^gV z^LV{YRGU!Sh!ds_UN^@h`6M36op?NrN&p*(UgBngpLbRcLsOdA4UI1e8hbK^A0%V= zZ8F9bDWH`&PFziaW;zwgy;LMsX~@(QuM^ja(sXEkq(d{Dag%F&%A>sj{A=u(J3u4T zm_>;qk&b`dX67+VJoMWw=WEOQ%v1YWJ!1aVnM>DmJ`eBvcOI7I$j7n<^RcXX;s>Ip z0G+x5tW{Qsq^l6g(L(eWh+l}tB6OOHb~v+(vDW!wTI)MAG@cR+FPC8WV+o?srC>dA zjQF$^nt?JTUzH)bTZW9O9Bd{|5m(EhnL7?m^a;|GS3t8>f#LK@yhVE@u3>_>PLx!k zQ&jbWQ)eBT#vOLv?1xXAvtM7e}fogM_o!BvHN-5?||mJgRZCJBs5YdhTEMOzUsulb8xy5 zWj=AR5tgzhmt0~N>81KWom3Z?B@*fPgo_pxfAq2mVR189L>wkAG(&pt6qgEVEvx?> ZkL>U9_(EJ(Ty)E6tVh|3i5so&{{Y>NQ7-@h delta 1294 zcmZXSSxA&&6o%)U#WBnr_uOY3HFuG4A(wGnN*&jkF_$#6GH}u~mr#r*Z!)CR@h^x1 zy-4ULq?pQX!lg1zyy&7xLApu0DU+B$f~fPJ3khQ2`Okm8_x)eyJI6IIT=QZ)B23tY z{gk*Ow-dSCgTiiiWM7uFVxJIFywD1{V=7W8B$J5KDm`jjdO9q9U0t2sy_$dO=HBj> zE}6sm7nsw`4W=dtG220iX$|I>u@J=QLu-fw zp;5%i&?Cfkre7H9O6Ew|GS!ywM&hGzDBp+UR1*Q3nAeyK5kIK&M0=HD(sV`mH-nK= z#1MTGaX`;8`Yft9m;q6!&CJoLRjM1Jp?n?<ont{aNNWrrv-~yk%zI)0>tzcLix53%I!jEqKd!<<~Vby2r<(~5fgHZV~UCqvtA5yatUtH zQi5$5Wv(!lrRW$+UlUa(Y|m*El*1rB?ySqm&jbKH CG$BC% diff --git a/src/wasm/Hacl_GenericField32.wasm b/src/wasm/Hacl_GenericField32.wasm index 52efafdf1d60e8bcb44b3c33f4fd7f2a6e19ba25..12c62b4019f36019bd9ace8bb2b0126fe4197507 100644 GIT binary patch delta 42 ycmaDId_Q=@5?1z=#~T_L_&PVQWIXmc5RrV;=_3J$RV diff --git a/src/wasm/Hacl_GenericField64.wasm b/src/wasm/Hacl_GenericField64.wasm index a475b2db329ea3d4bdae0fec0cd095ed0aa465d5..fa41a05bb098b2bfbd2acb540cb777c86b52b64f 100644 GIT binary patch delta 402 zcmX>Wy(@ZxloWG)eLZ7+0wb8HW2%QS>lrtih%!#jVH9O#n!J=zfsuLgF-A>BmdX4~ z+KjA|Z5fpq*?^?v#?Fv@MVV#;D>Z#m!4z`!?g@@iIDaaJiu1`P%V1qMgP z97m2EFegiZee!+QlRzyi*{)9RVo&2?m4Yb{V4nPkJs4FWnqvo0tqA88Ao-G04@gRI zfkZBHSpc2H#XXymWAid@b7n@?NivFz?34J916?N|FTu#Z`60g^Bajjhlwg93ISH#U z0y%yPKn0TPOT$rnH^SPYb6oGdH%32vIEJd$bY3JBBgDYyb% b1U1@9#fk|Y7M!SIvAJ6<7Uam6>UUWIuR32t delta 510 zcmdlLeJpx{6fa{vV|@a1eLaX`oG2@~(Seh3vKpf>i=i0<(_~vlMMmbyKN*!6StiFb zDloE6ZeY}8WScC*B*(};c@?AfW-F#FX7&l^8yXn+=1yMCDl5(*<;bAHz@Wh3$e82E zkpt#r39wAQ&w3JQ$V#@WJWln^PW1}PtxgQhEtBim(|9k)vD|K==IPn$5_*c^0=hGb6|3FVc!Y2{D=DK=wTu2}YL9-({p2 zC%+dE0Ww(xC79siX2L3rQc$Nl3Mg>rpt-h0BmwRdZdAF=uSGana7uDZz5v;R%_Tet p>t*HfTOX}}+xkliuFN>C*Hf`#f=hCuIlNvimJt{bch&E*0syIVZ`}X@ diff --git a/src/wasm/Hacl_HKDF_Blake2s_128.wasm b/src/wasm/Hacl_HKDF_Blake2s_128.wasm index 03362c9f65beb7722af8eb9cbff0a8f177f1f292..d3975181e198953ec1263118652b776e2c8f0329 100644 GIT binary patch delta 103 zcmeys^?_@GloWG)eLZ7+0wb8HW2%QS>lr7S^iO;y%E&ZXo>763d9nkeCL_z_PDX7; y)`^AkEQV$bY!eG5xe{|S)AQoZOc>ZFXEDlc-onVk$jCbRAd@sB`{uh$lbHbhnH&rN delta 86 zcmeys^?_@G6fa{vV|@a1eLaX`oG2?f(V=-_tuTwB83WVA?TU=d6AL9L_b_rYvP`_G pz{ooBmnI|I$D7_~QVVdPc4zc4igOkXi)9X^A&TkuSmf5fl zA4uRdqx~{A+394=4==Qe8uBP^G>w#p{Ny(_EJC4_NH`XTVy1K1V z$(SijHD>GSM0c~5pNu*onQ3k9$;R&5+NNTwV6>mIyV_YiIT=ir^uZKlLjb0&HDw*0 zf;%yVu`j~6k}*pRS;4hLiBM8fYQ<6zjhd2(ie(d{K9qtgl5#Jp?P{t6T9UvIr;yCZ zNYYl1F5J>(jY**j7QU?98k>SRnD9Uymx3B3d|7#t@Z(cRf`s=;T;V6APz4IVWLc>- zF$HlD;Td&O3TlAxKuZ$7Foh&Qc!R_hesT&`pzvjjOROm=h=T~vsE?$e1_%$dB;lu~ zkOT;CkhsE6OQu?qEc_DgpG#`FBU1|Ew4`3S#1usmSt5mif(hg-p(u)mpislArLm!* zuBEo4InicK|JP7-ws$vJGyYW+ZO0p|nJE;(HH6VJD-0s{%-LZOxfjfN$gi`iL|sc` zNtaRWIz~-Pk2iNW)wVRAv*xDLj8EfI+uYXNZOuzZeIy;#oJ;4YQ%oI{Le2jR(n#vl z%21zDhK1=gX=SKaWq33lHLVQwX6LjPrBkGp!J|-RD0a6cqp$?l)pm8)b#~|WTe;5; zV9!#pxMWFbS^3gs%PUr_yzfblvimK^%D?}=Jc00o<5UH0b=KB4w6}Jg#v0msyv$mx zOJv4lur0Pq^kSBhP$Ea+l0g&E0ZRu<6!s*%5R&_v|9Y_LSmbNOJ2 zoM*+9^oprhoxB!1t`&nO*K_j92UJsF6O?O@dh6XwXoxqSL4D`s^f~>GHOI2V1I{f= zjG|Go`wL|RK*vt-A1B9SwquF=MU?KRXgSdHDc`XwEQ>}tc8eI5TZw?RIZ`RHDW9Vg zamrt48AwSS3hX>PA6!HM6^I8Z2l;J{^ixir6U!H)seneS?Bn8FUZO2x^q_c%a>T>P z@L?mvhXC0vGS8S7)R}XN0J0&z%G|N?=s~h6C-y*oAs2rPjR{%&u{72x=i-lH=3^ku z7(<$|9F5|S)eDTbpdSwbYTWu82M`QEE+(KVJYXW}6q-O2yPSWqPq7S0 zj*IaD?72*nXi~^B6;h$|kFvQYaiJzbs7Xel3ITDU3U#3rQb4X$C?+F%3h1Ypg_;ad z7wQod$^)j7PN7F=s#ho^$HkNY_NsSAz{-gX(mAKmH0RdJ4GHxuRw&iENfAI*3HGXco<6X=Z`XOe#vWKpqrsHq8=qXii=T7ISGX zO=lK!X@(-6M{~t|nx8Kg&;rNeyivV8mKP?$5%DN3M3!O^<%(j;r9~_$P{m?G$sIeE zUx+|4EtCNi`X(*9*+Yxz(by;q3`>-tDNCG)*F8xrA;B?ne&9~AL_(Z7OIX5fR{}zR z!qCHgoG3+f8Tgf%{1{5ao&WLwKL6jnu>u8YUS-UyEPxj`PPr4K@;0%QA01*DE#<~p zrWfW}Zkr9L%ME5- zij8D(jMp|BCFHiv2CmNyLG@|T14+^OKY3p*5r1Xhacjib+rLIUm|Y_*aaXMoc|4Sc zX^n9H=dXVEMo+mF8+5rCeeW+9m>$$pV7p5JPr^&EDZ%{j&Nrfzh!>$Uendn$KcZqO zKMF8K;}OHG{Ci7a+VyrOzn7cM!P;sCt#IVRQo&Pt1!lYoO!V$%3DY#M9<)NQ7z!zA zD$}da3b7K=t3baB0=Q)W5UasrUW$M>Yl_vh8vJ6pJc;MgN~Gnsa5h}DN(o0j*pcvP z53Z7M%!AibB_{bQTHDpt<=EXOH0?}hKIVPq&t+IA*ASkfR|OORQR^Z6dQ*7RCl&!! z*Bez`SF5_NQFXlu%-6e;Z$@w|7j`36pK|c%qMB1(dCqKFUxTETHN30gH2l<@qH0d< z%HtTS>FUbQ7h5y~=hgr9)PKs$12hNhoE!h}U;pVohE_84kN@|-|DUsY&VRo2^xya7 zgJ>&OpuBGUOg_^|TjI1SPFv%&IWD#l(KaqJ(FWe25p8s)cH<}KOegQsh^l#qMpVNi zZ#~z``rvxmo?A)V&3ai0koOd_-)v{f?E#crMV8pXkBHdGkEqzij{>oqA2G29x$mH| zxRP=wl_SK0?xLj#v532A8A2@O9$Jo2Z1DXLB-oolRhl302pvVBX{o zpf&{M0E$x!51@D#CDZ_FszH((K%7Q#Bv1>dVgMnA5^?~wYX|9zbz_0O6p)T{FS`vWe@ZDY#xbQ7@flz0?Bq z22dwcb_PtkH z4@R}Mb~2tG2W-M$Kj$`JgH=EExM!4$oFtyG=?Oc`IpvZ~`0Hg(c=Dkbf?wuHamA)9 zcD{JhrYE86F%B$$kP;ISS8YOMPeMFp(|PfS_@Sp{0r*h)G;4~Uu~CE1AkJ+Ru8e1G zob>VYj68nI6XPF1X|!C?R8tvizzX3ZhXU~<$}He zjfcfDqR-+t{e+IFH@SRoCY4XN(Oa@DBI1U8M#bCmSs>n#&zShW%m{?Yh}-lBHgl1a zB}m@3dAht|chNhxnlRtAYZyUInF<5xoD5`%g^5$f1CVp)yB-j{Vlz)=&_4V$M@{Cb;_w~@lr)H`^_Say)& zH?S5z;9C43sTR@ph~Rldc1wgokKc59H7Gum6&n#hl+UR6k$e`2oAMbGAK^(rdfvvQ zX(0^6YaHh370BP_Fi)@`xXxjg9Yim1I3|Z@93wR#e?)z3=T_3kJkZ@<&O`W*Y{GEI z@5lZa{zQsniBIJ-B0iJPsQ9sb7Koq7XH0x9$M8>W`f1oP{4<+=2JyIXKb2$nrz{kL z{ul-dVD8#KbH}iY8c;4EiJxbXnMK=|3^2l&7j^RIZX}O?(Ci^L5MDM|GdhbEh zFS&fbOe&uo!@rVk5fQ(Zr~d=`i=>p1;3VK7{A^a{tZ%i9DIRijAaKYegkXqFS!=~GN~5P_K5Jt zFv6h6Z#umi!+#|!HX{C7KBMAquZ}B7`ead6fLKujjahS(2kblf!9>XB` z35QvB5PioHWhh7uN>l%cVVPcXJ25T$*zG6^BsV^3<7qxpX(YT#5?3Jo2`f z(M%dd3`DgyP2=t5Gz_b0#==B^c(&$gmq)fj3K%^Ye*)1(pf7Tw-l!B}x;}0^BxcYI zr0FSnCQVm>dZARr!?GxduMo5FUTd~~uT^F7bkKtzsOx6M9+uk5ITWEeZDKAzIt1P) zp#U^bA5Z3TvF1A#3Xu7Dg4a(ppUb!aeCE>}_iTdKPE8Dt}z$yj`Yi|rEXEj6w(re6I70JkKwU+ zlgcnDmhwMZ%9Vi;O3+O%^BSHM7g_kUci+niZoTsPWoS6Oa`M!X%Q+eizkH#3 z_pOkE8fdwwK=caGuP_?E0+2ru&ytWo4X@zlhBh?))f&I6nhmw#6tNl{R~r(o0;FG# zuk?aiI!+NlPUkmHtU+`ocvTwG%@U5+Evul?taQ8OT4*>WKx?@jD_PFU0KTYxvCf%7 z>)N2o!=nSrOX&1ALp!M6sY1)wA~uHTcGXZyZsfeG!Dl0_bGuy)Rf|ouDNMK9Oi+k2 z3uLdl-4@ymb>-H4s3x}=-OLi(li&z+hTD-PbcSo7FkC}B-EOy=yWM2j)1ZOmZnw+t zc01AKcJMza4bknm*X^M_;dy}Df$Qb7IF zUa=3+`$50oXxDv!%wETk-|O~sYwi!?whwAbOB_V6dkh>OGbB0)NcXw}UQkQNDFVo8 zz0SFdwi2*ix!rauW3yX{yXPI`9k`VkHxgTk@!y=S!~}CIF(GIxG5%iKO3-cEN{oXM z%)fanL24_3$FOWA@Cx#d+e)C@`CAFy?UJ_=t{j%RL(@U; z;?gZsbjy6R-3-kL`!ScULeW)-6|i(hjrvVTs3#xl;Wl3gb&wwyg~rH$dZ5Hwz;?tw z(G^5M;p0d2vN@U$Ub%e5S#$F$ct>d=8w^7#sbee5LrAQGQfyTl?8)%xfHfIwW(cYk zX+SR3FX{-NS;ywEdWAI=4*0-+%~f_(kpt7=wPBPU>u4RVW)|ycjZ${3r*+W2Y{-Yn zFcdDvD}N|f3^*d7QbCr`s^p5z(6McDl^t8$qE0S^7G}UsK#?*lHn}iFt3$~G0yzk` zs@B^opm7)iq>m?0&G z;zmonSCL5#%#dXJp^?!Qv|rKf7Y9OQcaRS1><%ijgRE&8GP^@`NRu5>WQSm6HZ&P* z&@|Z*MRr6S4MA25n<~|cwTi4(9D||^)oHXN^i)11hwd&y4R~=;);IZ35ADSN5Za5f zt{?*9_z}IZ4KqM<=N$l@?rQ{{{+vbbmoL6)F|CQB%?glG*x)<$ibtWA-%iFTO# zCD$uT>({dE4#^l%*vI)8(H>X~9w!Lx6J0?B#PK6~soez4?$9VYsDo-~3nsKerFJ_- zr#w~dExZ$v2Ae!Y(vAb2u$_~-+Ec9PJO$0@Da`t=uC@~p>uNi-HdG-6ZL~T?7oxjC z-)-n>yBKnlf(tOz-ek5*aI^$36N~Ob1dh%0KS;@U@%uqy=|h8 zA048fP3ATej%ISv>|=?rdDpWO+F`H_??s#NHz17yF4|y*mAThlI-lS~0W-Kup-ks4 z+k`)u%vt<|F&@9Ag3;V%TiMLzi$7p%8T2RrL#(SQF%h(_cR*ufXpvGNJ zpRrMP`SJkk=~V_2{OUgBiu$Y#<2umD+38tZ4Wwsn@tjvv88%Zez{B4kD&prg@$)tt z{P&aZ0XS8eg#FvKw6!=JAA?`h)i+2XY@#IOOxpC*1?6TfbY zH^LCV$;Nap@td0XOq+17Z^yU{{%1^;{8tOUO=?iWFrFNgoM|`1fu8d>0FtWp*4p|VO< z{#5&$rc|B!jZms$sUKmj>h+HY2CRuWaLk(1XTvJ>e)-Ik;fDav42U1?S*>16wpzu@ zx(u(Whu>=TnrpQh^qs9r95Jg^!ln|%H?3AP7k?zIR<8}e)hfzlTCHX-(}-BD8ig7z ztJO(K-`TQf*+F08)9_iX!ejydTwz84{{jz?fb-L9c1+}Pw_yjb)7zkD#iIizt25Z* zV5YV>n8B}nX1VXfGRQic;L(*$1n}7{EHkFNwgz)xcmW5lVa)VkoyrbgVVlYy__C3~ zeAuVLDls2^U14Ktm>0mWs{u#g;Pp{tiBD&9@#$88#(8Qc$+A zIE7I&+`!`7+DT$1_`p-3$7cl~yfb4~6<$#DRGNj+`t?`1o(m4fR@0xlN z*AKR+0-q{pqARJBCty_SHGy=bIam|GtW-6@TIRnNjMf?gtpmjMwoVsQAq5BlQ`Gf{ z-T-m6pW6+RUhwz9aSnnJMlNuYpixQCx00Z-E_4O|ab0iVx(@b%D>g!ajY$G* z1h$jcq4O5ikZGe6F`}s2v2Q+!UswTOz9MQ+j7{LO$>1WGCvAf3AFhlfh|5zW#e!LF zTx<%e8y11Ti8i#L+NGtK3x{i)L(EKDV9BfL@3Gf-Sj)N1#U8syb{2BDF7E}o5xs`VFV;dI(wz_G> zK-69cwAZXL%xrmZis-goRX+&_ZcB`)D~QQSin(+J&JN$!Y1Y=)s)~*G_i%zYc{}fU{#suSIEIM^m=jEGm(SZ%Hn(^v5 z+d5=hXJlIshzEH+Due4$Aq8d2ro%!Urv|92Cy5i_bHd;QZS0h|x9uaZyhW9eBikkuH z<)+CC>gwhc0pxVv<3tO}n?TuHt1Qt5o_M+I@oWX8%iYHH$B+O*8XsQ&+96_lo9N(2 zhd4zYV8phz(oVF4DPf4K?59wS!IrQtIH-ibS&O>Br<2; z_0oAa*hd#)f;Vmbs68D1-1U;j57e;(zFkx15psKGW^!tP)L zfYFBE^sY=6+XFvgvq3Nd{pLOoM#1Rl&vJG!@AbupB^zKp`atIadi+TkbOG5R>N(~I z%U*NKCvAYeCd;_#gIVx3-DJgQRSSn>GN3#Tt<@KE5giICCw1ZEZI+j z2U5Xiz-U4g2p%)x36%q|G zO;<8C3|rHfO6n9;rsiE@GBwO;)0j%?6jY{8DIg?=^>Q*(SWPItSI1LA_JW1(}0|RNH_L%RhF@YrX3I0AIeN6Oo z5?02v6u2t-0ACs82x&F#O?R2s$sZ~cWL9u|h)i2Rj1GsOKi>g`lQ2#F&|N28)Bq`f zgqH*k!OR+#%$^h(_np$%TCI~er4Tn+2n2m0fC8B3%8%Sh*F_ByLRo4HHjn<;hD|fp z$xoT*rzTI>NdwmFBrrVygNmC{!k`Pt08yVYzt7Bd5-&yo`|BiZ#U<>nlK{|k_)Tv+ zyi|eRGg;WCeIQppu5UJY{=kOCGctnNvtJpA2dq~}ka>V!AtemDfQ+0pW9P4@mcN>; zmglX7wD0CwIqmDQ;aAIFb=C4gKC4vXh^gggLAi+Hn`-&Y#UBZ^{MF%C%cD%DT0V1` zMno-d6zbkl%SSQCk3)xHwLGf$-d4-2!pZ|42en$BgITpa-wt`VsO7J!*_;=nO767| zmLkVki5SxBP*yFEZ38;^hV;YO2445GYWb{M-q-r$RFYN8>+1OTQp;0TEpJTacY<1; z?r60<*1vX#lv*B-VNuJ|ouroM{h3zFb5N`0Iha+;vzi|25mw%(X29)A=v0C2P z=Rqxxj-FM^XVvo9w7wMB-Cw{^$g1U8)oZ-)NYz?P^>m6Zn$^>$b{QvztXkgJ_1~Ln zdCIEgEp60w*Q({|E>p{YWhMD|{>r+&YWc6M3_hM!;)to`XG1NI;+tyu%*7uGwft9} z;mWZ2ct)8_wS49>jfh&_DAc{9mLG-F%{X)zR?DM`?`^fbDy&q?b5N`0Iha+;-%V=y zuPphJRjK9CYqM&3t$H`Dq&6fONoCdYQCM+`thIcKoxNc(u1;fFwfw)cTD~}|mcPT) z^2K+wS|00PJ3JsuYk53|MJ-=^C#mImf2P&)9Mo!g4rbNz_qJNz*ylklkB**I%V*W{ zS+#uDTHgIsBx@~?gWtc+TD~}|mcQ%O^2N7ZE&tuG<;3R8WVQU4w^=R!rK^??bQP{r zM@%n22YPvw-_*-zF8@gA<-Z($y*$cf>g6++X+-q$MxpKdsnzclso=(SnBd{!@?^x`C}z){Bk%{^73kVsp#ND(3Gn z#r!Snj@Ha$1#D-p2U0c9&tcKcgV7!2%7!;-S~<@_t(@mzRylufE9Z^P9+dOw>sjS| zRym(l&S#bLS>-%`3H`NP**cDtR~Tz&;U=5uoTTHzoG9%#e^*nTEi z!e_n>b@(0B+;Ua!K@MtE;z(%R9U_}@@JW&tn+v@=D#6sdXRd^i(7WFne!V-&Wa`~B zmuW=w?na@8OYa^N(`cIKG%LnmZA9QAG{~6}5jhq+=A6dgrA~umj%mi}lmNkj9T7-< zq@ZtC@!2ZAO0yh_%_~H>2(ILzY=@^&xRQ^-ZTyUI;>={9@|=@&DL(_^%rL}ZxAQE{ zOf8N=3KC}~+|l2%;9|bWv8pT%j*HEQL(n|##O!>2cAJ>Pj}G{qpMxxDuJ(#LhwEWp z2!DX{36AGk)cN2uk7m1GQ5VpB_Cz1XD=Kd1q&dvuQJSl~qQXTz{L>fb!$JLG)TZ;8~#^;+bWo-}Y9-eD^nXYYx6ja;r zL&H_PoQkSY(eTK+pb%nIP(`>#UqLIB7!_Ri6=?JdqtRCYVlh@|F%(je7%Rj|DuEyg zT1iDT`zDo9i4tWMtqLc~YFe#Cfxr0{?V#TGn6?^j=vkE2S`>v8B+68qDoXwrBOK!M?)e<`)pH4K@vHl zimKqYzRHnZf{p&yQIS{=5hH4XP|jsGA_w?y_uRIwWyt+bK>2o0aF66}yOBTO4f4AV z0jj~L+TgQ}A&<`n3Auc#nNM|))K#K}YQ!d*4kzv;wt(jrqx72rX}>|6yr8ZLP7y#B zsTU8R2FQuTqJ@~Ew#o+LIg96oZ4S}4HnE)_9byM(C+*^nc_`^?+E6dM$=R3Tkk6Bz{oN3k{eTw%w$|bWn+MoQ`{= zFrz4q)Tl%`&Y~QLD8~&^8Ue8=jan3i6eLQcI6;rm32$CFgn8jHI>Pe;iIa5FlP2Sw z&_qpYPB_WZoP;!RQtnku6Cj=wn)FCeNI}vx!E1T5h*PsaC$wNrNSJd%D+t?=Lz^*{ z;|zIYIUyl;EVuDkZVSi_QSIQW}T3jr(2LB%M4g>?q9xDgn%Rm++ za1I~-1Hpjnu`kVI+;P9(sodlvO0&fr z^m28Qz)1nYaj`}6B!PZ17hC_iZDJljI`|?gOhhza?-=IsY_q`464S$A1;lBA7VvVr z5PTNUTzAKS`>Swg)uJ#v2AmnxVWgPms~y8)Di$SFl8=)DZWA$15~WFS1jmO`WGTui z7YB!2DpwO(Tr9(mVL2`LRwTJT4ps#0^;NAc=Rz+>8!bmwxJxxIj)DNV9ZI*6LJDf5 z3S2NXbQ`Utl`1#+h|)^j426bSfz^GLJbx7P`2&0TaKf*lHHzJ87JfB^$05KI9yd!# z;n!&46;hD!YvA2-XyLKa(A=BcRUOVXO1v{Kh5QMsL;$1u*ZJ%2@luddJj| zBV+@dQx4d&1RGQdHt-=}NV;mOrgBMEt;njexd)*WlUr0&FX3(|ALBGnQ&>A-*S9%@}VB?>zfH@ zCoe8aD3}lR&?fv3p}i>U3L+qmAJK~~ya8Uhe1)%$0PiSm#;Kr557q|Oo!E`8fT^!aR{@(A!8OeERRxgI;^*n3Mpt7I4q9fDxjlu z1Or`W7Z5gd5T;i}f-@6CuFv;+-mKII=Zhm?)*oDKR0UgSol5&-)9Kl4~T{D*TO5LAmRH3 zHrhkatrz)5CTf<;$VGX@P81QiQ;E7!iUjUrLOB3EPTgt|y~I}|g)0hfQ5uPhmO_*Y z*DF1VOE=V_`lK$_lk#e&p{wL7-_1m4WR+aiwD7(7lufwV$(S;U?^g0B)E?Za6hgG8 z!yw_ZB^a-0`Jd4;Jj4DUL*(!rUt*%=e@@drXA8b9X~-4&8eg>JW~{DUin@0t4M}{R zZ%)z~U)LF5x8+4aL(<|}BmAjKe^JxEXbZl_Xh_An`R)UTl;vM>4=3zF!M}Tn-Nji!q{DJU^-zmlT<> zxgI}l!+L{G6P%eAITr3mLijAN$yiwAy-410%)BDMX7ja3Qu+No=vP`mjOhhIe{uv0 zCt-$p&AnB|MGcx{UbEHhO9FQ;y^c#)-02#3E*0^8OU4p?1KHrJBV_nCE^+eI;UEO; zudg7Iu)D$nz!Hn!bR`p_-eG?4nEa44V4vR&4ZHl12!8lY=Z6vq+=ujiTi{+J3)d5I u{l3RJyk}Mst}Ozr7jIyCfL^{O47%VPz>`<-RK-~SlLOF<+17L`-}?V{U4v%; literal 28160 zcmeHQX>eV|mA)-K*$+Pf4`a(9V|tZ)J#oHX1>$ichP%S(w&-w35m*2 zx9{nDPWL(8eY#ICr|r~qG%Lq()D!ofIpa({s(MxLQPrpV!jImfzJ9QLF7E3K1&O|1 z4QfjJNxvr$AHwVR318xU8X=frT!}Cl_J1otG7$lN2_ket|C^VhuRMsdnKW{9O?ypq zeN%CBeRGkMPVGn0?e1!8Yi;iw6ME0o)_1lx)mP1ToB>g4ni@~EWPqZ=D5JxvG}kmW zwbnUBVT6cm=FN4A!zkm!X?UDu+G^@@^~an<7&9Hl>};=Tb4tRWBl4P4<%|fUObDk? z*HB-V%hYxqKVILS>1aG%?~DxNULVFq0iLdJZ*`K_Y^Qz1=(47C|8!-PWA0vT%iRmIJP6a_N)6h?-L zUZ*??%3qV|=&Wh)EbeoPA2@>^dj#>Mipf((+%JIYb6h7i@>^%|KGO2M)m@XRYi(}p zLcal}oa2z~RPsK|&4^m_Kp#bICJppanq|dJrYge8)|yt6K?gp0z|XbvdCFBrHuR~* zYSedsdKQws^E$*RD(d~Y>T{jxj>A{ER~#Niqc}yW^vH4@NuylVBtOB(rj_gPNKVp7 zN>+iQluBJ^iQ`bpRXI)-ufmJ7IkC!+mP&L}mP&7RLP-5mC{-mYm7-F*yLDO+|b?GQ2*N;b=fAr}Mndf3=y*7Y1^OeUrKC zl+ZP#s3?6^svN?Pp)v7fqoo~ zs`d)6jK&E)sQCtzgAd$D_6yxWH+H!HtxgIH_?+c&QK^sOaw?A}(0Cf}{#MsixkywF zj?f7F>KqS9BpPoeGDuN*j^~>YJpuF+LK59X6FiA-hD1Ivk?a?`nI`%Y;d7QJM5M0U zmRo|P+{6IQl8D^1A3gTX5on`$J^!0`f8Sf>B=GwC_y6(3Q&mn9ufKc!$7dDBWW4^% z2OmG*UFD>Sn5m2>QNnfBqZ#sQhsw|-)1yj6H7Y1U6)ikjf;OH)lTkP{1*j*1J6Vcs zs$otEbIAdin?}=UsxUSU=}e`HrW{yqqWVadAbJ> zv<1YHYLx^DUMNA57fDdcizP_&689GlFQuhkn*%7B3{+RkXqoGXEK8*Tmx6_*p*FV+ zPy%gk8FCGfB3cD4860s0ll(USo{x*D~!059Oc3cHe4BE57mucB2Yv>e}xbCQjS z=4v?U!>cuXjSsKUaN390l1r;;4Xy3y=x|kM7@Bxzdy3a-e34?wWCC0Xj4Z=D#crE0`AlQdj8iXC4iS=X@e*q(?<8^PCSb4Y!?k= zsut~I+9aB{Udm;Cbh+$6x$Fp+%W{AyQPXaA2<46lN+}|TcS?}pT@oaDw*;lUM}jo( zMeaMPDr>mhMGNsJjP9mIcoP=)&|9isMu(S0=AYkZb!w{8^(2OAbtQBZ-vR|Xrr|(=>NOlF&~Xh1 z3Uq>wQ9T`}6IOvjkXE1ujW04ZhyXQG{xuN-AgU3}H-?!hTfpXL&3Y6=hW}`rj)%au-NWM`HAE-&bLV*xN&02xlYyzGFHTF{=Ng+ak zPEuABD618yElVv~I+>-`EVrZ8K~o$Awq52#fpUQYHJPT_EbX#U%B3;7TslxL9pQ30 z3eZ;|bX!^Oh@g}ra=2511b0c0GF=q@^h zH&LLw=`h~Xv`@0z7nvOt<}y`Y?xwkPTY6Lq#s{AQ>!;ijPo*$GpQcHO@!KMu){ykw z>0;sIl%#(uD*ZCP2hrU~u{)e1ASsJ~{;zlcttXu_5K@!0x+Sgdh_pntd)zV9)55(H zv~eHxLhT5qv^EENMH$bi7=!s)Mfjb8DdOM-iJei@;~53_%18c;qR*%}hQWP`@Pn=6 zNZ4m=gL4we_ba+zrT76w4}hzXh49(Dm;|3!gvg#I{+yzF_(44AbK0a43*hsh<_iji zcmZ)5?%v)6)1qE<9I^Z`t(pEY7|KPq{#M^B=nRpnK;j>;;&?P?SMFqX$^NK(VTzq?8*{(4!Mt6^2 z@@PfSFX>hU9?@*^jGFB5C6Vt^KlyYWy4Zsnb`Lq@w#v zki4X%$9z$B(96p7nU_?xAedg$VBoua2C~LN->KsP=^d0E%H#J6}!{1dlD$wD- z@uC79zT!m%I{ZCFpam|zeb3Y3ycn&+-}h)m(C_PZ3LepV@GNXSi26X}`=FnET8DpN z>*zyUM<3Zb`k}3(AL)!hXm$9b4}43dwu4u1$8{-M?351_+9Fgk272mw9=Sz|$m zbvyvA!$0v;^mG{XVI9`VMdQZP7^w??C*zyo(M4`5k!|9R|U>@-DoC=o|8$h8jx#ct+(9g}sb{ z8RpXG6`fbKC2*;Nls* za=7JPFYips#R~zDiEw$#%MNHI@;l;G6A_r_y8tm=(eb(a8$8P$g*+Ps2jxbh}Ww z+L6AX8xJhkO-o;+S1V^wf@ZYvObOZ;3xp7WX4&bN7MkHr zOt7wqwZz-vOiC)Ll4eR0l{5<-AHydTjAKz|@quloUoTb)2bt2-dIc_1pa{aB`S_#gzi z11a7S;^-hC+v;xjUoCg?g@Ihq>f8c5;^QyFjz|~vPV7^?Bk(sFf1~j?4u50p`}Mw! zaYp@rZ%&?3c5|{a!p1Nk|7e?&>0uk=Q)*)jv=PX%zqk+`)POcdQf!PFP6#PNtzh6! zLiE5k#&y=lKx#;fjg28mPnFCoV%iv9r(HDg&L(y*jX*j`(hP>pfu$lB^Rt1eX2B>q zqb>_CvxGcJv-lRVR@}4<)50^sQVF1L#un$0$uk zIkJ5^Bu6lIOl~?z(mb9YgK7aS@N!#Va$CSvG3XZ3LXU2tpV!oc;gi?V^_5BN(tK7*ir;zNH5xJg9(O>sY+Oh z1F7z6F^I&rSdDaCTH%=|YiKpErL}R)lXbL?R!S1&rcX22nzoMIg{hxXskwHinHf zWSiarOKGzv+hNFdz<3hlD$w1XVW^vT#YlB`44gf*$K!a9$$AgNXd04Zm|B+OeTGci zOhb|#fDL5}=zyU+z_-W9?jRks*&Q@w2gNoTGP^@`$dVm0WQVXVJT%!6I%3I=7_uXr zi9vRhj#{##hU_TUz_dkihUyWPWdLc_-AyO~KQ1X2VgPm1F8l-2ew1?#5nv~X=%Jc0 z7*^fgTpNRNY(LW`-Tdkd<8^$DKp12}$HGuI*T+cpcnq8qbi(F*!sL8{8)A?(Qllkn zG-Qp8?SdhjLk^qFM$Vie%dt6KH2BckOwE?8*^o7JON`uFsnwFT8nRYy!v=l-a)q=( zDeLCYA0vq!?EoX$jXnN)0@DG}IYfY+Afkub!eF>L9OsjCk~Yy+bZDb7qrc){d%WGB zTwQ^UG4Pze1H0XNcGWIZlXlEy+TCDw)d5ImR~>f#VvwQ?tq$%)bQkEmLbIz*fxPn_ zIMgMO7%gdSwsnbl)fJUmC-%~(^IdKw-;EUS4smoBAUnId(|@(x$rlE4!R!jlL8lOX z8fl#laW{?cv2*(CxSP7=n=u1>u&b*9>XBjiwD5U4B3<-)*y66F-WKkYpp7w}VotUh z`_y*W>=T~O0y6D*ka6=~Mfjb?+Ps4oBzG3O)!yKVUG94o-zU4(aR$(Hitxi8w&>HLshQV%Z zu6s2xNb--yL3&I!6FmVQvphVeu-6_VhbLr*-V@*nOKdmUhwSj4();MP=$^8~PbuuU z$H-al#d|qFV~L+p{KYuL*mB3umix1o_*uoz#UaM7J${z>c}x7f;xEM^zN830OMJ-^ zUsC*nZ13u$4WUYxqY442`(GeQ@%XVxoFE8;-PkdfpGe{(VNgbt@E1h&V2?Zuhdbg& z6~Bm&LslTR`0>N;lyojHSz%sM{IZfQ_fTfoZ4ZOp{7RhUUyXzGn!-r{kB8SR53ebH zJr4033MU3U;x{bu8;WH^e@N-yRI+hzrGL{Bzp1d*AA|U^lHGkveAyCTRyY?BgBbh& z_*vqwTH>!N>>=VLM1IjrK?K#V8&N(|QqD94NEpXoQSuW>0@8C5CHw_ZJ+gTjg2RpI zRV614@I&W&LAuk?dPILq(ObdUfx*XgoJM#X=`TU+dJElkj^9!k`S2PH>_CxE7~0=f zcJ%a6BgQ!(;b;Rsydw#{qe5dSPCN)48sjw>4DtXmdg2)|EPq|e2?YHC3edsaaITuo_-ejWw_iFRJ=$gu{!fL$AaQFRIRP zoZ&@PajwGeFKSV>>@Q+b6{Gzhc~$j4pTsFu+>_JdpF~BTe=67He6rpyE|H=lEbBYz z8-|xpKMl*LX{M0ROIkLzaTHv}&4R@{CjLYDywD`H5lY`!PLptJP8s9M9Gn$^*I!ze zPW@vWgR(P~rW$sp$o9?@Y|2atZSG73BrZ5pAxV|v01~9A)!eB(4aY@rFrz}d$xvCk z91A$wMF##eG`)qzQ)YS_JY{Ae137uB4+GAS6yYypHv0xS)PQ?;#IXgZRFTdsn(m!v zfa46V-k zP*z_fB-Lh+qDo{XuYwPgxPOUjlDH+!r-ix&YQH3xT%R8t10VPd6Hc6(-A-5daZy8V zsD{KnM>aZF+g(G06vfYKhG!42r8Tau2@vr*s^IltFkw1}EW=xeHzJ3PZo+q2TPu*i z1G-*A;zJ_RZ+PUbBx z@m8d{H6;BOKz0Xpv;S&KLcTDN3wT2bz=WEcLqXQWcI)hTj z=}o^^I0{I5g`>6=7^J9@KFT%HE7a&-;U=$Fz#T&I*uWuA!$z&JQ42O|Lu}Lm5;p2A z8wM$g4LKWnQ1=SQ&?{`eOo!>9=@oD^F&-<&2}_w$BlW^cJy@v^v2q-cuyWk8VvwR( zInF2O4xAu0y}}{%3U}bXCyYzP4bT|I4N+}Kr!ACD zlR$ntEgJIDX_IttD6~*}vvMF5XFwg?TtwW7G;uc5m%am#?Gf7jS6dSDMIYo0?y?gBWph@#%L@JL&UzX|?407oPLpXUL-N7^TRfSIU1B&*37x%GPo@sg}aK%Yah%*%!h#yP^3_k--1++UGR=R}g!KVV&o*(3B z>~xxLa??Q)hqPzl46IBbFhRgeS^8!@fq?dlbN$Q~IM*X+gV!3S3!CW{Ybwl=!t-6# z1~C;dNuXP0RGE!*Zl#%?serpWF%PJWV=7>(VCEom#9Ln`5OZmcxYwsJK@bP4z*K-w zeF#oq*02Csihn)k3&rpnvlEELFcshp?+-?LbQq|HVB(uHTOvX)L3g|aMd3LxF9jqu zwA9v+L5k|u;WOsVxCW^qcy*cFbdaRwaE(XBEJF>g(9@2IGVOo~ACLJ}w8|u>{lD@( z?8Qe+m|ta?H%L*;uM($tjQ#GQ{*K~1Hd7yqT`jpLNlbF_@oOR%0<6KU1q;$z0CrA=X|n>WYcW=jm5t(X;9j<=euxAL|abhszLa=hJ;ZO5!&=mOe_ z*@De(r=i;!Oc@3#Ael0l0@`iJcJrPX*h+ckkz;!_mZ02_8YSOatpwa zWSDtavfB;W?YPBs3)~08^@vgmYE_>r-hx{cg197{_d-xN;Z6s+0wEwehX}9}MD)OR z4}sz1#9J`ksUl2aVMSNrq7Uhw&6YOY=zI;j0=?8`rpQ>Z_;pxZ)#NlWrb;45)`m1lQ7Uv;Zf?aC>j-Q~+mRln3dY>WSkq8M@oeKdC|e-OXrxRb1WhqaEvJsAIJRqgv@~RCle=R zGLh4q9rs^{oLC$|Qr--^kt$Go+75tfz~}9XL)jM2>9CEPaH}asPM9Nll8vl#-0U^R zK}OCNY9ZWy>cEc-2{u#K8#-I51wLzSafVKDEW;Jt4g%b#Dnn;GW)|=nOX2F%PH-Iz zon8Il1RS`!kR>idZNyEejdYh+YbUn5bdj95wC9!hwrT&jz#02^7RacCcrTQR+iAF;%bDE>kmV)#tqXEJ`&(!#|FUT;H7 ztgoxJw2xcb#}&)9v_qEGlln4Mlkbz3_DRK0#i7-=&f0vRwzN+xxq%k0iBjA_1;d3f z4C;{>NK!ODQc`^ULSQ#s(Bvm__(*UQLzJ)=MD^gt+b|p+l0Prrc62AM9EL9Hw^K4K zV{Fm>b-pqVJ}BevTX9yCVfk6Djn9fU;x*7lpvWg^<8z*qkB1tijn668K6>!TgQs3F zMqdJ-;%FBdrC&rg{E}q&60T147s5dZI2d92N{6(?U{rW$|aVngY^FM>5gs%Vq diff --git a/src/wasm/Hacl_HMAC_Blake2b_256.wasm b/src/wasm/Hacl_HMAC_Blake2b_256.wasm index 9ee78af8198d6c3580fe5bb2bbc547ce9ddfe612..38740139c0b53779e66fe2c4f77c20ef5e44baff 100644 GIT binary patch delta 666 zcmZ`$J5Iwu5S?`#<+Dz3V&`MW2{WPqBxqAmWjhe!1c(bb6d-z>h7)G?=$6nd@E_eM_ToeHF8#l=` zYeQ#5+%;+~-Np(Rx^!2?5Nlh$O@>{_ zY3Z}aeNb2y*{ixreD2ABolJ(19M&WUKx$p6`qj0Rw;jRoA9+Jf?m>_hB*>88(XkC2 za8L`p^-&~CeO1upcW&n}D_ua|Nbcc6h?m2lK7&tq|I&jCTcr>4>9$P3LR3pT!)T_u@YyYdC_JsAf0@vWDYw vYulk6VjRm1a7gZSaQ%QF48&~%8gXZfG-S=b_ delta 551 zcmZ8dyH3ME5WI72q4=CQ4?mJ~oXkoHQ9uV2RJjrmUw{yOLWAf@8ot0kl21UQr=x-z ziLZb?$FU5}>2_y!XJ)_Em)hJoM<37YwUF=jb(|-?GU8AOYMZqrA3iCl-kvU-@QXWl zauOgg;#>$QBbTIFbN+}ccfREWwtc@qa4Z_6V&g`)0d>%!E*#!{?XKW>=0zw9(Lu;F zc8q#8Dyfey4dBp_C58=&0atQas`Tq`@z%n=5gxj{6q45nBYJJ6IYWf#Uxpa5IkOoe zn;~L>s38#BHfM;d3|o@Ta2Zo#1f@KX!oS#sPLSFGrqhVi)WsMTf)U1#=0`qQO@|a~Vz8T+Rb|!`uY<&fJvE)x``I Uf+=RTIffUM7onF(6bIhxFG}88lmGw# diff --git a/src/wasm/Hacl_HMAC_Blake2s_128.wasm b/src/wasm/Hacl_HMAC_Blake2s_128.wasm index 22fce826a4f1426f89cde12cf1b464f42c1a80a2..f9259a47caad4406353632bdc264c25ed036d2fe 100644 GIT binary patch delta 666 zcmZ`$J5Iwu5S?{wC9$30#LmZ#6J{zvf;I(Jwn7MTf}8+{0z{9~Z~|Y1BOq}Hs+33^ z1Kv8uvP5WB`(|d}XZF+k@?O{0+3sb#CG}xW%X*G;O{YZC)o&y&yFoiC3xLJiPIHev zO_xOMYPAu!wZesN>{U6$o+;mF>|Ib}NRzas!)@*?iG1ZK4=m!dFFm$Ed~4@yIU2P- z``ibEA(5lHsU+Z@4A_!<2*qJTaR8(?rD#xJD|OQm4F6HrMDNP~h>hVGUgCz~1jraps5C^MR)mce1BtBlZcD$u7H`~Pe2+I3ygHbn Ws$hy)jVI+5=b{%mE~7YVz5W9EoLOQ3 diff --git a/src/wasm/Hacl_HMAC_DRBG.wasm b/src/wasm/Hacl_HMAC_DRBG.wasm index f536237d257cab3f5b8333ade11525929c819beb..aac0b09efe70e5f8d343717df015674f0846e0b2 100644 GIT binary patch delta 6631 zcmZu!d30CB75?3qAR#1@Kmf@};9d~n1jG;_K(xT?2?WFqNFV{hjWxmGf+#8^Yzh`b zitJR9QU@Ic_)Kf*Yn{~szN39}Swe5H3&71d9`cE?7%$<9```tS;@AvpY zdTt->txs?F%HG9|sn)MHHa4a_Z&6yXF#Vjz_NU)nlDXz^IJM0SE*VtX>V4w(DOgvX zKk5Tw&rkU|b9|ap+dGvm$68Kg$cMz~erim5gMT2!>9z6^@kb;d6MsxOt@8a;?^FsC zYX%qm>=WWo$WN7F;$!lu4n}Fib%@==LqnM8(wtgYo|#IT2AB8fnEwgMr^KIy4X_=0LBQ<2`lt>g=}1Sz;vkMbpm%2yilt7J$_15=%eiaie4 z_y)^Yh@ThiDb1P#pWrndk@*9Sj3CG_Co3O}FsU=g^sml<-9Ob#zSbG}MuUHo4E{AK zw!vZQKo~rjTUl_r&B)4oocK7&x5VGZ@*X$xo*+J9xTUbB@_wt~FpTAei7r9j6Op{u zROPKaLGm5(cO>5%dH+THJ;@J-`j6`C$E2?xNU^03QwOYW2Fs7?xX{()Cv~5w$-hyP zU}t4X@NQ+-;L8D-4Ok^r7YssmLFjm-?)NR#1-V8~)m`kVx=TDzT;e6tUF<2g6k+Os zrD!nr`6TwzZ)XOrDmwK{X?Qhj!w|czH z8y)p7${$j8ap}SH&tW45f;VCwB3u_rWBUEeo8Z0Zl;J%`~ z4EUs4@K8{(%mm=H(dc~84Pg}Tc28ULWNyFk_;ycLAv?RFE;v+G68xu1OVOs#He8d( zO`fdwc(v!wbCZYjJha*4X7JbZyjpp~2q|G0HyliK2@Tim*(4%UB$=9 zD1paoJh{v0vsRt0O**^FQ*6r#QwOYPgJrEzq}sLI-ReKla`$++2JTho_a>cV8rV9= z9tW&*gRw90&+TYB6;91P0Op~zNTkWL_rMxWA-)HY)Gtk^t~05v*XZk$(bsv3ZFHDA zV51u>y2W+suA13;?*k%D!2W%Dw3*H!%J}%Jx>OWDS z2ed%j)%o_Mb4&wU=h)+bb#5^BxdOqdxk|u1ED#de5EW=k%K{;(2Tg$kLS0QMf=?Bw$4L>g3h_04a{7PJl>iU6k#5wNG&8>_PSJ_m3`}hxE1Gp{{o%U1J(J(lG%d z9gXW9(QDfPmpEm`Aj=y-79o&m4z6V=vP~y8;*Vlvvr}zkm*(`a27Wjhco!+Qfnn)@ z4Qw#>#RWu}vVyC7oYLK%G9&bE;@u>V5I+(Nz1uv$dx-ZKvMH>oK948|4C6xv6J5d~ z+Y>#%)>NhJvWH|Z@m`WgO^JR*{3yv|NGIraQAq|rZoEF;QuSZ!hFJ9{RP`q{@RP~F zmR(QoR+vwK7}0!cP;Y`Nj3JpZIB#XLQT68t+-+{p22Dw1w%Ds4dXy&k;XEF!PJu z3;evYJ)pY}B-uVsiY;4g>+F!FYp@*9610XxyuZ)txM9CK4_{xq*Y9kz%V9EFG{)8H{}=by>@^eT4W3$s5FP#JU|Zx*a7xYRIOrrnL}iHq4W`(r2E;6LT{2xr!$PRQar6vRuKHawYgqgZpU-_g4a0_gA=~<*O?) zP5IhiY@wvndAiKt8BWsaD(Os~8A>`sH_bql8Hp$}fmG6&)^lj2o^xl)ES@E^A!#ko zmO7t$pL3{$P;#sjs^$=+;{LfAJuLrVOT;+x~mafR$=reH2ENy${*+#2b zJU!HEcC6KGxf)ts1Ffz_lxq`Ft^u-9u69EkMOWln8>Qv%#q01;alOpr>*RVIg8A?| zpXa$Js>(32g{qPp_UvLHd^Mj%V%1~;@sbVX=^YJ19jeUj$uaP~a7 z3}2+{SjSPz-GoNbu5s~i=9}aeyhCbKCkqjIVIuM^KsNHtZfGOxiY&Aew8;sZWf3ou z25vyJ1lgDM=+j8th?_uPoCY;-fd2+RK6_ZlJUXS3f~WiPc>I^e#EVIm5L#p@@e-0{ z1WlF`wk}UzM9WCAWd}r z-LeD(BE`UGCD!G3%ms}dh6Y8a=SH81<_20tyo%%w;ydDJWR-bFnuwdM7l4e!sPU2>gHL6aao`R&OHKf?814{?2ItEM7Qk}ch zd92P_Ri|T5qfQAH%t=CMRR=s)Q7BJyLLLyhN9DO!JQ0_6g!Jh?&U*;1Y>?YX^l;B3j` z4${%jlTNVjl(6pzWJSnxLo0!?2>SJ8jy9^*$u5j zU6C%))pJIB%>%?Z%g^KX+%Bh0K4)_WfNVccK?lj_tYAS+aj?54zpK6fd?QUE7fM&| z>ZB=DX}WQ@P@1k9s4JLtO)%>Qq|$T?W(|<+Zo;+G~ z5}wWgvN`s2Lz| zoQeCgAIS9MzV5!vHmV=2$^HC1ccB%d=>4&!e}aE8kmY}t8(RLlBGlj9m;Lm< z>}T%FzHk}dmwi~rQ45ryVp={I|7xvAF?#s?$ zolCh?2Js+Q=Td!K2lL>t&VzKzAjBAyh%p#Q>pa-{4UII5?7=dGhe#QUeh!xfRo%;i z`-^gix#k|qLwwiV!%+C)aE-4Z_bD_C$kHF`hC!dRSntQ8Az?q489|0{X;|yBSe-KQ zIZJ--=LPdRmul$fJHoVdIhV@_9^q0g*HlOH$S~Crn(7Gf8Ij;K5=c`WX;Tf2G}ZEv zQo$863aO6fQF0!-U<@{oNo+n3$R;z|4Xqou{xvVBB*mQn2v?xu?fc)16juxxuIpMD>61ZZ=Fun U>vSBS%H!fs=TzI^yemHaFRF#Zc>n+a delta 8644 zcmZ8ld7M<$m9G2VBY^<135_GB9nVQ7%a36-4j{%zMJ5IVA%+-}xWO=sfx#-?Vzb0Q>z9t%f*Ejkfj`cNB?sx6l*4Y}$ z)-d-`_uIQ|3nj>AvPI8k&9l9Kn2B`z&5F9ZBa|qc`9)Nk&FIcBTa}Thx9$pMSE#!~ z*&P)*Rkh1Ax1H;q{KE+|D zgfmn0)FZae2cuG{qi-+~N`*Tk=$ly6x0vOvbQY)~yDaeWg3t12r-U=Jlf7REcHWlhdKlB|ICEHjO0HB{m4fGhxg_^qU{DkhfE&g4a>IvI|lLU7%4emrp z*@1&8g{HP49^HG+L{D0DE%j8=17JCwaD66}Goij8%KPN{1LPprXJW3;hH@5~nClN| zgOo5vfR6}?Po8TSa~)it&2#;6-YMq#6Nc1tp_~i#yyg1SP|k;X!4`UvQ7@*WUI;0> zLSgC!U*{#K)QgVm%SoN!`U(@Jxc-b>UuBl7=`2t~c3I%#1)t@bQ^Gms`tw4rzp$xH z%=L8^<+w(%XUO#p%k}x2xc<@>|CQysU&L`QivX_7Zc4X_`bCr-IG9o>;Q9q;0`C}I ze*{RTF!|O2kr*8q2@&+w@<;|oItYe3m^lYWQXX|qb}ptep44P2heX_%Ln9d!!4wg= zdF=ifW^oT^jNvivAH`ENETX~zho={OS4TLd4!5`)=tvvf^aiB3kWoa%Y!Y_H?gjc- zOp=c#tOBTGEYz`)jE*#ia-B;b9Y>7g(!IqpWCsx*UWg%n6U}+iY2i&s?R9)qu*)Z~ zhZ7@rdQOUDQlyh3nH*t9y@WOHs7aAMJ*Px61)6wAO{EQz!x#ak5gecVj)F1Qv7@F$ zzGc(%PK5KM&R|HL8Oh8@XW7bUM=~qY3QNlzMxB$6S`krpCBxJUKE+(8)H$}|o;ok$ zTOvL^UuL4zj(P=aI-gnQr?Wr}*=2!`7krilP6_9j>V<`UU1U?a({nM4azvxpNfCBb zr6qqB3-zbx5?lOIn(8v!g5?CaJPmGHMA?CZDTRU^wZfU`a*M8|u1tCWs#Yajua0DO zq-!EsL$22%2f1DybG6W zigdH(x+;>*kyhJ6w=n9KbkypIvMUs(UhphzbxPghxZal3iPLjC6Q#J`L9TZ)%g%Hb zs3E&7@bQArvdbyq9CN+9kn7iMDtCJBVNs526uX*S@3mZSzKQGCZSngo*EO~U`w4D; z8eC08*@1&8g#xY*I1}A((RI>;2;fi6+DLYUpj8j?K@P#ccUyb=IgH2p4Z`|2U9#%x z(m<95ynl6F7TmNH>GGQub#+CMzkjU^vTb<%S{2BuKvxH{I>1n^3F4tzm3#R9JG!h5 zWGxi&Os%5{N`ozC>eT?H^U2Q?jJb}PS{ryO)_bSi(tB@xTsJVHZVY5&pqm0&9_Z#k zHU(N0WLs-BV^*hQRt1z@J7MVspJ0nqYBdu)rCV)@@!_$JX|ipS!+SfHbO&?nNauhW zvdaM%FZdigol=cnyH2UcV}4I0CE~z1$uxO>-y^@LnB!DB2h@;V4!C&1=Q!<@ zdMf7kOd-GT+eA1n$nOU%DCQS+){@_|mfxc(em}HTe`NXnn6-aGT%V+IeH>7BxL}zG zDSpp6t$q>%I}iWlq)5K?(?E<~2qet5?yDCAxe(|j*y&|vyd22Iz+ba4gmy075MHqa zeHO^20EXGt{*HB(h_BKAS_*Lsxf)R6Aj8iKzLlRlrCuZA`uc^9j2qVFxPt4%gG}-L z3aG7=?9dywk7ld&9Exljj%GKfD`24jgkr#$|#G9;8C(8OD3C~c4%#_%6TY<%)t z2gY2-))|uju`)dGL^Mz62!_;=p^OZ5RLHY&bSR@j9Rp@{EQ5|s2OSeqb_K)I3%=l- zQ^J`Udg!>+Z;)-P35N(^>rt0-KQrHa(>5fWa~mn&eN%nNACDB5R?ulJc_| zo$W}j2&E#_IibuU$#ao{Bv-^F&kJQ9G%?99(*_w~4F6Y%h)OY))=$*HgJmM5NZ#bM@Fucmx)}ld*;p0Iih#GNYCcFcg1Ot&mN56}LrtP- zMyBO@X20Eax)B_8sXwn~+^ndpGfn>TG|Oa_m#5iAW*e(Mm?EOh;R6D7&`7(hENH5~tM4+|mxk-$9C{wz~M7(_M97 zw$kMO*r|m7->otQm!Q?w3k3^S*ASmf2R?*CkBYvbYY7}$sAk{;nsv5_S81o~S;_{M zvLRi{dPCWj0?Syq6`k_6Y;;=Pz*<@&`KF{6Ja2YPRT-%=T5Y77Ol?69?(-@z)n%)Z zt&n4;w$Xx2;WMM#35ZXgDHwAdOl{3GwZl6R!kuv3$(Xv!$S$M1En}}4*==+W%FK~`42wY(-W-ggpuRMUsW(Fc6!{XoU}cB&&az5hFS4fl~WM(H2qKe zzO(|frwkRYG4S((Z_F8|)YEJX33=bf#%1LMH4j1a0Wl$4d|d%@DRZj$o?h zIw~m#4H+F|mB;#+K*j_*HjuIW&6Gne?Ee`1n`vAi6rk3rj5#$Ob4ozjbrY6e@ZFr|lsc6O zp3><_i8xkfFioDZnPhAhbIeNTfEu#P0T(a$9J8HLXE8^qRunQe$0l-z%3Kx{Gln|H z1Ym5QWo$BPOc;CFR{e@)Y(8sWKwJybxaJ3x9WGcVLh3iuLZ{UQ1lCL!!QUT1ivyWy zz@=95K`If;-3OKgx!UfJeLJJKWSejVilj&zNh8qzt(g5H{-fHlu{2J&FP26?qK!a& z6X3+Rr@b+kKzd`}u3Mck?RNzt?MJq9a6%GB{VA0zvOiz zej(o}w`H3or;gxlTyb}EEb4c1HFuZXtKThn6aN*=F`&h;_?|SC2H9e{O^S-@>G!3f zG=(8WtKA0`mpCkRxQ|TXs znn}hp`PN_HHQ7bNY;j-hDqZxKFz<#u-K493KQ^@JVj_Qq{yi+ely2E#|6cqxuyuz| z_Y}5YLvq+2c8`ry7pdP^opxt$>ggj0hgV^@xKqECt_U8#2D{kjWE>wwYdB#sl-~i$ z@8wbb1Kxe~=%bGT#A7KCe}Lpb{N6wM20d<73I{Z(p463k`17+tZdAvwx6~&J*y7Fj zsdzqnNDu8PJu#nu7td!8yIXrnuVg;|UV0*}_Jq$r01>|ZeD>lJ=ChYa@sG}!wHNC@ zNiTg`p4MkjTW@(r`(R9mceP{s&%B8A(PwE`qJ8sY`bT*>Kc@eJ9b41P?%03LU2j(M zfc{%SDgO?0w`2c9p0WLWT6$pc{uAAO#&`F>ko~_gc>ewendjwsEtOJqrk}~(==R-?Wk$+yM~L5FFBmDMJ1_M84f=uy-xpY75sfL!UmgaS z+{3@R{Rx;4WE?FwQf_pRL2CvZ8Dw;b!Qc&LsG;eb$Ph!>@eWHb_?{1QN*$V9G=`^c z!y}j{-_eo04Ub}$QRysDLv~r<;su{&v{ULRW?|W53U6X#Z7O#R4xLdKy~ zs5~Vv+N#IHMklcJi3Bw<4Qhg+>_EZL3oSB@91WA4RwoiwTcDbZ2>v*D$;dbP(wc&A anSxmEoi!DY^&4dB>yb!~M7c*EKl}elBV>61 diff --git a/src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm b/src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm index 37798d12fa6459f580f26a327b62e117016dfd1c..655c805848ffdbfcfc6bcc13c4e24b0e6ef6232d 100644 GIT binary patch literal 19078 zcmbVU37l6|6@K^4z^uTqFEV&8AOeO71g4hyP*I~2qteV8M;Mb~SY!rEGX_ObSAR%ZJnF8x;~>rgRL{`0BfV$){aJHf4P-R zUs292f@`HWHO3V|Q?2R7GZ138vI+IG>!&v~l}vA#o+dR_M_>e#=FFNkb9PIQf?RiO zL(9yjhOu8sk}ohBJt|ROU@~%Sk`_(67l9DR$qln-CM89SHlfrvP5sG?I)qYMH0jpn zBK1v8Gbbk zFuSgK>d6gBdACFACpXm{FlY9O4Wq`48M#;8n303FZ>no924EJSutc3|BB$JbBB z)%TbKbNDo_*4+mZbCl=|2S%MgK0ItCdsgTk7kYF`kx^)}#bH zo|lx?bnVu?;{uARHOZ2yvfH=?I}EksnKx)P z8zv<+J8Jr!r0b9`x@vtBFPe1w0>hcJ8fF;7?iF@SAW(^_T(kI8jA+q)7(klNU7NOM z$}7Y^*8G5?!t24x|V-M-dsJcSziOBF7BJ0jBMAqF#28&*~aXvm99eR69cijh{eGyAP>4#W$ z+JRX5OMf3rt@L9o9NGZHg3l03e|903{vno~bK^)XBK@_NK9WjFO{E@)D-N=;C@$0I zt<+!{oPDOSRIuMg2J0dC+!c}Rn#rMlz4|~yB#1)~6`d=?WT*^lhWiP6c-AVzn={pD zwB2O5>^9qoUy?6nWhWYOgba}p7%deUf~%cC$wKeWt2XLgfgTy!myNmJL-v5jczS^W zsslEf9w{UBC>do*u|CnGWi(ddD|Y1TllF!_M#lJM7%QW>45IL1CEd^kUbDAv3VmpaA{rmVfe)JuK#s)IWg@@5A&*|igH z3gj2jUIMuR@;i`uA|D6&Uy!j=^iLpfgWM7F@lvm+;n|Gs4uKkQ+y)cDBsW&C;w}q$PU-3>Kz<4` zr}jk1oN|u!r;s`29P7^@kFY~M3F7-`=1@J&Ryx6~WF_)EyW+f)c>ac8ze3gDq4tQNU1XmT5IrdJ)X< zRiAaT$Z>isRHs2%lJ#9k(?#a&NUioPTHPlv{#^UHP1I1 z=}qchU^F6>LAx;cl|%cL(YBzk0@_7JBSIy#i-RTZMD!&_dk=lMPtliB(@PoTWh(n? zqL(RIru1?p%N6#;3OjN%wKw$3m0S)(JMt^&is{ESaM3HdJd^?#V9b7Ok;_$Bk*nG` zMYhRGLg=fNT#ZG~KKrcXgqn08Gt<8|ZK)^4DkZB_*h$wI&F`gajpldLbw>01>3XBN z9d(0I{GPheXl_@nR@hc+3}%JLyh&ka-OQ2RTpa05in=2rT!IpYeT!>^Gv+EX>|2#A zReG&~>+IpysjQQ}jc~UW;npeY4h}9s32?W&MmWde?ojyVzSE%XLPcRZb2olbSA7ai!n6L{xT<>@oOe4|4NI$^yW`mLqc@*v56Kqtn z(FL)=1hIiZK+J=H3A+%)MjwPXjRmn$>EEiXQa`BV!90jcyHxRG=^-T#xgZ`iK|IJH zAm%~9gk1>YAs>V{jRok3>@8Skr8dGw{iu>hT{QR< zoQ~}7=(`_N@|f$!CvMG6ce(HWos!?VZhV%Qvz6yEAF22+`nCOgCBIkZwjiOv+9yo>_R@Nx>gPKEEPgrFZMJ)ThVM7}cZ(&I@bPrA`O zVOHh|t_;L{WnjWCtjv>sWxQ!@Wu8>}DYG(9tE|8NgOWce{fv@lLO^I}Z>lya+2kU6 z#zgcCBZ8Pm1QT{4qD?*`ZyJkelhV&Bc~DS656frP?a;J0bn3L$07+Ym}9aKn^R;I8OEfxDt31#@%ayUHQx z&D<3)D|tDP5Z~YJY}YMHwjgqz+%I$V@WsSH%sB`Xc7cOiJO{mLY%aDa{ff%^Nok{g zRmrPLZ&k82M1zL*!g)=}Yc8CvCY-Gd2Vx!$OxT5RUi0C2(^xpKDgC;V*Oh)l$s2x4 zK(Jc^g544j?3RFFwgf)ymVn^5#NTgOF&KR5!->@YR8p${MT~zHG0+U% zVZbFQ4#Vj1i($Otb_C}BRqhCRK>|inDRl<|m!LQhqr)!-@>|@qXM*_x z1;Q6gi3Nb z1WPsuhHUt>WP{+zcAfzU&yMGlY!@)_3yOiCpHg>A1ec(=Q5YS5u~95a^@Ro`yeaur znw98_2y#&o`@2dL&LqLslLo`Z&@nK zQoTHtcHEY(*VkiflPlY6TOvZ@I3>y)|!flE*vh|%E}0|EE0F(83^*Bb6!N0jS|DA%Uc z9SVGc;!upP>9XD17b$WtvE0J~ffhOU+I{lAK9%d!>}A6d2%8}&w(uU=bdilEY_T`g z5O~wwfi1k(?nr?xWfb`3Tuy;r6%h0dY4$>*Z%pOJRIg5Dbxz$I9cSXyU6aZh44Bki z&AEXyrVV0FU6`;7)LrAL>rG=bwI_rT%J990)aqrN$nn>}?QSn5Ks)P-QF z3&BtqpO(51Jaum|0D-!|Eh#9jq{h0Re!v9p2>$_<@4bcndnNX^k37F z2f8v{E@e{Ad^i&4mTcH8({v?=Q6obZRSmYt>!fZ47Z;glL z#`)DkdBt!UqIZ+s{N8HRs3y=OWCVAB-W>p6mM@!V!NomfH$4)>7zIp}0|d;G(A)vC zM-T)4Xw#(OpdVozLSAH;>v z5Z7Mp!d&bXV)#n)voy)RNM`^kQYV}BdR_G$3kQ)AkE9OGm^#<3sbH~?|r zGsKax3vpy2jstV!{AwYNgHUcTUdHQ#DmwFb#=qR219@-6ghH zD_I?0q~aY)dyGUG!5Vmik?=u~dNt#RnDY=O>;ezhI35PmNVG80Hz7onAfP~D$L=dp zws4CJ!qhZGY9C<2E(CFl55k+qg1ANLTbaCAt7L5+#P*NZIg|7{CF@)eYt2lo+7Hn9zXU-5XRvw6A_ z3O2C~!6vp{S4Vc)#5M$;+U5Q_6~3zyg8mKZagUOF@|9`#Ch%S*_qx&CV^-!Kt_;L{ zWnjWCtjxWBWxQ!@W$slPwIEoT`$Uu{4z zuQo6!RC8=Bq#qvQMtGP3K3oj&AyW(CjwQID(<=5@vO4@?(f&rUSOXv-{qQIg+m8|C zu_DN$rU1i%zy+OcK#UH*7|8ufqd)^7A+h~?+C5H?$BQ7S$bjxZ;1U!!5~H)6V*9fd zuP00u4`8t5A^+gMu;PpVgejun9!OYCVH4XBY+@UNO>9H3@8KZW#5M%(ijMTMiEYQYGn`E(oJ|Y|Vjd1m*oAPO_2GEaSUAtRDi0QF{K;<# z2zEc_z01Mv%W1L7rFC9SB^4;y{cJzZgiVe!*ab#P*9yO7u&_c&UhiX6OzB zEI$nC%G z6Wb78#RQlZo7hH+O>DcaQX1RDwyDNIVjIN~e^+{&l5P1m`n#LBIOLnAoWjiXHjV|( z7&F9t8^MHK*hX*qZRAZO!-NqlF8-nPTT0&Y+X#Z)MiA^af?&511hb9sX}1vszm4AJ zh4O9m4!6-i8Q?#Q0iu!ux?2>u1jQ|i(cu@1_6>!43xb4g^e%3D^m_z(uL$z4qV7Q8 z5)=nwboj+U@XhjX0}>M3@6+x-1o=-9N@WGPx$pn=5pM=Y*D;e>pglj2F#=cUx$62T=XZi$Qzzt|`~R2oGX010nG<}q< zc=i!Mf+u`dbnn!~hPv2Qb+qB$l0tUS)^S+wp`gPabPz1}AXx4} zFx(_yonF7#$TFF_fgh zy+jQ6P<8`a6vJ zxuTFZiER{dxG);EEd#OCMLme6E(A+m2$s4K40Z8osSCkV7c~XAVxaC7r0$gr{K{hB zSESV4dc!3sZoQ2Tzt|`)O*Kk701~LXk|0+T}u&W7REKWaWDsQ z7mP}G!FcgQ?T}iifkwjBxZznNgppstz6_QcB$cb%B$ZuCp+NWCN&lZo<@^c`mPus} z+CCwa7dKaK%cQd{66mRWVRCv)?;V>QCWFo7;NEST?v2UmU6`Cc&^S4L+~ioJ*yNa0 zvW+rr%04ej{19EbVomVTV=Ri)`$?~{c&*h>`i@PirJvqO%8_JFLoDTa2bXl(w9j` z=^&+Ta1z6tt6{IK-I6B+)}1D3CAfe_u?UwP=}2w{yd`uqwGwwvz$dB;L-i$5sQ!yXGgbWcnU zDoz4zw|_@m@Wc6Gqr{!gG~EkPqEscEnZ2Qrw!K}H)+iQbZ~hI6__0tx>x)v68XgOM zr8|#>zS5J&LSN~_W1%nqeuX6NQ0R(70;I8r0?vmpo_1#)r-vO6UVcu~{V|^Yh4Iuv z<9KS_c&t%uJhgfN%2$eq0xDieZ2_<1$1q*%ev~KtGny|(Z=GA!_g43N@A$lu z%0a&8dH$WFs;a!k=lUc35$F0N{gLsXaO6nWj(bN0Bh8q`K(IAtwP3Ar+*?~ij_1WK zJ(g}K7S6SvHPyy-9r8y83&$bEMs@m?RFn)VEe{8k4hp=&{Ijrv{= zUg@wQ<)yuPc;0WAbnoW*zhTm~mlvc>iqb@gm)Gc|k^$w##}2Q!q_kVlo?VY9?%B2N-d)9&r2{HThZPSSQap6{dF2-k z@akqbw$5@KP*Pq}F=%-CFt1)+J$_QZ<4$PlTE(URM$iL>6<1zZ(yh2#&t6`BqdyI~ zwEwV@isIu3pMO!w;K8MpmGvrz4XG&Y+6!0KD@dkkk)CE~$>8&oS?b3W(ecr7#m8P) zG61u8Ic-S!WnH^>>AAml%N!~$9Xz1ovZ2Ggeg2>R=VPPj-@xW-z4J?kl@wQEJ;TD0 zUWh!d@^Xa@8x_?a&!#`pi@f|gb6JA5+Q;iTt>5wS73tZv_}J6BcPs9HQiPqsMHn`G z=rJXgrTx9a-)s68uVK62nzf{ylX{JQ!*IyZ(!tuWsBXL_Bv9zr+lu4E%WYqT1q8u} zak-I^*WU9~y+~BP-V0U?$6m<4dM%890@N4<``BI?ji>LOHeIzKX3u4tk8bsa+j7b$DpoY@0 zFi$o59T62tk&XzCjin)@YKUeNM21%vSrLa2Sy3DrESe_9$@=2xFxpItRCBzxKrAh# zC1Tn4cf`_4TG?1yOH0PWrR|4U@ake|#UaGf%Ei(qG0uoZq?J#6-QW7{|u zpO@*^uG9f?K(xolrGou|a)4@w*MktrL6MwnuUB&*M1nX}dr=AKAnm0?CEPDo9ix%b zu`=?@H0mTBrBj7&9xMk(buQ7(&eBdgW3>d*4zpd%R!n^eXDw3)fjwhshiY@xMY_P_ z1bQI@{8}h9)m6HxZqhAIius=EF5Phzdc-TA@71*Qp3>7^hF;R0m!Ui855wZ|>Mlc1 z4&gHNbeG}q#5g0?Bc!M5EuQKly;a{x)RCC)$kcp&fv^XhvX5)JxsTFa3JzKO@CWH_ z>#j^4Ek~R1pR2cqLrM8^UdqGMds5go6)6dbaMPLSs6L=2sTc~6#;v7>*C zceKW;?@ai_SABV8|elC_jC&n2YdaCqOr$x2Y>2f;a_)8o| zt(q=uf9Y@II73cn9H%3WGZ6<~T^#*6ggE-UIR2U#XT))q^jBxg+3Iidw`eKPEzJ3M zv=^c6qx}!G-$Xk=`y4qt3gZyV)Ia5)Hk|)KQ)2UCOm%L&m^w8rrbJ4j4TkPKwAUKC z0j8gH=cD~G`a`r!(cX$SPvi^G-i9_RMg0ryO=#Cf`$8#E7vZ%m>aGULKvgc~*xErl z8ewkoU*^F+SO(j*3=x>Y7xN6o*5cJ|?O+aJYX`fny*M$>*xCvitSY5a4U=KfYlx9s zI~?t0Xme{XL7Q97wO)!gx14MJH`<-!rCx^S4s>&=m!sV^Ug`g0m#>h@$ZT<$`VTT` z1i~GW3im%irc$nOO&9t|-KF4=RVh`zT%<<%peTs8XsJ)m2hC7wjotr~rcF+8WJ+lbUu z26?+5t@hL%zTDxfJAJv+M_t?%uN+-9Eq$6V(_k2{{BF8p`!NS5y@!{FEtd;0<~T}Z zn(wa2z15sD%49kz)P26(hl?KV*%Mw|81&IQeZTGtY*IYn%LBfvqz5&!wKPK`TTKsX zWb5f+jZ8&7qJgccM>R54HPc5~&C;B8E$3rCDr+`ZIy=46$9!Uwh;VVhE&FlP31{q8 z#it zr{pI#vDgtzMLfHqH0bV zS7Bp^N$>4J1B&Kh}n| zt>GuWeBun(YJfKZT-~SIFwYvU^JSegT(3bG8*ZTWM*Lw4@(msTWj-`~=F4ZkekkEx z;d706f^&i|G~$B^?+RaPWFKWVY4iogcvtvJBYRi)+FA0huvsJCX?R!Iq7m;jyen+g zh z>`1cXhiXn4*|Cch>PMX&yE*Yse$-0+?90!-+T+U}##)6v!|tQ2rtKdU$S4@b!Hy2# z%3$%=Q)2=I%a#ik#vDhmqXKt`jjiUC5$rfpsPTb}57dM}CIs&0bEQT$m9Nssrt(CM zY${)^k-7Oy(!kz)uF=Tcd?p7hm}|+EJl0JKP~g{bHP>OOZZ%T^Vsk#j#R0dp>rE$| zu^$;ryCIP6zPeG<@xuY`jk+m_^3~0xyE#pFQ$TF!;NpOzyTx?EIh*d*Kz94;Hci*c z(oLoD?WDUsO*b_lHgs@tz|q}dI^mp6cV{3-G3FPx6M2LOy}Mw_k~Vkt?2?`q$h070 zX?mFWZjCkrF{$p+Xd@6w@6~7vPzW?#BNjMI`aX?VwtUdLUn7<+OZowg?1SEe&XOfP zgGfCTM4}!Jbr9V~(Q|9!)CYv1(2k z=`x!X>Tyo_L=d%bXFk3)J(0JfGC;3mVy)eNiJ@voC36Yj(ayre+st zU~BeejZDoh)HVAGxssY)#F|~q)htf0W>G+FvKlT9xU7EFbix_?k+HPb0(mGQjyQR*?+rG_r3-*E>s|Q5!VkTScBx8;R6s zboxAqnyD`W`65tX2J$6qds7@1y5cR!j_a#HzJg(VoxY|kHUo2D&duB`wp?w)nB%DJ zuaer{Qq3u&0Jf4seG|wxf!Y?xwxqVVYh-KtTa9dOf2WbH?HwAK+WuYxTiZJ|GPV7K zuI*jqN^1K@*7k0$W_NluKO%A6rH6|H?$ZBcI^m4{$XMFXfqWLIJ(|wdc2yYFR-;1D zjS5qnR}~VQOoNL9j&8KEfOC{dHzwr$cx=ekHM5C5j>h9jH$F`_E+jT|aB;xVO)#Bs z&ZfIEl(+GQFcR<~jZoWH!IG(tiGTfeF)@^hq5GKdYK?Z9RG*~LR+H-25UI(ezc!3k z;iH#OriALcP_7G+!Pm!Z=&FhChEQ&RVVuD?(iN-59GLVb?h0EjgJH~ZWbh54+lZU1 zIb~etTS%d94dvD_s{UEWZMu&&!AaHBP^O0Nar1VKYOSn49wNQ(<5KTSFLioIY!U!24!CvSZ#vn-LNlI=DFC=pHtmaL%TCB;=jrQBBv>(#;I{DZ?!C%}Vpl42caN zd>n9mkC{$5XYCBjNbb$K&*S0)Kup<>!PlC)A&4dvJMD zBYV2f)#wM4@=p<|r#bU8I^~}Y<=Ifp3uPWt{<)Y9UGe%g_g>#1~%ni*T|&&5}oo(HK|MaH<Sa5xrpD!X z3mH$o@oBzsIby>H9|s)Y1k(xUY`!aV)ovlVxSnvCk#Gg5r>m-u=c$P~nV37qI}A1E zsjGANIK3AB%U6?ff`9xNFxkZXg^R1S?q#CYz0{3F)suQP=w9lmJgFmjy*!Ts8J*LZ z71>-5tH<JW^f%3*zv+M(JI1m2n1=s&(cVj-5F$Yw>QG2-9Oxp4N|#uF(-r!gu9a4Q(@nZc zw^)AzJ&e`g^pMWd1FNNeMml4*l>VkCXDw5QfISJ$UG`quTpcEd!MD2p=5RS&9U(_J z{Y`J_jjPZnUio~lrlt3lzVYN15lTw@~0+}=F1lM%@C+jX~V>0*Z zkJ3g#2BZFjc~6m3uo-{;9VvOLoND*tG&zOKI|Xx`j=1pZ_Tp3yVJ}W~vHT@5&e)6o za;iEbs-@19GZDvM<2dlG;;*OUS#p+*<7_#Tah!=b{)Ra4>f$(yLx|%n7suZdk*sTzdW!H~`j zks)fR48_)7tOJlCHEnH$RM@pt%200YP|Pz7TZ>n>wG|w~)>gQ!9iA9xZ0#jd0ZGiI zkiz`iy$QYyZTtLkIokH=<$uw(&n{P>ZJu2IgQk6M8G*KWY8i=LuJYy5Xu6r70r06XeN+sLYfR(7GOgL4+k9C zm8KKU*uugQ122vfr29 ztPwMvS$vB|_RAHhVT>Oi_MwIWvMQRX8d(+1?aq=a8pvc|X;n0k%S`dLSVm3LDw?}} zx!YIw_;L@wtGqW}Il5{plIgxohhcm~KoM~dcN}wo?S5V!wp^{inB%CH>B$$X(7vRc zl3jH-Gst14`)Y=d8)Pm~ef2Eqpm0ex7D^UiLkAZJ9Nj$A3FmCO=X~5-p>4s2Iu#9cE%Whl z3F#;WGmP)67r+$vx>!Z?ax8+0RWu8I zzBzftmsgmWi(to+Tu|e~3M4LzVHhXot8_(-m;>Rw#s#tE76fCCBQX~ziTQdpr;IDK zgcNEiqKEoL-e7ERn%HW5)PKvDw_IrW^!N=O8vc6Fs~|MCTxc-nI6`|X32k{br;N~6 zkV3sf70tW8yvxwuGokS<4gY06-LCXyr3-BpJ-KW6>p}5?(AaXJ!I)YF|MOvnHunh+%+i#X=1OWGfbO7$94*pU^wP`_i~7*7~y6Mg6G;5!_q&*xf#VpKF=a4){ZAEsb_JaP73LJhMiQPDsR z1H==Y6MU@^pHX;M*sPI#s@bBERnb5V13!CLfEotK-W9fKWK}fVHR7FycLm5}3?DxN z`p#MM?eh*I^*s}Jr!PC19Y2^A)tI=ueAxxNxTYXO*r~Gve?7IE*};~}4j6MB*|96h zj-RSIWn>5RGP|^12H8=?y@3*@m4XOnbO7;-jN!L-W6_V@M^{bT4~fe-7{&WK$V(7>wCe zh8_mUrt-BKnVS#fFBq~npX)R-Hy`L;uq6<;OiX=icO$>GyNRoTz$IA?lq|sJe1?ky z?sjmq>7+0w^X>!P%lE#5xCM0XTf5t+qM1s%scAYWS%3{4TpVz8x0_BlXVXFVQWYqO zTR`VjG!(X|X{3X6C0X5FT3TZ0;DW(l+nc}OFcEg$qA*2q5S zJ>o1`(vK3UnL#8_zsM|>!eb`t8p{?Em)Wq3D*;l3S==y8fhnKh?y}`70md9hCCpAL zVNNxtj7or5W_F;ZG72)7mJ*gJh+v)$Abyc&80fQcpy;ZpW+8Ez=OTTMu8b6aJt$rf zDO)a57;_wv&PyVN6ei`A5h>I$^8y7`%u9iS8fJb{vyj68*_wqM2FTXzLXB+ALga!m zQ?rXSur<3_BU7``y`W|xZh0wHv#+yemvA*p(yM`z1=wUYTpVykwA6IM8T*m3v^N5o z87PQbK<8@qEvjhVCf(ap5#vDg&Z%t}@do`zw0)RSZYoMTt`7TgU!|X_E8*&&RTiZJ|vb7C8490A2@6yQB zHsmiDvbDWiBU9Ty>Dq?4<-1gE?_q6Mg;-5hm|D#qByMOgJzN}cmwr?jcfuL_k+HPV z_--svV?xro+8#?4%{bDHOVdHg0&Fr3E)F=l@um~b*>upo;C>8o3+SAR<|-ObBptLX z$+fyl>s1ULTrd~|M|ZX9gmX6Cq);H)ZxRXkkVdHOYhcM#$6UYuZe=pH7w!|oYc;Ye znkgD>MZPh^AeVtk<9gEHpd|$m!Q2=^{318;E6|%`HgwfQ2Z_rqFpM)85{R2%JrHwX z(%ZNzY`F}EF~^a?w_(%DBvs!`u=o$YAaWqw1e^Kn#PvAdat0K@D?P@)hVb zjcj^D4udh9-uGx^)B9eHOnOh(z^3Ye~Tq9M<{6Fb=?<9;#;l44av7*JD3(#r>y(EG=7kXA7kQB>|5D6`u9{LF z5|{Zfj8h&Gh!?pLm;;l(%#C8pr96x|j+CFDr2N8aP8pXTa+vv{f(&NSFH?T8?sF** zHO#9?%0mtVWK$k;7$BSS(8B=PlwYcmNqNX$Fl1ByO^r;-L-&G|hqwiSxRhVUlwZ!J zE>ACYSx9Vh0xk}?oLFHx;f&47ST}Sp&*51~(-qmWevc`?l5`NbBs&Ws3$USsivx~s zmFa|YHXURyNO@>musWCWkg_cB)N1lUt&;G4p!Fk$4?Y--f#Zu6EyxA>Wi1aPmvqW| zIN*`;xXeg6ru;|nW470;DgB^^`8ZM0Kn(-57TM^76KWVBtD=D#28flwc-CoT-)}(; zgE6b3*`SeC(QI^MRMC8m@AjM}7CG!Vgj6+-+XU$dSz$Lm2? zO$*u*$`%;L^|Y0)*jUVg`M=>+V9V7Lj5&^a+LF{0v@a>AjJ$#zW=p8P4dvTks%RjF zL7%H9s9|;__4K_)ww@q|!I-V5A2hP{1d$8I>=Tj(ww`utWS@|9Jwe<8QdduVSWi`y znLyx@EEP%?V3Tlgalj?qs9f9$XKYr+x}kgdGE@+^fX>y^SgL5okq!cvgbqR$U_%EN z2OQma(@9}WCf$S_>j~Nx(6zSZdzDtvOeEjLG#?}`z=jV#4mdu@Ty!VIFWG!hzNG6Z z7uORmGZKy}nro_$=c&m#nVhpKnrks@E&JK}l$_unb2{0^{MWCns`6?FBlaEXk0=@$ zjIeT=eIntHxYQu0;cvbaNc~VY)&)CM}GY`c2a)O-ssAgW{k2KqHLCmvN^jEW%D>Cj8RsU&8>b1UwxHF zzZNC*JJJx>1TT$yc{#i^?d9d+rA05V04kh)@zX#1iYGqwFJWP!(!X&Nf8Ej=d%vIT zheHig9iGuOZoY`v15L?(T-tt!dVeerukP9lhp;?x8Q(TBPBzJX>34t#e%=N@&f}!V zd8(bX>wqLX2-CMmh#jN@LNuD48bhRJ$DFwbO9zJ70dsUlhbWAPnqh5X*1&JC3T_nsVnYVj>F|JIUEYN+EPn$)wn!4uy7y9f71MU3KAghYai)z z%*fOA5A~qsU!nT`!tO{pQu-c-e1PbOKjrfW`2)`sb+l-aS}L~uHNQAbYT*ZDwfE-;Uzy@%1O*J-^`!q?hwU8E4$kU{S z6!A1^Ax(Iiw2{-$Uvv< diff --git a/src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm b/src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm index 2b7c24967ae0945a078cddc58b2499b275d0e152..a4c5a62df47acdec5aa45ae77cc58b97b26a8ebe 100644 GIT binary patch literal 19206 zcmbVU37l3{`M&4Qz^uTqF9N;?5JAHP0z*r^sHjniQE6t4BaF$%2*?bU{*3`qR0MM& z7erhz#06ZyH5bHO%Pq0hQUtd!wKQ`}|Ic%_dnb+~zkX8peb0H{`@H9z=bZ0;_kF)f z8k$c~Ns_3W=g*s$^g2e(RdbI~t*X`jUs-EwkZtSSwAJ+)9U5$%SqE4f-L`f#D*Ho6 z-c)Hjjd@YdErL_EH#NppK~t^ic?%JBkBW&6vl~umY$`jU@q{#~t2qLLnmlLLteLZ0 zdKBdPV;fs$HZ_j@Qj&a*$>>pu`W%yyW0SOK(!B_TI8JGtJu@jQTC@qJp=sKWX4E5; z@}fz%HaBT#YMMDE=}c@;6ynR!BbK{iRjV<*pGwWx~ zIksuql%y)+I3VUYrJ<=|_6c*ET9WE2(|^ds11IzgQhno35cHIm`sU*sM%9lRGd8K| z_Vt-Rp4`$fyZ)dVQ>Qh|n9T#QvC z2ifHvGqV1GNux*APd+5Wn6RQPb7qZiXl|UGblXwWZzbJ_e%@Uhnt0Qs$LAQ%oYgqP z81}5PV*(q?RP~dK=VL^d-iHCC>D+Z`YnBX45?!5%u92F$%6>-ABvVcN$Ien+d6wu( zT_u%LH5SITQkx}vCyCT#YPzl|+aE$Qv3!4HEwyZJDoO2#q+rzM4z;R-s;QA$-ANMN zSvpHvR|R0*MY?3kgmT2B>kv>^=~`E*yX`i3#PKZ~M~C(ncCt+NaeP}2XB>wkj_)82Ttgg_*@ZYJhd92Q z8%N?eLMH1YktRR0M@)M9#$VbbOS(y#7q<V3^kAPMGyGU)3e4b(`^>`B;u@xQ2D>n|Gs4uK zm>Wl?cDBsW&C;w}q$PV4F>-3>KzxTJpyCe1~-a|_}A6q?&9rv@>E-fEgO9AZ0Vo|0*Lz5+z8owt%c zO=T5&fda?^1Ro~lG)3Lvz#}LD>2%i!=Q!9IN)FZw3D(_%ovE-z&m!1aMX)m!bq5BI zpaih9T_c?1V2hMY5-?PnWqMtWJ_ly_sLwiC>%?pf1dXu^r8jT1Q(3S?jN@zbf+7|RxLA%IkM5u;# zaj?Xbh`z*V@1hUSDf&`sdKrVfOl5yg^l~N3mA+ib^g&jGX+8g>6O0IyR9r=}X z#q{GIxan0~9!h}=FlIlt$Q3HA$klC}BHLsoA@ns$uEC;bpMF|)VqLnAndx7cw$zhi zm6BB|?4)ar=J(QSqxs!*ozeV$y54ARN8Ml)zo%|An%h-t6t>k`gIVP-?=)yTd9=G!wjt4X6YlOJ++B*ggM&{{0^BcMBb?)K_bBNklQO}gl3`NU zN#6@oe8AQ4fBHTp_d(|~?|h~2S8_i%>7K#olSBL$J*zh;*#J*45Yl8t#78`^!$dQi!OE{u&PjExKfVjc!e*o812^kI0@SQrl~ z{VSDK>xYy)lt)qB-dsGa)unTMRlwTWf8e5yElz!T*%`+&No|5yz04~n`wn&1)?do)=c zez9nuQ2I{>gx`iaNd0G()#&F5@_Z5G&x*POflE*vh|%E}19?X2zZj7I9^|jIdx0P? z6hZ!~s5=n21jT_E9eyzo&;T4rFkx8oy3X$0bRSyro${iR7ws-cD7*=Vo0eA~q-=Q$ zLKy{anQ{u;7ab^YUv#8k9#DK&B?P_M?2DI_ycAf7&lEfKcpzJpY(eZizhC0`^-G)@ zh&c;k!Y;6Ii)W!Xjm^durMIfApOlyM%Sv8W`V}RwgmBQ%UO2BRdDVsUiV5cxh66DV z2PW)7IIsF}ylE_)SCxKE$!ki#uH<#UDInNQ0l{tx2zFCIFq;BbyD1>}P4PG0DBl#@ zxGCOXfNvB7+@`2ID#0Zvj!H&{Uo6_qO8?z}giZ0L%Bu8R1bM3n@}{EhK;RM-2V!*i z#Xvgfw+#kg{cs}nKa`Z~e-h)LMGQ1UcNlO9io-BE{9+he-JZbQzsx-$4=DYPl6PQC zu05^vy9#X?{SE(rJp#RXw>O_%K+>tw6a|28NOxIIwP?;Ds8 z2=hS^=6yxo!N4gf4#w!3F5AJFjpaXBFsHJ7VXX^$k|1Zvr_5~1TH~wAV!B@4CGgMc+Uj$1qy^umof_&U;9O=EK2PQ9SZLo zxc@07z$2^tj?Ut!;)ITcc2Xk z)`P^@kkoZ?4>x7)v1C~)%Tn_mFri>iI|v^b`YlWKWrlvsQ(2zs%Tu{Lr{8k7>~UVN zNM!|LCjBnw@ZgM*L(J(16Lx`qD?I(YX>717Qhh}#SEO0tRR>iUMoC|p%9Wmd5G?y3 zSoT4%?1Ny~hpS~D1kb*!3_xJt)nwmF2EMWw_|++Ow`6b$id!era?!ts! z;O<(_U2htjs}|SSnfiw+=XDd3&C&~SIb=pp1Zdg zfWX~b$=!7fd|ff{TT|+8!Qm1Vx8O#HUu+aNq!tfhpUh<0kyq`D`}G5Gze0w+ht2f^R_`RebEUuRB>ioMeE>4-1Dbt?eV`1Gfi}aw zv+V3M>>_;y%(EHxzPOvuu=9>3Q)Q3`7h|E zpvU6Ad&ypyj4#{CXm_)ZlW~47_LjXk7kl9z`yeh{LtNw7g}E3PV);sL9Es(tGEVP{ zgv)-iAL7{G#<6`;JonU?HXp}$*^hDThd2&E9Jq!!GIk-3EW~kOZk*pO6u=yW;)4k? zK_4s!o6jyT`5}<`@rCCF6b~F6lHy;J30aw?O-Z+@6`1Ig;@`lnzG(;JGPXvs^+h?n zK1>d?-=P383G!wKcerckXTY~0o0r;htMw|^9-_WLFPr1smP`_Xc=pV}YDQJmVNaGxJyYH$ z(vW}0pAIH9Azb7RLb%gS#@<9+3StQTggl=g34Mkfp=Sc1*01)gtU{j%fD;PK2>>S*0Omk*04+fb06#WO8V)glpUA!fB1(gN z54rD?EhV>4edl9G(ZMO^L6$q<{~-Ic_o=FCfw>G+_j3jgM&{{ z0^D`3QPVFry6ctH$-bFjbYvLQ+&929q`6UFa3ge==3b*@O?aD%_b7J71~2x7et!kfl| zSg%khfienxyOP^|js}AL#tOk~F?N~V#e$1+yILiEhmt#7G`I@C?V~%Sx$jhRr|ZVm zq`A=@(%h)6xXX3p+FqKwFadX)atnUcCoIOWX>JJokjJ+=o3ksUVAI?XY?|A3b!3-K zb3@>{UFq*r<-4jO=wBLw+@s{4d}-Pz$h}JLb>q3mtj#@K8;JSZz=T~`n|uA*c+=S0 z+^aOIL$EgYGtIq0$p+SEAa&y@iw3(E@tw2LlwFv)dcZ{V03(8!M+6ggA)<{wB5xXt zXrs~(vfKmp8T^v{sJS-;%N7WhEf6eQAei?XxZ3v{5X}1x3<}j94;Jze4|6L#!T=vB z2KcZkop47JT+nG1do)=cez9nOsaU)LkdTLXjA`!23G#Rm#9VHe#-2NrxntOo`s+DW$_uC zj$|B9bNndXz|=s@SqKw$frZa_7JAdzY#{aXTc)|oO1A1lZ(==%569MRa9`wwGKCWa zl>*NJo8fFS;cQ|!5c6Hy;;d-v~h9oQ>dpvTZaE^ntO|(?p6k|wHU+}McoAfpP;xPjIQaj-GdK` zPFQIHln30yLkTq(*bw?v!n|69dBxOMI2bsg(<%G`4AOQ=Wk|H>xH6rt~%?+wz_CH#c{2$Tv)Vg_-Pa91EN=W{CMt zf(g5@liu(<$(u%o2_u$Z{9WlcmAvV95(K-GAlRJ*!R{moW+&llcM=4@liuQu@}2ZH zchWx?;6I81qO=0KTNJni#Vv}_;TMbcb%g>9f`py)4xWDWy99Z+2=b1i?m*xY6bE8- z_{Bi*@$xSN64Ko7(eB>_`F9cIJw@Gtz$GXS#OUygf!vCR_e?MpP#}D|l-Yd~U;Fo! zpqzs&ODNbhHv~SJrD$Qf2GZOgveW`)6!K9{st-vB{)-;ZA2Zc%cQC3iK&p>8QiwUJ zV8Skt>SIqTZyFn%FW%tCRQR&V>WlC_^Al6AVIO8b@oa-&*#^O~4T51Cu9j^OJljwq zfIEh7k@J{GnV%x^`Dr2Yc`0?bMsNv=TO*^xFE)%1ltxVkK*Bqb1xz%aPLR`!APZ9J z4g@YiaUe#AUkn6CC~7hQ5)zFIO|s@pqMTVoS(s9HDDVl2LoqrkG-4=tg;Gd!<2%p> z7RrDr5JIN0tUZ>XrUTU;+YJ36*wYSzO>>`>vYZ3-GsPIFxR7)5RN*4G>~Z>;x(rJ{ z6l#Ee=Wyf@bNa!AU7+7$pDOgGvB55OWgc1KRR_y7To@&73p*_PP~lba~V_@HTWZ(G={QP3zsOEs~mJBXIaZ6@&_{BzXcB)aM0g%AH3(3Bv z1X)@HxiF>fK;RM-2V!*i#X!J5)Mx-CuW{g^DdU%?*KcEMtp(vW8&O+z@P<+qG2C#-_O; z(5nh9tPMe$+f;I(TthC)xx37fCeB?`)M2@cDi3h?GR_;soVze#7r49JbJv^3rfRvX z5owd=Mootcqoi$Bh~+NILo9b8Snfiw+=XDci>u`>1kYVm7~qb9yH}FCS26Ibih*C5 zQg;gum!P->H#+=cqqsEHsPF(t;OJ9`hL2)2PhhGc?+(m^4Kth^( zmErESM7g$zvMQzSP~a03hhlWpd&E$Z0(Z+DcTvRyUYIm@vAa^E7G$+ebLa2jguk51 zxm>gKEF`)+rgM9?s=3vz>D)5>@)|FRb>~d@TWtTG3WwK!e`fQi+qKeZER5^$^k9zS zE*O^Xf&t@4+#%6W2aSxYbK|o{2qV9POI6k(#a!Da#q1Ic1-|D_`hQF@=XY?hOfhrN z_M1Xw@xdgwWeVDs4D{5!_<-MAdWYG;lSMnz!{o4;9z4QL*S#@4y$jRR2O6iRkDDHA z6q_ECTDEbfP1$F~iJz%USF8#yJ;tJZy`S_Ni|IR{H6kq!LNzM3nG{3RT0G@vjdYW8Ys1FYVm0CoG zgHamMAV0lh(U>GQA4CT+nn8$WP$8PZY{vW!cF|a)STut*e=J{am$D?fjuj#>+y!^s zC94fTmmdNRcf|el&^FNbpWng3cGKNI>tM)j@#iLb*n^>m z?#Y7zB`ATnk%OV&i6g|uiRYc^x)hDFXk(8BoDg9^?an*S4?7;b3Z1U|V?6x} z;~4;r;~C(_V~t|t8K4KEprv>$pazEI7O+5LWBT7mwi^x34;zhUG#b(9LNtS*F`7Xx z8fz4b##YIMV?q3}Fa&oTQaBa{LvzQ%E^Wdu$1&k*VE~3RVxYB@U1ZmAEWo#yjA70lLwKTigQki0Bdxu0@yU#cz_}4hhVS!S# R!gi^tv40vT8JyH4{|`dJW~~4K literal 21448 zcmbVU2bdK_*6lFBz<>@6S&&U*pEy?1;;P2*8v z5Cq}e=BB2g`yt_kaKa(s#BgHzC!RRbx6|GU(L_6@7zno3tPQLcr@d_za@-GOUFGSZ zn&;b@g?H-UOx3u(N8!Y1`V>UnwcUuChMJ>l>*J$pkBWlIiUYBzk;jZ1H@0DX*Ic{$ z(Ax21>uZN^83ey!GIU4~{)Wlm;X#x)smc=}juUDd#s+cTq9l}>`Xm1}rW&D?M6*$!N89Iz_TuvTOA&N7Rf$s9Wqiw*L6RL$?^Vp2ko(R9!n}RKxM(#s`)EPyfTP z>-6vJ62HS?HREfl8?ia#Hc`WfZ1Jljfcf@Y# znNY!70ywyd25yE7(JiGVsw=gM-^<#$9>B~=;`9J@+qM%5Y3Zio!8KC&tfA+oA8 zGFWuajI;Hn(P6ZQRGFT5?S)u+OK-%o&hLn&kMwb|^p)O>g-cr(vEbFm(uYHcrH_xL zUuIkoi%1`1q^Cp@S9UP#VaEPxEFmw`uU)D2W&LDzh)V_g4P<>Y0IwS&k_{8t%U!RY zK!^l!n1Nz4u#pUujT+&8wAnbBC>u8>VV$B)WMkQ+L7SV(rb)YFwK+%z$RMniKn7s8 zquENSH{+~zW<#(SENyc&H(SUS@YtPR$bhg73e60b!DfgINt2=_FhgZ1uELh-%3Ffg zmOe~|xyvwIhVn8D1^rf7JYM}}7{(!7hGG6PY@Hbw#JY_PGuujFwv%nm_KBDsFy9Wj z`L+kb9&pO-eACapqjniM6z#(wWLsBvb!I2o$s`h+oh325fM{0`?V2On1;`TZ?3OZLK!{xRLrR;zx5jBp$JC)o?+W-rXKH{!yp-_Q{p!sE#J*^hDThd2&E9C-C{jN}mF80q8q zYi3*!$AL1^93%&szscW{MLf4K=ikwO4DAr@f1v#|+7a3Z%Rxz;hFE9*DgSig{12Lv znpb10L(;{xYi%(#Qj@%I=?+DEsihlb`$=~g+HavhM!Od66=?HBJ{;{Y(MF}1f1$k$ z?Y3wiAvNYmyw)W{&1e~I>ZKlAdz3~a%uW6)JlMy`7`K+O0yFqxo^jY(y!x#j!y#<# z7{9egXT}9v+aP01qcocFGCp|%F>-5loNf^hdxoe3>=Cor74snO>+o} zj_JPDnUlh#otYGZWD0$B}t^+UO8Hzpv zBE^}Cn7+J>XDMQG#z4~*G0mB_XDfOSh*$F*MNDs|?zxJ{&<^N4?^h0VzM^+A)*k2r zMP%pzbfLGzlZd%U(bpKmbBei`$jo4nmxRe{fw?r4OG9&6D3^t(i_6oMqpP*0&kSWI z4AYfgL04=)=D?&^^763da{Y#HpZ6W|6{N9m>_X=*jBU@zIshcDmEo zXkX-#;@VKI4SgkDr^waP^@?0I-JrsJ#vDgtKAa`yV-0{_(v{SiH$r*CNByP(NzdQ6)Ud5Hd^?o4z2Q;?accOE8kRf5cSCvC8@{K&8orNi z^8x-a3Hg?e{|a;P!%#j9^|6Hag^v{R4Ce$NE8?RG?+c$Oat|}h6n%^_-WNVq#4gkl$9_{ z>*`y&GC%M)FyAph*z)-SV~!&~R%ZF}eTh>+e*8cR^P}d+Do*@UnDj9}hw^i1R)?~h z!8T#nu>a_4?E*AM(hS2i*pni-GFbc#%%ljxvgLz?F~<>XbL0=QlS`Zmf}KnXGbNHK zk(nCF)X3j}PEq8N`BX(NnWrgo$$Xk3djmRMfx7{np~&8V&Wu z#RE@wq3wioG2KOxtP0J=O4rBH&7komq`M?fHzOjpba3&&(_Lyi;ap62StLj?<`=dT zc~rTUxg4e}YI}DtF6x<)%#0EiXMl;XQ1m$vlj=%E9{`c`Dn-kIVxU=ySmG?|s}-?u z`LK75A{H)-`dUTqVedL`$)diV$lMU|#(ZNWH%6$1n{3#v7w*lG+zi7s?Adh1{$UPG zI*0qqmd_p-a~!pBb5;wtlsFY6%v@5KTRG)zQPNw=>dftt+#Z=bBDsT+&chI^_IA$A ze}%{9osrxL!!**n=*md(H!ycIQnq}gFy=TSy)%pSo)V{mNavHn+#AWgk-0CD`?9LN zUy-ZY2Nb!geNd6B+7?B&Y9CVIs`g<;wrU^Is(qAPS=Bzqs$IaU%Pcr;u(+o^EnXZ0X?Qfv1~lJKAE|**)czFm_xofdA`{(vE_r02cGX1+X?4lzPYi4Nsv_5^V$szIwIY1 zeQ}kUaxFpS+U8bz-iANFnf15Fa(k?wZU=C=Ly!7$DG`{|0Uz#N$L0d5srKI>u3ab*2{S=K*T;#6?) zTS#FZishj=DgCJLVeRut|41y4WJ&+1BA4`!DRN1_K#@!O#}(P6e?oyv`X?3Hq+h5> zzeq`a(m%zdf0|2uI=|GXVq%*WaPh!r#WS`O&e*Jibw3-+U9nlLbUx{qFzKHo-E(=m zB{8w3gNp~A?s?k@=VH1SH0fVdI-m3}Y0|$;zL)cSFJUV@AACIUe6QF}L%*Ws{kJsf z1Do`?&d4}H&c6yrrhBWOOuZJ%YjN_9O~BU`Ed>fe`i3IDW#Vn~O+_pP2K1I9_r~jO zMSR1?=b5F7_!fr71TG1TtTf=w#g?v-;e3Q$FEgyV5@O)EkC!CA< zPAMz7ikrj^OGvYmsiP-KBQtqdQox50lE=JYbbKYk>b?PA3e ztAd&4R50xmF&(5sD=L_Frd--d`S2j1Tt<@`vm%{4VELHd?{)>dk;N71YON9Yvl*-k zrhMZyUWwcofL|qUOUVTKB9705u ze*dd7<7_sse(5TerW@wzE}`jxjp->p5l^q*5f8L7E}nIyC*z@trVrx5tBF;7$Ukqb0m}3LPf>$3)e-0s*{yvrgnQ=iZ z8%lqOW%@yl(*tUpjWFZJX)Gb8;Xhuq^vNRgHk-&MP`9M#;HI*P83e!05XojaHBLVu zM1nZX<`Cd`u!U?cTcm27!BFE2Zggs#Au?Eoq-q?fW1JdiOBp0vVzpG$$RNy?Q{xQd ztaWBHuxG*f%RXGq%~rA%d`s0hTg%pF8`;LIakiChaTT^pSKhMbH$mC#W_#J*U4|VX zbHQcU7W6w}@p$!@VS5hYGHmZJ12r+$IGfGKx|3{gP#Uf}>Y$d{+3X^_u$ol-vYYIt z(ISO+hmd9uq|Tn;+%w0y2ar9acK1!szn6AFDO0#tf0TX(q8Rfh%)7Vjjm`M;??}mg zWFNN|`^w&2-rksFKg5MszZd&(2z#-QkL53!alu}Ulzq(pNgH#39Dq3fn#O_e7=Jw- z50nF490$n(jN<^r@i)YQS0BfL96}ri`Z)fc85hLy4>=G5nS)gq!+R+YVhUq;Ck@fA zM%xK#4v~YCI1RDR)JTmB=TI~yH6Mkk4oer)uC>L~O09omd^p-mE#1FtKcAP5K-+!y zbR^n*v&=hB9okMvGaBt>=;w1%z0{hc@Hz%Unz1t0jFWNL+M_iZ8QWSk4btG&(kSD& zwc{|)cx)|R{nj>c2wU6WxAvIKxL|9Kl?Di8j)O4f-~Ns8@o2jTm=n-;k1zj=wtILv z5pDbE@*gzagUbZ8?PJSC>~d2m$7P@FH1iAplR`uc!All$b4YCK1s)#wB}}rN6v`A9 z4RkO^8E9RO%QQ7psHvGsvZ;A8C|iIn89Y4jWT)6pI2V&a2Q$JIP3=p5wnZeM;0J-l@FHpoxXBJ}B@Ku%LL zLy^6pUKK3+&QS1P?JlL`Lm&4IqL-wTxrcPnyJQ;+T???K zgNp~AZoch=b1~h$A?~eEx?n@SrUojP7JOhrItpC;b-Z5%FP08I82m+Bjhv)=Ls%|Ex#Zba~z4eAWO_AOPmU>&_Ys}MY>5(h4K`Gd)fxq((04cGod`= zgTsf&Pib)Y8$i8+;MnrP!I*FNE>}V|&rY#y2&;x^FLq z@{$kjWqNYo@Hc?|1);I!LxVBL5!y>xXb{HaoC-pFl@#W+P+kiSv@x$|H4AABkgHi} zV}M-ELL39+YW8hBPl32yM)=Oj84G3=q$7PVku`KBe%!@VO%QT(ew})6_s4 z13!0PfHnrm-50)8%G4fIMP^R1@tccFa8^!VN`srA(T zA(S6rnAR0U3Eydc;BR17F+bSy`2k~&BR_t~^5drxr-J-|dgcdJ&mcdVxI4`eG&PXK zoD@L{Ba`^`-O1?3{-dk43jo2(WEiHwLOC%B)}t{8=A6neyx8)=!kFU-c5?Rh9fUDC zr-ERim6;qFXk?~E2HKd@vajz>SLBiz;uws%WQIBh$R+bxitG&tVi*j$8_?N`>QSJ)_~`fxjhOU^^+3DZB$g1@moaAawzq|N8D? zYHDVXZbqICx)xwd2Nw@K-6ggY&c$?4!8An%QWwy9O$}u(W+v$%WXV={xe84z9b7Q@ zi#t#%cEQ!AU(x*)`WFbam|q}79^w1Ds{mNk_U>L>)UzU)m1%0AjRA6+nrjp}O%1d$ z7;~B$Xk&m_xO~`yHU`LPYHm=(!sWx>jf&jE-c8<;MSU}onXO;f&xvFXYvC3fcI$-; z!OL72rnLZJ!W?cHw-y_!~g~f=Jo&k;0hch;)7yDTFaOr-DeKm6;zI zXk;FY474#VS=B-u1LUd};us)TwT~!rRSU@r#%$F-rodJ00!6lJp@KoxLhABhu4Xe zL=u1yX@sJMCT39xxr-`g((Hx^W_4&@d69Wb`5v+les$(;C0t6vrFlYVU4ShioILP^P`qfTpq_)O@JOy(|65Tk@cSY6i17O%2pD??tMeG0?^IFp$K2 z7C{OlpR>G|r(vNh-SpzPRz$J_hUuk(bmDVv2Ic_Sm)tD2e9pj_<0$VHS$Tg|;#5!r z(8{cc3^X#|L57# zP&2icZXY5D8(oVv+WRaN^lg^j-$<)+LCf(#b z9ds?gHq+qZfv1~dJKt2iGUAjgz`QEmP~cb_3Lk1&ZH2-e{^`3BB!aDu4o1FjTr{<3?v)pkp5g% z7eEqoUJNOWoX;;qFG$(Y)fycHFBifv&0xqO&WH7A%z;TS=B}{iGZ@AkM+RS*eHjWN zOwOs`GD914VQip@xpWOp4Wu#XjM4z>474$qXWxd-ROIp-;uws%{Jv6=%kQfc+5DcR zz~%SVifn#E1%unywMyzg_PCDUhF;ILUY}npbS=O(1K{F;&wv|jC!Dcc1q+7?=A78v zq;&q{zMCm8m`%Fbc{<2ifGr(dJn(dLY$u$H=^%o^-D9rOd3nLDDlfQ=e7EKKZjFg8 zAACIUe2}?lr;1{VmIviakmNNrc!%B+;DR#cR8upLk=}_vznS%SQHkL-HFqm=2RuYG z7;~B$h-aYLpmfIEr&$k4%>6N>F!BJi{=t+DU9Dw31TQTxOtT((hzGb8m;;kO%&lU} zXFZHLj;wFVvi^}0r-F+QaZF2WAc}b`PD(%OTcCYD>7k8zJWG0rV}M-JLmUI-k{;?9 zAeZ!u6xpPQ7zRTw>7Q0)lO8GT+n3S%{750lv`j0Y*-a4x2U2nIH<<8WVg83vikh*}*7u3nr)J!HFCm6W6Sj2!G(E6>$xJ52mAZ z8Xn;1U#z4BKL~@oUK{%WA&gJxN_Hcit(3(YA&hK>^eZ4pSUM*iO&96nX*;e_R+*D= zzat}ENJ|AxmmF;syFqJzKgJpr(N>wR>8Ye|A1Mj~4Q3ChN!$HbRa_jNnjZs&>n>CgC}6`lzF zPpRDwvV&~D74iWhApX$LALI`_Rm@JJqP1LX6>t9HJZXcUhe9S9{NEF)Mh;>#W|m-2 z_>Ul!UQt$m6h~d1>6*?8fC<+ZIe}gxInf?0K>_eweCb`omO~ zcFk zy0>~X0cl#@{C5G^R z7Kc|~s{J^G#q~=U2aEMG<1A=@n)HX}YkdkFbEnA$vH`Px0Gu~Ohy!IHLd1FSn;~v2 zICIhvH<5u1aUkZ{6d~f(hqy6^5aPx@#6g*HL5Q2l#t;u~o}DHG+-b4}W*>~%2j@}()2OhtS7!EyuTAvYN(j6Yho<v$H8Y|9>9$+Ab5 zlT0F8p2^&q$z&!W6S9$Y!V(}T5CVZffZ?$`0!iQjd4wfE*a;5;j|q?A5yD^Hea`7~ z?~&{;AK#ac`NsEjRrjf?uCA`GuI?jm_}XRVd7k>plecbp{HVIAZsI33GxMmLQS7XV>!wyGk4qnb*M(k~{|f+;xKw+*0Wm9p4`MN9)usb`-?7waky4NneV z9vLsYJaReZWizLtf#cV&UcE9o)$Ejq&W%i686WB0;(2b9wxlLQSBKA!ja=}Q+cHLM zczpEI#1M$3+$N1KvCG5b<5$jmWp0BQ4I|LzOyMs1VQb40Y7Y%OU~9{ll)ii1KODSVL8aCA4cveH=)L}7OJTvfQt6Eq550bL z>M_g;Otfo5!xI;V#zvm-ni9s+Z7yw^b90gOTGp0UO^#fHDQRYSTXQl@y@hnU)?RMm z98J7M2~wDG^A;ymT^PMIg28G_G=qy*yd?=07bizXyrl^h=dWCS!dteyRJ6xr4h3EH zman-@3s)IbWo8DZb6I8e^PhSWHv2c9gyo)0-K?4^yID4qx>-A;Zg$Po+^oSfg@5Jv zCwO_f<+kydFa2LgL-@%*-|O@|uJ*H>p-i@Nv4;Juug3lKxCZjsl<#qxt0+xX zy=YuRHT~W;&!dd5#yC^98wKxB`EK2sY5=9j^HkI2IgMfzYE*^FWT=K~fv&^*b$)g* zc)ynGsR9^qhYV^w&soZX7B^4>H&P=vQN6CtQq)RQfOQul*OKRa(N|oR2pQ-5gGQ5z=!g7vUsoaN)(|t08-~c<+ zVEGK5zPGP@hRQ39`jwxWTm6e@QJfN9pM5TdN_QwA$7Y|I77fLlr*-pfUK;W*rbXOF zZJ9e^yM&g+d4Nub8k>DiSS%6k3B{$fgqP8>3@=B2m&f~?25R9pFN;TRo0lWSL>@Ct zuN?|+cNETm)R@*eg+;4;sbMsBEYjMO=hv7q&T|LqSAggWzdSUo4qizez+fan3PJ?M z0o|v zr}t|BiMg$@JvNOJ#Gy63me%q*TI*Zu&hvTz=|BzL079Mg_+S6#r@wI{Tg4k_19kdd z-Y*}|%k1A6)@-6ps8M0f=CEc9Z9z>ctl1jY^inTs%EFp$Va;~hj+*kYW=B}lM}4TN z2y1qRHFwcns96x!+#S~3L-(MjGOXDZ*6gO;s7Z%4epu5_{ivx5YxaaSducCfs>7Or zux20aLro^E*&o&%paZC>32O$!nuBx@HML>Qp|IvK9Y#%ESaT$-xtH!mO?_B%lxk_K zwNAbdz>WYo2H@@hxS!Sm+-N~D*L8FsCLbQhWOA*e`(<*~(zf7@;{bLBzzG1m0^lUA z2e{dxwVsX(t@U(5XsxG{hSosv#woNP41m)Bjs(CN>IUdLw7Th((CVhsLaUq37+U*- zHy%LyLjiCWz|jDBka_^_acK3>1465Z&I+v_dQfPs<%e9;f7mtsM_kh%!sL0>S3K;e z_?%zH=gFf-Vg128xd3?Bfk)8(B4*`9$9-JjOLPGk>pn)p;xs=-mkdOY$#`58_i-^{ zz5+E0s-rQgc~Bh2BI;cIx1906R1M=^N#G}fZe`b#w!!xI^l2~=PNW0 zjP1+iuvp-$bj3h)Rk%(F*NG&qvYZ3 z(RCp{Da0p}hC)?py7+p$@&+(|gTwfB`~T&KfgfoJHZfoMj!-Vm-glDNK!Zz#PLJUT_CPb)$r(}JE>!U_O*;WufGjM+v(p^t8#pOKfI z6-l0TB`GL=P7zuIYrFc415EQ<6rq8srMGA)06+@-mP!HC^TOl#A|7wmJb<-49+noS zJk2jCLcKxI3z`K0u)uFA3-rsK6fa6g-C6NAdFkyU#M@mVUR3;DiqINZE5zFzpuq1? zga)RT-l3%c04eaBAO+~XBo*%zPVaO%y`=cN6}_YcYdG<{6unFNSqv^78)R_btq6@w z3wpQa1_0dfo4_q@ukVpQyGnSky!1Yiz%;*K5gM3UdcT$e z0HnZgq7=i<-2H$^;ZCCu%1a*-DL&*%@d3pjR)p5TS}8v000sVtA~Z0y^bsuu07!w~ z1Svr8WvTe6aQdjr>1D;=tLSAVSgZN(Q}lhx-v!O%p+xiFuLzAy3;KS|4FI^|H-VcM z1zhQuITr%S-bQn?ja9ZVEa65mWGZRtogPnlTC=MWk9)YMS;aZT+9 z6@N<6r$8;oKVb?U|D-9T`O~J5<)4xQ{e-55!cS^yD12H|L*b`{8r1aD&=`LPn;HHY zg;n=w#T0;x*4@tv1`Q(2&xv|}UdHU_-7)*D;$KjN=D=F(_&Eod=2sMH1!kN=^00BdJ5T^NwWX|7Wge?fsXyG+p%9R>HM$A zYriUD{HiO)FDw3^iqITbE5@%lK!JZv5gM3U`ZX;F0LX#g1UW$Ob5il^!s*vtPM=f! z8;U-s1Zz0)=M{Zk=@nZ)FcH3>2#rh&`hw;L0Nn7Kz|A8Jbu^q`lwQa5@SF15Z;33w z<;wC!#s5nYngeTl{hJOj&A+V(4NNWlww40`X4$%9uRQ$eh`hAzvmlgkkqAx4KTJ8U# zqCYgNJ05y<|06|cWLnT4X>I_(4ZjK8il*0Bq}QKu$K#LXwLcMA{=}8#D~kVA5t;*Q zd;P}_FwI|8ga)RTzN+N_06FkmDhE)1COrPEh{vC69>7{2L(;;Or}-}wq23_qFEk4P zV1eIK7R6@um(takZ}}^E?XN|Qzjnp=OT~Yq2+e`DV*Hf@6!>oyp@FHTztwU8fE@Tu zkOTC-CKZ1toc_+`^fkr*N72`mU=1h!dqsb5R(L%03jYs^(8#o)f6&|jfE#`jxD^@k z*QM97>GhBD+CPab|K!T@b;bWz5t;*Qd;O0NFwOt02n|dv{j-(>0OY`Lq8u@^dR63z zO|NgrYyTp0{EI8ctBU`hA~Xlq%JB^cDDb~3LIYDv|ElEx06FlRAP4BZCKdlCoc_(_ z^qS)Tujn-;SgZYiSM={@mB&M`^50a1My3UQQ*#3VZum{$=B)DnfgVSz{I>)OSNU5h z`VYlVrs&C(tfWtwg2!(*g)~2H3Rx~lfu2fj^ANSC=*=jkrSLQgSt%4y$kDeHKa-+o zKrPG9nnI4BGX;;|VhU+~UJCTAriQ|Eni>jk(bQ0QUQ@f3;yHX(f?i9xL-6>~N^j#@_Pl`YpSZhI`83F}5gHf-y-E6`^0#1kL{8yn5Lz?ViLnTtBX-daarO5TmNrX9UsNe-S16a`Oc{p{b z^1VLKBbdn;r_n{EnZv^doJqjE@>I3na~eU&h=NTHS0$fu%*7UG9ERM|0jAH0KpG*BH? z_j=8E)b_$t)N#F^mIISexpsCXD;Gy9CJ&8l^%!TN+^nPA20*YBYUna2Ii^ukFXdpy z!@-O*wVU0U-QrH{R+|B>#`(xXkmg*7z|n?yrNtovhZExTmV}6f_7%turW0*s;=qPO?Z4`z` z^Ydof%v*2}Ak}%=A`E+kVNU`>YNXCtT0QHhXPsC zz)9AGt^?kzPdOsb6osOAhX3>Ov7a%2#-NjcNpAyG>>XcZrd z#dVBpU^;Map%1MUG)$n@Iy2J%4Dxvv!w z5q|L&=~#>OG*m06-wx+OG2%v1{nG`>Y~dt0O<5eGmy=kJ_#YW6IBo?$Cc18*#w=7V zYJ!1z94W&LKbjOR{~vh<+AmM(yA3r~^LJvJSc|Dj)-8{}P1B4hIHtJ(W>X;ss5=Hl z!A$GW;|gwsDKt8!imnEx3a4U=)>H*#H0EL+bB%A*4l5wmKs{BPjMX&Z3PSAEU+xvS zR?~6^R9o+$4VF^Nd72j*F7H6R9}!n%sGV&#w%8K4PI;Y64S0o>%4C4P2m1twWE) z5mv+eDyTXG_wM4+w1P4j%olh{N`Y-T+<)C-17d2H!C#eU>x&)aZ^-pihdbEZ)=P;* zcE!mUg^M&P2xuI&VV|vCEJLu*`0sKpNDWnFvlS-Lx}!@adiv`LFH2FG9-ZV-gFMEbLj3Vl(>?vTd*sW{tYE=?PVauY5 zTBT$clckzoS$wFz*Q?QMP>hgV>hit)5=-MM6ztshGTao1RJ?0)M1m{j7IeGCxjhCP z?rCAq63$#eR@xR>+2C+X2e4T#wn^C51Y1N*$F0GB4a2w)Z{)xx=TjI<0GFp+p5U5o zXb;G;$q5@d{)j}icCwHcAqrNtL5-g zRMhhzObHwil*?=UwCu9NK)G}~9KA35Ff*gpim+?ZqqTlCqvV>EOxSgHMwv!QJ(ArV zVK+y9nEdJiV0p_9jiNkPaQUiQc4e}$3GQ4~Gwxi;7ALrBRgKL~uios0TUp$&l5J0L zv8ozdp1v-RIJi$$ja|=OvK0!hQB^apQOTw#xII#K0}btkD)&0O)%B5L2At-yJ>F|P zj1J*&6H64bpY}7ZUhyCe=;}NTiY$9&p6yN0KTrq3@L-H#xII1;W0*s;XnTyyTEg%M z9bw$g;-hp(SLf-dFgz#>4<<0gJ*hpj)Ni)Ot~TuU*hSgxafB+~9Rdt(QLBCyz8AlSh{Cmq(6|%Y*oYJUSV-zr+$w$zwa8rpN^YP&Hz%#^O!=7oRpER-=`A0aAI4Kq6Ue$K4?O z5P^`|q)HJFT@XhdizYM`9a5nG0+r?gY_FTdVIYzzjRYFyYP#={ zmu+I%h!jnPkJ>#$kj_9z!jsFmT%vziX66eo!tz>EH&@(YFuWkJP|Pbt!{LnS zgolu57wKD_LPVEX=iTk*NFopH*imn8uS=woiNW|NAkbg&JatS}=TyX0YbhfJUrW_8 zT9R6EV-61~CWNHXbY_f_1IwoXt}rRtuHSaH$=*CG%$uk|m^V?QFqa*AL@Wx6uz(nA6CErd#@a%M3W%|K>2LutRtFss^>om^VjCS2 zLuT>7UHu%+7jReK!vlBq(|F+0eHIT~y2lwF>?Ycdourc`o8wb~yPIdNI*kE2gW*2o zY>t7;(;1wWf`jqofUM&uiXzKIQ3bo=|6v z9*`w{R!E;sAnkj_3*VP)1Q;BUAOZxzXjMYU6v{5Ek_qynRjE2wk28QkQ^l*26FuS_ zMV2F+j>vMP@Tii7!8y5|=VgRJq$EnLGf^UvDdbUowzW7){7k__iLEnQBu272)-$sn z2R%vI^*DePdMfL40LvG9w_oxkghiJmghiJW9%<>>cQDe+VkwW#rss`oaW-uNOteZq zo0b5x!P=Nc%N#1j{Pmu#k^_s^2?CRsG&6n>rX=jD9y=llYLiiS zoQBS%TdyRcE}~D2bi_@zG68+0R73g(3;OklRO^GqFiQ&YXBJt@l-&$Ou}~wyK1{+2 z24IWULIu>DfynjPofllk?DX9B5w+LJOJg6=_p*U~i1ixx0Bf;4L;HwmSe`9%UqmiD zG|Q5st)(Q%seL+B5>2+Bzd4(QiOYMxxDuW!&+NX*d%2 zq%<4}gT+YT1SyQVm(bGL8_0G_1~p~&E$KXxoDv0?^%M5bZhBMFok(DEdoVThYZ@rI zcZ6FbauEdUx7j^*AbANJBOLdvpf+mH)M`zwIdzdD+@mw+mTQVSgy1&TAxT3<9V|zxakZw~eFvRF7%Z1ukdHB)>fsH@ ztgb`;Y6Epqx11-n@kUyMROCk9Or5+1&S;At8Te*E*1$J~MQdzQgZMe=x{7f`#qKvU z9o~x9dcmvLiB^EiQ*U0Y!y0yPVA#Fl_<9p~A$!#(hP@TnJ20!nxVQ2)G~5pA+lvF1 z?Rk7985ksO52$Y!>Jp$w5=OaY?PzerL?PFzB-%E^wiAvA5jm(u(aPN#HjYNj{T>Gv zbKiy7IWU}Ej@=k6%w{(}dEFgw@Tm_WxQ`HghfSzP6}g&WhqWN!lsqW{AVEc!>DH`M&K-7K6+Aez=?Cfr968Hz(+XNiv@>GxG$WglKe(?x&OZ zg7|pM-O!FP$Kwd(2%idT(ScKNGCRiTlyN<$=`@IrBgd<;XU34`-H9CUPV5q_89!Lm zzlZ3dSpOc5!R1H-p8?4U9=d}*K1z?8EzzTqKkA`totGz(JD3$=psLcwxeJ$T5RN_c|u)a%nCs|W-uabBnYBL(0-J7ID~^4eTSfl0g)32x9pd3VRn- zgwiMUi>O6T=#jLP6MEbM(n$zo_EAdI98MQwmmp-&fAs^JSq>XV_!Z^`mjS|rm0m6l zi?+K`!)RQpWn3hLrzPzFa6bnAS2lujHwIvZL1vh7XS9J>mOT8Q=XPpDW-%TLt{|K@ zJHATSxAce!_&}d)rjM|bX z3o@Z*BU?3X}g93I(_1V`;vOLg^_|q zw5O1N-N^TV<~@#v?gnI6sk_3W)t1yS8kZXV77UWyaK()-*hCaVN{r~*kfdHCSmD^- zg^{EU3>qq*%N-(#Oa0;<_K+_gVGr$=q=sa6ah`5x3=VmUvjBf?vvbumn#p%MhlP~| zs;~9S!oxV6BHtK^7z#LO^{HWq0| zI9WmC$wE22tAy~8N6_&@2+wnP7s)28IMs;JlpNl{FJld_7x!{YnK_ATQ}(14$xyNR zdW;m#@Ip5tCwk&WvUYS#qnI}Ank#pf)5x|oaFZ}<2BBt$P!k|K+Zw~7B^v&nuBaP! z$g^_lDgxWE0rFy1H;Xl{iH+HNWs$tCa!>_%Tdk8ZjgsVTMdmHb$gIW5V|6yN@ji>rDK6M$q1%VvshvQTzn z)!_v1z~!kU@5l(q8bycrfsO=TIIe1t;JuxyWNX@vgpd7b3)Zm`FRTKWRZavCT%K0t zbyQ*IA|T6Um2i;&Ba+nG^GalXRu@nF&la5ye8>JCUlwQabzz`T8dOIg1;~^)7LAY9 z#_XT*i!3_$8-4Y5h)p9XB%)vyBG>e5@wuPM)<_^FMRAC!Y{Oy+TyPE$%7Z0RM-oop z5|K0aNX5|2*CrS(2^@pK1s+6uoJKoB;U_5(1uKyJi9xv*(By}K3(}=<5VcWIf}E(G z#8FVvtS7ON5T3$J>1aZmOQFTwKxYvoUE~BMEr6n+WTA$Fpak~?@{A;EsT!N)GASm$ zTpCrp6f}`44SSDdDMm>vpe-y~O-PO0EKQ&p1|?Z|N*$flnu6qbo%Wd*?NyWptXy~U zC1|Z(-JFZlvU3aku`|HYS4iLPh3bfc9pE_*z+&N2^pea7W9V~Ub&{2#dwb`hb9uU#}c`m@(AT$H|c5xpI|cJbfqV@-TTSL5_mc0c9F;VHWc4bZ@R zt57rsxLUH$Y@Y|jtqzp!rU7T#BJZu8@ILE%O(WPyM8QtmefHk-{%92%#5_Le&ST`F z`7k&fc2wJdiTeQA73pYLv|5oGM&nYg>c?nVh^bNmJJ+Cq^#N)D@b% zJ~1_V8Gr9_cy#>w+{& literal 22942 zcmdU1cYNH%mH*9ZWv#R;Y1P-VWxbJ`gqz%CVN<-uxL}(Su!#d$$o67uRcy;L1S3nv zcuh|rfj}S-Qb|*Ka=BD4X6BWdH}jL%dvs8Eo+rL>>&cUzyk3lnG5kcv$FCRTVv3)V@$vlnQXmw{)PMxu zx7Zn~no&V1q*A}-{wgsvB9F4d7h@E(A`>gxfNYSnojJfuSA{i?~f{U1Ed1 z0|STlc_nUxJQ`Y{l`Fic+a&4I7#Z$8?3KFJArzK(c{AK5EiQ$92Q&Nnd-fi?;fBm` z&(Xf)8L!N3Tj#dL03Oc_AM#?&TYC2m^lTkGJkYaYptnD>qUWMRLq|t?henRJsj4l# zM-Q5+y*)iW2Qnj|ePFQns@}Z=8Lxbb_vK{o_uZ&o?%Ci~_%m9uzjvg!2YiP|O2)m? zO;@5+uYP-WZXFs~zM{M5*vb`{1=N;2bP~$Y7*AAIRVQm|>*^aCn^H5IXU%Sz(@HaL z=Snf|rB8}@*|WC+NH!J=(l^vM;w9!3R~$PGNiscy#|B3Fyy})o8w~Uw9r2Qji|e&6 z^xV`pauB)&bv@eCJG8&2KXbEJA2K3tb8gd+nvA5^xTv^lICB&eqL|@r#Yr>uQgpV~ z-pr;+nt072Qs|NKW`$Jk?>mseaLf)F=o^MJ8LuUzV&9>|H+yqhizPV73g`F8YYpji zUW=g5y+=JN*LnoyFNswA;>lZ~C*OVtbYN#>tbDv=tYkbgRyi)lI>r-Y2|OeC7sWru z^V2O)UKbq`otB3ag~;B+!A2GX$s1!R+vv?t4CjLIn1 zg~oA;Z}8T59#y18zpN-(kAk-?x?Z)WIH1@-mf{_r(osxO*A`YCU& z*cu(jGwp4Pj#Ejg9(IwAOg`)>O660+>l1h6p<;6dko^;9MtNlNo5xi1HC_yI&!m)W zrsnu`*v_I^`8+^puIQh*gIUbtQDcg;X_joEmbjdQ{?5toZw#o$HC_^rrZw;sLbj48 z2`VQs(9#)b0wns?Shw1u8Fa3ZXzXaB(IYRN&~uxWZK$6MqI1(xt59un9<>3383ZW^ z;TLmqKF!k*&F9|CWmTG+E@lDL0#IF$PFSi7(iMj>sd6E@(N67F&8D=R4(iZq*3O-2 z2g!D)_Z@&(%{okvb)yh*s6(DZ=g38LPTFX7RxSn*^M_#x06Qj2)4%-7dw+i-SuU5+ zGFq7SvgznRmaV?i)-0#xs1deig{@ghD^U}%HLGmRYFdq&5?j+{Yu3;j)I@Dfx2-vs z&P7eBtyybp*3mlD%&;}**_!j|eAJZLnhR{rg>)fmVzy?ztw~cFHRZNugRQxUE<#O( zt=VX6E~bl76Sp;+Y|UocjGBb4*jPH;xWEUl zqQwALIJ6ei^O)9Rx{_%vrmHlqjs6=u(SC~$>;kae2d<_i0M|RTme5Y7wS;ytttE7| zrnSj`;Luu1&u3aoX*bhaN-tnq=g4bZ)xXwN{p(!S-vi_6 zNeg*>IwE`1C2}u$)C2AJ?PMR|>m7JM+Gk*u8OMF>lQ+;lV2u0NZ;PYy0NtP=I>7Uh zVfT>^9ZeI06aSt$ zEk|fr*U<+Nx|zA& z#9VI*=jtlKad35|xCO6_g6pWm^|(Ai$AK|iZ?VOk9HSE&qA}(=%3MdoxE`0|K%W#K zeNs5I$K|bp#u;Y@^OT@dBAvo;;?d6Id7B`#(JkmU!F&LK5q^uK7y9EW#%TeB+u>HL#|iIR_Y(cL+iQT}yW;DF8qU{1!_A)Sb-Z&H^5HDIUNY9@laU zT^^Np3qrj{(A|m!0ILcGWo;vOMiEC{WEHA39$06BS| zAT-dmbf1y}0HnZgh!miAKUchjIlaW?bia@<6?DH~tmY&i5cGgZCo#Erbn@grCbs zKnnbZO5tkMqbxwq`+^8 z6rlG6SG^!b@W{Vb5E|(g^jgIY0Jz~dgqs(v zzFvn82g|S5Gi29aPYQaSkZ%z52GQZgCeME@{0P}_4 z;`svVDdzE10gr!DJb*PkuHzQEJSsmb2=y94A5|;>fCYYwS)gO@cRTj6qRxMuU;6}$ z@d;Oqj|utDg3ugTBgV%aASXX52n}>CeNxE*0CM0rL=MnKV?oo zbvgY+$o~@b6Tw)`N&ZaG&vb;xLq+(X3qm8^f_|>J0RT7rhHxuT;$LvD^UUj){MxTr zmS4HD{6ffI3qo^XO|O6H0Hg9Zg3v(M(r=U;03Zi`L*;lM$lJ}+Z&{8!^ZFgX_IsA& z_pThj74i>)&>UDJ$L|~QE~u)9QX~91N5Hdia#=^Kf0Wr74p9YJu4V% zM*mNO{-mQk9xBTJSr8iO7W8Mu4FI^|H-wuL<^O^n2T}fi7_w3RuY&#}w+im z(1n=1lM8f4QA6Q&MGb{J6g3p?RMh?|9ucC&+ixjm5JX{OHMhi>n2w_V69rk?@r26j1Pk?#}LpD|QQikj(J`e%X2P2r* z2P4jw=*1EFvIv1PuttMG(*im9P=wGxBj_O|J^%=b-{Lt1>S5;bZ~>2(D;~fa9+1rf zIr&J0&_E;T5yb-lc;Gjb$D_>SQJ2Sk5&4P;VN8HEJTMv-$jQedga#Txk0~AizyrUb zJRWBrj~DQGrQ!js;qgF(n`j^>pNJ3|Xaqf>cmMzo{D$$M@;r3>&3BzV=`}>g>Tr61 z1Jp_Xm8uhvI&;EMhCFGEV)=PeB>4IGL7y;`${AEjGrBw+M=;N|o<~rVei=g-kz=+e z4CRbQp0s?4=QM(l0R@v8E|(Qxi<5@wu}u&TB$#cS*~Y;(?yyZT8f+7Wt!@;?Ho@l+ z=vP{~%e-ThKtvpKRHdI%?5dcn04Uaa5&bB^!|U>*RHZWP46TKCP>&iMfK+sO^?VM} z1y4~WtJ5()9Us9Mi6M zExW-r>_(IRtiXXt6G+RHMa-m`?3J1=0w)ga^k!QG4i?y_&asGAYVGhyl0`_89bP-q z|2oxOA?MQE4zE+rvxxaLzr*X63y}X$vFB}|X7;-cG@HFH&MesHHWG1NBeii|BhBNw zMq0piBK=o!L@uO=dLnNxJ1}$|>4-fNs11DFIaG}^5aL*gOnW_=1;<&7Xb}&uam^W`e)RMlBp z#SE7*!(|~1sg~L&XyHUUHgU$GzQl`6AYYM>Dw#lj%0;z%(FtrR&gAePbE$PGV77U9 zUDOgUIpMTe;?*d&cr{72tgR`kp=#=aD(JjxT z6s^w5^QoD-a`FP2O>1)ULYhO}Ik}#Q&dtd*PKVa!S#U7Qcr2FtEUZIS5FslT|I3?UD~6vyuplr=XUKc<5T!b_M`d!at2Jg zjLN1k>8LEFD3!9mgk#kvUB)I|1~q{U)?eb_R+)4RBgkUwMi4TfVEknap4Y=cuw&A3 z*Q67!Nmp9if+yq2skR9CBA%uii>T$NkPEUr8-ap^Q?6DP;w+Ng~O1WriKqj@-Hl&U(bj~R(AjzY#CD1FQr z_>}UrHSvXc3{yCb3``s+kjxP0i{@l-p{mZ(BJR9ht`$vqIkisE9Btxm?~RGOC}ZLQ zs?fyC9r=ujyC`Mi!P|KzUan2Nocpkt@QBGJ{7Aw(;*rA1B_1R@`Oz+y^P>|E4UcZQ zlB#KOPOhRFT9Si@sH3Ghc!&mCmV<|AqRyP`rkS)n2M;leR^;FzT4-es9-=~fzzSN; zx=}%0tQ!@yhIJ!O-8^)0I+yF>w3h4Qv`(A&jJ(;4KYa=&9*K;_kc=c zG4?KrTp{8_LH+3*C$4eJBSw5FCQ7V1N>MOPSeFSbQ#=?%Z4v{`g9O#`WH1zZ;?aZ$ zH9*H7KZCK4M$~PkgdKM%rMrg4v=u64K!Vlxxh$ds3Pvfhh{krnqbRE$YacI|p`?{i z(&%$j(n227m|X-e@|2OPWe8R+Sn=jhH*N~KI#WLlstwa(`nQXwUu{La=#lwtj84Y?;PubFcu@Le9~z&B2wRf_qYg z-a6yTW|ZF|M(CJr0W-pyf*`a-c{v~;PJlQ;2M3rncmA-o_Fp+2Y z#wUDzneXedCORpG)T-&EILwv;*T;sn?Fg=R@g2ERs=!^hJUm`0rFa}V4yQ;FDuE3r zlGT_516EQrnJjdTpW%Dg4sS3ytJ7n7Oq@~I_7Kb(RHIRldVt6tyBBujXC zt6j>Z%UcdkF%y%fT;8VbJx6m<3*<3EEG|1kM6-&L^=k6``-qd?XDnJw@qNZ54+4kp zfIQrmCXbtn{pPx35T*UQjJ$$o8FV8IQxSRI=1(%+L0e7(Kp~M}Q$EuSNj?f$27y+I zE{+~l-Q^`z5XpNBF{C!_ZRWi$?kPf9(_UP{(`j`>5*YLu&}$NhKir>ezz{Y#S2}^q zQUh`d*7yJ!(Hb~7!5#Fyf3fG1jeb4_a|Y)iQ(%)ylU)n|m!(ve8o@?y zy#X0EDQ3gRlYyuv&NSgRBri+#fT2r#GtyGvjlrxu7`suXY-gJ zLlO;&J4)Dxlp_JsDi?wV?g3gF3ji4!^KH=#9M?!RE~1zAeAj^SlI_mqs^l)x9#LG2 ztZc^8yI{5aCga6ozSR@fkJA`axB={4{kg@=aDVerDvt(_G!1H4e-konuzC`lXv6B8 z%8@CoD-`z=r?r!+^~FWjZrbwtGO`Y^T-N<4OHxh(S`=zYDv)p&Cv{8g_2Oopbfb`Y zVs{_7rL(i;w4~ZpOzN(fZIxj=Eq67AyI^;P*|K(_#w#DC<`g5vxl#2f;e;hmSyD$C zK%5&@52P>IvDM=N$ChHp=Ig=~Qoz-jOW(7tHvuk7b=qkeZ&K%b6I=k#vNtR;3M>zK z72+lNB6vfgCcWA$9sC^)oNC%jHV~e&L`HcL0ELWyH>VY@)mJ#S4@dLcu`kwYuey|l zpi*Vb&B}&-R)rc>ed!!i#mj%~x~K42Sh-W26)r&)tgpNRcYTznQr3?B@s`oCM_kdL z1{b8CshYW9s+bF>kula*130DZa*bj{G1SI0A`|C(#s>C`b^WrD8f-X#>$hI534y?P zwG{JX2r(pN(&`ZckbtH-ISZZCZ zg$nJQu)u?|CUeT6oK=lI;Us3@yv11pL1x@+HS>O+l13qQqY$l`uJ^_s9sbRll8Pbu zC>29$bqrCD681SfyZ*Lb#}FXbdPtj46~~ZTs^J)dEx)feQ^b&9;Tyydf8mSweOsB% z!k4|&S-myR(WsHtv`MdDnj|*Vpr{%8+_1H%As7 zc)GB^UxU%=&6(*Pr)tY7GO=Nt%1`XV#-XGXE(p8NQyaW12wC1;W;bE5PsX9(5%%L1 z{X2L&s&~(h4XItTq0VuyQq8oE$IOjaD?WW}vn+j$1$`upl)9-5Qj!F+Wl2PwNYO`L zwH#S=nzP0hwT419IR-(MIpzl*z|W!~)>om|qrtot>Phg94OJ;;gXHgAV4TL~h3s4c z8vf3OkAIV_1}Z6yAl8IaRL=yl0*^f_>+E7^mhZ6<#Ih=g6;cNsa{!C%h0Y3i{DoJZ zhnu5d|Clq0Jq_0fvqMC7cF36>jm7Lp4kISC6Um7a*yr#zDZ)FeEI#ewGZetV=8Qks z3gD=mpB8d~Z!YRJ1r%K=SV#-`RM(@0YSD=9wbMe`!MZn(=2Hiv-hyPcJV&QH{H={# zgn6}HfMN$4X7K35V&rcY_oLO2#DOlJ130#b(9(Xn4Eokdf|mMgmgNP#!$ZU2J39mn!yDMCGkR-8C>W~IfeSK$E#`ZOgriiT$a*VH8wgt0y4oN%?g?h z;YaJKmDPVe5!;?Nfeo}?UWCRQ!C|A58t|CI#w?-Ecf=cg4jY-n#t;rbUF@raa#3;< z=5sUVbCW-xTi~+r4H&2fVgC{Z2S%~Yf!Sqkr|njwwmS-?vCyX-_;hTC&*4(qjE}-D zm6y{dc?EiPg)>)|12S`UnJwD6dY(c_ybv~5SJHOc1ebNyH0Eq4NbCadUCx{Vm!(}< ze8^>8)-HcUckzhs3gJsTX&cYkPW62m8eB~~<@52}4eGlK=WBPCQ0LFrZlC&YroKCb zI#4gbd|X4<>&-Qv3aZp2A6GC_~q?lgjuej1SD@0fUa;L;4`m+=zU6LiR*o10Brmt+JYH=i@5=H zn=tA7xIB#}ahVf@{|h!UFTWd0zZf51i?M3_<7HnR8!9Y4yu z6Tic}^s91Uwp;8*tld^S+ODzCbX=E1_J>z{wfWnY$&$ACa{;V|xUB?rlC_u#><8?& zr4A6^jH@%5dffMHd>UA!Ujs9RXraNq^OAx2ylw1(iK|OZ5{(jli(08z#?O={~`c}VGf27 zOFBD@Er4LGJd(3*(e#aLaLq4dNUJFczuSr=AI{j|jXB%L=W75&_oo(G!eIU}?VkcmfYZPAPt*GM!j_8KcNN$Nh#jJ(?T?@ZR%KQ1YS zubjb~d=m`VH z^I(A$bA)3@U9NK1e%_07(|n5K!&Bs@gPb@Ahr--6OrN*^tkWkE1li_sFvw8>IUFSH zc%Z8=Ge}We-Q|Qe>u$A~4c!Ri9k4Q+>uQ7#++ZYx4`x&cLfHIULQ`Z5&<>o@xQHmc zAx;NaV3j?KvAl>9+WFV>TlHXrq>*LQ0Ej(hgVAi=D1AR*35JeOPVzU^YFraaGCF{{(!@+(JalyNN5WK zWVj&pkwyNaSkGu4j(Hapn)3%uy1LPra_FZKc}pYbY6t93>k%%Kg?}2sYML-(ts6nI zfPyhA{xpJ@a>{IZf$5uq6N!SC*qSxlNv)aL@}Gxdzi+KMpx{_DUndjEL^DDp7x}m? zlB{j!(1)1)Au&P>)TEbEGA zuZ}Go*2;hHNyrw^Y;k%&8;}Wmvux3f1lLG34xwqn9_9tQfcv2AFI0i^j!ygdz4sy> zHAcp25Razw;Zdbg^w3lHJVg&~#6H){Kmi3a!IPZ2PsSD&p%s17Q+HPMqDw+WY^U=r zQ1B8fzGjPn^f!D96i_g|D!<}hBz7vz6}bf+i!zhl=*NV zKox#CQ0t6ehyQI*&|G~u;DUd*-v(hzm~i?E-9Z$>=dvaW;SZ2a6@`L1KpuLNUgTcP zW_xkTdb&6rtb#TH;zjNz<9l@@=tw}pnC&KHvp35vv^j`ETVcnSxOTh^FK!2i?N0RA z0w{eEaksSx|a4G8a&)L&^yw1Xoxby!-s}x>`HAO2u&W-v1}LW8|S$9nO1yE21^hHqx5_kgOv_eTEyFWm}T1g_h(+YHFoLNwz7=lDE-_8d5VHE*TDE z$dE+Ewt~P%UwlXofpZ9Q&>yl@_}~Br^1Z6+?&{%iff2-l zAa+;v>$iUO-m97xrRDjU5F+;HA3b;=cHD^FkKM}J% zi?mH!uxa7I56bmLk?~!cy;W+Iipz~sy^&ZIiO(Lu-@WZwC)d&0<>tE!J==Qw`UeJw zhDWxy{emC1tqSi!%)#H^itw+?x9ZQASttG1A2XNj=S$PI;`!3@?c#W?G*>=&vp7|m zzjg4)Sh2EDX^8BT>l3UjL9cRgex=r^h}`Z?32UY0hUnOR-a9o}Jnv1NFOE-o7tS0U zFHTKdJk1ivCoiA6P#nL0<;+wfwrhR5%WqsURPha{E}Xe;s1i@C&uhw?^qD0`*C%`R z%)=o1Hf1l%f-^dt>Rqyj>zxb+G|YPO`Fk*!lCs^>gDBf zLn{GpN&~Ho*fzS=w?yxbO-M!G23l%-i2e;oZdGQ>0L8$DFq~NwgBuXc)XQZtv;o2N z;?f;4yk~t=ZZrHG;ti0AQCEF-PZLD8A zwR_??^vke*8R(a>^n)2Nu1f9ughd4LAqS&hduz9>Cl+qeV%K#Gi_ICany8gnxJB9J z#k^!w5JDAYCv_r&vXz}IHHJ&rQ(gxj5Sf!Moa$LESi>jrCf(8iMw&7l_$*-bAohr%nbn48rGptpY+&7k7@HRfqQ#&0b9?m!H(6;#fs*P58H zyeNf&c_1cpm%8vTBKcwS#J#5HA3yX?Jo?F9UNWxCQxd5RUSP1m5CAqtai}CHPu*_& zAm_g-04A*DsKXDEriAu$ZfTP7`#E?%XFR_ZXd(wuJFizEzaun54i+@qNKlFas0@Rn z0MkxboRZy%lhkcN6{h-7b%R?2RemBtESqJl7vGRh7N|f%E!BcXO+oe$5FP|jP>(&j zE4OWdYws3l`l!!H?$eU{Wd9ak8=wL6+JJs-K$>E?HYZtj4$+W#ZAia1B!{;kZG=Y5 zYa{x#5xIQ}ukD~6=CvLAwH=bEx5=-tHuN&UE}AGZ1j7OfETnDt2c?54@emVGC!DxP zWDg%&3#nI1wQgdRAjv7qN>RLaLSH%G@CE@nKkesy;=RHGmH=OuxI;k-LHe90{JTGb~Go*KbW4#mku$TrasolR2)Y; zoVAHVL7k|zco%7zlef;_Nu@}RQf_C;``5Q$e0|SJZ9$Y;{%XNkX;$;OgAjy3d1<*I!GU5;p)QvFXmss0CW z+5%I=jnE)*)r@nW#8JxvB`zCpb%M@UA8SZ<8?D>WbV<;(Ry%YGyOk9}6G~}r&y9z(}>kXt;BL~X6 zE33Ov$xypUxOvH`vuPoGZnB);fMM}_!tEwpWkM!7UjdoU=MOdV$VVR53s!HS$*ABh z5s#H?t58*{osY@N`$U|32Rzq(82pL^YLBs#^ZnAi{;cSz?v5eYK@J1CgR&e2Zt~ED znxFx@Xm%0oaG9c`u*U-3g)KbJjl-s^h`R)rg%>E#!VA>R!ns!nnR#kMH!70wHunrZ zj)d%usF;*}ga*N+;SBCg|I?3rNWF7%fcm+n%OSvEzykarMy6dKVr6(FL9jLn_Q_!^ z@e%5k+ra}RGFUVt7#SYJ!L5N3KjHGehMy<=3NeTlZ3p&gL=-IHwfi(HY?36e3_--q z3EH4S2ikxS+*BoL_Gy}^g$5+$PTD4SA+3LrXqDFgV$!<5PeWQq2tr7z@)AHW4v2Cx z4PLk>?g;gVs?zQ^?KMvewHELJ!g z&{YFet&aw{Smh1qHBl?b;b_2j)dp3AQv>o>u-a9zjZUemfjq5J2451$?y73|dk$SS zFv(S;z~!Qca|~Av+|I` zqt{y2{nSr=jNX0*aS{m&yAm1zdN->wye{3|<|^~=Pf=xh^r--!45aWsjsN&TA7GC^8egUMdsmaIULu9;jJEj0aq$|pkBh75)-m>Hf9aqw#X09@ z=Pq)etQ&hPX{d(YloGCUC_RXDD2@9QpFfVc0{(gy{JT)5y8QI+QVhk%#r{2RDtwp- zyMYLkmH_~B8&>!^RpGwfSM@Sgi9IVs=%USi1VzS4 zDZQB7=`RiSni{M*_T5FhXggQDUAiUQC7)o75q6zV&=Bo(b73)!KMJ%4ZbJZ>dFZ{H z;1Tb2G0Mh4&qgIfa$ej-8K z0us72Yfir*DCH5nHU?Q^UI=W=RcWkh9Uow1WQ{TW##+dtBQ(UoK0?F1su;$mka%9C7r`y;R$fwLooE>D zFI!`V`w2P$b~>@0uyCVi0h^QbGM)4#cywHjlP6EnF?kx+oVMa}3L_Jj@!;0@il0aj zuZattpkqipXMEsn5Fd28@*HHIv*LrfDxIs+OR(7_o^!qj&#?#3wUT+3Ugr3mr869x zv+_KMCct^3IW`kj!ZSZM6F%n&<~-4gvyiXQD-r)+P~*`5lh&C1e~~T*{=aDXU(dq- zm*`TEZZ6U#q?^lh0V2Y5^D4cHahPsii;R=`358H zHF}-iph=pFq#HUmXJs57S<1(Qr!e3ulFYF=x~fypoAf4_*5oxkdu>i$rx)Z~^a5S; zLH2f&eecja5&PbajFa*`yvo_9xCI+;&<(oCHr~)S-jF4_M(@&lRHC=2$k}I7-lVtX zG~JZ9=(;Qe7iA0Ow=gm&pAK#f%K3=|@fwu-ifw@I8N4 zS;pXO3t2QnlZ@^ex~N>N6VQyj4cZC>RGL9vsS=*~Ag}lWDlDMVLVzb62YX7Vd3W&< z{~Tj}5b#d|a4HYhVK(~z?l6!J)fuCu0H5^uwsJZ|TNR9XIr4#vpAK;l6ZBzv7HH06 z+uH3V)$e?NO19cR{>|(kvyaCv3qC;9JG?~DQ{j*UKltH?TUf5M21W+LIQgpbueEEs zc~3O=<6oOoAOH9>1$VbGk%1DsH?78D{O!GSc*tSzp#53wF0n)ZdrEmY1%(pfLDB>M zEgeW}Lp$&P^b|@DrD^G5?#)=*aj>KNSw38{_qTkwWREvETncq#z)_C(yK4@YfGfbQ ztLd=XobX}4&dH+-p~4wcaH_#4PQ1l*O?Wg>#2coW=3n9kf6K+DkH+~Eg7A;FRzdic2|qSMd=MqGS^hv|zyG}yzFgXGK83o8FU{tT)AZu}Ql(aER2CPg zT(2+Isj*UDDBq&SBGp$G8kPAn&6Fy&m3ldQR$oVV&R3R~D+{yKxU*EI$^z9&cNSL~ zG+$aePLq|J`zIC}SIg6l#X8^BD+|k&>E-=DEKfuH&b?xdM*Q{G_ovBJqh2n}!<>kh z%$HI1()*8_X8LQX$51i-fz;#XF8<`|JL+`h^6AURsWQJ*E6` m9+hU*10U0^k;I>LeMbfU2kWf0PLZ$39&6>S57)jIJ^u^+(#A&s literal 11362 zcmd5?OORC689uik(~sLdbLTw_!_0pM5l93Ta5STdTtW~8MT3kX3QaRK&^!IdH(D^O{mnaSJHi)`Nc{5_q;fOI%Z>74rJh`@EGC5Kz6_yHt*xvq zH=0A%_tJ$*bGcrbIUs}$vb7yZX{9{BP`M}^8?psixn8}zR06Vu4KiROTP)Y>%kv^> z1GM1K4DH`1QZ|TZ<7hU@DRYDR;$-4B4fi2 z+OSaIwMt`IWCNAvFP59-(rUBZXeKvA@@F@p?~z>IEfjnEy#9f~q2ZCyu`T0UC$>%Y z{E#0y8^Zt4bu<5Y7YlFlXnDR~I$B=6QaV&GFI4tjD4nV1r8lb0D=;rSX0=pax>#DMTonVmyF13PgLE%g zF|_~gAdSjurKy%gVftk}{4RMO*}7WPyo4`gMrJ8P*%<)4S0fBsk!SI)pd5{I4-H|O&L6dcvtP8TPMYezy zlPwss8mJxFf-GtyIDRT-gos7iOTGRdl>og=)xpx46$$;3#D|y>De7fIWqCC#JpIv6 zy_BC3L-;7pAP9P8pP!W;d7+f7pH|~rbo;4a8y_yRAU#OwSyJ|6#m4V9#@9gYr0kai zB;+6*;3tmqfJ_UN`@ZqejPZaTFqxrD2PRW8O({yV2f$S{lNn|*115nz^iVjmVlr!( z)IcB~VK5$$g)a%22a~@{C?`3@eDBVqmZP4Q27K$RlL)gI!EM5WP!Sh1#io6XQ6pg28ATSEQ7w{+{ z`+=w5!sB7(EGSfc;a7u`pGW|2#+mZPk<`APrHn{t8){{o5NG>{>D19tUGQLv-n))3H$=j=(Z3i0c4$v1BSm8yja3!}de2-OJ zgBNgQM62tFhc7DK}HS|L;%hGeqWsSp2JDBq56+=xAY_qPA^o%gPDOnb5) z?#`aWQm`P1D2Echpne+ga^swEYH%%tL#PUEc!01{qiy$BKY;F2iS+{XUI_ILr?j(Q z8>fs=u>0Om`in3ar$qWgA{Ufm*HdC4)xk-NaMINMIcc0m7uFphq$d@m1KMW>2OQj#s=7lHg3?A zy}WIO(Xb>HH4YIKIckbQdWwa}r`)?E*9Qd&l>v}TXq)(Rg(C&2Pqjj?Apvr4l%_m- zoR)onrpa~Ty4+zzr>0mSi5J^V}4AXQSDlm`z=ln$8~zv z#J$2eamvbuMdaQpYmxr&JmLU9w#n|E*ysYcgLCp|AyVBMG%M!cmjJ{BdL9 zlgd+j1R5|joh@6cHADXC0vj3wLeqxaOb+*MXbUKZ=Chw-UB`PoIKmSL19s5}cFk|V zBe#sqSJ?NfmfxvE?aGr`l)Lu#tajI^T?4_skz!0Hhra*AGgoyBgs82o50gq z3M+1()dxCUDscPkQ6IO@D8uOY6a!G*02m#FM1%fB*sEZ#MuX^yLr=lVNH@qO8ii!D z@&+WQHQ^1&VH%PnXhHv)YUjHjn6wmhONdurc!PvbnUpC`Fz_k&@3*zjOa(r}E^c}1 zF2Ms9JF^m{2AB zNAWxeDIuYCo$ESK!B9edOa<0qDm+|3?V-_OloqNfC>L5cSucz4rqvimBBH?sIYPr+ zb>$drIbwO|C|0H?9%FjEk^tCjzahB=@ik6^aw~A4JcS-Uj+Mc&CH!jqfuC>z-h|H= zK|)3YEhj8X_-dTp1#g}3&|M>zEU~w79el?BpM#RbrN-4EPb`P#tAIcjP{X)?7gw|9UnEENWFBX7>}s6k?tO|U z5F~X>Ae7+5!V?Il4sV!4&boPUKQM7R9K~si2s&n?nA9;Ftob76F3eadkqZrY=R%WZ z5qSReTE;}lz0N6sK5_iI;h{ZDY)*9hi;DuWQ>?mS2Rm36=L#Zb8Sb@n%PaHz35@lIu zQjKof8f={@Yt7anqR={ISx4`}I+DwXDeXk^&J8(DsCoKMuI6WJayxCuI%+{4Xj!M^ z4&RXv2H_IF%8P`OJ836P^MJZj^@&i^F4`$~v+q4XJ7_nm-h*CIe$B{iCMx-L*fkUZ zjvaZJ*@qAhVLW?i0Z>bF5ADHcUGBx!-UazE{Ou8PXm8N7JQ|a3AMI66 z|3M;-Jskpv=%D;IfDePh;ds+>xJGyuguvl|!eORxxD5qteJ5}Sm7?Sk*z+0K^GION zqexlD=oqjnkyR`HtXfAwaok#SP@JF>p+}vt97^xPpPr-R^jtvUd3r{kr03-ebVR-g zsa~{f^#WGLRwu)+p{>5Fwj>U;v(+g&K}V3XUh2YTr-5P)wC60FVXsDWHF_4Z8=K7q z8l7W}&b6USr|CG`>@>Zk?e#L9mfr*58DKvXx7V2(;aOm>GXeH9jQva-?AZD~?C}b{ z(qfNS)jG1rS!>PqI7jC~dz`cEp?6`A5|zRvb&g6%Qs?PakWkr1C8;u%v5u0|g_d4iVNmZyq=Q)HcK?q->io6W==mO2tWjae25yMvkuhiE*vWjeTWWmZ)ys{*T zo|N97V7Rr*pzb2%pL3azvV`&!F7B^x-(Icv2Gft%E% zb1ZN(5O|H6@^z}yYt*3EX^B=L?;CVhzDdjSD!nPM(V~0{-tv|e2iLGNad0*K8pgqO zwIy+&T^!s9xa^R#)^T(LG&d~&$6k##YDzc8|2F~;-Cz$zi?oRzt}J{M}3qTkklgkh$_*fVlFuL-;Q?$oz|E zG5n(7qRk8LCUCC?a#2?Mf`%EytNJm*qQt+61%h%@(QBus7##fZH;3cE`WbdVuhfmH@b#bL$S*$FvM=hx8%EmK#@jRlckq z1YXo$OZuIv`()vB@a^_o@oQZ7$UllQ`b*tLm<< znQ2l={)vD42fFLMSN-0rs#jHCzpg)vK%eZ8&T)AIeQ8&2M zUAa;zUjW9H%j89(dNN2;a@mv`$gxFAO>Og>k-}*5v?YuW*W*Watk|n>9!k{4eq_G* zG@97laA<07>fH2f-MQ&=nV^`z6FqS3(#4Ax=H@%S^5g^4^A~2P$2J6kU!^y#%H+kV z(=*c#2FkBlp=@gQ?3weEP?qtlwEN1=P0h|;I33ja6)JRCht`h z(6Z#Z6=tW(^Fiy9iE#Jf$%*jT#N_zl@X+4TW4IsLeQ0w0uH$=;)v4ZE`H>UHBWWf@ zdT8%mk+iO__Ss|M;rQXK)QWH4d-T|m+rq<xEM1%01F8*^kg++(WtcW|Xri zFB}VUxb=($1yOpD1bQ*Xq8A?YdLAqWrDlv1i(E1Ch7yg+-Vj9{9E1hi=#uV(dmUE>6U9b-liVaa3`$W~=#@@#v)l}ca*|a}vRYO{l5vs|Cs`wFAgObbwNA25)5ExRFUc9K0#vRC#((&8lhoMgZ3hosd>CZt(L z;vqQzVl*ZWf*6a5Lox_*WkfPu2ju`K*PtBa!~i0J+uEwL*?i*9y6vx>m>?R@bih ziKD2$Cnk=8n23qvG7NH?r)yY_QrEB?qpo2&PF>6O2|x5t`k{ZPANr>-dF~38zB|n5 zd%`;XItk=1XpiT~y`b;*==)HA60>sBTR!g9_shNH*yZCsS1jo%x!+PU#qpTr@-dk* zU%`3+S|6y;dKy~6v06{KVo5(Jr!6H9QtJcM`ap_S`2IT2_i506+aAIrXJGY=XLVXX zEYsvzs}H$iS)Y}MEhT4Z^$e|^NwYewOJL7HGZt#M9G3K~l*lof^*K2g79Ztheomj4 zSu2tAR6Rr0Gij;=xyNhJg{lT##3PTu>?5Ap3wlm2kYgKk(G^R&EOVBUGOa#BtB<5x z^+RADR(&rn;gLsS^-<62ynemRlVh!3a>cU#7J0p;-7-zCqp)79Pc=6W#cPe5;kn6IA^;RX?7lx-t$fqd|TH0fqEc1-n<3XZW&y zQpsh*vyHlvluP<4B@pugY)?-qng;>}zb-=x6sL;qPm^k%y7X5WQ3DgA9qP@7!q#Iqhz*3T(H1uK>3j29r_ z1+FPxK;QEeyoHwD;#+!N>9;C*UKy?(%C{+bn+kJ}^4f#Zd_f7S@F{fZ3&s`**upi% zR%KAWoh|me;6*<44*Ky9-;cK|{Z1vQO|EV6iyl$Z?^1#aRx0l@UVwlXxTbpHN7B3L zg+Eu`!-rm?7ccotrzd{h_Zg45>&8Kd7tqD1iZjC#S7?rnSyVprEm8w zy{z}IQa80olgbrbS7n++utnVh} zFxCf@e3#N6RPsSJ9Ax!}Y$2yVYzqbbJ+=_&?`1(gWDKM5VHC3Dd=Cma7QPpSf_%5q zA5roVXv^vEvxS2Gep?9i57`@B>C03O{JHeL(3SQu0I4R?ydM zAz`AC3RWsVXZ!#GKX6U)1NuHm!KY~HQ@*87D*Zo|d{P;%v7~=q z$EfGu28Yz2aWPTKs7Y;#2~{1PAgWqR_g3us|B^?P^sgvE1uKHTv;uz8}A;^#4|Z>f~BK zKJ5`@{p(6l!Aj-VjUOQ32d*i8K;LI5_zhb64d2pdl>SX6pHYTu`~J6-{FdTAjw?6v z&niI`K83#jtg!_Gws1|cRW-GKn{ED4J{(8j@9?qTr6<4Zd-B^#|DF<5C)c+5cRZq` ze_shISgHKJ@dE_>z_r#7u>OEH{-Dan|1mbmwKh(f8m3&*pHl+470Ty~2@o)WYpsdu zp!!2LwPIcQBR=+dy7776jXzZS|0+Rsa;+PGND?t@Lh2i~kV+#aq;hJKrDuDl@W@>$r zkNqV*`AgrEzfk(Gl%P7fw#{Gkh?4$mC8%Jf^4G=>5by)nR6i<$>ThbM*5C56FVT-L z`F{M3(toD})ycJf{H;fn_1`N&1uK=mH-3PCAGoIY0exSl;2&t|AACz+R{ASSzN`$_ z_Wf6teAVpkxN>*@MA*;V`3pxGoEXcna!zlb43R!Z#jzW%we@CGp|E%;klzhYN?pHIWP|#1> zLZF|rg{*#>1-Y6T3j!9NL?O$H@-dknkeZ~g4*2dd1rivlT`sIwEf|bh4#s&!3z%|W=G*z(kPd#_lWXyL& z3G;?9r*o2X*-bOF$X7F)ZWHG9dI9p-3lO5noHvHS)*uknn$bBppYyVt$g3l8O47JG z@G8N4q9MxXHX=`-tH{&e(5%VR)9iKg^fX(D%-Tw6W=)|5-s;HY9Bmm7HerUEDn)y1lNMYZE&B9g{_pwr^l<^~lBHb!xR-mg^gc+BpYEKrcVQCG6 zP>7@OWuOz)ET-be7^KSC;FYpcBR8&BA@xXcNmkL;3QnmNDg6u9Y8YHyVQ@jbV2)H6 z>_N4}AhPK+xK`F`q|f!Jj2LlAMrm+04X#cxDD9F7zo8z%x{Pa>oA*qt4l=;{6^uF{ zlycc5#M(UphLU~fvu+A<_;i!2=9y1A-jl0?VzrGnV}m2p&K9lbEvq;1meU(~E9f!a zih2`ooq98GJ$egoeR`|($ojIrMHb73vfhSoHyg`(JHFJ6mGur0*;Lj$rC&Ce^)6X1 zTgo~_zItm}k4uN#!p`cDZEQ`4Y-a;HWCwlel%14y$}Y+}C8VrV#!Zw+Lq*r--~SYr z!EEMo-4%6tL9`S$hOY$Iurj?8R4h%eV7)L%Jejr)STY+V2Xpot;(vT?N;D+j5T#CI z>A+XROwIIfH51;ft}U(@t#@uIsL`1XgMHa65+HN{j-*MNi}`v(1~HjmEX`QKT!NC>Yi~ezWI>{A`>dp!MInA^4__{d zFvrRo-&u9?A=Y*QU8-Q`U*aIuWiB_t1G8x2ISdZA1Gl863=TFIY2->Az&Jk4a&Q1q zW^|!$7eDyA54_D}W5PE_9ce4Ug+xO%IL#QC1?af?nYdZ4(u$?M1>bV(bvyL7dwSbI z;TtDa7hN&3L5W7?S{qpj(0?7KyOh)+s$1AyEoiYB9d=pbSpI7q9enw2h=W&n%HurR z&ih41Fh-v}dgu|vKojiNOH&@5=krmuUh;W#6oN!DFq=i^wk)e%>G zTF1z6`q+`-w7CYW85p!NXVn#(7vnQ=u+N4f8VNgjsV^4m?H+)DG|Vu#MsB`320r$J zZRg!lI~Tx?Zjc5>Ks@-xIzkvb(hcn328;+Myc=oERx=#AXcTNEbZA!7d_7_%8fgS2 zKKC|B6ZKC*&|sHdB2w z)FW=4dMpY?eM_X?R!UP($iY<#(>+rKaFUi1k|0Qc;yFo6@hVcm0^(6swK4=jMJaGq zElMj_+#~pG_dBj_0toBW-7(t3FfPOsJuuS~id(+GD@jjDx*bvg6`ARw znVvK=3vt2$YiiVo+yxg4j+{hTV|Ac(Zv`FbKCdCzw|NS|juc|70*ZRwvJ`t_FV`=e zZ{UdFECWYjDULNHy<*d$C=o!6S2J|2r zJA@uu?!_NZAw=uh5S<|f2$$0N{d$Eo>S59PCg`}y(=iMx(y_u7qh6*&qjJq)h6o^Y zvH|PhV!aY*8c5A8{pOXS0!@28w@Oyo8Fe$2-wbbV_PkjI%0RtnM%hX!9>^-aT11bC zgzlwy4T#8gNv{RLU4-iNItObK4b?CfkUmwmZfS*+Epb(=m3*F5;_D*2;P=L~CUuvCvxC#96ymws6+2vlwa}!BZ9# zP*V;!z$I*sdK_If?txn%*tmJSWn~ygIMMVt8Xd>b9@)bN<4lC!C;Rk%oSLAxB>U;d zIOoxLO8=-O9l(6!G<(DiDz@HW9MtB>%O)3@_h z(0A}w)JJ*i)W>-1(Z_k~!^w&Da$8xS#BrM=Wqqe?lH1Gr6b{ndQPy|KEpoK1@5T|C zV`Y60&chrpypWt^=Z(mnjEoUE#mE?uyBIZVim!Jm^HvjF50vkB+%HO(Htzs)$3i79ni=xa34=rQ7$)?Eb~g1qfHEJgnYL0zuSW=m5F-~uj8`{npa;!f7kbcaOJw#nL5~~k z^MoFa7~mLsm~N|NnE?j!GH`>pI=t{;{IXKCITuh{vBR4i(jr78owxy!^bnE619k&1 zh`&3q>~w^E4hDdSbC?!w*n|P7sMQ@@O#owRHYjQ5$;>d2pxB{t^s;&$d@us(bx}kC z4~0RoQ*hUHI!m~XNtOT5TW#Q-hyxGP1rXOw1Wt8ASiA@jE0o|WrwV9`iwGz1&g0Pv zh}Ea@(iHbg_4M;X5V%wdCKe>mKyH>Mjso9)DzOgUew=f}I_Q0m<_TBdc!n=LuR4o5 z@WxZ5@eXOD@eXOH@ecHGqAbBWJj$BHQGnyBzKdDCkbo|F>p-$7-luwqb-HJO-g=PQ z3e5S`MIJI(EQNVi^!P+IEgSOO~2(dUX$C{copS*<=F z>$K`+n8G6R+OUo$tmEP=R4(vFJa3@KY{VKPkW@u+ zF^hFJp{q7|(4#+T_-0v#@Jcd=n>^@&L#b>q@R`ObROGKVLkqDEl2QSRSS&WB^e-Z`S*6|q$*6C*pcMEP=y@$6Pz$9)3;7QzyfRwm(>IvR@ z^a0-b^g(28cb96J-FW4RW9i;4~%&%K%OT zJWLzlc9@YdU{A#k*i*5~dAv11>Ji6Vv$)}SYYsP@Q!VgC-R)ihkk_%A_ub?1Vt0cb z=mduAB!c0j_r42WNluo$)ecm&Y@Fm6pG+I$6HG%E<%AqJ@4F}Tov3vRs!w_ExY%_{ za;hYdMKKL3Qhkc5d5kvM2wXm{3f6r#f_3>Tris|2pqOzQ?MWsHIhZ@|Xrx#>eE1dKX%=%Nv)SprUcv;fm3?P@++}W?bYJ*@(p;d=29>j>mVy z=5B9%z$?k_l59bnqVd@s_uy{!;O;b=$mhhvixVL0) zta&dr?@iMjFjrgA|N9M{^#6oMa|t*g2VDOj@cQ3!(f>%xx+UO%9K;fENcO`-vIHPo z3p!Z>ZmpoR`Zg>9M;J4=%58E;Cge!P5+ECAyj6gd=SId!;mYlp_#0>BcDo$hA$LIO zERet99-RSp--MLuCOI01%ZVyaPXeC%p59qOXOUAq3YF)PR6Qzp$|+=^kIP+L5hjpe zJ%Q}%-AJt-)AwSm?)4(?b)cfiyT=uy$fHD~a!usDhKRclk4?hXq!)4Em1GiGcr+x6 zxXCy!+>hCkRVCVf?vn{d+j}?$-l$pc?g?nBNcO-R3q`A1uGZ{fLVvR0cPvjb?dzjAm`6G_wmezNA{vYgU8wMgo*nKz)FA zVBQKAFutl{nUB+pq>C@=NbQ-A68Iv^mvtNVN#f9cm#yaZVH;l}_#ruZW*K@GFva!k zQc#$%Y~>PHjG9XcvUMrFT$DDfeuDQT9LK<#$QLFa#Q<4vnR%^o_^M4b^NnW1>X&vv zm`cb*9OJJ-{2H6kSv>^fD?;YKhD4CaSiu#@8n1;oRBfBioN*<@2>@|?2$a>U;M~o~ zsjNhnr43N5)qdlS6)3$r#I(Y;=5}%=11B$PVYoe9V zR!ZyGHQFAbTWil}aHAjfAWt&V>(jVNijQz*BfQw?9aUaOD*mX6?`Wj>KLr@s04^oH zF~uXW#-L}+(=$rS>DgeYSkD;sjHT$2wHm*K2PRT_3((|N=-le{&=ye9zOmUAqu!!K zqjK$JUmGI*Tg>4^4~9MgS_D#lC~T{s6`+$Z5A=?B>LOCFcUplzuiwGxx>L4uitdzc zd>h}%#H9;ad@iVdef>g|R%-gDSnAP<&FJ~&u+DuOQrtzGal+WO0#wxV_#%cwZYez< z2G#BSO7t}-{wi9JU1b3)MLQ>Vkp@F2C&i^6XS_7Ydk@JInOz6X_Z+nesgN@f{5RoeEJ68oa)@SFK(;X`R0U$5}Rr( z7nfH8yRhI0PxRprSv-b(D@F>Rp4=+J4^do2iqYuUN*JBQVzdeJ>{1SCf;QbjL!D66 z=_%>}6-~BwSB#Xp|C&;*6nh6g0@C4VABVr#X6^2YBTF;U8K=bGZhmZmzunOqV=JZk z+l`b_Q6nW(souQ9b9KT|rI{#4kf_I2Xw>5*w^_n$r8M=6_%#f*kddn1oRd}}BhXu! z0OxG4WSPyml4UmMUi%MXu={dz?nN>oiIq5nv)qDRL?v7tdqU=5sTTBr#z1LrpafL3 zIWKd?sJWCNrBFNiaZ^p`d%Km_)P6iR09ykdng$R5yQSp$2r3GS0j>iBDYozxrj2O2 zUmE%T){m1@(a#SM9fNpa2v&wXNDW>|hDv6ew{sCxWMzm}xZ@|DM)5p|?8b`fiT{bJ zwDy15e@7}ZjptxOEy_*K0&~jS3Dd&SYeDA##kmO{|Hq`(XozhkbV#Bh>JXmKZI*gb z#RBmVi~Qi8DYh*R1(Uzcxl@Z;5@q&8Bg4Fn?zF)j;~*DE}}sG|S7l(e4zb17ORdtEW=2}<;mw4Ue)#Uc`X z4YY-3TVQ6=8^`?T--2xmr-%+jTb6e+*DO`FVU(O+w`pcBjMqI*!*#dj#BQP?3L}1w zaVK_P-K;rIL$lXCPD8UbCwXhkzLp;kB@Z#2?W94U{0DwKlxT=r%O4LF5UGVEC_6#8 zBLH0>3URppfB)m5I_yO;8v0NDc&NI65#OsZFR%OKA>ZJC>c>OX2JwEG=)}ydpv(~f zUu=#5_zdp|fUhVz0^qaaBLG#rkpCa9!l*d{;M+)!0N}Sl0nT~g_D%g`BK+6BLPe7l zHWvJB)^05Le_=P+Sd#I literal 21136 zcmds92bf$(k$ycptDW7M-JQ)@TeZrTR%Fe( zepS`o6}qc?*2@(}mFIctD>oiF;_+4LusV!KVruFtHKmU8sF<26uCD~*u%Ln(kN_Z9 zU6`S(1r?R-CxXI%s5D=>Pc4Q)6j9dra*Ret^g{lLAKr}q)mClGP2@%gM=C}KM-yH) zeKESf^T7D{{)x$Er#!H0aB~01;K~)A=QinzYcepN+dVwE$5U?0B4W9bp?zZmAeL~O zG`hq_b0Z`BcY76XgCZJ6pykWFq}wFx(wLmcjeC`Dbqs~2E4>+RlOC7C?yCoP4-f1* zuy^m^#6V%_xCqXIFR6%wC#tyHu(v&%Y~?uKKMgMz*z-RbgOg zY-rNU%q*`sFb-aW1EU8MkvyeAH)>QwA)_i)8dNv~bS=KG^|M>T?=xISNnpSo zGN_R}*HgX7n58T?Py;toovzMPqtLJQRi0~O==(`-q9)s=19Z9 zJ9*rJIp_puOo1KD4nS&HGjF%WKnkg0GR8z zPVf5w$+-6gJvNPE#GyW(MYDJ|&GLh3&hs1qsc;zP0_P}Frcoh`;So3@B?&8DqlTp3ytez6U}@(?%&z^V{9m*xOm=+K%& z+l1B}I!9>Dp>qwb)59;$Lwi32wgcD{0y}6fz*8MsbLl*xHJ7#vt+}+r&^jah;(WB< z7y{1&a8?NHqI<)4|`9fPA&(0sRLhu_E*9xuXNnUvaFjpMq9hk(w5GBnt_!yz7~A;DG zO%06->AaB6$C38vGN(ghB^}(4PsYJ^++jP$*U*??Oo#T{VxA}H8UxXUa2*$}xJ|oA$=&0bg>@XfDXAGJd97Kz;?=EdjlU)bc3Kw zj}F^no^MoyN?k=aD&agOoUxLM)AO4YNZ+K2Xx}Wf1!ss~7?tyUiy~B;O1edp2LSqb zmNP(CT$R670paTWZSv9WBEs#i2)8PJks`DftP$ci2Pp6ziqOE+(j8g~0FVOD7%4#S zPN}#{INjxPx>NCs72T;c*39AEitbi^>YzN^F^%^qLKFE4q5dAt3jlcG8N;hs8($(l zc8B0z`RG28<33l8mngnp5n2n@^!Q!}$nyh=(7@Ev16m3IkOI$GDO?qLP^55;ucs3J5lwe+Z#0sy4IGe!!~drT@`Dx6;Ga(Ybh-za)a zX{;IdmnnLg@~aNYvjgM)az$t&Ut!!|u6Y3fFFa#-dC}7973glXwtA&Nc5(G8MXyl& zYDKSB^SvZLZVD-WjVWaKwWi?l>!d)BYsM(N28E*UL$6f)dPT1XwG=;L z3K@QbDR}%wQ%LfYQlKX^H5A^UsiE*jO$~)7HMLhMev_g%fm(*&YziJ9HH9R<#S~Kf zRw>Y%H8m8DYHBFFMN>oJtwIgM^tTu>ew#u#{X2zt`gTzS;G&574#A*7gn6e7?^7~k zPq{Pp4#j`32+aj+bmN^4kmq+PLIYDv@6w_Hz6{zpY&e_(O_5q$C#06K3#XMQ9>l z!3;jEc>w?~JY#rygo%#&{1NGM(I|XWKKqzR@-bJEk0}0UMQAQq)8~&mK%PIY2n|dv zeO$`{0CM0ND@T!1eM00YlI)Z6*{4K~Pq}h@Lh-*SLUX|yIX>wC1^%=mG%&UFX)Ol; z$bn~!9H93ZsramL`mD?8Gm8IJ(Pxy#nz8?!qR%M_;CM;||GXkJk*_fJpVzzqfES)I zyh=>#3)1Je$;Y7veo;RAl1TC;SCTI%{x?NvE?Cp&FFHV;zpMxiOf7v`%K-p#;8`vQ zP+t)qUn$}7@0y2T4Uda;3ti6hR~4b&RMJ;93jkn&XE}>frTUt5waBl0T|WDUi17_q zjISyFrXn;KtP$hu4p884DMABNOW)FR0Dv5L#>fGB-dcUW60RS&NV|bM)@b{(9MW*!w`Rs=x$q!vgzOVQ{6`{Fc zO`m_@0D1nAA~Z0y^dl_?0LX!7tQ=rQ#RD=@%}irxpKF(bGy}&Dj4+(XVuL$5W#FGm6ke zzQWi)qj>=UFFa#-Inn*s=x-F=eqX zzt@aW_yY<_!Tb@0lobAiLWX{$_|J;|tfTv1Od-QZ5*p+2ji!*~o1{R0(bP~lk`QVr z+=xO-3OAvUq2DULIYBokWaV*-DWv#TQ^@dbrho;p6zCRw;z{9FO$~+HG&K}%*VKAt zxO(G83(MyOVQ53Yb4LRCyF2lSPn2$ThD z^anI8P~dwLga!sd_iFJ0KuA2x=M<>>gvWg)Jnq*#1Z#LeHVYK^fdrv}LC^!52LSNE zGnU7L!s9`g#~lfNC_xw#!5SVI4GR?b;RK<9LD0jR2LSNEGnU6A!sC$=9*=4sf;Bwu zPUt2YDDYzmLIYDvk7*tNzyr@X9#ma~PTzFr5uGAm7i7q*{1h_aDVx^RQ;P((q|Z%; zypm^7CCz|RakWre<9UQu!<>TfDJQL2EofL}b-(8{0{e(UkjSm(8n8vC{KVKI*=5+K zg>4#ak*K%1li-u9I?*;NgV8vDs$VWLsQ|Z4Hu@HOS|;fi$;UL*akW?Doq_0IRQOcue!^Klf`XA!e#)_kv>XIsP^nls<)=DEn$w@bfTs8fa)xqESd zEt0O!P%DwtwNjtdwbE>vV6(a0x9DhRB)o zQd-K(kX6^&U|J>&`-NeD3`1(9g#QQ-P_J zv2#%iycEuCQl&V3mg9Wc?`2DQG-w_&GmX-t4M3VEgY6nSNMrM$AdN?y&pT3+pZ zs=T^+4Ykwq0hfj3YS zt(BfN(P`3~COTa@&_wG*mS#Fb>Y8c2)HRbYb_ zQPpuwHOZBfq)M<5e`!p$N=&s1>HzB2AK^Sv>ro10BLbO5U>{Kk++Ye0)x-IuW2$M_ zR5Pxr)>@l^gOXvdw+Q$cnSch1Xq0hIag#;h98e~u#Ufg%Ri>xSBHF24CaS|CI;m5p ztji+0saqzmhcZ+z{mM{-40MK?WS}$DB9l-nCx1A+qz#|7bW9a0AsuF7#=#nn#CkzROm&v2lcT7c!(7(Z7-%SAJ4XoYA- z4V@xdlctq2uxVN)b!l2Hb!j@)nC^_Cp-#W_CYWwAakyei9i9=GZ?%8SJBmfcF@1_} zt@s#1y>|Ff#Ep(@_&F{sA_{>SV_88JINh(siGHFia@Xrv4LR~y7)aQbVz5-PQM`<1 z()Tter$|&tVU%jK=}HZF21s|}egJxx%%(~gcz=*fF&LgChDQa%gL@2S*+i8#OjZyU zN_h|&IeI;Y0>dulFj&=+Lf8=_2kTDVl(FbyaamxA*$Pj*JSK{+Tv6_~nAw!YJXwUo?eOSR&=!W*+9Mib4-QSihpZ!?-^h-K?K zNwm)>U{aG!Q)RZH(nJC%qDK1OQNhZ!7SoPP@tlXp)(BF?@*VhEbbD?aQEO^5!M~iw6Iykgj|zM z)N3uN8s)$bF$SM{hq0Jpo(1`3mC`3>xL!*^vj7FYXtwYZh|!>2e~9N$HFi(EY<58r zjk&Ug(Xj@Lm#`QNoPaEwo2YZ4HxG>*?02wxJEpOkCI@NKEk z>Ip`F0%MA$G(12!x0o5}Z!t=5!r|RdfEwHYX^X2NrB@m71J%efG?j{b+bu@irchj; zPFUPfL{R||D;0X5AaxRYAL+@oHwneLrGx1(jbf&Q#QUT|?-P;N-bct!>wScLT|oYX zyw7p0U%U?n->SLpe=$l=5xk@1N%X4%FHA&|vL;UgXbtRMzmUohKh=yE{8YR6DaQ%G zLk0H~yR8!dE>BIy*O<*bprCiSvzQkrR;J6;$k_FemzBGVa5S^v3-j4ZuT&Kt52A8K zpbGGczL#wwyt#st5?|rN)8D++XiRUYF{0p(>dK1Q=&c^gJeXmrB;#hhqd1+KA{a}u z6HsZn=#`ea&QOhLoGdnFK_5C9PgXU6&A14c`XU(jOHY6cl224GTrfGpMUqGubvx&h zP;$8@F}4`=V3B|=c<9<%#I-dIb1Su2e-7VmU0WMGdEl1Xg`YtBK53~d!jyHuygI3g zyWk#i-_z3Q1QgKdu*E=>2xx<;M6lU5FG){JqF7d$E*sp$3+ml6{Nacm}>&(=u6f=X3PMq_Z=ktQkxt(%V+M zYXOP-t_?KWG>X#4cxCs^}@CuY_ayVY~hSG+ibwrG>T&@{`%Ps0RBU^ zE!LED)F?*jsL^PmhJICu&ym?}G*JVPL=F9F0KGbi8jX^6YKM3K>sGE&GznL(_&&6i z6Ir>6mx@hP=*IE+j%TC=+fdba@Cn`%lN-7rPfH(Z33e?=u+5b`BAZSc;g>J8pS!FC|P zK9PokC&Z7}44*4IL$jH3Y)EfN1L_j@YV}N;2qfGjB8BvEP-5vDEa+nsuGLL64!awL zok|uFCsFnbg+_@iW)<6Di$+7Cni7Mc$`bt}2NSMj5%ufP>+7R=D_!G6-`G&q?zro4 z>jC2o-F5laBcc&x1lr{ACo5`z4GskBpf>$u1{1_eJa$*!*@jS8=&=#R%r6{Dse@i{ z{DQ=Q*`Rw!$Rb2JvxDC{Sk0E%>2+Y49U`)`L(c3NEM|u#j0B;bh>zZYjf(7Z^bR$T zYxu~(&H@}QLF5Mt5uD^XzT&x}L9?;>=tJz9o2|!&L&x7R3}btdIBErfdPaH@;33Ka zaZDE-suzCUc>tyNnjn$+Q%s z(of4E=5lPNR?uQ(Ggly$xrkR)tfy6uqg)9nP{~tl(W>NXjbeNdr;?|JF620>xCWoD z1<$pPDgu|MwRzpcVCAqjRF<`(ENf%<;g=D;qKa$KHQ0es$J1yHpN_`sz+s)U&(;fw zb$LQv=oHt59M%bkbuk=(3Vu6LiJ^qM;PsdlAG5MPoRtl59~)^Ss74|9Obr`%Aqu+5 zfki>_y9tmp?rF2576uEgIEyynrv$-f`fT!f3!TkdX+3X4ueLdJwG~h>S6gh+&eb^@ z#rPm@uFj>+v>xu`yc3wS?I5uOymvTr23($YB6GUB6DmpJobu$Yfaaqm2I7?;qca2S`-MPLy*jOXJnbL1dAzPt!d@)d9xSBef@PFK)n zbTM5COB^V17+1ma*AF{BLm)?i@@cnkk>s7bg>X>OF^`y5W|lq=wi{u z33N>lZQw#CX+r6-pY zxwDc=98P1!Qy;8)8vbY3_b?lMT;#YhS*V9S6*pGrz+yI7m|L@{chuBip`y4qf~B-9 zd>ZWvK1ea^c`n;H;AfvE>x|X%FgIfS8y&@J0wiaYO<1MM64W%p07vW7V2#qm&0yO? zEhoknXT^ps^18!8A~@POV5hIy;yhWiHI3rfwlbDfW(j?qWH%0?i#86}0$>GQgu-4z z0o5$Y?soH2({?PhWantMTQF}IdF!O>e?2rt2HvsB-s@rS9ObjGtURo&%)+V{ zODim_Is@kON=V`48eNYTX$y+rBob#hj&|-7eBBOM2>4;F8U2~ z9cBjkRI9s^O%>g(4`#zO!fZsW0iRh+k35~XkA@EZxg_H%-o6;6y?v+o!vb(pa z;&+)jCNV z$%vF~pjLh2IQn9E5mQ!HVNBI>Vk!rQ(TVA7d@u(+n&T|PfXmYy+>P=>dj}M7!M+@S zKp>9GY#buaEzLf?JIJNB2Butt0dz0zmM>PKOBiOkB)&B*!EGUs} z?DPp%-f*1281=y%qliwMJ0uu|24@tI%hb-LF_@p}FPb3TiIWgXcM77b6(q zNP0`4z5i=}3&!3bnMQk~%U?QksugqElU>o?vU?>Fg}`V|fE(id9YYJH*AMK~8)T|&WyRu>gsXf46{@^^)bnCc6yE|2I!E3Q0a(Jwcj z|4-jV$8IfC)Fp+$f?r*kz=B-qaRN&;Ka4*+V^*yj#bh^Dtf!5Bv-<6O*aJG2oMhJhxZ~w$0fpYtFMc)E%dQs%BwEg$<|1aiq B*0KNq diff --git a/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm b/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm index b1a26f75e6b90a9bd26ab9ade9ca6c22708f4a85..dc99dd8a8aa0d1590b1eba5e5d1da1422c3f3a48 100644 GIT binary patch literal 10398 zcmd5?O>kVr5uW}3dArh+R+44?*gZA|8L+`ZmO+N#Cn1oKjKMMn^W$YLukHQOPgbi? zrh?=^k^{Ho!XZ?u!X=f14!Ka3T;P;jDu+~Z$uUI^x#YwlsZf=#XXd>(yIRQ(l?tx1 z^f)GQS@nx#fFwk={G-+{jeGFc~=@9K7Y zdJBF10|y2V4h@ft9!ma#A11ej_l0oqmx4cOzEgib&pN3;|CnVQnky~Wi*u!oTgB;m zsa77}C@xi2uAdrzvRGNIG)4Boy%{#wVO6=fvRQ9dMDEyK8SABurpOpj#F3q2Rjx|m%UYJ=ZPG7k+yA+EY-J9>itCtK{bRVvT*(-)C_Q2k#ykrFjeRMMi8CORLw5weoGzbNt?3W8Pl68?Pul zu{TSjyismyBOpy#pp6mzWB2-&7&v?vR&ih-D>XjE;65zZD>usk#ld}Hcwm^R z#)Obr&y@~2ZfZ!;p6f;GUc9b|svGr$Ov?UDj7V!1S6_Oli?WlV z510HT5}-?VW0Eept-fuEedrn4Lp?h7KuArx@U3h4wg(i)zQ@E~Gj*C|kL)EO3yA+u zBMJbi7B1)K+_vMV+%eN;jTAy_FU;WXJz z(Onbo#>68vg{!XM)IPpI4Vm}N%A}l zH06yWH4{t-C79sk5tt+#m*5caI|x!YK-~qrCS)(<_F8g#K$&2xh$p z{|d@o5sFcPy4YP^n6ZiuyDT)!`6ovQ6*grelObO9tYYqbU1tO%`s~q#?*ao^=uJ@8 zf%EKfuqMcH;3$BEqEH)$b8|zQL_}hgr?{JM$Htee9g*Sm(UIY_*@4z56l%{O%yZ}92%FbJ^`$lNCT8!TxL->K8dQjk!;~|er?Q2`;CJDWaR;* z7_hNdIYrN43q4ZsIU!I?Mca@nvl&ulmRke&p8bU)#-w{7MY$YvbhC(`%agbIjr02- zYOwSgtCJo{uyj@s`h>m8_Mp>IikpD)ag^9eX&@~dq~Y$yWl2#`WGM_nnMG_Em45l) zh;tS?CN`R>#p@$$3r|(TIcQBRwc~+Z;S`>poEh?>2+hx>hrcP?PgBe$~_e*P% z=y04Q3;@i=1h{ob1sK(u`>49aa8Z5K)qNDylz0FZ+(%uB#;60fj}=^5 z3YclZ8wu(&#;R06(+uDT%FMce$}J0>X5B0MsLvB5+#cCa{nVBRpPKR$Gl)uXiFbPm zR~eIWE+j}Q=hGi);E@kJqzjTauw+cQ+)6>KNmXI0RLd*RHGYh8HOF(Gj-bZaG}?jh zm-h6tq65%Sjv<&Qhk=}@EJuOsIx3F|8obMOJunVIM2)v;fqHK%k8&x$YZety%UxDp zpdMCUpk7v9fQLh2o?11Ii8#CszXv!Ha-hY;xI91uK|M|5-gKlvltUV*$%8Z~vC5Xi zfWbiv@I#}ru5v2Gi4ah@~ambP}3d+bB32qIP_=yDbE_t5tOZ^a< z=3%5T5w)gp%WBXHnYv3rRrNoa zs_ukU9U%xIsk%!5!6+cgwLFAmwuQzLX)jb*D_yK&7>m3v0v02bxD#*2}1E z6yZ5{>B^59MS{BBzV^!h8PkagE`wff{oUR`g>61DNT6 z)ip3yrGcudMS9T!%FG%yrz%l|cBML!?0~PY9le&12WgNFFnR|W#Bmg?gFx>=p!aTF z28&za4zzcfPkw?f)2DYTyjw}&e+vI`X1g`gM*E47LMPH`t!H;!3#rn>#|?otHJ$HlfbYC3#~%3cKDlw|BFOk74LoO+E-rVD=u`FPNNuj ze(~NXKGS%820CY~_+YF`GgW#Db2jmr@jW=h9-Qf3BE-+@!(@V5Kb9AAl1kmZ4RRx%`%!me0Vam%${L?jh zS(k%X=oJX9%ZqySVofg58F`7$&_y3EueMownJ%|j`dSMem#^bht_WA?BE3eh(>z_G zD_jxg3W~4l-io#X-4tPsYP-+KWsY-a}1Fz&OD6xW42L+yR z9P|aJeHH$pIc?5iG2-u-lf0I(5c>aJiK9fR!%@osJ`wTf$|VZRkAUVB;e!dkMB%_9 zSg+_&;IaqXyIwD@PTl_y@3hbQtJ!B|?~mOGe1NFuy;!jJh%l`3_!R{+*O?5;Ko}=P zIL=1Y!KaWjNqjCF5wzJ`lwCO^4iLJrCw@Q z)>f(9Xsk7;x!G7PU#I38H8xk9m6bBxC{^m4jdJ#ReH}ZpQrXz3tlp&N?e#KMR;gaP zy|&q;mD2hYEmp3boL_BTE-yFN8hqENtZr16H%@-9ybSe6j*DFu@uw8uo+V4oM!B?t z>9lyse5TMSy?eh|raxV{4-L~_E8K7H;_n~6p-q=AJa=J=Dl6;t@=AG?@x8iPsuMmx pT5H^9s&rF5@G;#PN&J1qH#FdXkY}B3YWbw&zBbPKisW0d^FITpFWCS9 literal 10194 zcmd5?O>A7%6~1qNJU?&7^E|d=$NBNO4nGMYfrQ{tr%m$=goFSIH6-EJOzcTAzcw@D z&>)Bdw4!QvNL8h_3&f^srEI$BqKhh9q%K%?*-aO%s?rVWqOIER+Jv3*Arcu)%?a8H67_=6RbG)xA(CsH+81eDmBXW zc%$5i3D3QNMK7$auCBB?W7hrRrE+JbUY;@LVntmvTMn~3+eOxDfihYQ|F zadd2a+r;GdsU16a?H>A&KXlfG|EcSy{`mt8yxpftOZCN5rS{du`Fg2VKHOehs5F)j z9e!-F(yVku_P)&p)>dJ=yx3T)cPb)(?;Qo}rFKUQAK6?^r(^N0O6Mxl3&gY+OU>oQ zTKT#t?%Ud9EIUefk`-f*Y%bC&x62*v1hgp+v@>Gc-mN|*CZ_KoDkcYbsUs`44-i?d zTqz@*Qv(usxm7NU9RmcGR#vZzo%e6<=~c#sN(aYv*MOjPNk%a%wZ-m?MksI{cd|eF zJ>&Bi%yqHYjiGg4>=i<0d{1U6>!qgD*!O&=?kDPMQ1xQIkSUp_6s2b&?2_vX+)&nc zYBC!?fg5oqaY9Lxi&3IpB^P=lp)L(MC+WD*B6E9$FRETrP^L@3{m%#5zk=~E<)>jk z9r~AM|L{LMul-BPAxhE^`gI>#|uFW)Y?^EIEwWY|fa~LjBC!Mc`^1YJW}^-+&YXzpOPnWX zMS_Os#Zi&gBA&jC&@g3Z#TYL6S=7L=EMSoydEJZb%6$|WIYJ{k_drNZdRU}qEph}S z&i#nVy%y@X$Prm2AxDw_j~oSnRF{okx0AL7CrYJSQ$%IT&f|3jXAVoWwVkc8@ z657K|0LRM7v~f}k!F;5_03Z!037LhHzl5BJic4fx7AUqMI|XFNsVNi=hqnffVWu|dO!(qtVx0szzC&5c zdzqvfp!|8@PEj6<^%GvYw}}7E-45YNAUuxABbY*$1|?4ij1lM3s6t8o);ltXB)WXB z<5k6q9E$mZa%^o%iAoffkY8UCzr- zg~L20yy0F{{Do~JD_k^DR&K#2vV2A(O)K+qQ^e@g08A*gE+xG}(*OLwzx(1{mkXY8 zDQ@vBHy%4M?vFQc2#V5>2oS4M;l(vuFq3Y|7R=jelYxP3;U^SZ{5IZ?Eq-I%Ne7e$ zZUHZWg?yA&57S3n#QrgV`J|w8K!E4b+PbDx2LSB0pbcG~bYCWiTSOLt&!EMPP!G9% z$lFt3WHfNsr!Bi1xX6u(-AV_W3)8m|b+w7$m2A3+;HbMM>RJftjpAZ(4^TpV9(c#z zB~s7rQMLCxS4p7#NbvkcN$VE!0mvw?~tr-ew%R!tIk`&dnNXO^UjwREof| z+@#edj*Dv2o^H|@%?T*t3;HPKxJi3d;3n;%=FM&Zp)11CQLJdx-x+c!=BhM`&Km*+ zBeS|uu&W&Hx4NPnqcIX*QI6A?+=fQ?$3#Eqeq#6{Zm^Jq72%B%t}-qYT>0Q(_s>7n zkeLo3!w9#4x>tBWGhN3ML&W zFy>o~!wLfT+G8OR^*h!fwmwu~E65I8;Vlfp9wr@0>9U$Ya1nJwUuoPM@*f9r(r8U? zqj5Qb2u>oG+bncWU}PG=B-`VWguq7dkIC(*uPGXpJD_8SrDF;sqhot`Yw*CINCrJZnInLeI1Ttu~()p!vKJtOeGv2iXK_)-D_5$Ve8rWEg5pa(8|T zB*>gDaEURs=K*`154>?MIqP`ieq!o$JgU>KCFq)sYEsv1Fy_M{a-!`wECG{SA8$gHnk(!7b@uJhhU=sIKin88Pl6#ixK4{tnw8iZPF8 zYtQnYRqto4zNWUyMmzVtyvnqWC0pZ;6zIn>J=Cvfp&!Qs1vf?`dBv--s)T-N!)_~Wp4iw!2~BJwEbbM`kg3my*P=U30u{qMb~IwRhqIx z$fA!9LaG*JWoI|HEGs+H)|j(HL}7Nw%8s5zb|kkE)7pvTt#{=Nq38KA-p5z{bXD%5 zJs9g9A#XwM=^7{Gy}l#wV+7qp_tFe{*L_}I-fuiN2OPN<*$Ev1JR_W#dAPC4PxgS&eYw|&$<^VagKiGL3?D6p-dZ^3Chr7lJc?gfP-ovz?9;QQx$^kkI zHy=TZJwgZNqjW^h(u49S=;NqGl(QHaNIn|ghLC(rO-Ve^50c*sC}aQ>=kVw;*gR%I z5pz{KR#huBJBMQdSdKARj`gueb2QCRoWs&k2O5swraAc?2p@-qbpTw6^$cm4woCvZq5BhkV9*0)7WVJ1RLX912qD zlNK!WEKu?~BAgY9!x&M;S{b5(i*Yd1MN7ew?NNAz4DyL6VG(3LKiuXc?SvVupM zKdQ7uSE)i5sZ3RPU84)KPM2ka>at18as`=Nu`t=h$Y8P&-iDZ5RZ|iV^uy$vTfkxq zkG5g6ZDA2}Rccq&3Jn(90b1IOmUbU|)S?TF#TJ&PLTf0bLoK-m`D-w8trwEls)Wx1 zNL~w!Tw^2G`WV5~Tc#nn^80+cx=z=7_w;|~V+mfiy4djq$Mm6|qsdpauSdD4H>DPR z@diCuA=B5eHRf``h8)1qyQ_i)Pdy80;@b*rp%b!*cM^X}_IoGsg?T3tyiQBPjUnDb zn!vKzlp05@YM;%*BP}+5(FrY>)vx{kF{CW_yyp0;dF?-hP`l;`6H~r0-6bD*ZVO$xcxl8D$4P;|FT!?k7`Vh6 zknm)~??d=NRWN%uEd=02rp)`c0#cLKdr>wt3PspkhKdGkMj>Z5qPf~s-E9^roK z^0%-DgYV$+{rf@l_4+0JLZLQCODm1lO1;#ntTd_IYOS=Wv({>sm#MQtt+i&S(kRpA zQl-AuDrZmX>)x40rQNPHuTbauYMClcs+X>>taYeSTAib3DwhtPZg$R>mpUsgzH3#Q z?aEU7;I;A+%+Kr-8$9A)1blNmv41D<^?9|>X_ZS21i34B=9dhu(p%q9&w?-Y@6x>& z&OUK=jw+4Sdbv?`v3p{ diff --git a/src/wasm/Hacl_Hash_MD5.wasm b/src/wasm/Hacl_Hash_MD5.wasm index 0efb5420242247c42c88e32497204c5fdc092aad..b6fa9371313cfa61cfe5e49e2ac661c7a5f07b77 100644 GIT binary patch delta 1432 zcma)+%WqOa6vk&R&{WWdR1l%B@2D+8d6ZVbTUuqPeIUlfn7A>~#>9m{G_G`^-bQyO zCX&0?bfYVy35jdB`VY8r;mW0t#Kwh2&s?|!Sh=`&=9@EfX6E;u%*VSw?mRsdeqdPE zwzTBHuXQWjDnGT0a!Sh9ZnX7jpEO!uAU&8FWM@YNjR#&!I-if*5&lB_x&1MFFSBQV z%f>8>!FY9#LP{JLLP=#b)o9ZZ^`M~y`Dg?_j1+{RFv8f+9c~U3gqNF8QH&x?hj?DZ zQ$|DutKPD+7($6UB6V;~1)EAXwMq-I%mz|WYF(ZU;n)Qc|Q>2 zo9o4PE;ngS!N635OTI7+hs z%`t~LC2B5*Syr;)-2Ygw+vT7m#FH7a=3tjHvks2A^h}qYsp*-nTVuLfm$BTNs6mwm zw3k-;v<9_`)gGwTK2V)e26YOjdevIO%22f;*>DQhsdmfIvM-6Fo%EdOG}msjfzzFe zE~CtWCaKP-YET6!T0;uTb>$37SYz2O>J?hY8QQ=)ZK6V3jA^USrA=1TU7I$z-|2R3 z(vr4xmFa@MK2*?ER8i7)RI#G%sAAPQRHvq^bd~|vc)QxLE44Z}=DJdIU8!kTYI?Vk nKp>_>zDi5tfza#Rrg+ghe;G=`p0{JJT6U<6`FC@7#OO?fHJE^W*;K2hUquOljuizt>~U zWL>)3Qc658Z8X+jDc-(yXDv1&q_tZ@YNb~JJu)n6N+0_+m*g)gzinn}_H;|Rxz<;b zJbdpTlv1-Oc$wep!JL%Vm*B9}Hk(6nUbkiko_0Nbw!SW!w8y$B?Pn*Cb^ILQ$DOxX zTLW9VA*k8-|v4u zsztpaPU|KF9mGNJ6z8J$ZCa;}3f(8_D1d9W*QWuB13e*irJ&@?)VpKiUi+1#| z8Al$5i5Jnj7 z3PKni3PKnGMa%&)gfI>B$q3&YsRj|rgV$UT5f?;6frz*uM#><#Ak-Hq%Js&$-l$ve zD5Dv}n3qSPiEq<7jaO)eW&#tO&OtL#fyRk4nxh!UQA18SmL5Zlj^h}eK$H?(2se{w z;0ab`1CM)C&p^WtQ9>1?x_J^4h;lcd{9iXGkwjb>nZ&p3YhubZF{MmQxh77rNz<72_9>HQwrQQx6(a9SJw7@M%|x`Y;bqd4s>o(*dzCZJaZg7Jm~%RP9`n3Vqy;CsP@qMO(h^3o;KE==QjLhWND3A<@oMgHLF}6yymjXxvX-ERZd;o!Y8kk|N1_QEd=23xTH-zRn zRt0=4$BI)yoM3b84o7u}HqEkXU@LG`oC~l)emYFoIo3U)G+8{J#{0gAz=b(_h>NoH zDOQAwso444I<&FQT1drp*3x7s4L$76z7yr^>3YQ0;gMho5-f3)mmpku0@W7L1S*mA zBtxD^g|air5Mq`g!Z*ke<4H2uT@8a zT95epO(jJu`})mvBi%&Pr_s%+RLW63T6MLFz1>lst`|t!(@ParqvL~%Hd|xtfYe>MODdE*nvv}OtlSoh1!Nk zRgz|jH5HZ_s!FAz(KOF?BRGTq*!o>K46Oq3oc&k+_QOcgYDN3&fBydObz$f7rfrw{ zqR}YdN;8~r4<5z6-84H+x5VkzI912_HY%jsxikrq;d% zzE6e_&zB*>_sbCD1v1$DfO9Rl(MXLRH=3x)0MaPjXv8LsS=?xXCPmhSxN>;oB&emS zi5DWRW~^w=M!DH=qXDx4Hx|(%L;yDy)8a5Kge~C460HZ^SgQ4a8xLwd;KnjqLQ84- zgS5=$hN!Nlx$%%*AZb4&IXBCTXqJ*lw;|}w7#jOS}T?Y|fZtz-R@Ol`rT0Cw% z0#S3Ljg|{H+ETQ_aHFLGCc_P}Q5y`jT&&;*W@v@s#!5GW$BmX8Zb*o$aN|*G6>hX@ zZmf*c!*O~vPLIU-F_hb5rY2iWLAD7u+5&DYH#NCJ3KGWv5G?{kX*+;u&jw;4RG%B| zf>^t}WtSVP3_`1Ai16bEp(hMNPa=s`bRTM7IJ%nVLlSO0PWM9+ZahH?APF~~qz53` zR4*IU%LWb=ah&ev1jV_7?&LK{Y)w`Hc0f}J{b@@0qWg&w3$HGDxrOKTC%d=%GaeH6_sK8ow8m)3bZJWbtP^9()B z&r*{6kbYklJkLUN;d#avU3kR8eFAcX)>0Bzg!OVoSkh1Z^ps%KPrZUsKdl#h`Z*1J z1{8J}P{R=#P;r9Ug9xg-30wivfRb(Gg0Ml+2IV^n8XJergm_cVj3BaE(PrgivRP$; z;?4t==M+7M(Eep%i=?^*`+F^Mk@Uo$C_;QZx9IzVR1W`C(MBb?Y&AlNpEp8;w;3VE zFBrk*7ZrHH+ZAnBZjYS1L(vZGESPP_ehFByHN@Z&0T<}fb9Ts1*{%kf9g1JV0^TW0 zcgh}K+M?;}1k!{UG|iV4y)41dHZzJcDoihFkDzt_Ou17*_k6{h3L5CEiWZxdK@)vV z5f;N4#@CeFNQcenX8O7pPtNwbl7k^hyz`y%&c~$(@7pfH475Kt_=WfvMu_k)jS%B^ zj9~MxL?C!>RcN`EDDZw>6rCkjY!f9S7OZ+f6rCehz9@>V$rWc%%;GSQq;O_v)F`w< z_cDV!OEPiD@j98-vWOd4!QFS2_NI5ytmHy-K+ypqdpfS)`s0c}k+e_9(i1uF8JcE2(u5eW z{!>Mtx~%_9(Pv=&C)y)m{cn{!6|nwy-c-Q)&%t`LGGP5lMOX}H7*BevA2y>||AiM% z&h~{aaU_XCJYiUWO3rgC=RBq~PkUIO@vuJYVSUcS`b!-gt6kQAr6iPQJyxC7tUm+R zpL1D%3ap2NVZHVM>oLL(Z|w%wYXcO``tyEBkM#(U&3Ya0Dy;un(P_;93ll!^ zA;bMb4)K3)APn&zj1b|AMu_pZMzHyhA`m=}2#GcE;r*c~I#I0nNEA(ctU4-+P7^CX z7R3gOWN$nJ;zyhCUFA!P-dFscqDzYZr06}xm*pUrvq^z@=d}98;QQcT~YjJ$!SRT9QwaY$`Ia>*u7f{bazYm;ygoo%aPG& Nkz+-VR8Bn|`5%p^0p9=s delta 4995 zcmZWt37AyH5q|xc!HK)*L=XsaR0+69AV$SS4ahz`;wp+qViG`LRn*LGNOlKNlVxW) zL{!9MKv`Jj5abd;kd;G_Rb$NIm0$!x&|H#BlcQudb@Ds_v@p zWpUNV6V~@>=datp@sxJ0$CvGTva!)i`wfYlvA<(5rtrJl;1hwFCSTg|P)XovC+QfN zYVDYRqeJH}ohZkNzqVdG=SQBis+ZDU(ks~Mt(`X9yHa`$_pYf;9S)!x#BEBE1$(7qf}dJ2b9E#r{ZelT@v@Vck{rGNSu{c3)E z?>-d+WI*5{Nthcb{h7}GAistcF5 z3}A0a!!mS3iGD9XwrlU) zDDh=fH7aDHj+W6FYjg})W2*I9F*;U^jEPES97xKIhw&;zUM1tBgUbrLPR7c0QWYQD z>lv4)H*iSk1P&>^kwdB8#38LW2giB(eU?~8GAOZXTVgd*6P4HxB*PLzLUkfm#2A-Y z4G@oM4Hj`Z`(Y?-GyWr~(ORh0L9MQMi|Sm7T@P-d#Illw8I;&0nUs`T%oR%P7K;ld zHre7riQQ^(p~P;JTV(Pexm9ipOAIhVw#06?3iSPU=lGAY9yYDcI{p(lT@R=0i=9Tc zJe}e)+2AsnbD3;3E|pUatky?`HqC&xmDV6RR$4<=8jUNp`bx~XQsbmFa7J==KARi> zOxjl4^vFz9ZuM=-t)AsJafXp8thXt)-lm(3OvP^oe$x=t9q>=?FfP!U1XJ9ZzLRtV z>#m_tceBXMxr2Qd=|*VsT;EMkV6L7y8hDQZYqmj_W}D(P)e^_`2O1i)^R2x*v47nQ zdradTJ0|o#JEru0JC^DLc1-Jo7zIK%prqL5^TZII0L)^XOAMtOfmx38h@pHFFbnb_ zVi>s@n65u;q$){Is_6UG^NsY>CL`l?fsrPo3yoZ>&5WfPQN_m_Y+zH(%Ss>_19!i| z?js>nn;BseoJUP`m*23+2>$gz1RQ(9bB?`u`SqOoR6+{ z;P7sTEW%kDCH65Bb(Wo;99yl+3_8bhd}J8}*$*=MxDiiR7+Fy;$h}y36eppp+HiRJ zuQsyUpuxE7f3+#P|5uxv?F3!`Ym7j3O}yX}MwT00Yvc)|Ptp@z$1v9w=NNq8Fvk#$ zL7e0DAyd~eSnlRi^akeea@r85`}&PW@NXrLIf+#u5+pVlm+hxHubY_d7P`^$f7xz9 zwmBbV8#v51WRdNbDBI64lbelfE(kM|TaMx+bZZ+9Gr7&kwn8Si6=yQ!BazPykSCC;d=OW#ENc+Yd!r< z#4FqqyNo~?!kF8IZfhT7M{(?J@n)pUAm0od>2IwcI2Nx)5x&LPb~Co!ZDMmfvM1V+ zx1$}|8|}!xXh+_$3qczTA!yxiXv$t9(AsNwjl6BLvM;A8@<#xxQU;?;6=-c%|UWH{1c##yj}Ei{E|({T^MG_rhHK z&Z0dmIi%qC7QJnFvEUoO=>A~&dm+Uh{YQ)T0kNk4WZi;cc!PIS{-xf>^L73{(Ltv4 zV4IZU5P%V1GoYb*FxrSe(=_w<7e^%YuZ~FR-yBh@e|JP$KOiBH+-Y!I@V5ETa-h;m z4kP-9h zsP_d^dxWVyV&seHFE#sz+JB5d5#J+)&hlS|$DB6$mPqJSN2GL`BT99;BhosMMAe zjE;DPzB1PrC_^~!%UnO~D@So`Uzr!_GQ@eli|--p2M)~~-^TYaBb(32=C_H=?M73y z8w;Y{SQza_bF>?e*kzy{c9lncnzCI5S_^%4l?8rQntk^=o$pstz`ai|b|MhqJfy7{ zx{Bok*siiDHW_sl7%1*4^L;*4bBldx^4V7KWm{R~XXFw57UTD*pV1}sS(b#USZYxd z5R33Jix&87EBMB3WtrtKgcQqgxkb%DtiZ>uUq~k0VOx2#!wILYAi0tWUD+m~H~?V8 zJq0xMR=Q1CMXNmhs~wThHI7K>6OJg=wT?*ZlOzO^jXoNIht<|u4pgR+!(i514s@rH z!;bQl<-o{vau~`6%Yn5SCy>Q>Hu|VZ`gMdIWfVA1`*;QDCSN``+TzP5U!U>iGozas z;AVsxR}=Wa;ZqBuF&Jq7y9lTR9H1;B+hN!4{G1>DkzArXAzh-azO?!q_m#bv F`X5o|o|XUr diff --git a/src/wasm/Hacl_Hash_SHA2.wasm b/src/wasm/Hacl_Hash_SHA2.wasm index 09296bccb1535738a9002e66ee7d83a4b77c435f..31d08a715329054c19dde3acfd1a560e819467b5 100644 GIT binary patch delta 3521 zcmd^BU2Kz87(S=%e(1*Nwr=a%b$!oZTib2W{k4>JY+v`oM#+>BFlv~9A;z}+i4aL} zEfOxcz(|fEAtp$o@h?ooCXlF!l6Zp{;-z=XF!92~AQyNA>T|yKD_^G(W4s|I``*6q zdCvEq^F7a-C%bfwpT5f9XspZd%r5>g%v&;7{4a9(Id5#^RZs0cyl>l)g9i`1^m3gk ze{SFIbo#*a%px!IW;tUixApfit31U6xdux+FaPj#Hp>EX)SAHKly6(@EB5cf3#r4b zS_)fz?t-<;X1Q@*-mK{3T>e&Z6!A!9Zm(6;QZ?0v86$^g(|8(xNk3x_tEdA|7iKm9 zl_fwufch}21W;W9H~~1rtQvsB49GJpTyCpq1l$;APQb1b_)8sj0d|F13t%dN@2+t5 zTZJ32JIvMq_M2hh0pJNUKY(sCAY*oQsu!>~%tC+%O5puE>;vozv-N;uW;l1ze!&Wo zMruG+?rP0S3yQBDsu-&Mp$vtuNs)N2X&$k`br`^D*B#C+@|b%L@q70!08vjLx8%~E zS_RCvTt-~${RF_Gw-jUH`5+~a~I0FS8mU&x->rWYe%kenz7(ua)Pml6djRC7W{15@yYt}cy;rN;_)ty$B77My zS;(8hhW(M6djL|9u=+r&Cktwab>2<`GB zgiIpjS0O?#t)pJ!gg%OOiGJ!60~8d4_<%tZSp%pFWc3%*0$BRot?Mk`B2zsDewC`?5q7f2 zSwt?nTjXcc!`P{twSLIm!S}FpzGb`oIs6Q=sfgLLcge-gJLJ;(gnp5e?HB){cayvq zz4dtdcb1p>ca|&vR;$bWJIi(dURCPf<*}lFXL;Vgv;6P<8~e~d@87E)Q~w4T|D}H$ zA{hRyL&u#t*FPun>pwBzkx2SIa o_CgEtg(n5xADOr$2e&Ti_c|)i%x=@~b_mfvbt(7zwuOp60IQh)NdN!< delta 3667 zcmeHJUu;uV9KNT$-AW6rqZ=z**ZvM;TiGAjR@!ds7`@wE85^(_25h)dAd=WFCd6b> zgX_fbfC)rBj}jm;5g!=NGyxML;lad^2!*eWft*1BW4vh*xGER zrJj`>KRG|I1O!*z;uWh?Oj}zS7n;>A&RHGVFRY|m#5>}L(hl5NZ=c0@#-3%W7+vOh zPUWj;IjssSD!DYf#%A!B3Mh(8jQkiVHwRAs}up z^Hi(63UF0e@c{M~!H;x!4d69lg#fQBg4-RQpvr3i*MyY-;Fdy|djWXEN(+FtLO>jI z$X$JaePLx2;LalWlEcH)?5B>iRxom8C3wkxy>f*GlY6dNr$tk^%59{QP0Wo&vlq(G z+5in!N&Y^rn#0)Rc@g(4dLA%{alU#Q%fMiVv;(IM=@mtM%h$h41;ur6v-O9_U zTvNiLJmM8E)t*A?)kr%M?njd1Tx&wy^$la~LFeoBr%ppsmt{`PF#T7Gs~x`@{LHvY zeB*!U#B)}VTVrYN7GL&MI(Q|mB+J4vzFK@=TQMQu!^TU?)^e&eHPD8QSkiMLP#Zv`2xnlfyX(SjLE+EUD0;Rq9~j8zuio zEC34lZ=*J!#Cxm zz#EYx%|m$MFkU!hzHkKMbW&%&iWH}-Xs+_^g1HnYN>P+E#EBM&W2{1)9_pqZxx*l6 z;hQPUducP@LLnZ*YoLqr>}|m;$6jwf%(JKIOFU*YaUVq~geLBnBp$L9lQ=*F)FnwA zpl(Ux0QDOLK0^Z>eZu3|Ilh#@c#43T1jbDQ%0ieGl+))rijt$q4y9?- z=<_ie12DrA#%dzXcTt4zrU)fW<;M}4t-f1-fIT$K_tH4uM|=4M?c$TxSdb7O(hf(`IBnwuWrTu!awi?}#h{)4d>jKf-c5 z*PnPo6CiP=>(*0Z1oN!e2eU__@v{u^T&fh5RSU19_fMjAocUmJnRb)gZ+a)uG) zK8VLgAp2Yzf$Z~tFaopHr4f|zr_=~O!-47>xg#!XK1BZQA&kzxaf}E1>M(w;uUzeu zCwk8gi3a**d_W(+-67+f{kL$wpBXqU59H>-8yI^N>xD1A713hiqHu40?Fmo&ABXPZ z9>Y#NVq>gayfG}83$g6n^*3(#o(56cI{W&h0vd?sQ`rI*-yqftHyT3SH<-DJ}F(=}I6HZOft& zV%V-h{2_%9WHA-%geCq!i7cWCo53K9(Zoa(qm4h*7&P&mJ2UOHvBtz7^X8r9e)qfI zIVbmB9x&{G-*9l6xm)e&@vuYX@dFN(iL)%4F_Ft+nJTInFoW2^VtqGQy3zb<-|k*z zmgw^&3T}>Js?TBm%7A~7p~`AR29T_&jkmU;Jsa;fjRh9DLR!cQJ$SPV#L`IeBajCHAFwn0h!B zqrTkKxkN%w>%^p|X-G*GgXz~2PMpFsSk-P*59UWPXiTFy^z+*)5)Rn3`mEJ z7M_XzOfSy@#^(ZqOw^QQ<=G=RDLiL{ZREL?Q=0OhR!d%FNJR}zhu{$y z^C62bK;ai?%8FnO=cC2EvUc-^UIR6x2w#9Tj1Z5~rxuOsh=o{2JEGY`Is$z<*!2*0 zXa&&h)Dd>-h~*0*mO5e~Rs;Kwx?&;6g7X4|D;V7s1v+3XP;4j&!W9r)0g*#qf@hYO zI=}%Fy<9`_7NdAtP`pBHpe3*bF@_tcNZP{?vJ!NZ z$Vx7UVptyBVg)SbD`5pM1&x>CA<9A&E=4P#@XDcAaqv9%ECmgxZ`FU8J|~LfLR?pf z*67p0r9;tB_o5Zh+C}2HNb6!J6qD9YSVcNJc{xH=AXLR@ohx*}SWxGRAXEiGRfvK6 z&u8(~uzG}wYotA_Vr7U;Dz5dbxDM9BI=_l*qe`k+4b?+qxMt`eG=?=0Y8!bi zVX1t*?DQHrhILQ_wNMG`p>FgTMsPQdlIHCPcVH4h6K==3NuGe)Be$<7&`ErQbV9us zHnj2v@-5*Tp#e6w@J4^A(aW2lkZ*!QXbcv8^9Y6tZysSAxh|7nr31!--O?3A>>|W2n#yoZ zu~;JU;k#S-0bNtOi?-nNmm<2xl8TSvMJR5RY(vAo+=kbtuW6;cL?@KlD3t5MkZ^Za z`OM`X((RBM>n<+jfN6J{B>5e#4?px=jp zp12!Cq31Yq_|7B0F0O2pM`>*OjlKtJn-61Xu-QPz7}!Cd1wVOj5%EiH=kd9zolf?3 zdoLZy-6@xvw|+@WpKhbj>h0HQX>D`0*!p5VI{p{db|&_|R)%=aGu0w%FX8oFmfYI* z5w~y8Np6G3_J~Ud=3ri5ACWllk>qE+D)}usIiQNUM-ni6?*fJI9{Geg66ViG$Ht`d z{xhCDZxGAG#G%tyBpzuu7QgiK|H|*(3!(fFv-FZ5vsyG=9-HGH@#B@0F?r&-H(yW0 za0pk+%{Q1q9KU%Rxwd_pNlqqveI~VRxcwMi1$RiOi+6TWo#)+?I(~Eiq3Lfg C?9hAw delta 2985 zcmbtWdr(wW7{BMT3+N&uAS$7L2L%>+D9XzR$rT}29twyG$^s%NcbA8v_>4t^M4fz{ zG@Ef8tI?<#RNOIZD$7ERsVr-zscEMA&p({TX_{=zDf_;2cVS`Dai%}^-t#@q@B4j^ zv*#YV==W})-;sz=7jsp!p+~#wI%=QTm#s44WD(KKvZ|)Fv2<%oOY`O}p+4)X#u}%y zxmHm{HJcCb>!mTx/ErJ#o}^fq&UFoSSO;-Z(H912G!7Tt&ILCK1ZjII^4sKOwYqSn%(Ovf#fsE!ysM#=_+RSD#=K61Pg!sEoal;R*O#DTSU?;@ob;zUzQ zPg*=G;(PbLY2h`YJYYZtyaJ84B2hAqcY#DwI`B+jxg*gsLZQ^MpXQx9Nq&s;Asxurs z6BwTh3^K4zR4P7i7&C>>A10gl0?BbcaV*P%`LF=yLIz|J$7dmyFNAqK8y0d6GWjBG z%py(6_x4mni8*9L$Y3mu#$X$Bq>3-1=2R?xim}PXO?en1&u45#%%q=uz^t!A>=^QyX$Bp_tlG32TiOR6!-LMz|V;s~OpX8XeH)Z9$C( zPS{_M%)~A0u(Y*MJFK+pr94#HIv<%zTQ5ty0qS9cQQCTWemA0GqbzL$Gz^~K^@FL< z`PIQ!HgN|*F{eBmx;(#)po0VIzzL0XHkwA9-)7nn-hzj)z|jjWo+C^GaTvSFW7s5l zY$oU6R;h!{4rp!STS%3&6}G@uCvP(>Z4SN-iuiUYf;R6!>=;H=;X8-PCcaBj-bs|F z?uMPP3${ZW>?X?3Lmh929oz-&yo13u5xgxbn*L2)u!oI2y?dFV*&M$S_=1|Yk4qEPlkKwpf|O^HP8XP$pt3qOF)7JiVyLFUfPxy102yt}xO?#*1f zL=Wel_Cq1r)`r4an-y?8+B@r@!b(9PF1?1`DkKb0SowJUM6k^6PWP^wfx#)xCMwz`?6`$&F~9A zzoDDfa>N7YFVd@0MN&wB_{E+jvTufudgwtQOD$={TUAGp#k^|yEfl_9Ch~sWed6A- zy+Q;vWTH8@0SPl<)9Jf}?HVBZW127E^TTG!FSVtE61%)fZeQE_9c}-ui%f5~U8k)r z+mE5}zi_rO@$c`d)x zr;7c@W6*rzIGImhHZzg%%2kP3Ar_ycTpx!!aQcb3xo1b<-Z|&NJ=XJhSSsOvQxA jF={fhOm1M5V`QCpR*8{q@_a^ZM)rvx6gQt@+{Fw4L)I8r diff --git a/src/wasm/Hacl_K256_ECDSA.wasm b/src/wasm/Hacl_K256_ECDSA.wasm index 5022a27e5630c9183d0da04577ce3008c7d63a43..a68d42d952a597450abe46406760f881c76990c9 100644 GIT binary patch delta 2583 zcmZuzZE#fO6~1Rb!iEjaGEK;4Lz3rMm?7V>Ah9h-?vf7yBLpJy$0@ctg(c(*2!h$& z*z78zfOx^J3W7-Zl&D?k;zvwo6#Zdqzox@<9DekNQ>Q;V)2VixVQhQOy_Yp~I&*iQ z^FHT&&wZYA@B8ll>EHeM^IMxw?D@4yb;i2>e)o!FJ;VTCyF#%>)whG9IG;yps{Npo^9_L?g$ z?c>L>0|RLui6$|Uj?xInF@kuC6G+(kq$D1XQap{Md}C-V6{RsAB@AMR%J0J{DY{ZA zc1!IOl$RSbUBy2qNAegYA^9<~Wv2)!;xq}xPZA_}lG6N?8^w7aK`!N`mENGB56KAqWS>Yeh!jEI4Ell5e_*spCh&PEZM%EQxGkVv%>^YnX*0g z;qXjvp1il`^WKDWy;-ieBLqnwajYFB93_51Db5fi$cVgO5R!Hn=NAcbDftpX_?JZI z%l5>Vg*!$#<|sa{g``TFUr}$bINnZ>L^vU9P7+Q!GhfyGamSj(V(o})?G!;o9Iz^;o5!09NmcbP$ zmj*`MmW_$>^0X|@6DgjL3TKGeumkF~yi6TiBCQB0Dr+duCEv`os-~Z6HBhNHG*H!I1O%#EjF13YjIaQ=NN7mA_zr~te@`LA3lze<5Qg>n zbXNrmO>c+!9~$HuEHaG_U(z)Erk`t3Yc*f3uzt5VxTFqsQA2N4YtlBZl^)fa#+qAa zcb$yn;6|O?>)T1zo!^uc)^WX*YCuE2R3jRlQVm+FK}t2uDAgz5&>$#*HlSG{z;`Ny`1=ZBUaSCKf+n4EmqLJ-DulR2 zAd~sQ>S5}0s7F+vLp@gN za}YhDq6kRfI|?Kt=}a5i@||f%yVIFA?M$0=rfo)N+GV7jX?Hv0OtL%E&K>BGieg&= zy^R<%xKkm(_bP;Vl|q-MANB-Gf+#`{jEz z*2?!9tm_`+Zglf{td|#Nq5Z~ywBQD8kRQz`b#srH>_JUus95$xFc{1he6p;DKDAbU z`EfBq(c>o%PhR=-vx_&3o*}F9tD0=*SLZ#&=)67Q{kkUG@b%i&^w(TGN=tpYsSf(P zH~02x8lFuPx#kCGf(lR-P4o<9zSu~endS%SlLC3el<;q&&Ro|f`e@cH$PsF69LiK} zrU!Gwn`x1Uev>=#6Iw5;OYO-we=4h2K0*%_BXXODjDHK&~;C%I=9k2 zqN&W*bSNg8uE(_g{M$7x-$pg_xf&5v4>EvT+ZKj&-4j|K%>3nHI+tncrP9KKPdEx@ zWx9IlA#3_<#48UZ_s3rPt&hIQy*5Z^y_t=>sGYL8W4q`*Z_!vv-{u8+j4iMRcb8;R Ud+0Ozb8hoq`n@-|JV9alFY=O=5&!@I delta 3235 zcmZ`*X>e256@EucEiBmxY;4)}a!!C?K^xX!AhhO*fFVg63{}29+hEKl>ltK$ zGtrqh`wLqnHoX6k-d_erJG#v|*T z^PT(7J>PflySk^obbtCU_aU$E;veH(M(veESC=s|;qz9F$+IX**5Ih)E3C`Loff5L zdD~qM*_aq#7D(I}?6o%WCJxschG|F(Z<1EnHs=g(V}l(VOxLElt{J9LV;GEp+nP;J zb2w-ivYDH8Oc_YFaI1pbc{8_1LPkWk@>c2K&9aR?Y(oL(~~(xYWt*vF;GXWDoHc z-l}QDh+BCN>7bjkjZ-+=dz^C3hew$Vsv@HCic2w`I;TPkz?u1v3xVf5}3{Mr#8!8 zVx)UD%M-*WNKPt9-Xezc78?DT?RCn}iSYquNs1VF3LU?lNxzMOr-)DK-rrG$P$e#D z#Uia)o+fyF8qd5-{H`8(M&;kpEa8J?ug&s3Vl;We=JmcJ`JNq|wTRnH3QHT_lsxfp z9(yJmhL7Cj^#VFa**C{@HA^4iUFy@&f8Hb@*}eD+=%Yegn)9|a{lv)hYiTYJUm&@t zAh`tn;avi~Uy}AZ8O;Nw5rTK{9@ke;( zI`MTq@`lQPsHK4qY5HwxZazkgE-%<}+)~=yw1bZ+LnRE_|6iP6X?kuBd*u`2Pbjsf z_}KToe~EW>8E!|^Z@Chqcg7M|Pi{*MK2l3%<)+hga{>EJm#q-{xyWB#o>*}-P+H8z zGK>eYj}3pRl!Oc(#wC%?gZSY3u|;4X%^tLc zh;YhsA&w(`hz+p^C+-ZCG!ErLDZ*pLc&ylT<#tsOw8#hIX;xBEb685a1SR~2lya#A zH~>Y4Wd?+~OcyRkh^<_#3R_?2qM#9w3T6CKsfK>0W{yzUaIQA}IPga>%SaZvpYIcE6c3jg z9w9-j+DP5sXt?*+8XnLc19w!d;fMPTxHYv#0PZ*825Sw5`v4oN)3M5UWSla78Lv!0 zCMd%)k;jHk{$pU^^MQf9#XK&@sG*I=haS)JLp0&u)I>X|YiN^6Jjpa>%Y!_~bj~&h zs7)sGgFsxCX}Hlut?8C2JO#@=HF5q(AaQeocg8fH7P1%eA)caEU2TBrSU%X?s!v0t zR()D-)xTvAR*O^gZJs7|cx(nHFxkWeq3i@xXY$PK1P`;RCNME_7AAnrO)wLYnqX#b zg4u~b4OCk7JX7ZIoKT*Yd@fvct0aF!yGrs$wW}n5OuI_*?{FPny#o0ztn$3_IM=B~ zn5T?K8kF(Nd}RW%KpB=so}-2=RK_EVEM@)Dq)b4bP=@76p2rI#vX~oqQAD2N`P>wd zr+EQC5s@X_$WKOODKC~~{FE%`r)32%k(InuR-ub!n#O9l%QcNPa98M^vli}3y>r&V zU4@-98a$qXU+4G9dM4OH3BwZN4ahY^@Qu6?f^W(SKBI+OvVynrMkV-0j8xYq*qq=k zh?L+hIl(vEgEOz&7H+|HsT~s);Iae{6Lu(x4BVMuJh(H#_;F`~3E<8I!?-iGt056( zJc0|58WL3|fa?+rtLxG>jx^wo)rq~lgQNJ~$riqM;U0xuyqkFUL!>TEdvH+e&C8_9 z#)xA$?qeZpkZy>;_?lerA1rbFM0vel!3}3GrT%<%dXRpT82!cNBJ6x}vp;n3(DP?c zUH|Qyw~fXh+zqB`?!Mu$bM^P;62JdjFg57!^B$pr^kS1HyL=%_9nI6@AtTnEer^_h z=Cq+!4MpctM?RfR|8)VKpnN@DRVdn+e0?F+CqG|E*KF_s1)g6-_36AOx}4|Dypy;z zgp5VWPnS|tddxBkI_THwg)3-&w$d>5(7+m0`eG$5wL31qx8vmURTND3tfDHS%gI~o zs3?7AHP!uZfmv&*@hb}?Z?2`lpwnIW)o?hX?&O`d#4gF_{N#{z)JMO|6eee4>1S4Q z^)u9Gb1GHs1=pj!mi4*n75~-V$LlGW{O5YAbcwJeOT%<_(EiV!m0rzCJ~1PC^7dv~HEPO7g2ps}`IVcuzZLI&dawQK>xdIXciHhf6(f zYZbzXRyC^)MdKLnzs=vE_>F8)!k1HOh$ehFscf3Wgtjj~RMZPhRMhDktF7%&ckNSz zp5iGN>eh;D8wq3z()P14?RjWB@I23QW2lnR%Ie(;gv-loUcn!pjs= zh>L_25+Wgm3DG1XQkWDGDSE~t(p)EfTSU4A-ZLW7Mnqafq>Tt>GU8H(Z?woa%9tB1 zv6*Eo_m7L2l^fZjRbLh}Szi`2YsebB*#TW|jp43i^#JRp-Ub#ahYi|9hPL=JTRv!; zoVuWp>rZ>kWQVrA2F@bG4ZLmGo#v6pim=OLRoLaRE!^^06K;8W&fE%Is&JcIp#|PE z+zN(ULAVv9R9n#wcKWXfo8paZQOTEuP1%=)jYiq9DG%6qYs{vCO22E1Y^cOtJ7`+8 zGwz^q!C~-)phWpHe`S}uHREE}>)<`?Ne5TEYgLzOSfYI_p=uNypol8=rHM;a#{nIp zPDiNGF+)1``8XnuJt2qvX-|mk(6PUXUpLS|QA9LQ5)loQy?@c|;|8?&XwV7!XmY9M fZ6D1Rc+dD~8XryZ(UeL@%&e_~)>f2AZ`|R}Ompk3m{p0h| zkucq#jH1i>>UdROTV+*yUuo9&*N}pmO0uQ3fj4(?1Mc!iYA$_g>)P_chlZ#Syfj9I z!Ao;Y)bVmNDonoJ0w1;V>aR(_FYnA52`NH+T}Y9KKMy{Wz60qxP-;MkF+TH4SDyT( zTeY`~oY^M-{QG`>=s~>{vqlku+R^T0@mmzQqz&q*(!HXNP^Ck{Bo0Wkd^{~yfN7C= zZxp5Jv14Do)QL_mWY=m5NE03Ab?U18guQs_Ug<+CirsLdyCBVX)Po*r9#D`2!2*Tg zqF%TVFYLtwD@2?? z9=cX)u5DCk9x4)hVT2J-B*N%WB*K8=q-caeHjPIJ(=ca5xN77ojYuARrfEbpjfkQV z(KMpk2ov=MigBYO+-OWUI?8y)Fy=g?@Qh#522E6Hg(qI66`o1NHJTORh$F^hJ!_415=o3JGn1H5 zW+pMKj7(zEHu5QwG{;#fu9>=OWGW9n(?+JWktt6=S1Tu;L7In&)_t$M2biuXyOugZ!K)HdZL>XDo5?lyk<{zBi>lmdCj3T4SY$A;eHn`>%7HJ!sw1aKh#X9ZP3@5Nx1!|X71=NnS zbwItaLbRuhQ`7!Z0m^2PMOp#MVo?FgV#)5r8u%=hZBDO{rPrL4V@|oNoN{^andX$! doN|g&PL*ro%r_OMy!2KIFYROhj>IbvH37 z#EV21i5Cw_FJcQqY(t^qp?XlH*h_N~(X)3I!8)5z@hy*?`OnO6XZF26_rm+_@XK48 z^5IACwwBt6@7%4|nWnh9r?|>+hg{x!zM10w`>l8J*X>bQ^FnUS( z&hR*lNfjd9Q)jv1hqhAwpe72yL;|A_b-V=>pvzEI9`a?3fsm}KdXj~uisLZuKe&Nk}@Z#2A>8Uqv%iA%k5|E0PsR zOp>QvTY_Yf{g>oFtr;ST79^;#tVOg5m>7QLo53%71|$Rp)6Po3O*@CQ$|a_eNf1G= zR=r-Yl$Z%=dJ-aKfkB2L%%JMa?}vNOzW4VojjB6eJ8uWuji2k@2yyVawf*K%d;iJ^ z^{Vq^?bB}K@#O$Fq?iGQh{CpjgC#P7XNk1sFX!y*8OS(9VvxbCWC%1cNcH^Dz!rHl zFpY~yxPQ(hAWW{J?Ra3XI(!AEz=o+7<{&4_8*R)(7V-|i16e9<27Hiw2_g7Z=K>9< zVHyiiz#NrhdOcmq$_D VW8L*KU+mhFTOkVfPgnRy^)DQ^oR|Or delta 922 zcmX|=OK1~87{_NeN!eyw*`{r}X;b|^(!{hHP2(fhSF&48+oZ2(-?j%2B2+KEh(gV# zFKEF-agm51f_M=u3RPoKP_XSuioGZZB6{|2L9l+C*DlMP^wTQu|>xoo1qtD%XXSkb5jCVfE=NH@Bh-T*k)hjmB?FS1mDhmrr30 zRI4mz)m3JrDYdE0?4X<`0Q3^k2RsDvC_qQ`DVLr_<3e zQ%IhMFx_6+jb|u@l4J$XlI6$WyIYTcXSYW3c@Y{~{<*yVb!}$nnVt`g(pcA*@AGT# z?=5a$$)~F{_Hy~*lg}S#?vLjQS}C_30+jE;bHE`W@5S?^19BtA=sP-$DX&NGgiejp z&6VzgPK;BVpn4=!YgciZ1Ei=PR1NH= diff --git a/src/wasm/shell.js b/src/wasm/shell.js index 28a02c3e..462cd120 100644 --- a/src/wasm/shell.js +++ b/src/wasm/shell.js @@ -1,7 +1,7 @@ // To be loaded by main.js var my_js_files = ["./test.js"]; -var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2b", "Hacl_Hash_Blake2s", "Hacl_Hash_Blake2b_Simd256", "Hacl_Hash_Blake2s_Simd128", "Hacl_Hash_Base", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "Hacl_HMAC", "Hacl_HMAC_Blake2s_128", "Hacl_HMAC_Blake2b_256", "Hacl_Hash_SHA3", "Hacl_Hash_SHA3_Simd256", "Hacl_Hash_MD5", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_MAC_Poly1305", "Hacl_AEAD_Chacha20Poly1305", "Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305", "Hacl_AEAD_Chacha20Poly1305_Simd128", "Hacl_AEAD_Chacha20Poly1305_Simd256", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HKDF", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_HMAC_DRBG", "Hacl_Bignum64", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"]; +var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2b", "Hacl_Hash_Blake2s", "Hacl_Hash_Blake2b_Simd256", "Hacl_Hash_Blake2s_Simd128", "Hacl_Hash_Base", "Hacl_Hash_MD5", "Hacl_Hash_SHA1", "Hacl_Hash_SHA3", "Hacl_Hash_SHA2", "Hacl_HMAC", "Hacl_HMAC_Blake2s_128", "Hacl_HMAC_Blake2b_256", "Hacl_Hash_SHA3_Simd256", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_MAC_Poly1305", "Hacl_AEAD_Chacha20Poly1305", "Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305", "Hacl_AEAD_Chacha20Poly1305_Simd128", "Hacl_AEAD_Chacha20Poly1305_Simd256", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HKDF", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_HMAC_DRBG", "Hacl_Bignum64", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"]; var my_debug = false; if (typeof module !== "undefined")