curve25519_dalek/
scalar.rs

1// -*- mode: rust; -*-
2//
3// This file is part of curve25519-dalek.
4// Copyright (c) 2016-2021 isis lovecruft
5// Copyright (c) 2016-2019 Henry de Valence
6// Portions Copyright 2017 Brian Smith
7// See LICENSE for licensing information.
8//
9// Authors:
10// - Isis Agora Lovecruft <isis@patternsinthevoid.net>
11// - Henry de Valence <hdevalence@hdevalence.ca>
12// - Brian Smith <brian@briansmith.org>
13
14//! Arithmetic on scalars (integers mod the group order).
15//!
16//! Both the Ristretto group and the Ed25519 basepoint have prime order
17//! \\( \ell = 2\^{252} + 27742317777372353535851937790883648493 \\).
18//!
19//! This code is intended to be useful with both the Ristretto group
20//! (where everything is done modulo \\( \ell \\)), and the X/Ed25519
21//! setting, which mandates specific bit-twiddles that are not
22//! well-defined modulo \\( \ell \\).
23//!
24//! All arithmetic on `Scalars` is done modulo \\( \ell \\).
25//!
26//! # Constructing a scalar
27//!
28//! To create a [`Scalar`](struct.Scalar.html) from a supposedly canonical encoding, use
29//! [`Scalar::from_canonical_bytes`](struct.Scalar.html#method.from_canonical_bytes).
30//!
31//! This function does input validation, ensuring that the input bytes
32//! are the canonical encoding of a `Scalar`.
33//! If they are, we'll get
34//! `Some(Scalar)` in return:
35//!
36//! ```
37//! use curve25519_dalek::scalar::Scalar;
38//!
39//! let one_as_bytes: [u8; 32] = Scalar::ONE.to_bytes();
40//! let a: Option<Scalar> = Scalar::from_canonical_bytes(one_as_bytes).into();
41//!
42//! assert!(a.is_some());
43//! ```
44//!
45//! However, if we give it bytes representing a scalar larger than \\( \ell \\)
46//! (in this case, \\( \ell + 2 \\)), we'll get `None` back:
47//!
48//! ```
49//! use curve25519_dalek::scalar::Scalar;
50//!
51//! let l_plus_two_bytes: [u8; 32] = [
52//!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
53//!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
54//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
55//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
56//! ];
57//! let a: Option<Scalar> = Scalar::from_canonical_bytes(l_plus_two_bytes).into();
58//!
59//! assert!(a.is_none());
60//! ```
61//!
62//! Another way to create a `Scalar` is by reducing a \\(256\\)-bit integer mod
63//! \\( \ell \\), for which one may use the
64//! [`Scalar::from_bytes_mod_order`](struct.Scalar.html#method.from_bytes_mod_order)
65//! method.  In the case of the second example above, this would reduce the
66//! resultant scalar \\( \mod \ell \\), producing \\( 2 \\):
67//!
68//! ```
69//! use curve25519_dalek::scalar::Scalar;
70//!
71//! let l_plus_two_bytes: [u8; 32] = [
72//!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
73//!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
74//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
75//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
76//! ];
77//! let a: Scalar = Scalar::from_bytes_mod_order(l_plus_two_bytes);
78//!
79//! let two: Scalar = Scalar::ONE + Scalar::ONE;
80//!
81//! assert!(a == two);
82//! ```
83//!
84//! There is also a constructor that reduces a \\(512\\)-bit integer,
85//! [`Scalar::from_bytes_mod_order_wide`].
86//!
87//! To construct a `Scalar` as the hash of some input data, use
88//! [`Scalar::hash_from_bytes`], which takes a buffer, or
89//! [`Scalar::from_hash`], which allows an IUF API.
90//!
91#![cfg_attr(feature = "digest", doc = "```")]
92#![cfg_attr(not(feature = "digest"), doc = "```ignore")]
93//! # fn main() {
94//! use sha2::{Digest, Sha512};
95//! use curve25519_dalek::scalar::Scalar;
96//!
97//! // Hashing a single byte slice
98//! let a = Scalar::hash_from_bytes::<Sha512>(b"Abolish ICE");
99//!
100//! // Streaming data into a hash object
101//! let mut hasher = Sha512::default();
102//! hasher.update(b"Abolish ");
103//! hasher.update(b"ICE");
104//! let a2 = Scalar::from_hash(hasher);
105//!
106//! assert_eq!(a, a2);
107//! # }
108//! ```
109//!
110//! See also `Scalar::hash_from_bytes` and `Scalar::from_hash` that
111//! reduces a \\(512\\)-bit integer, if the optional `digest` feature
112//! has been enabled.
113
114use core::borrow::Borrow;
115use core::fmt::Debug;
116use core::iter::{Product, Sum};
117use core::ops::Index;
118use core::ops::Neg;
119use core::ops::{Add, AddAssign};
120use core::ops::{Mul, MulAssign};
121use core::ops::{Sub, SubAssign};
122
123use cfg_if::cfg_if;
124
125#[cfg(feature = "group")]
126use group::ff::{Field, FromUniformBytes, PrimeField};
127#[cfg(feature = "group-bits")]
128use group::ff::{FieldBits, PrimeFieldBits};
129
130#[cfg(any(test, feature = "group"))]
131use rand_core::RngCore;
132
133#[cfg(any(test, feature = "rand_core"))]
134use rand_core::CryptoRngCore;
135
136#[cfg(feature = "digest")]
137use digest::generic_array::typenum::U64;
138#[cfg(feature = "digest")]
139use digest::Digest;
140
141use subtle::Choice;
142use subtle::ConditionallySelectable;
143use subtle::ConstantTimeEq;
144use subtle::CtOption;
145
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149use crate::backend;
150use crate::constants;
151
152cfg_if! {
153    if #[cfg(curve25519_dalek_backend = "fiat")] {
154        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
155        ///
156        /// This is a type alias for one of the scalar types in the `backend`
157        /// module.
158        #[cfg(curve25519_dalek_bits = "32")]
159        #[cfg_attr(
160            docsrs,
161            doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "32")))
162        )]
163        type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
164
165        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
166        ///
167        /// This is a type alias for one of the scalar types in the `backend`
168        /// module.
169        #[cfg(curve25519_dalek_bits = "64")]
170        #[cfg_attr(
171            docsrs,
172            doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "64")))
173        )]
174        type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
175    } else if #[cfg(curve25519_dalek_bits = "64")] {
176        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
177        ///
178        /// This is a type alias for one of the scalar types in the `backend`
179        /// module.
180        #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
181        type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
182    } else {
183        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
184        ///
185        /// This is a type alias for one of the scalar types in the `backend`
186        /// module.
187        #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
188        type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
189    }
190}
191
192/// The `Scalar` struct holds an element of \\(\mathbb Z / \ell\mathbb Z \\).
193#[allow(clippy::derived_hash_with_manual_eq)]
194#[derive(Copy, Clone, Hash)]
195pub struct Scalar {
196    /// `bytes` is a little-endian byte encoding of an integer representing a scalar modulo the
197    /// group order.
198    ///
199    /// # Invariant #1
200    ///
201    /// The integer representing this scalar is less than \\(2\^{255}\\). That is, the most
202    /// significant bit of `bytes[31]` is 0.
203    ///
204    /// This is required for `EdwardsPoint` variable- and fixed-base multiplication, because most
205    /// integers above 2^255 are unrepresentable in our radix-16 NAF (see [`Self::as_radix_16`]).
206    /// The invariant is also required because our `MontgomeryPoint` multiplication assumes the MSB
207    /// is 0 (see `MontgomeryPoint::mul`).
208    ///
209    /// # Invariant #2 (weak)
210    ///
211    /// The integer representing this scalar is less than \\(2\^{255} - 19 \\), i.e., it represents
212    /// a canonical representative of an element of \\( \mathbb Z / \ell\mathbb Z \\). This is
213    /// stronger than invariant #1. It also sometimes has to be broken.
214    ///
215    /// This invariant is deliberately broken in the implementation of `EdwardsPoint::{mul_clamped,
216    /// mul_base_clamped}`, `MontgomeryPoint::{mul_clamped, mul_base_clamped}`, and
217    /// `BasepointTable::mul_base_clamped`. This is not an issue though. As mentioned above,
218    /// scalar-point multiplication is defined for any choice of `bytes` that satisfies invariant
219    /// #1. Since clamping guarantees invariant #1 is satisfied, these operations are well defined.
220    ///
221    /// Note: Scalar-point mult is the _only_ thing you can do safely with an unreduced scalar.
222    /// Scalar-scalar addition and subtraction are NOT correct when using unreduced scalars.
223    /// Multiplication is correct, but this is only due to a quirk of our implementation, and not
224    /// guaranteed to hold in general in the future.
225    ///
226    /// Note: It is not possible to construct an unreduced `Scalar` from the public API unless the
227    /// `legacy_compatibility` is enabled (thus making `Scalar::from_bits` public). Thus, for all
228    /// public non-legacy uses, invariant #2
229    /// always holds.
230    ///
231    pub(crate) bytes: [u8; 32],
232}
233
234impl Scalar {
235    /// Construct a `Scalar` by reducing a 256-bit little-endian integer
236    /// modulo the group order \\( \ell \\).
237    pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
238        // Temporarily allow s_unreduced.bytes > 2^255 ...
239        let s_unreduced = Scalar { bytes };
240
241        // Then reduce mod the group order and return the reduced representative.
242        let s = s_unreduced.reduce();
243        debug_assert_eq!(0u8, s[31] >> 7);
244
245        s
246    }
247
248    /// Construct a `Scalar` by reducing a 512-bit little-endian integer
249    /// modulo the group order \\( \ell \\).
250    pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
251        UnpackedScalar::from_bytes_wide(input).pack()
252    }
253
254    /// Attempt to construct a `Scalar` from a canonical byte representation.
255    ///
256    /// # Return
257    ///
258    /// - `Some(s)`, where `s` is the `Scalar` corresponding to `bytes`,
259    ///   if `bytes` is a canonical byte representation modulo the group order \\( \ell \\);
260    /// - `None` if `bytes` is not a canonical byte representation.
261    pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption<Scalar> {
262        let high_bit_unset = (bytes[31] >> 7).ct_eq(&0);
263        let candidate = Scalar { bytes };
264        CtOption::new(candidate, high_bit_unset & candidate.is_canonical())
265    }
266
267    /// Construct a `Scalar` from the low 255 bits of a 256-bit integer. This breaks the invariant
268    /// that scalars are always reduced. Scalar-scalar arithmetic, i.e., addition, subtraction,
269    /// multiplication, **does not work** on scalars produced from this function. You may only use
270    /// the output of this function for `EdwardsPoint::mul`, `MontgomeryPoint::mul`, and
271    /// `EdwardsPoint::vartime_double_scalar_mul_basepoint`. **Do not use this function** unless
272    /// you absolutely have to.
273    #[cfg(feature = "legacy_compatibility")]
274    #[deprecated(
275        since = "4.0.0",
276        note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs."
277    )]
278    pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
279        let mut s = Scalar { bytes };
280        // Ensure invariant #1 holds. That is, make s < 2^255 by masking the high bit.
281        s.bytes[31] &= 0b0111_1111;
282
283        s
284    }
285}
286
287impl Debug for Scalar {
288    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
289        write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
290    }
291}
292
293impl Eq for Scalar {}
294impl PartialEq for Scalar {
295    fn eq(&self, other: &Self) -> bool {
296        self.ct_eq(other).into()
297    }
298}
299
300impl ConstantTimeEq for Scalar {
301    fn ct_eq(&self, other: &Self) -> Choice {
302        self.bytes.ct_eq(&other.bytes)
303    }
304}
305
306impl Index<usize> for Scalar {
307    type Output = u8;
308
309    /// Index the bytes of the representative for this `Scalar`.  Mutation is not permitted.
310    fn index(&self, _index: usize) -> &u8 {
311        &(self.bytes[_index])
312    }
313}
314
315impl<'b> MulAssign<&'b Scalar> for Scalar {
316    fn mul_assign(&mut self, _rhs: &'b Scalar) {
317        *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
318    }
319}
320
321define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
322
323impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar {
324    type Output = Scalar;
325    fn mul(self, _rhs: &'b Scalar) -> Scalar {
326        UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
327    }
328}
329
330define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
331
332impl<'b> AddAssign<&'b Scalar> for Scalar {
333    fn add_assign(&mut self, _rhs: &'b Scalar) {
334        *self = *self + _rhs;
335    }
336}
337
338define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
339
340impl<'a, 'b> Add<&'b Scalar> for &'a Scalar {
341    type Output = Scalar;
342    #[allow(non_snake_case)]
343    fn add(self, _rhs: &'b Scalar) -> Scalar {
344        // The UnpackedScalar::add function produces reduced outputs if the inputs are reduced. By
345        // Scalar invariant #1, this is always the case.
346        UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack()
347    }
348}
349
350define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
351
352impl<'b> SubAssign<&'b Scalar> for Scalar {
353    fn sub_assign(&mut self, _rhs: &'b Scalar) {
354        *self = *self - _rhs;
355    }
356}
357
358define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
359
360impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar {
361    type Output = Scalar;
362    #[allow(non_snake_case)]
363    fn sub(self, rhs: &'b Scalar) -> Scalar {
364        // The UnpackedScalar::sub function produces reduced outputs if the inputs are reduced. By
365        // Scalar invariant #1, this is always the case.
366        UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack()
367    }
368}
369
370define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
371
372impl<'a> Neg for &'a Scalar {
373    type Output = Scalar;
374    #[allow(non_snake_case)]
375    fn neg(self) -> Scalar {
376        let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
377        let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
378        UnpackedScalar::sub(&UnpackedScalar::ZERO, &self_mod_l).pack()
379    }
380}
381
382impl Neg for Scalar {
383    type Output = Scalar;
384    fn neg(self) -> Scalar {
385        -&self
386    }
387}
388
389impl ConditionallySelectable for Scalar {
390    fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
391        let mut bytes = [0u8; 32];
392        #[allow(clippy::needless_range_loop)]
393        for i in 0..32 {
394            bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
395        }
396        Scalar { bytes }
397    }
398}
399
400#[cfg(feature = "serde")]
401use serde::de::Visitor;
402#[cfg(feature = "serde")]
403use serde::{Deserialize, Deserializer, Serialize, Serializer};
404
405#[cfg(feature = "serde")]
406#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
407impl Serialize for Scalar {
408    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
409    where
410        S: Serializer,
411    {
412        use serde::ser::SerializeTuple;
413        let mut tup = serializer.serialize_tuple(32)?;
414        for byte in self.as_bytes().iter() {
415            tup.serialize_element(byte)?;
416        }
417        tup.end()
418    }
419}
420
421#[cfg(feature = "serde")]
422#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
423impl<'de> Deserialize<'de> for Scalar {
424    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
425    where
426        D: Deserializer<'de>,
427    {
428        struct ScalarVisitor;
429
430        impl<'de> Visitor<'de> for ScalarVisitor {
431            type Value = Scalar;
432
433            fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
434                formatter.write_str(
435                    "a sequence of 32 bytes whose little-endian interpretation is less than the \
436                    basepoint order â„“",
437                )
438            }
439
440            fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
441            where
442                A: serde::de::SeqAccess<'de>,
443            {
444                let mut bytes = [0u8; 32];
445                #[allow(clippy::needless_range_loop)]
446                for i in 0..32 {
447                    bytes[i] = seq
448                        .next_element()?
449                        .ok_or_else(|| serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
450                }
451                Option::from(Scalar::from_canonical_bytes(bytes))
452                    .ok_or_else(|| serde::de::Error::custom("scalar was not canonically encoded"))
453            }
454        }
455
456        deserializer.deserialize_tuple(32, ScalarVisitor)
457    }
458}
459
460impl<T> Product<T> for Scalar
461where
462    T: Borrow<Scalar>,
463{
464    fn product<I>(iter: I) -> Self
465    where
466        I: Iterator<Item = T>,
467    {
468        iter.fold(Scalar::ONE, |acc, item| acc * item.borrow())
469    }
470}
471
472impl<T> Sum<T> for Scalar
473where
474    T: Borrow<Scalar>,
475{
476    fn sum<I>(iter: I) -> Self
477    where
478        I: Iterator<Item = T>,
479    {
480        iter.fold(Scalar::ZERO, |acc, item| acc + item.borrow())
481    }
482}
483
484impl Default for Scalar {
485    fn default() -> Scalar {
486        Scalar::ZERO
487    }
488}
489
490impl From<u8> for Scalar {
491    fn from(x: u8) -> Scalar {
492        let mut s_bytes = [0u8; 32];
493        s_bytes[0] = x;
494        Scalar { bytes: s_bytes }
495    }
496}
497
498impl From<u16> for Scalar {
499    fn from(x: u16) -> Scalar {
500        let mut s_bytes = [0u8; 32];
501        let x_bytes = x.to_le_bytes();
502        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
503        Scalar { bytes: s_bytes }
504    }
505}
506
507impl From<u32> for Scalar {
508    fn from(x: u32) -> Scalar {
509        let mut s_bytes = [0u8; 32];
510        let x_bytes = x.to_le_bytes();
511        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
512        Scalar { bytes: s_bytes }
513    }
514}
515
516impl From<u64> for Scalar {
517    /// Construct a scalar from the given `u64`.
518    ///
519    /// # Inputs
520    ///
521    /// An `u64` to convert to a `Scalar`.
522    ///
523    /// # Returns
524    ///
525    /// A `Scalar` corresponding to the input `u64`.
526    ///
527    /// # Example
528    ///
529    /// ```
530    /// use curve25519_dalek::scalar::Scalar;
531    ///
532    /// let fourtytwo = Scalar::from(42u64);
533    /// let six = Scalar::from(6u64);
534    /// let seven = Scalar::from(7u64);
535    ///
536    /// assert!(fourtytwo == six * seven);
537    /// ```
538    fn from(x: u64) -> Scalar {
539        let mut s_bytes = [0u8; 32];
540        let x_bytes = x.to_le_bytes();
541        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
542        Scalar { bytes: s_bytes }
543    }
544}
545
546impl From<u128> for Scalar {
547    fn from(x: u128) -> Scalar {
548        let mut s_bytes = [0u8; 32];
549        let x_bytes = x.to_le_bytes();
550        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
551        Scalar { bytes: s_bytes }
552    }
553}
554
555#[cfg(feature = "zeroize")]
556impl Zeroize for Scalar {
557    fn zeroize(&mut self) {
558        self.bytes.zeroize();
559    }
560}
561
562impl Scalar {
563    /// The scalar \\( 0 \\).
564    pub const ZERO: Self = Self { bytes: [0u8; 32] };
565
566    /// The scalar \\( 1 \\).
567    pub const ONE: Self = Self {
568        bytes: [
569            1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570            0, 0, 0,
571        ],
572    };
573
574    #[cfg(any(test, feature = "rand_core"))]
575    /// Return a `Scalar` chosen uniformly at random using a user-provided RNG.
576    ///
577    /// # Inputs
578    ///
579    /// * `rng`: any RNG which implements `CryptoRngCore`
580    ///   (i.e. `CryptoRng` + `RngCore`) interface.
581    ///
582    /// # Returns
583    ///
584    /// A random scalar within \\(\mathbb{Z} / \ell\mathbb{Z}\\).
585    ///
586    /// # Example
587    ///
588    /// ```
589    /// # fn main() {
590    /// use curve25519_dalek::scalar::Scalar;
591    ///
592    /// use rand_core::OsRng;
593    ///
594    /// let mut csprng = OsRng;
595    /// let a: Scalar = Scalar::random(&mut csprng);
596    /// # }
597    pub fn random<R: CryptoRngCore + ?Sized>(rng: &mut R) -> Self {
598        let mut scalar_bytes = [0u8; 64];
599        rng.fill_bytes(&mut scalar_bytes);
600        Scalar::from_bytes_mod_order_wide(&scalar_bytes)
601    }
602
603    #[cfg(feature = "digest")]
604    /// Hash a slice of bytes into a scalar.
605    ///
606    /// Takes a type parameter `D`, which is any `Digest` producing 64
607    /// bytes (512 bits) of output.
608    ///
609    /// Convenience wrapper around `from_hash`.
610    ///
611    /// # Example
612    ///
613    #[cfg_attr(feature = "digest", doc = "```")]
614    #[cfg_attr(not(feature = "digest"), doc = "```ignore")]
615    /// # use curve25519_dalek::scalar::Scalar;
616    /// use sha2::Sha512;
617    ///
618    /// # // Need fn main() here in comment so the doctest compiles
619    /// # // See https://doc.rust-lang.org/book/documentation.html#documentation-as-tests
620    /// # fn main() {
621    /// let msg = "To really appreciate architecture, you may even need to commit a murder";
622    /// let s = Scalar::hash_from_bytes::<Sha512>(msg.as_bytes());
623    /// # }
624    /// ```
625    pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
626    where
627        D: Digest<OutputSize = U64> + Default,
628    {
629        let mut hash = D::default();
630        hash.update(input);
631        Scalar::from_hash(hash)
632    }
633
634    #[cfg(feature = "digest")]
635    /// Construct a scalar from an existing `Digest` instance.
636    ///
637    /// Use this instead of `hash_from_bytes` if it is more convenient
638    /// to stream data into the `Digest` than to pass a single byte
639    /// slice.
640    ///
641    /// # Example
642    ///
643    /// ```
644    /// # use curve25519_dalek::scalar::Scalar;
645    /// use curve25519_dalek::digest::Update;
646    ///
647    /// use sha2::Digest;
648    /// use sha2::Sha512;
649    ///
650    /// # fn main() {
651    /// let mut h = Sha512::new()
652    ///     .chain("To really appreciate architecture, you may even need to commit a murder.")
653    ///     .chain("While the programs used for The Manhattan Transcripts are of the most extreme")
654    ///     .chain("nature, they also parallel the most common formula plot: the archetype of")
655    ///     .chain("murder. Other phantasms were occasionally used to underline the fact that")
656    ///     .chain("perhaps all architecture, rather than being about functional standards, is")
657    ///     .chain("about love and death.");
658    ///
659    /// let s = Scalar::from_hash(h);
660    ///
661    /// println!("{:?}", s.to_bytes());
662    /// assert_eq!(
663    ///     s.to_bytes(),
664    ///     [  21,  88, 208, 252,  63, 122, 210, 152,
665    ///       154,  38,  15,  23,  16, 167,  80, 150,
666    ///       192, 221,  77, 226,  62,  25, 224, 148,
667    ///       239,  48, 176,  10, 185,  69, 168,  11, ],
668    /// );
669    /// # }
670    /// ```
671    pub fn from_hash<D>(hash: D) -> Scalar
672    where
673        D: Digest<OutputSize = U64>,
674    {
675        let mut output = [0u8; 64];
676        output.copy_from_slice(hash.finalize().as_slice());
677        Scalar::from_bytes_mod_order_wide(&output)
678    }
679
680    /// Convert this `Scalar` to its underlying sequence of bytes.
681    ///
682    /// # Example
683    ///
684    /// ```
685    /// use curve25519_dalek::scalar::Scalar;
686    ///
687    /// let s: Scalar = Scalar::ZERO;
688    ///
689    /// assert!(s.to_bytes() == [0u8; 32]);
690    /// ```
691    pub const fn to_bytes(&self) -> [u8; 32] {
692        self.bytes
693    }
694
695    /// View the little-endian byte encoding of the integer representing this Scalar.
696    ///
697    /// # Example
698    ///
699    /// ```
700    /// use curve25519_dalek::scalar::Scalar;
701    ///
702    /// let s: Scalar = Scalar::ZERO;
703    ///
704    /// assert!(s.as_bytes() == &[0u8; 32]);
705    /// ```
706    pub const fn as_bytes(&self) -> &[u8; 32] {
707        &self.bytes
708    }
709
710    /// Given a nonzero `Scalar`, compute its multiplicative inverse.
711    ///
712    /// # Warning
713    ///
714    /// `self` **MUST** be nonzero.  If you cannot
715    /// *prove* that this is the case, you **SHOULD NOT USE THIS
716    /// FUNCTION**.
717    ///
718    /// # Returns
719    ///
720    /// The multiplicative inverse of the this `Scalar`.
721    ///
722    /// # Example
723    ///
724    /// ```
725    /// use curve25519_dalek::scalar::Scalar;
726    ///
727    /// // x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
728    /// let X: Scalar = Scalar::from_bytes_mod_order([
729    ///         0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
730    ///         0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
731    ///         0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
732    ///         0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
733    ///     ]);
734    /// // 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
735    /// let XINV: Scalar = Scalar::from_bytes_mod_order([
736    ///         0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
737    ///         0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
738    ///         0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
739    ///         0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
740    ///     ]);
741    ///
742    /// let inv_X: Scalar = X.invert();
743    /// assert!(XINV == inv_X);
744    /// let should_be_one: Scalar = &inv_X * &X;
745    /// assert!(should_be_one == Scalar::ONE);
746    /// ```
747    pub fn invert(&self) -> Scalar {
748        self.unpack().invert().pack()
749    }
750
751    /// Given a slice of nonzero (possibly secret) `Scalar`s,
752    /// compute their inverses in a batch.
753    ///
754    /// # Return
755    ///
756    /// Each element of `inputs` is replaced by its inverse.
757    ///
758    /// The product of all inverses is returned.
759    ///
760    /// # Warning
761    ///
762    /// All input `Scalars` **MUST** be nonzero.  If you cannot
763    /// *prove* that this is the case, you **SHOULD NOT USE THIS
764    /// FUNCTION**.
765    ///
766    /// # Example
767    ///
768    /// ```
769    /// # use curve25519_dalek::scalar::Scalar;
770    /// # fn main() {
771    /// let mut scalars = [
772    ///     Scalar::from(3u64),
773    ///     Scalar::from(5u64),
774    ///     Scalar::from(7u64),
775    ///     Scalar::from(11u64),
776    /// ];
777    ///
778    /// let allinv = Scalar::batch_invert(&mut scalars);
779    ///
780    /// assert_eq!(allinv, Scalar::from(3*5*7*11u64).invert());
781    /// assert_eq!(scalars[0], Scalar::from(3u64).invert());
782    /// assert_eq!(scalars[1], Scalar::from(5u64).invert());
783    /// assert_eq!(scalars[2], Scalar::from(7u64).invert());
784    /// assert_eq!(scalars[3], Scalar::from(11u64).invert());
785    /// # }
786    /// ```
787    #[cfg(feature = "alloc")]
788    pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
789        // This code is essentially identical to the FieldElement
790        // implementation, and is documented there.  Unfortunately,
791        // it's not easy to write it generically, since here we want
792        // to use `UnpackedScalar`s internally, and `Scalar`s
793        // externally, but there's no corresponding distinction for
794        // field elements.
795
796        let n = inputs.len();
797        let one: UnpackedScalar = Scalar::ONE.unpack().as_montgomery();
798
799        let mut scratch = vec![one; n];
800
801        // Keep an accumulator of all of the previous products
802        let mut acc = Scalar::ONE.unpack().as_montgomery();
803
804        // Pass through the input vector, recording the previous
805        // products in the scratch space
806        for (input, scratch) in inputs.iter_mut().zip(scratch.iter_mut()) {
807            *scratch = acc;
808
809            // Avoid unnecessary Montgomery multiplication in second pass by
810            // keeping inputs in Montgomery form
811            let tmp = input.unpack().as_montgomery();
812            *input = tmp.pack();
813            acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
814        }
815
816        // acc is nonzero iff all inputs are nonzero
817        debug_assert!(acc.pack() != Scalar::ZERO);
818
819        // Compute the inverse of all products
820        acc = acc.montgomery_invert().from_montgomery();
821
822        // We need to return the product of all inverses later
823        let ret = acc.pack();
824
825        // Pass through the vector backwards to compute the inverses
826        // in place
827        for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
828            let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
829            *input = UnpackedScalar::montgomery_mul(&acc, scratch).pack();
830            acc = tmp;
831        }
832
833        #[cfg(feature = "zeroize")]
834        Zeroize::zeroize(&mut scratch);
835
836        ret
837    }
838
839    /// Get the bits of the scalar, in little-endian order
840    pub(crate) fn bits_le(&self) -> impl DoubleEndedIterator<Item = bool> + '_ {
841        (0..256).map(|i| {
842            // As i runs from 0..256, the bottom 3 bits index the bit, while the upper bits index
843            // the byte. Since self.bytes is little-endian at the byte level, this iterator is
844            // little-endian on the bit level
845            ((self.bytes[i >> 3] >> (i & 7)) & 1u8) == 1
846        })
847    }
848
849    /// Compute a width-\\(w\\) "Non-Adjacent Form" of this scalar.
850    ///
851    /// A width-\\(w\\) NAF of a positive integer \\(k\\) is an expression
852    /// $$
853    /// k = \sum_{i=0}\^m n\_i 2\^i,
854    /// $$
855    /// where each nonzero
856    /// coefficient \\(n\_i\\) is odd and bounded by \\(|n\_i| < 2\^{w-1}\\),
857    /// \\(n\_{m-1}\\) is nonzero, and at most one of any \\(w\\) consecutive
858    /// coefficients is nonzero.  (Hankerson, Menezes, Vanstone; def 3.32).
859    ///
860    /// The length of the NAF is at most one more than the length of
861    /// the binary representation of \\(k\\).  This is why the
862    /// `Scalar` type maintains an invariant (invariant #1) that the top bit is
863    /// \\(0\\), so that the NAF of a scalar has at most 256 digits.
864    ///
865    /// Intuitively, this is like a binary expansion, except that we
866    /// allow some coefficients to grow in magnitude up to
867    /// \\(2\^{w-1}\\) so that the nonzero coefficients are as sparse
868    /// as possible.
869    ///
870    /// When doing scalar multiplication, we can then use a lookup
871    /// table of precomputed multiples of a point to add the nonzero
872    /// terms \\( k_i P \\).  Using signed digits cuts the table size
873    /// in half, and using odd digits cuts the table size in half
874    /// again.
875    ///
876    /// To compute a \\(w\\)-NAF, we use a modification of Algorithm 3.35 of HMV:
877    ///
878    /// 1. \\( i \gets 0 \\)
879    /// 2. While \\( k \ge 1 \\):
880    ///     1. If \\(k\\) is odd, \\( n_i \gets k \operatorname{mods} 2^w \\), \\( k \gets k - n_i \\).
881    ///     2. If \\(k\\) is even, \\( n_i \gets 0 \\).
882    ///     3. \\( k \gets k / 2 \\), \\( i \gets i + 1 \\).
883    /// 3. Return \\( n_0, n_1, ... , \\)
884    ///
885    /// Here \\( \bar x = x \operatorname{mods} 2^w \\) means the
886    /// \\( \bar x \\) with \\( \bar x \equiv x \pmod{2^w} \\) and
887    /// \\( -2^{w-1} \leq \bar x < 2^{w-1} \\).
888    ///
889    /// We implement this by scanning across the bits of \\(k\\) from
890    /// least-significant bit to most-significant-bit.
891    /// Write the bits of \\(k\\) as
892    /// $$
893    /// k = \sum\_{i=0}\^m k\_i 2^i,
894    /// $$
895    /// and split the sum as
896    /// $$
897    /// k = \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
898    /// $$
899    /// where the first part is \\( k \mod 2^w \\).
900    ///
901    /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w < 2^{w-1} \\), then we emit
902    /// \\( n_0 = k \mod 2^w \\).  Instead of computing
903    /// \\( k - n_0 \\), we just advance \\(w\\) bits and reindex.
904    ///
905    /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w \ge 2^{w-1} \\), then
906    /// \\( n_0 = k \operatorname{mods} 2^w = k \mod 2^w - 2^w \\).
907    /// The quantity \\( k - n_0 \\) is
908    /// $$
909    /// \begin{aligned}
910    /// k - n_0 &= \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
911    ///          - \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \\\\
912    /// &= 2^w + 2^w \sum\_{i=0} k\_{i+w} 2^i
913    /// \end{aligned}
914    /// $$
915    /// so instead of computing the subtraction, we can set a carry
916    /// bit, advance \\(w\\) bits, and reindex.
917    ///
918    /// If \\( k \mod 2^w\\) is even, we emit \\(0\\), advance 1 bit
919    /// and reindex.  In fact, by setting all digits to \\(0\\)
920    /// initially, we don't need to emit anything.
921    pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
922        // required by the NAF definition
923        debug_assert!(w >= 2);
924        // required so that the NAF digits fit in i8
925        debug_assert!(w <= 8);
926
927        let mut naf = [0i8; 256];
928
929        let mut x_u64 = [0u64; 5];
930        read_le_u64_into(&self.bytes, &mut x_u64[0..4]);
931
932        let width = 1 << w;
933        let window_mask = width - 1;
934
935        let mut pos = 0;
936        let mut carry = 0;
937        while pos < 256 {
938            // Construct a buffer of bits of the scalar, starting at bit `pos`
939            let u64_idx = pos / 64;
940            let bit_idx = pos % 64;
941            let bit_buf: u64 = if bit_idx < 64 - w {
942                // This window's bits are contained in a single u64
943                x_u64[u64_idx] >> bit_idx
944            } else {
945                // Combine the current u64's bits with the bits from the next u64
946                (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
947            };
948
949            // Add the carry into the current window
950            let window = carry + (bit_buf & window_mask);
951
952            if window & 1 == 0 {
953                // If the window value is even, preserve the carry and continue.
954                // Why is the carry preserved?
955                // If carry == 0 and window & 1 == 0, then the next carry should be 0
956                // If carry == 1 and window & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
957                pos += 1;
958                continue;
959            }
960
961            if window < width / 2 {
962                carry = 0;
963                naf[pos] = window as i8;
964            } else {
965                carry = 1;
966                naf[pos] = (window as i8).wrapping_sub(width as i8);
967            }
968
969            pos += w;
970        }
971
972        naf
973    }
974
975    /// Write this scalar in radix 16, with coefficients in \\([-8,8)\\),
976    /// i.e., compute \\(a\_i\\) such that
977    /// $$
978    ///    a = a\_0 + a\_1 16\^1 + \cdots + a_{63} 16\^{63},
979    /// $$
980    /// with \\(-8 \leq a_i < 8\\) for \\(0 \leq i < 63\\) and \\(-8 \leq a_{63} \leq 8\\).
981    ///
982    /// The largest value that can be decomposed like this is just over \\(2^{255}\\). Thus, in
983    /// order to not error, the top bit MUST NOT be set, i.e., `Self` MUST be less than
984    /// \\(2^{255}\\).
985    pub(crate) fn as_radix_16(&self) -> [i8; 64] {
986        debug_assert!(self[31] <= 127);
987        let mut output = [0i8; 64];
988
989        // Step 1: change radix.
990        // Convert from radix 256 (bytes) to radix 16 (nibbles)
991        #[allow(clippy::identity_op)]
992        #[inline(always)]
993        fn bot_half(x: u8) -> u8 {
994            (x >> 0) & 15
995        }
996        #[inline(always)]
997        fn top_half(x: u8) -> u8 {
998            (x >> 4) & 15
999        }
1000
1001        for i in 0..32 {
1002            output[2 * i] = bot_half(self[i]) as i8;
1003            output[2 * i + 1] = top_half(self[i]) as i8;
1004        }
1005        // Precondition note: since self[31] <= 127, output[63] <= 7
1006
1007        // Step 2: recenter coefficients from [0,16) to [-8,8)
1008        for i in 0..63 {
1009            let carry = (output[i] + 8) >> 4;
1010            output[i] -= carry << 4;
1011            output[i + 1] += carry;
1012        }
1013        // Precondition note: output[63] is not recentered.  It
1014        // increases by carry <= 1.  Thus output[63] <= 8.
1015
1016        output
1017    }
1018
1019    /// Returns a size hint indicating how many entries of the return
1020    /// value of `to_radix_2w` are nonzero.
1021    #[cfg(any(feature = "alloc", all(test, feature = "precomputed-tables")))]
1022    pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1023        debug_assert!(w >= 4);
1024        debug_assert!(w <= 8);
1025
1026        let digits_count = match w {
1027            4..=7 => (256 + w - 1) / w,
1028            // See comment in to_radix_2w on handling the terminal carry.
1029            8 => (256 + w - 1) / w + 1_usize,
1030            _ => panic!("invalid radix parameter"),
1031        };
1032
1033        debug_assert!(digits_count <= 64);
1034        digits_count
1035    }
1036
1037    /// Creates a representation of a Scalar in radix \\( 2^w \\) with \\(w = 4, 5, 6, 7, 8\\) for
1038    /// use with the Pippenger algorithm. Higher radixes are not supported to save cache space.
1039    /// Radix 256 is near-optimal even for very large inputs.
1040    ///
1041    /// Radix below 16 or above 256 is prohibited.
1042    /// This method returns digits in a fixed-sized array, excess digits are zeroes.
1043    ///
1044    /// For radix 16, `Self` must be less than \\(2^{255}\\). This is because most integers larger
1045    /// than \\(2^{255}\\) are unrepresentable in the form described below for \\(w = 4\\). This
1046    /// would be true for \\(w = 8\\) as well, but it is compensated for by increasing the size
1047    /// hint by 1.
1048    ///
1049    /// ## Scalar representation
1050    ///
1051    /// Radix \\(2\^w\\), with \\(n = ceil(256/w)\\) coefficients in \\([-(2\^w)/2,(2\^w)/2)\\),
1052    /// i.e., scalar is represented using digits \\(a\_i\\) such that
1053    /// $$
1054    ///    a = a\_0 + a\_1 2\^1w + \cdots + a_{n-1} 2\^{w*(n-1)},
1055    /// $$
1056    /// with \\(-2\^w/2 \leq a_i < 2\^w/2\\) for \\(0 \leq i < (n-1)\\) and \\(-2\^w/2 \leq a_{n-1} \leq 2\^w/2\\).
1057    ///
1058    #[cfg(any(feature = "alloc", feature = "precomputed-tables"))]
1059    pub(crate) fn as_radix_2w(&self, w: usize) -> [i8; 64] {
1060        debug_assert!(w >= 4);
1061        debug_assert!(w <= 8);
1062
1063        if w == 4 {
1064            return self.as_radix_16();
1065        }
1066
1067        // Scalar formatted as four `u64`s with carry bit packed into the highest bit.
1068        let mut scalar64x4 = [0u64; 4];
1069        read_le_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1070
1071        let radix: u64 = 1 << w;
1072        let window_mask: u64 = radix - 1;
1073
1074        let mut carry = 0u64;
1075        let mut digits = [0i8; 64];
1076        let digits_count = (256 + w - 1) / w;
1077        #[allow(clippy::needless_range_loop)]
1078        for i in 0..digits_count {
1079            // Construct a buffer of bits of the scalar, starting at `bit_offset`.
1080            let bit_offset = i * w;
1081            let u64_idx = bit_offset / 64;
1082            let bit_idx = bit_offset % 64;
1083
1084            // Read the bits from the scalar
1085            let bit_buf: u64 = if bit_idx < 64 - w || u64_idx == 3 {
1086                // This window's bits are contained in a single u64,
1087                // or it's the last u64 anyway.
1088                scalar64x4[u64_idx] >> bit_idx
1089            } else {
1090                // Combine the current u64's bits with the bits from the next u64
1091                (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1 + u64_idx] << (64 - bit_idx))
1092            };
1093
1094            // Read the actual coefficient value from the window
1095            let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
1096
1097            // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
1098            carry = (coef + (radix / 2)) >> w;
1099            digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1100        }
1101
1102        // When 4 < w < 8, we can fold the final carry onto the last digit d,
1103        // because d < 2^w/2 so d + carry*2^w = d + 1*2^w < 2^(w+1) < 2^8.
1104        //
1105        // When w = 8, we can't fit carry*2^w into an i8.  This should
1106        // not happen anyways, because the final carry will be 0 for
1107        // reduced scalars, but Scalar invariant #1 allows 255-bit scalars.
1108        // To handle this, we expand the size_hint by 1 when w=8,
1109        // and accumulate the final carry onto another digit.
1110        match w {
1111            8 => digits[digits_count] += carry as i8,
1112            _ => digits[digits_count - 1] += (carry << w) as i8,
1113        }
1114
1115        digits
1116    }
1117
1118    /// Unpack this `Scalar` to an `UnpackedScalar` for faster arithmetic.
1119    pub(crate) fn unpack(&self) -> UnpackedScalar {
1120        UnpackedScalar::from_bytes(&self.bytes)
1121    }
1122
1123    /// Reduce this `Scalar` modulo \\(\ell\\).
1124    #[allow(non_snake_case)]
1125    fn reduce(&self) -> Scalar {
1126        let x = self.unpack();
1127        let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1128        let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1129        x_mod_l.pack()
1130    }
1131
1132    /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\). This is not
1133    /// public because any `Scalar` that is publicly observed is reduced, by scalar invariant #2.
1134    fn is_canonical(&self) -> Choice {
1135        self.ct_eq(&self.reduce())
1136    }
1137}
1138
1139impl UnpackedScalar {
1140    /// Pack the limbs of this `UnpackedScalar` into a `Scalar`.
1141    fn pack(&self) -> Scalar {
1142        Scalar {
1143            bytes: self.as_bytes(),
1144        }
1145    }
1146
1147    /// Inverts an UnpackedScalar in Montgomery form.
1148    #[rustfmt::skip] // keep alignment of addition chain and squarings
1149    #[allow(clippy::just_underscores_and_digits)]
1150    pub fn montgomery_invert(&self) -> UnpackedScalar {
1151        // Uses the addition chain from
1152        // https://briansmith.org/ecc-inversion-addition-chains-01#curve25519_scalar_inversion
1153        let    _1 = *self;
1154        let   _10 = _1.montgomery_square();
1155        let  _100 = _10.montgomery_square();
1156        let   _11 = UnpackedScalar::montgomery_mul(&_10,     &_1);
1157        let  _101 = UnpackedScalar::montgomery_mul(&_10,    &_11);
1158        let  _111 = UnpackedScalar::montgomery_mul(&_10,   &_101);
1159        let _1001 = UnpackedScalar::montgomery_mul(&_10,   &_111);
1160        let _1011 = UnpackedScalar::montgomery_mul(&_10,  &_1001);
1161        let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1162
1163        // _10000
1164        let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1165
1166        #[inline]
1167        fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1168            for _ in 0..squarings {
1169                *y = y.montgomery_square();
1170            }
1171            *y = UnpackedScalar::montgomery_mul(y, x);
1172        }
1173
1174        square_multiply(&mut y, 123 + 3, &_101);
1175        square_multiply(&mut y,   2 + 2, &_11);
1176        square_multiply(&mut y,   1 + 4, &_1111);
1177        square_multiply(&mut y,   1 + 4, &_1111);
1178        square_multiply(&mut y,       4, &_1001);
1179        square_multiply(&mut y,       2, &_11);
1180        square_multiply(&mut y,   1 + 4, &_1111);
1181        square_multiply(&mut y,   1 + 3, &_101);
1182        square_multiply(&mut y,   3 + 3, &_101);
1183        square_multiply(&mut y,       3, &_111);
1184        square_multiply(&mut y,   1 + 4, &_1111);
1185        square_multiply(&mut y,   2 + 3, &_111);
1186        square_multiply(&mut y,   2 + 2, &_11);
1187        square_multiply(&mut y,   1 + 4, &_1011);
1188        square_multiply(&mut y,   2 + 4, &_1011);
1189        square_multiply(&mut y,   6 + 4, &_1001);
1190        square_multiply(&mut y,   2 + 2, &_11);
1191        square_multiply(&mut y,   3 + 2, &_11);
1192        square_multiply(&mut y,   3 + 2, &_11);
1193        square_multiply(&mut y,   1 + 4, &_1001);
1194        square_multiply(&mut y,   1 + 3, &_111);
1195        square_multiply(&mut y,   2 + 4, &_1111);
1196        square_multiply(&mut y,   1 + 4, &_1011);
1197        square_multiply(&mut y,       3, &_101);
1198        square_multiply(&mut y,   2 + 4, &_1111);
1199        square_multiply(&mut y,       3, &_101);
1200        square_multiply(&mut y,   1 + 2, &_11);
1201
1202        y
1203    }
1204
1205    /// Inverts an UnpackedScalar not in Montgomery form.
1206    pub fn invert(&self) -> UnpackedScalar {
1207        self.as_montgomery().montgomery_invert().from_montgomery()
1208    }
1209}
1210
1211#[cfg(feature = "group")]
1212impl Field for Scalar {
1213    const ZERO: Self = Self::ZERO;
1214    const ONE: Self = Self::ONE;
1215
1216    fn random(mut rng: impl RngCore) -> Self {
1217        // NOTE: this is duplicated due to different `rng` bounds
1218        let mut scalar_bytes = [0u8; 64];
1219        rng.fill_bytes(&mut scalar_bytes);
1220        Self::from_bytes_mod_order_wide(&scalar_bytes)
1221    }
1222
1223    fn square(&self) -> Self {
1224        self * self
1225    }
1226
1227    fn double(&self) -> Self {
1228        self + self
1229    }
1230
1231    fn invert(&self) -> CtOption<Self> {
1232        CtOption::new(self.invert(), !self.is_zero())
1233    }
1234
1235    fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
1236        #[allow(unused_qualifications)]
1237        group::ff::helpers::sqrt_ratio_generic(num, div)
1238    }
1239
1240    fn sqrt(&self) -> CtOption<Self> {
1241        #[allow(unused_qualifications)]
1242        group::ff::helpers::sqrt_tonelli_shanks(
1243            self,
1244            [
1245                0xcb02_4c63_4b9e_ba7d,
1246                0x029b_df3b_d45e_f39a,
1247                0x0000_0000_0000_0000,
1248                0x0200_0000_0000_0000,
1249            ],
1250        )
1251    }
1252}
1253
1254#[cfg(feature = "group")]
1255impl PrimeField for Scalar {
1256    type Repr = [u8; 32];
1257
1258    fn from_repr(repr: Self::Repr) -> CtOption<Self> {
1259        Self::from_canonical_bytes(repr)
1260    }
1261
1262    fn from_repr_vartime(repr: Self::Repr) -> Option<Self> {
1263        // Check that the high bit is not set
1264        if (repr[31] >> 7) != 0u8 {
1265            return None;
1266        }
1267
1268        let candidate = Scalar { bytes: repr };
1269
1270        if candidate == candidate.reduce() {
1271            Some(candidate)
1272        } else {
1273            None
1274        }
1275    }
1276
1277    fn to_repr(&self) -> Self::Repr {
1278        self.to_bytes()
1279    }
1280
1281    fn is_odd(&self) -> Choice {
1282        Choice::from(self.as_bytes()[0] & 1)
1283    }
1284
1285    const MODULUS: &'static str =
1286        "0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed";
1287    const NUM_BITS: u32 = 253;
1288    const CAPACITY: u32 = 252;
1289
1290    const TWO_INV: Self = Self {
1291        bytes: [
1292            0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c, 0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c,
1293            0x6f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1294            0x00, 0x00, 0x00, 0x08,
1295        ],
1296    };
1297    const MULTIPLICATIVE_GENERATOR: Self = Self {
1298        bytes: [
1299            2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1300            0, 0, 0,
1301        ],
1302    };
1303    const S: u32 = 2;
1304    const ROOT_OF_UNITY: Self = Self {
1305        bytes: [
1306            0xd4, 0x07, 0xbe, 0xeb, 0xdf, 0x75, 0x87, 0xbe, 0xfe, 0x83, 0xce, 0x42, 0x53, 0x56,
1307            0xf0, 0x0e, 0x7a, 0xc2, 0xc1, 0xab, 0x60, 0x6d, 0x3d, 0x7d, 0xe7, 0x81, 0x79, 0xe0,
1308            0x10, 0x73, 0x4a, 0x09,
1309        ],
1310    };
1311    const ROOT_OF_UNITY_INV: Self = Self {
1312        bytes: [
1313            0x19, 0xcc, 0x37, 0x71, 0x3a, 0xed, 0x8a, 0x99, 0xd7, 0x18, 0x29, 0x60, 0x8b, 0xa3,
1314            0xee, 0x05, 0x86, 0x3d, 0x3e, 0x54, 0x9f, 0x92, 0xc2, 0x82, 0x18, 0x7e, 0x86, 0x1f,
1315            0xef, 0x8c, 0xb5, 0x06,
1316        ],
1317    };
1318    const DELTA: Self = Self {
1319        bytes: [
1320            16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1321            0, 0, 0,
1322        ],
1323    };
1324}
1325
1326#[cfg(feature = "group-bits")]
1327impl PrimeFieldBits for Scalar {
1328    type ReprBits = [u8; 32];
1329
1330    fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
1331        self.to_repr().into()
1332    }
1333
1334    fn char_le_bits() -> FieldBits<Self::ReprBits> {
1335        constants::BASEPOINT_ORDER_PRIVATE.to_bytes().into()
1336    }
1337}
1338
1339#[cfg(feature = "group")]
1340impl FromUniformBytes<64> for Scalar {
1341    fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
1342        Scalar::from_bytes_mod_order_wide(bytes)
1343    }
1344}
1345
1346/// Read one or more u64s stored as little endian bytes.
1347///
1348/// ## Panics
1349/// Panics if `src.len() != 8 * dst.len()`.
1350fn read_le_u64_into(src: &[u8], dst: &mut [u64]) {
1351    assert!(
1352        src.len() == 8 * dst.len(),
1353        "src.len() = {}, dst.len() = {}",
1354        src.len(),
1355        dst.len()
1356    );
1357    for (bytes, val) in src.chunks(8).zip(dst.iter_mut()) {
1358        *val = u64::from_le_bytes(
1359            bytes
1360                .try_into()
1361                .expect("Incorrect src length, should be 8 * dst.len()"),
1362        );
1363    }
1364}
1365
1366/// _Clamps_ the given little-endian representation of a 32-byte integer. Clamping the value puts
1367/// it in the range:
1368///
1369/// **n ∈ 2^254 + 8\*{0, 1, 2, 3, . . ., 2^251 − 1}**
1370///
1371/// # Explanation of clamping
1372///
1373/// For Curve25519, h = 8, and multiplying by 8 is the same as a binary left-shift by 3 bits.
1374/// If you take a secret scalar value between 2^251 and 2^252 – 1 and left-shift by 3 bits
1375/// then you end up with a 255-bit number with the most significant bit set to 1 and
1376/// the least-significant three bits set to 0.
1377///
1378/// The Curve25519 clamping operation takes **an arbitrary 256-bit random value** and
1379/// clears the most-significant bit (making it a 255-bit number), sets the next bit, and then
1380/// clears the 3 least-significant bits. In other words, it directly creates a scalar value that is
1381/// in the right form and pre-multiplied by the cofactor.
1382///
1383/// See [here](https://neilmadden.blog/2020/05/28/whats-the-curve25519-clamping-all-about/) for
1384/// more details.
1385#[must_use]
1386pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] {
1387    bytes[0] &= 0b1111_1000;
1388    bytes[31] &= 0b0111_1111;
1389    bytes[31] |= 0b0100_0000;
1390    bytes
1391}
1392
1393#[cfg(test)]
1394pub(crate) mod test {
1395    use super::*;
1396
1397    #[cfg(feature = "alloc")]
1398    use alloc::vec::Vec;
1399
1400    /// x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
1401    pub static X: Scalar = Scalar {
1402        bytes: [
1403            0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2,
1404            0x7d, 0x52, 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, 0xd4, 0x49, 0xf4, 0xa8,
1405            0x79, 0xd9, 0xf2, 0x04,
1406        ],
1407    };
1408    /// 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
1409    pub static XINV: Scalar = Scalar {
1410        bytes: [
1411            0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb, 0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01,
1412            0x63, 0x47, 0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96, 0xd5, 0x0b, 0xcd, 0x7a,
1413            0x3f, 0x96, 0x2a, 0x0f,
1414        ],
1415    };
1416    /// y = 2592331292931086675770238855846338635550719849568364935475441891787804997264
1417    pub static Y: Scalar = Scalar {
1418        bytes: [
1419            0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83,
1420            0x86, 0xc3, 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, 0xe8, 0xef, 0x7a, 0xc3,
1421            0x1f, 0x35, 0xbb, 0x05,
1422        ],
1423    };
1424
1425    /// The largest scalar that satisfies invariant #1, i.e., the largest scalar with the top bit
1426    /// set to 0. Since this scalar violates invariant #2, i.e., it's greater than the modulus `l`,
1427    /// addition and subtraction are broken. The only thing you can do with this is scalar-point
1428    /// multiplication (and actually also scalar-scalar multiplication, but that's just a quirk of
1429    /// our implementation).
1430    pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar {
1431        bytes: [
1432            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1433            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1434            0xff, 0xff, 0xff, 0x7f,
1435        ],
1436    };
1437
1438    /// x*y = 5690045403673944803228348699031245560686958845067437804563560795922180092780
1439    static X_TIMES_Y: Scalar = Scalar {
1440        bytes: [
1441            0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21, 0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6,
1442            0xf9, 0x2c, 0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2, 0x95, 0xfc, 0x08, 0x17,
1443            0x9a, 0x73, 0x94, 0x0c,
1444        ],
1445    };
1446
1447    /// sage: l = 2^252 + 27742317777372353535851937790883648493
1448    /// sage: big = 2^256 - 1
1449    /// sage: repr((big % l).digits(256))
1450    static CANONICAL_2_256_MINUS_1: Scalar = Scalar {
1451        bytes: [
1452            28, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255,
1453            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15,
1454        ],
1455    };
1456
1457    static A_SCALAR: Scalar = Scalar {
1458        bytes: [
1459            0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
1460            0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
1461            0x23, 0x76, 0xef, 0x09,
1462        ],
1463    };
1464
1465    static A_NAF: [i8; 256] = [
1466        0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0,
1467        0, 0, 1, 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0,
1468        11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1469        0, -1, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5,
1470        0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0,
1471        0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1472        7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 15,
1473        0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, -15, 0,
1474        0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1475    ];
1476
1477    const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar {
1478        bytes: [
1479            0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9,
1480            0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1481            0x00, 0x00, 0x00, 0x10,
1482        ],
1483    };
1484
1485    /// The largest clamped integer
1486    static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes);
1487
1488    #[test]
1489    fn fuzzer_testcase_reduction() {
1490        // LE bytes of 24519928653854221733733552434404946937899825954937634815
1491        let a_bytes = [
1492            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
1493            255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1494        ];
1495        // LE bytes of 4975441334397345751130612518500927154628011511324180036903450236863266160640
1496        let b_bytes = [
1497            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210,
1498            210, 255, 255, 255, 255, 10,
1499        ];
1500        // LE bytes of 6432735165214683820902750800207468552549813371247423777071615116673864412038
1501        let c_bytes = [
1502            134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215,
1503            181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14,
1504        ];
1505
1506        let a = Scalar::from_bytes_mod_order(a_bytes);
1507        let b = Scalar::from_bytes_mod_order(b_bytes);
1508        let c = Scalar::from_bytes_mod_order(c_bytes);
1509
1510        let mut tmp = [0u8; 64];
1511
1512        // also_a = (a mod l)
1513        tmp[0..32].copy_from_slice(&a_bytes[..]);
1514        let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1515
1516        // also_b = (b mod l)
1517        tmp[0..32].copy_from_slice(&b_bytes[..]);
1518        let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1519
1520        let expected_c = a * b;
1521        let also_expected_c = also_a * also_b;
1522
1523        assert_eq!(c, expected_c);
1524        assert_eq!(c, also_expected_c);
1525    }
1526
1527    #[test]
1528    fn non_adjacent_form_test_vector() {
1529        let naf = A_SCALAR.non_adjacent_form(5);
1530        for i in 0..256 {
1531            assert_eq!(naf[i], A_NAF[i]);
1532        }
1533    }
1534
1535    fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1536        let naf = x.non_adjacent_form(w);
1537
1538        // Reconstruct the scalar from the computed NAF
1539        let mut y = Scalar::ZERO;
1540        for i in (0..256).rev() {
1541            y += y;
1542            let digit = if naf[i] < 0 {
1543                -Scalar::from((-naf[i]) as u64)
1544            } else {
1545                Scalar::from(naf[i] as u64)
1546            };
1547            y += digit;
1548        }
1549
1550        assert_eq!(*x, y);
1551    }
1552
1553    #[test]
1554    fn non_adjacent_form_random() {
1555        let mut rng = rand::thread_rng();
1556        for _ in 0..1_000 {
1557            let x = Scalar::random(&mut rng);
1558            for w in &[5, 6, 7, 8] {
1559                non_adjacent_form_iter(*w, &x);
1560            }
1561        }
1562    }
1563
1564    #[test]
1565    fn from_u64() {
1566        let val: u64 = 0xdeadbeefdeadbeef;
1567        let s = Scalar::from(val);
1568        assert_eq!(s[7], 0xde);
1569        assert_eq!(s[6], 0xad);
1570        assert_eq!(s[5], 0xbe);
1571        assert_eq!(s[4], 0xef);
1572        assert_eq!(s[3], 0xde);
1573        assert_eq!(s[2], 0xad);
1574        assert_eq!(s[1], 0xbe);
1575        assert_eq!(s[0], 0xef);
1576    }
1577
1578    #[test]
1579    fn scalar_mul_by_one() {
1580        let test_scalar = X * Scalar::ONE;
1581        for i in 0..32 {
1582            assert!(test_scalar[i] == X[i]);
1583        }
1584    }
1585
1586    #[test]
1587    fn add_reduces() {
1588        // Check that addition wraps around the modulus
1589        assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO);
1590    }
1591
1592    #[test]
1593    fn sub_reduces() {
1594        // Check that subtraction wraps around the modulus
1595        assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE);
1596    }
1597
1598    #[test]
1599    fn impl_add() {
1600        let two = Scalar::from(2u64);
1601        let one = Scalar::ONE;
1602        let should_be_two = one + one;
1603        assert_eq!(should_be_two, two);
1604    }
1605
1606    #[allow(non_snake_case)]
1607    #[test]
1608    fn impl_mul() {
1609        let should_be_X_times_Y = X * Y;
1610        assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1611    }
1612
1613    #[allow(non_snake_case)]
1614    #[test]
1615    #[cfg(feature = "alloc")]
1616    fn impl_product() {
1617        // Test that product works for non-empty iterators
1618        let X_Y_vector = [X, Y];
1619        let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1620        assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1621
1622        // Test that product works for the empty iterator
1623        let one = Scalar::ONE;
1624        let empty_vector = [];
1625        let should_be_one: Scalar = empty_vector.iter().product();
1626        assert_eq!(should_be_one, one);
1627
1628        // Test that product works for iterators where Item = Scalar
1629        let xs = [Scalar::from(2u64); 10];
1630        let ys = [Scalar::from(3u64); 10];
1631        // now zs is an iterator with Item = Scalar
1632        let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x * y);
1633
1634        let x_prod: Scalar = xs.iter().product();
1635        let y_prod: Scalar = ys.iter().product();
1636        let z_prod: Scalar = zs.product();
1637
1638        assert_eq!(x_prod, Scalar::from(1024u64));
1639        assert_eq!(y_prod, Scalar::from(59049u64));
1640        assert_eq!(z_prod, Scalar::from(60466176u64));
1641        assert_eq!(x_prod * y_prod, z_prod);
1642    }
1643
1644    #[test]
1645    #[cfg(feature = "alloc")]
1646    fn impl_sum() {
1647        // Test that sum works for non-empty iterators
1648        let two = Scalar::from(2u64);
1649        let one_vector = [Scalar::ONE, Scalar::ONE];
1650        let should_be_two: Scalar = one_vector.iter().sum();
1651        assert_eq!(should_be_two, two);
1652
1653        // Test that sum works for the empty iterator
1654        let zero = Scalar::ZERO;
1655        let empty_vector = [];
1656        let should_be_zero: Scalar = empty_vector.iter().sum();
1657        assert_eq!(should_be_zero, zero);
1658
1659        // Test that sum works for owned types
1660        let xs = [Scalar::from(1u64); 10];
1661        let ys = [Scalar::from(2u64); 10];
1662        // now zs is an iterator with Item = Scalar
1663        let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x + y);
1664
1665        let x_sum: Scalar = xs.iter().sum();
1666        let y_sum: Scalar = ys.iter().sum();
1667        let z_sum: Scalar = zs.sum();
1668
1669        assert_eq!(x_sum, Scalar::from(10u64));
1670        assert_eq!(y_sum, Scalar::from(20u64));
1671        assert_eq!(z_sum, Scalar::from(30u64));
1672        assert_eq!(x_sum + y_sum, z_sum);
1673    }
1674
1675    #[test]
1676    fn square() {
1677        let expected = X * X;
1678        let actual = X.unpack().square().pack();
1679        for i in 0..32 {
1680            assert!(expected[i] == actual[i]);
1681        }
1682    }
1683
1684    #[test]
1685    fn reduce() {
1686        let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1687        assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1688    }
1689
1690    #[test]
1691    fn from_bytes_mod_order_wide() {
1692        let mut bignum = [0u8; 64];
1693        // set bignum = x + 2^256x
1694        for i in 0..32 {
1695            bignum[i] = X[i];
1696            bignum[32 + i] = X[i];
1697        }
1698        // 3958878930004874126169954872055634648693766179881526445624823978500314864344
1699        // = x + 2^256x (mod l)
1700        let reduced = Scalar {
1701            bytes: [
1702                216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1703                91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1704            ],
1705        };
1706        let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1707        for i in 0..32 {
1708            assert!(test_red[i] == reduced[i]);
1709        }
1710    }
1711
1712    #[allow(non_snake_case)]
1713    #[test]
1714    fn invert() {
1715        let inv_X = X.invert();
1716        assert_eq!(inv_X, XINV);
1717        let should_be_one = inv_X * X;
1718        assert_eq!(should_be_one, Scalar::ONE);
1719    }
1720
1721    // Negating a scalar twice should result in the original scalar.
1722    #[allow(non_snake_case)]
1723    #[test]
1724    fn neg_twice_is_identity() {
1725        let negative_X = -&X;
1726        let should_be_X = -&negative_X;
1727
1728        assert_eq!(should_be_X, X);
1729    }
1730
1731    #[test]
1732    fn to_bytes_from_bytes_roundtrips() {
1733        let unpacked = X.unpack();
1734        let bytes = unpacked.as_bytes();
1735        let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1736
1737        assert_eq!(should_be_unpacked.0, unpacked.0);
1738    }
1739
1740    #[test]
1741    fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1742        let mut bignum = [0u8; 64];
1743
1744        // set bignum = x + 2^256x
1745        for i in 0..32 {
1746            bignum[i] = X[i];
1747            bignum[32 + i] = X[i];
1748        }
1749        // x + 2^256x (mod l)
1750        //         = 3958878930004874126169954872055634648693766179881526445624823978500314864344
1751        let expected = Scalar {
1752            bytes: [
1753                216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1754                91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1755            ],
1756        };
1757        let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1758
1759        // The reduced scalar should match the expected
1760        assert_eq!(reduced.bytes, expected.bytes);
1761
1762        //  (x + 2^256x) * R
1763        let interim =
1764            UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum), &constants::R);
1765        // ((x + 2^256x) * R) / R  (mod l)
1766        let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1767
1768        // The Montgomery reduced scalar should match the reduced one, as well as the expected
1769        assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1770        assert_eq!(montgomery_reduced.0, expected.unpack().0)
1771    }
1772
1773    #[test]
1774    fn canonical_decoding() {
1775        // canonical encoding of 1667457891
1776        let canonical_bytes = [
1777            99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778            0, 0, 0, 0,
1779        ];
1780
1781        // encoding of
1782        //   7265385991361016183439748078976496179028704920197054998554201349516117938192
1783        // = 28380414028753969466561515933501938171588560817147392552250411230663687203 (mod l)
1784        // non_canonical because unreduced mod l
1785        let non_canonical_bytes_because_unreduced = [16; 32];
1786
1787        // encoding with high bit set, to check that the parser isn't pre-masking the high bit
1788        let non_canonical_bytes_because_highbit = [
1789            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1790            0, 0, 128,
1791        ];
1792
1793        assert!(bool::from(
1794            Scalar::from_canonical_bytes(canonical_bytes).is_some()
1795        ));
1796        assert!(bool::from(
1797            Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none()
1798        ));
1799        assert!(bool::from(
1800            Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none()
1801        ));
1802    }
1803
1804    #[test]
1805    #[cfg(feature = "serde")]
1806    fn serde_bincode_scalar_roundtrip() {
1807        use bincode;
1808        let encoded = bincode::serialize(&X).unwrap();
1809        let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1810        assert_eq!(parsed, X);
1811
1812        // Check that the encoding is 32 bytes exactly
1813        assert_eq!(encoded.len(), 32);
1814
1815        // Check that the encoding itself matches the usual one
1816        assert_eq!(X, bincode::deserialize(X.as_bytes()).unwrap(),);
1817    }
1818
1819    #[cfg(all(debug_assertions, feature = "alloc"))]
1820    #[test]
1821    #[should_panic]
1822    fn batch_invert_with_a_zero_input_panics() {
1823        let mut xs = vec![Scalar::ONE; 16];
1824        xs[3] = Scalar::ZERO;
1825        // This should panic in debug mode.
1826        Scalar::batch_invert(&mut xs);
1827    }
1828
1829    #[test]
1830    #[cfg(feature = "alloc")]
1831    fn batch_invert_empty() {
1832        assert_eq!(Scalar::ONE, Scalar::batch_invert(&mut []));
1833    }
1834
1835    #[test]
1836    #[cfg(feature = "alloc")]
1837    fn batch_invert_consistency() {
1838        let mut x = Scalar::from(1u64);
1839        let mut v1: Vec<_> = (0..16)
1840            .map(|_| {
1841                let tmp = x;
1842                x = x + x;
1843                tmp
1844            })
1845            .collect();
1846        let v2 = v1.clone();
1847
1848        let expected: Scalar = v1.iter().product();
1849        let expected = expected.invert();
1850        let ret = Scalar::batch_invert(&mut v1);
1851        assert_eq!(ret, expected);
1852
1853        for (a, b) in v1.iter().zip(v2.iter()) {
1854            assert_eq!(a * b, Scalar::ONE);
1855        }
1856    }
1857
1858    #[cfg(feature = "precomputed-tables")]
1859    fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1860        let digits_count = Scalar::to_radix_2w_size_hint(w);
1861        let digits = scalar.as_radix_2w(w);
1862
1863        let radix = Scalar::from((1 << w) as u64);
1864        let mut term = Scalar::ONE;
1865        let mut recovered_scalar = Scalar::ZERO;
1866        for digit in &digits[0..digits_count] {
1867            let digit = *digit;
1868            if digit != 0 {
1869                let sdigit = if digit < 0 {
1870                    -Scalar::from((-(digit as i64)) as u64)
1871                } else {
1872                    Scalar::from(digit as u64)
1873                };
1874                recovered_scalar += term * sdigit;
1875            }
1876            term *= radix;
1877        }
1878        // When the input is unreduced, we may only recover the scalar mod l.
1879        assert_eq!(recovered_scalar, scalar.reduce());
1880    }
1881
1882    #[test]
1883    #[cfg(feature = "precomputed-tables")]
1884    fn test_pippenger_radix() {
1885        use core::iter;
1886        // For each valid radix it tests that 1000 random-ish scalars can be restored
1887        // from the produced representation precisely.
1888        let cases = (2..100)
1889            .map(|s| Scalar::from(s as u64).invert())
1890            // The largest unreduced scalar, s = 2^255-1. This is not reduced mod l. Scalar mult
1891            // still works though.
1892            .chain(iter::once(LARGEST_UNREDUCED_SCALAR));
1893
1894        for scalar in cases {
1895            test_pippenger_radix_iter(scalar, 6);
1896            test_pippenger_radix_iter(scalar, 7);
1897            test_pippenger_radix_iter(scalar, 8);
1898        }
1899    }
1900
1901    #[test]
1902    #[cfg(feature = "alloc")]
1903    fn test_read_le_u64_into() {
1904        let cases: &[(&[u8], &[u64])] = &[
1905            (
1906                &[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0],
1907                &[0xF00F_F11F_0110_EFFE],
1908            ),
1909            (
1910                &[
1911                    0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A,
1912                    0xBC, 0xDE, 0xF0,
1913                ],
1914                &[0xF00F_F11F_0110_EFFE, 0xF0DE_BC9A_7856_3412],
1915            ),
1916        ];
1917
1918        for (src, expected) in cases {
1919            let mut dst = vec![0; expected.len()];
1920            read_le_u64_into(src, &mut dst);
1921
1922            assert_eq!(&dst, expected, "Expected {:x?} got {:x?}", expected, dst);
1923        }
1924    }
1925
1926    // Tests consistency of From<{integer}> impls for Scalar
1927    #[test]
1928    fn test_scalar_from_int() {
1929        let s1 = Scalar::ONE;
1930
1931        // For `x` in `u8`, `u16`, `u32`, `u64`, and `u128`, check that
1932        // `Scalar::from(x + 1) == Scalar::from(x) + Scalar::from(1)`
1933
1934        let x = 0x23u8;
1935        let sx = Scalar::from(x);
1936        assert_eq!(sx + s1, Scalar::from(x + 1));
1937
1938        let x = 0x2323u16;
1939        let sx = Scalar::from(x);
1940        assert_eq!(sx + s1, Scalar::from(x + 1));
1941
1942        let x = 0x2323_2323u32;
1943        let sx = Scalar::from(x);
1944        assert_eq!(sx + s1, Scalar::from(x + 1));
1945
1946        let x = 0x2323_2323_2323_2323u64;
1947        let sx = Scalar::from(x);
1948        assert_eq!(sx + s1, Scalar::from(x + 1));
1949
1950        let x = 0x2323_2323_2323_2323_2323_2323_2323_2323u128;
1951        let sx = Scalar::from(x);
1952        assert_eq!(sx + s1, Scalar::from(x + 1));
1953    }
1954
1955    #[cfg(feature = "group")]
1956    #[test]
1957    fn ff_constants() {
1958        assert_eq!(Scalar::from(2u64) * Scalar::TWO_INV, Scalar::ONE);
1959
1960        assert_eq!(
1961            Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
1962            Scalar::ONE,
1963        );
1964
1965        // ROOT_OF_UNITY^{2^s} mod m == 1
1966        assert_eq!(
1967            Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
1968            Scalar::ONE,
1969        );
1970
1971        // DELTA^{t} mod m == 1
1972        assert_eq!(
1973            Scalar::DELTA.pow(&[
1974                0x9604_98c6_973d_74fb,
1975                0x0537_be77_a8bd_e735,
1976                0x0000_0000_0000_0000,
1977                0x0400_0000_0000_0000,
1978            ]),
1979            Scalar::ONE,
1980        );
1981    }
1982
1983    #[cfg(feature = "group")]
1984    #[test]
1985    fn ff_impls() {
1986        assert!(bool::from(Scalar::ZERO.is_even()));
1987        assert!(bool::from(Scalar::ONE.is_odd()));
1988        assert!(bool::from(Scalar::from(2u64).is_even()));
1989        assert!(bool::from(Scalar::DELTA.is_even()));
1990
1991        assert!(bool::from(Field::invert(&Scalar::ZERO).is_none()));
1992        assert_eq!(Field::invert(&X).unwrap(), XINV);
1993
1994        let x_sq = X.square();
1995        // We should get back either the positive or negative root.
1996        assert!([X, -X].contains(&x_sq.sqrt().unwrap()));
1997
1998        assert_eq!(Scalar::from_repr_vartime(X.to_repr()), Some(X));
1999        assert_eq!(Scalar::from_repr_vartime([0xff; 32]), None);
2000
2001        assert_eq!(Scalar::from_repr(X.to_repr()).unwrap(), X);
2002        assert!(bool::from(Scalar::from_repr([0xff; 32]).is_none()));
2003    }
2004
2005    #[test]
2006    #[should_panic]
2007    fn test_read_le_u64_into_should_panic_on_bad_input() {
2008        let mut dst = [0_u64; 1];
2009        // One byte short
2010        read_le_u64_into(&[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F], &mut dst);
2011    }
2012
2013    #[test]
2014    fn test_scalar_clamp() {
2015        let input = A_SCALAR.bytes;
2016        let expected = [
2017            0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
2018            0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
2019            0x23, 0x76, 0xef, 0x49,
2020        ];
2021        let actual = clamp_integer(input);
2022        assert_eq!(actual, expected);
2023
2024        let expected = [
2025            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2026            0, 0, 0x40,
2027        ];
2028        let actual = clamp_integer([0; 32]);
2029        assert_eq!(expected, actual);
2030        let expected = [
2031            0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2032            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2033            0xff, 0xff, 0xff, 0x7f,
2034        ];
2035        let actual = clamp_integer([0xff; 32]);
2036        assert_eq!(actual, expected);
2037
2038        assert_eq!(
2039            LARGEST_CLAMPED_INTEGER,
2040            clamp_integer(LARGEST_CLAMPED_INTEGER)
2041        );
2042    }
2043
2044    // Check that a * b == a.reduce() * a.reduce() for ANY scalars a,b, even ones that violate
2045    // invariant #1, i.e., a,b > 2^255. Old versions of ed25519-dalek did multiplication where a
2046    // was reduced and b was clamped and unreduced. This checks that that was always well-defined.
2047    #[test]
2048    fn test_mul_reduction_invariance() {
2049        let mut rng = rand::thread_rng();
2050
2051        for _ in 0..10 {
2052            // Also define c that's clamped. We'll make sure that clamping doesn't affect
2053            // computation
2054            let (a, b, c) = {
2055                let mut a_bytes = [0u8; 32];
2056                let mut b_bytes = [0u8; 32];
2057                let mut c_bytes = [0u8; 32];
2058                rng.fill_bytes(&mut a_bytes);
2059                rng.fill_bytes(&mut b_bytes);
2060                rng.fill_bytes(&mut c_bytes);
2061                (
2062                    Scalar { bytes: a_bytes },
2063                    Scalar { bytes: b_bytes },
2064                    Scalar {
2065                        bytes: clamp_integer(c_bytes),
2066                    },
2067                )
2068            };
2069
2070            // Make sure this is the same product no matter how you cut it
2071            let reduced_mul_ab = a.reduce() * b.reduce();
2072            let reduced_mul_ac = a.reduce() * c.reduce();
2073            assert_eq!(a * b, reduced_mul_ab);
2074            assert_eq!(a.reduce() * b, reduced_mul_ab);
2075            assert_eq!(a * b.reduce(), reduced_mul_ab);
2076            assert_eq!(a * c, reduced_mul_ac);
2077            assert_eq!(a.reduce() * c, reduced_mul_ac);
2078            assert_eq!(a * c.reduce(), reduced_mul_ac);
2079        }
2080    }
2081}