1#![cfg_attr(feature = "digest", doc = "```")]
92#![cfg_attr(not(feature = "digest"), doc = "```ignore")]
93use core::borrow::Borrow;
115use core::fmt::Debug;
116use core::iter::{Product, Sum};
117use core::ops::Index;
118use core::ops::Neg;
119use core::ops::{Add, AddAssign};
120use core::ops::{Mul, MulAssign};
121use core::ops::{Sub, SubAssign};
122
123use cfg_if::cfg_if;
124
125#[cfg(feature = "group")]
126use group::ff::{Field, FromUniformBytes, PrimeField};
127#[cfg(feature = "group-bits")]
128use group::ff::{FieldBits, PrimeFieldBits};
129
130#[cfg(any(test, feature = "group"))]
131use rand_core::RngCore;
132
133#[cfg(any(test, feature = "rand_core"))]
134use rand_core::CryptoRngCore;
135
136#[cfg(feature = "digest")]
137use digest::generic_array::typenum::U64;
138#[cfg(feature = "digest")]
139use digest::Digest;
140
141use subtle::Choice;
142use subtle::ConditionallySelectable;
143use subtle::ConstantTimeEq;
144use subtle::CtOption;
145
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149use crate::backend;
150use crate::constants;
151
152cfg_if! {
153 if #[cfg(curve25519_dalek_backend = "fiat")] {
154 #[cfg(curve25519_dalek_bits = "32")]
159 #[cfg_attr(
160 docsrs,
161 doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "32")))
162 )]
163 type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
164
165 #[cfg(curve25519_dalek_bits = "64")]
170 #[cfg_attr(
171 docsrs,
172 doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "64")))
173 )]
174 type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
175 } else if #[cfg(curve25519_dalek_bits = "64")] {
176 #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
181 type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
182 } else {
183 #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
188 type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
189 }
190}
191
192#[allow(clippy::derived_hash_with_manual_eq)]
194#[derive(Copy, Clone, Hash)]
195pub struct Scalar {
196 pub(crate) bytes: [u8; 32],
232}
233
234impl Scalar {
235 pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
238 let s_unreduced = Scalar { bytes };
240
241 let s = s_unreduced.reduce();
243 debug_assert_eq!(0u8, s[31] >> 7);
244
245 s
246 }
247
248 pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
251 UnpackedScalar::from_bytes_wide(input).pack()
252 }
253
254 pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption<Scalar> {
262 let high_bit_unset = (bytes[31] >> 7).ct_eq(&0);
263 let candidate = Scalar { bytes };
264 CtOption::new(candidate, high_bit_unset & candidate.is_canonical())
265 }
266
267 #[cfg(feature = "legacy_compatibility")]
274 #[deprecated(
275 since = "4.0.0",
276 note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs."
277 )]
278 pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
279 let mut s = Scalar { bytes };
280 s.bytes[31] &= 0b0111_1111;
282
283 s
284 }
285}
286
287impl Debug for Scalar {
288 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
289 write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
290 }
291}
292
293impl Eq for Scalar {}
294impl PartialEq for Scalar {
295 fn eq(&self, other: &Self) -> bool {
296 self.ct_eq(other).into()
297 }
298}
299
300impl ConstantTimeEq for Scalar {
301 fn ct_eq(&self, other: &Self) -> Choice {
302 self.bytes.ct_eq(&other.bytes)
303 }
304}
305
306impl Index<usize> for Scalar {
307 type Output = u8;
308
309 fn index(&self, _index: usize) -> &u8 {
311 &(self.bytes[_index])
312 }
313}
314
315impl<'b> MulAssign<&'b Scalar> for Scalar {
316 fn mul_assign(&mut self, _rhs: &'b Scalar) {
317 *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
318 }
319}
320
321define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
322
323impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar {
324 type Output = Scalar;
325 fn mul(self, _rhs: &'b Scalar) -> Scalar {
326 UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
327 }
328}
329
330define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
331
332impl<'b> AddAssign<&'b Scalar> for Scalar {
333 fn add_assign(&mut self, _rhs: &'b Scalar) {
334 *self = *self + _rhs;
335 }
336}
337
338define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
339
340impl<'a, 'b> Add<&'b Scalar> for &'a Scalar {
341 type Output = Scalar;
342 #[allow(non_snake_case)]
343 fn add(self, _rhs: &'b Scalar) -> Scalar {
344 UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack()
347 }
348}
349
350define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
351
352impl<'b> SubAssign<&'b Scalar> for Scalar {
353 fn sub_assign(&mut self, _rhs: &'b Scalar) {
354 *self = *self - _rhs;
355 }
356}
357
358define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
359
360impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar {
361 type Output = Scalar;
362 #[allow(non_snake_case)]
363 fn sub(self, rhs: &'b Scalar) -> Scalar {
364 UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack()
367 }
368}
369
370define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
371
372impl<'a> Neg for &'a Scalar {
373 type Output = Scalar;
374 #[allow(non_snake_case)]
375 fn neg(self) -> Scalar {
376 let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
377 let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
378 UnpackedScalar::sub(&UnpackedScalar::ZERO, &self_mod_l).pack()
379 }
380}
381
382impl Neg for Scalar {
383 type Output = Scalar;
384 fn neg(self) -> Scalar {
385 -&self
386 }
387}
388
389impl ConditionallySelectable for Scalar {
390 fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
391 let mut bytes = [0u8; 32];
392 #[allow(clippy::needless_range_loop)]
393 for i in 0..32 {
394 bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
395 }
396 Scalar { bytes }
397 }
398}
399
400#[cfg(feature = "serde")]
401use serde::de::Visitor;
402#[cfg(feature = "serde")]
403use serde::{Deserialize, Deserializer, Serialize, Serializer};
404
405#[cfg(feature = "serde")]
406#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
407impl Serialize for Scalar {
408 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
409 where
410 S: Serializer,
411 {
412 use serde::ser::SerializeTuple;
413 let mut tup = serializer.serialize_tuple(32)?;
414 for byte in self.as_bytes().iter() {
415 tup.serialize_element(byte)?;
416 }
417 tup.end()
418 }
419}
420
421#[cfg(feature = "serde")]
422#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
423impl<'de> Deserialize<'de> for Scalar {
424 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
425 where
426 D: Deserializer<'de>,
427 {
428 struct ScalarVisitor;
429
430 impl<'de> Visitor<'de> for ScalarVisitor {
431 type Value = Scalar;
432
433 fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
434 formatter.write_str(
435 "a sequence of 32 bytes whose little-endian interpretation is less than the \
436 basepoint order â„“",
437 )
438 }
439
440 fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
441 where
442 A: serde::de::SeqAccess<'de>,
443 {
444 let mut bytes = [0u8; 32];
445 #[allow(clippy::needless_range_loop)]
446 for i in 0..32 {
447 bytes[i] = seq
448 .next_element()?
449 .ok_or_else(|| serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
450 }
451 Option::from(Scalar::from_canonical_bytes(bytes))
452 .ok_or_else(|| serde::de::Error::custom("scalar was not canonically encoded"))
453 }
454 }
455
456 deserializer.deserialize_tuple(32, ScalarVisitor)
457 }
458}
459
460impl<T> Product<T> for Scalar
461where
462 T: Borrow<Scalar>,
463{
464 fn product<I>(iter: I) -> Self
465 where
466 I: Iterator<Item = T>,
467 {
468 iter.fold(Scalar::ONE, |acc, item| acc * item.borrow())
469 }
470}
471
472impl<T> Sum<T> for Scalar
473where
474 T: Borrow<Scalar>,
475{
476 fn sum<I>(iter: I) -> Self
477 where
478 I: Iterator<Item = T>,
479 {
480 iter.fold(Scalar::ZERO, |acc, item| acc + item.borrow())
481 }
482}
483
484impl Default for Scalar {
485 fn default() -> Scalar {
486 Scalar::ZERO
487 }
488}
489
490impl From<u8> for Scalar {
491 fn from(x: u8) -> Scalar {
492 let mut s_bytes = [0u8; 32];
493 s_bytes[0] = x;
494 Scalar { bytes: s_bytes }
495 }
496}
497
498impl From<u16> for Scalar {
499 fn from(x: u16) -> Scalar {
500 let mut s_bytes = [0u8; 32];
501 let x_bytes = x.to_le_bytes();
502 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
503 Scalar { bytes: s_bytes }
504 }
505}
506
507impl From<u32> for Scalar {
508 fn from(x: u32) -> Scalar {
509 let mut s_bytes = [0u8; 32];
510 let x_bytes = x.to_le_bytes();
511 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
512 Scalar { bytes: s_bytes }
513 }
514}
515
516impl From<u64> for Scalar {
517 fn from(x: u64) -> Scalar {
539 let mut s_bytes = [0u8; 32];
540 let x_bytes = x.to_le_bytes();
541 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
542 Scalar { bytes: s_bytes }
543 }
544}
545
546impl From<u128> for Scalar {
547 fn from(x: u128) -> Scalar {
548 let mut s_bytes = [0u8; 32];
549 let x_bytes = x.to_le_bytes();
550 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
551 Scalar { bytes: s_bytes }
552 }
553}
554
555#[cfg(feature = "zeroize")]
556impl Zeroize for Scalar {
557 fn zeroize(&mut self) {
558 self.bytes.zeroize();
559 }
560}
561
562impl Scalar {
563 pub const ZERO: Self = Self { bytes: [0u8; 32] };
565
566 pub const ONE: Self = Self {
568 bytes: [
569 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570 0, 0, 0,
571 ],
572 };
573
574 #[cfg(any(test, feature = "rand_core"))]
575 pub fn random<R: CryptoRngCore + ?Sized>(rng: &mut R) -> Self {
598 let mut scalar_bytes = [0u8; 64];
599 rng.fill_bytes(&mut scalar_bytes);
600 Scalar::from_bytes_mod_order_wide(&scalar_bytes)
601 }
602
603 #[cfg(feature = "digest")]
604 #[cfg_attr(feature = "digest", doc = "```")]
614 #[cfg_attr(not(feature = "digest"), doc = "```ignore")]
615 pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
626 where
627 D: Digest<OutputSize = U64> + Default,
628 {
629 let mut hash = D::default();
630 hash.update(input);
631 Scalar::from_hash(hash)
632 }
633
634 #[cfg(feature = "digest")]
635 pub fn from_hash<D>(hash: D) -> Scalar
672 where
673 D: Digest<OutputSize = U64>,
674 {
675 let mut output = [0u8; 64];
676 output.copy_from_slice(hash.finalize().as_slice());
677 Scalar::from_bytes_mod_order_wide(&output)
678 }
679
680 pub const fn to_bytes(&self) -> [u8; 32] {
692 self.bytes
693 }
694
695 pub const fn as_bytes(&self) -> &[u8; 32] {
707 &self.bytes
708 }
709
710 pub fn invert(&self) -> Scalar {
748 self.unpack().invert().pack()
749 }
750
751 #[cfg(feature = "alloc")]
788 pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
789 let n = inputs.len();
797 let one: UnpackedScalar = Scalar::ONE.unpack().as_montgomery();
798
799 let mut scratch = vec![one; n];
800
801 let mut acc = Scalar::ONE.unpack().as_montgomery();
803
804 for (input, scratch) in inputs.iter_mut().zip(scratch.iter_mut()) {
807 *scratch = acc;
808
809 let tmp = input.unpack().as_montgomery();
812 *input = tmp.pack();
813 acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
814 }
815
816 debug_assert!(acc.pack() != Scalar::ZERO);
818
819 acc = acc.montgomery_invert().from_montgomery();
821
822 let ret = acc.pack();
824
825 for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
828 let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
829 *input = UnpackedScalar::montgomery_mul(&acc, scratch).pack();
830 acc = tmp;
831 }
832
833 #[cfg(feature = "zeroize")]
834 Zeroize::zeroize(&mut scratch);
835
836 ret
837 }
838
839 pub(crate) fn bits_le(&self) -> impl DoubleEndedIterator<Item = bool> + '_ {
841 (0..256).map(|i| {
842 ((self.bytes[i >> 3] >> (i & 7)) & 1u8) == 1
846 })
847 }
848
849 pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
922 debug_assert!(w >= 2);
924 debug_assert!(w <= 8);
926
927 let mut naf = [0i8; 256];
928
929 let mut x_u64 = [0u64; 5];
930 read_le_u64_into(&self.bytes, &mut x_u64[0..4]);
931
932 let width = 1 << w;
933 let window_mask = width - 1;
934
935 let mut pos = 0;
936 let mut carry = 0;
937 while pos < 256 {
938 let u64_idx = pos / 64;
940 let bit_idx = pos % 64;
941 let bit_buf: u64 = if bit_idx < 64 - w {
942 x_u64[u64_idx] >> bit_idx
944 } else {
945 (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
947 };
948
949 let window = carry + (bit_buf & window_mask);
951
952 if window & 1 == 0 {
953 pos += 1;
958 continue;
959 }
960
961 if window < width / 2 {
962 carry = 0;
963 naf[pos] = window as i8;
964 } else {
965 carry = 1;
966 naf[pos] = (window as i8).wrapping_sub(width as i8);
967 }
968
969 pos += w;
970 }
971
972 naf
973 }
974
975 pub(crate) fn as_radix_16(&self) -> [i8; 64] {
986 debug_assert!(self[31] <= 127);
987 let mut output = [0i8; 64];
988
989 #[allow(clippy::identity_op)]
992 #[inline(always)]
993 fn bot_half(x: u8) -> u8 {
994 (x >> 0) & 15
995 }
996 #[inline(always)]
997 fn top_half(x: u8) -> u8 {
998 (x >> 4) & 15
999 }
1000
1001 for i in 0..32 {
1002 output[2 * i] = bot_half(self[i]) as i8;
1003 output[2 * i + 1] = top_half(self[i]) as i8;
1004 }
1005 for i in 0..63 {
1009 let carry = (output[i] + 8) >> 4;
1010 output[i] -= carry << 4;
1011 output[i + 1] += carry;
1012 }
1013 output
1017 }
1018
1019 #[cfg(any(feature = "alloc", all(test, feature = "precomputed-tables")))]
1022 pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1023 debug_assert!(w >= 4);
1024 debug_assert!(w <= 8);
1025
1026 let digits_count = match w {
1027 4..=7 => (256 + w - 1) / w,
1028 8 => (256 + w - 1) / w + 1_usize,
1030 _ => panic!("invalid radix parameter"),
1031 };
1032
1033 debug_assert!(digits_count <= 64);
1034 digits_count
1035 }
1036
1037 #[cfg(any(feature = "alloc", feature = "precomputed-tables"))]
1059 pub(crate) fn as_radix_2w(&self, w: usize) -> [i8; 64] {
1060 debug_assert!(w >= 4);
1061 debug_assert!(w <= 8);
1062
1063 if w == 4 {
1064 return self.as_radix_16();
1065 }
1066
1067 let mut scalar64x4 = [0u64; 4];
1069 read_le_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1070
1071 let radix: u64 = 1 << w;
1072 let window_mask: u64 = radix - 1;
1073
1074 let mut carry = 0u64;
1075 let mut digits = [0i8; 64];
1076 let digits_count = (256 + w - 1) / w;
1077 #[allow(clippy::needless_range_loop)]
1078 for i in 0..digits_count {
1079 let bit_offset = i * w;
1081 let u64_idx = bit_offset / 64;
1082 let bit_idx = bit_offset % 64;
1083
1084 let bit_buf: u64 = if bit_idx < 64 - w || u64_idx == 3 {
1086 scalar64x4[u64_idx] >> bit_idx
1089 } else {
1090 (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1 + u64_idx] << (64 - bit_idx))
1092 };
1093
1094 let coef = carry + (bit_buf & window_mask); carry = (coef + (radix / 2)) >> w;
1099 digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1100 }
1101
1102 match w {
1111 8 => digits[digits_count] += carry as i8,
1112 _ => digits[digits_count - 1] += (carry << w) as i8,
1113 }
1114
1115 digits
1116 }
1117
1118 pub(crate) fn unpack(&self) -> UnpackedScalar {
1120 UnpackedScalar::from_bytes(&self.bytes)
1121 }
1122
1123 #[allow(non_snake_case)]
1125 fn reduce(&self) -> Scalar {
1126 let x = self.unpack();
1127 let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1128 let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1129 x_mod_l.pack()
1130 }
1131
1132 fn is_canonical(&self) -> Choice {
1135 self.ct_eq(&self.reduce())
1136 }
1137}
1138
1139impl UnpackedScalar {
1140 fn pack(&self) -> Scalar {
1142 Scalar {
1143 bytes: self.as_bytes(),
1144 }
1145 }
1146
1147 #[rustfmt::skip] #[allow(clippy::just_underscores_and_digits)]
1150 pub fn montgomery_invert(&self) -> UnpackedScalar {
1151 let _1 = *self;
1154 let _10 = _1.montgomery_square();
1155 let _100 = _10.montgomery_square();
1156 let _11 = UnpackedScalar::montgomery_mul(&_10, &_1);
1157 let _101 = UnpackedScalar::montgomery_mul(&_10, &_11);
1158 let _111 = UnpackedScalar::montgomery_mul(&_10, &_101);
1159 let _1001 = UnpackedScalar::montgomery_mul(&_10, &_111);
1160 let _1011 = UnpackedScalar::montgomery_mul(&_10, &_1001);
1161 let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1162
1163 let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1165
1166 #[inline]
1167 fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1168 for _ in 0..squarings {
1169 *y = y.montgomery_square();
1170 }
1171 *y = UnpackedScalar::montgomery_mul(y, x);
1172 }
1173
1174 square_multiply(&mut y, 123 + 3, &_101);
1175 square_multiply(&mut y, 2 + 2, &_11);
1176 square_multiply(&mut y, 1 + 4, &_1111);
1177 square_multiply(&mut y, 1 + 4, &_1111);
1178 square_multiply(&mut y, 4, &_1001);
1179 square_multiply(&mut y, 2, &_11);
1180 square_multiply(&mut y, 1 + 4, &_1111);
1181 square_multiply(&mut y, 1 + 3, &_101);
1182 square_multiply(&mut y, 3 + 3, &_101);
1183 square_multiply(&mut y, 3, &_111);
1184 square_multiply(&mut y, 1 + 4, &_1111);
1185 square_multiply(&mut y, 2 + 3, &_111);
1186 square_multiply(&mut y, 2 + 2, &_11);
1187 square_multiply(&mut y, 1 + 4, &_1011);
1188 square_multiply(&mut y, 2 + 4, &_1011);
1189 square_multiply(&mut y, 6 + 4, &_1001);
1190 square_multiply(&mut y, 2 + 2, &_11);
1191 square_multiply(&mut y, 3 + 2, &_11);
1192 square_multiply(&mut y, 3 + 2, &_11);
1193 square_multiply(&mut y, 1 + 4, &_1001);
1194 square_multiply(&mut y, 1 + 3, &_111);
1195 square_multiply(&mut y, 2 + 4, &_1111);
1196 square_multiply(&mut y, 1 + 4, &_1011);
1197 square_multiply(&mut y, 3, &_101);
1198 square_multiply(&mut y, 2 + 4, &_1111);
1199 square_multiply(&mut y, 3, &_101);
1200 square_multiply(&mut y, 1 + 2, &_11);
1201
1202 y
1203 }
1204
1205 pub fn invert(&self) -> UnpackedScalar {
1207 self.as_montgomery().montgomery_invert().from_montgomery()
1208 }
1209}
1210
1211#[cfg(feature = "group")]
1212impl Field for Scalar {
1213 const ZERO: Self = Self::ZERO;
1214 const ONE: Self = Self::ONE;
1215
1216 fn random(mut rng: impl RngCore) -> Self {
1217 let mut scalar_bytes = [0u8; 64];
1219 rng.fill_bytes(&mut scalar_bytes);
1220 Self::from_bytes_mod_order_wide(&scalar_bytes)
1221 }
1222
1223 fn square(&self) -> Self {
1224 self * self
1225 }
1226
1227 fn double(&self) -> Self {
1228 self + self
1229 }
1230
1231 fn invert(&self) -> CtOption<Self> {
1232 CtOption::new(self.invert(), !self.is_zero())
1233 }
1234
1235 fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
1236 #[allow(unused_qualifications)]
1237 group::ff::helpers::sqrt_ratio_generic(num, div)
1238 }
1239
1240 fn sqrt(&self) -> CtOption<Self> {
1241 #[allow(unused_qualifications)]
1242 group::ff::helpers::sqrt_tonelli_shanks(
1243 self,
1244 [
1245 0xcb02_4c63_4b9e_ba7d,
1246 0x029b_df3b_d45e_f39a,
1247 0x0000_0000_0000_0000,
1248 0x0200_0000_0000_0000,
1249 ],
1250 )
1251 }
1252}
1253
1254#[cfg(feature = "group")]
1255impl PrimeField for Scalar {
1256 type Repr = [u8; 32];
1257
1258 fn from_repr(repr: Self::Repr) -> CtOption<Self> {
1259 Self::from_canonical_bytes(repr)
1260 }
1261
1262 fn from_repr_vartime(repr: Self::Repr) -> Option<Self> {
1263 if (repr[31] >> 7) != 0u8 {
1265 return None;
1266 }
1267
1268 let candidate = Scalar { bytes: repr };
1269
1270 if candidate == candidate.reduce() {
1271 Some(candidate)
1272 } else {
1273 None
1274 }
1275 }
1276
1277 fn to_repr(&self) -> Self::Repr {
1278 self.to_bytes()
1279 }
1280
1281 fn is_odd(&self) -> Choice {
1282 Choice::from(self.as_bytes()[0] & 1)
1283 }
1284
1285 const MODULUS: &'static str =
1286 "0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed";
1287 const NUM_BITS: u32 = 253;
1288 const CAPACITY: u32 = 252;
1289
1290 const TWO_INV: Self = Self {
1291 bytes: [
1292 0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c, 0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c,
1293 0x6f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x08,
1295 ],
1296 };
1297 const MULTIPLICATIVE_GENERATOR: Self = Self {
1298 bytes: [
1299 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1300 0, 0, 0,
1301 ],
1302 };
1303 const S: u32 = 2;
1304 const ROOT_OF_UNITY: Self = Self {
1305 bytes: [
1306 0xd4, 0x07, 0xbe, 0xeb, 0xdf, 0x75, 0x87, 0xbe, 0xfe, 0x83, 0xce, 0x42, 0x53, 0x56,
1307 0xf0, 0x0e, 0x7a, 0xc2, 0xc1, 0xab, 0x60, 0x6d, 0x3d, 0x7d, 0xe7, 0x81, 0x79, 0xe0,
1308 0x10, 0x73, 0x4a, 0x09,
1309 ],
1310 };
1311 const ROOT_OF_UNITY_INV: Self = Self {
1312 bytes: [
1313 0x19, 0xcc, 0x37, 0x71, 0x3a, 0xed, 0x8a, 0x99, 0xd7, 0x18, 0x29, 0x60, 0x8b, 0xa3,
1314 0xee, 0x05, 0x86, 0x3d, 0x3e, 0x54, 0x9f, 0x92, 0xc2, 0x82, 0x18, 0x7e, 0x86, 0x1f,
1315 0xef, 0x8c, 0xb5, 0x06,
1316 ],
1317 };
1318 const DELTA: Self = Self {
1319 bytes: [
1320 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1321 0, 0, 0,
1322 ],
1323 };
1324}
1325
1326#[cfg(feature = "group-bits")]
1327impl PrimeFieldBits for Scalar {
1328 type ReprBits = [u8; 32];
1329
1330 fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
1331 self.to_repr().into()
1332 }
1333
1334 fn char_le_bits() -> FieldBits<Self::ReprBits> {
1335 constants::BASEPOINT_ORDER_PRIVATE.to_bytes().into()
1336 }
1337}
1338
1339#[cfg(feature = "group")]
1340impl FromUniformBytes<64> for Scalar {
1341 fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
1342 Scalar::from_bytes_mod_order_wide(bytes)
1343 }
1344}
1345
1346fn read_le_u64_into(src: &[u8], dst: &mut [u64]) {
1351 assert!(
1352 src.len() == 8 * dst.len(),
1353 "src.len() = {}, dst.len() = {}",
1354 src.len(),
1355 dst.len()
1356 );
1357 for (bytes, val) in src.chunks(8).zip(dst.iter_mut()) {
1358 *val = u64::from_le_bytes(
1359 bytes
1360 .try_into()
1361 .expect("Incorrect src length, should be 8 * dst.len()"),
1362 );
1363 }
1364}
1365
1366#[must_use]
1386pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] {
1387 bytes[0] &= 0b1111_1000;
1388 bytes[31] &= 0b0111_1111;
1389 bytes[31] |= 0b0100_0000;
1390 bytes
1391}
1392
1393#[cfg(test)]
1394pub(crate) mod test {
1395 use super::*;
1396
1397 #[cfg(feature = "alloc")]
1398 use alloc::vec::Vec;
1399
1400 pub static X: Scalar = Scalar {
1402 bytes: [
1403 0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2,
1404 0x7d, 0x52, 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, 0xd4, 0x49, 0xf4, 0xa8,
1405 0x79, 0xd9, 0xf2, 0x04,
1406 ],
1407 };
1408 pub static XINV: Scalar = Scalar {
1410 bytes: [
1411 0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb, 0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01,
1412 0x63, 0x47, 0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96, 0xd5, 0x0b, 0xcd, 0x7a,
1413 0x3f, 0x96, 0x2a, 0x0f,
1414 ],
1415 };
1416 pub static Y: Scalar = Scalar {
1418 bytes: [
1419 0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83,
1420 0x86, 0xc3, 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, 0xe8, 0xef, 0x7a, 0xc3,
1421 0x1f, 0x35, 0xbb, 0x05,
1422 ],
1423 };
1424
1425 pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar {
1431 bytes: [
1432 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1433 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1434 0xff, 0xff, 0xff, 0x7f,
1435 ],
1436 };
1437
1438 static X_TIMES_Y: Scalar = Scalar {
1440 bytes: [
1441 0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21, 0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6,
1442 0xf9, 0x2c, 0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2, 0x95, 0xfc, 0x08, 0x17,
1443 0x9a, 0x73, 0x94, 0x0c,
1444 ],
1445 };
1446
1447 static CANONICAL_2_256_MINUS_1: Scalar = Scalar {
1451 bytes: [
1452 28, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255,
1453 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15,
1454 ],
1455 };
1456
1457 static A_SCALAR: Scalar = Scalar {
1458 bytes: [
1459 0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
1460 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
1461 0x23, 0x76, 0xef, 0x09,
1462 ],
1463 };
1464
1465 static A_NAF: [i8; 256] = [
1466 0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0,
1467 0, 0, 1, 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0,
1468 11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1469 0, -1, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5,
1470 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0,
1471 0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1472 7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 15,
1473 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, -15, 0,
1474 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1475 ];
1476
1477 const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar {
1478 bytes: [
1479 0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9,
1480 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x10,
1482 ],
1483 };
1484
1485 static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes);
1487
1488 #[test]
1489 fn fuzzer_testcase_reduction() {
1490 let a_bytes = [
1492 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
1493 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1494 ];
1495 let b_bytes = [
1497 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210,
1498 210, 255, 255, 255, 255, 10,
1499 ];
1500 let c_bytes = [
1502 134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215,
1503 181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14,
1504 ];
1505
1506 let a = Scalar::from_bytes_mod_order(a_bytes);
1507 let b = Scalar::from_bytes_mod_order(b_bytes);
1508 let c = Scalar::from_bytes_mod_order(c_bytes);
1509
1510 let mut tmp = [0u8; 64];
1511
1512 tmp[0..32].copy_from_slice(&a_bytes[..]);
1514 let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1515
1516 tmp[0..32].copy_from_slice(&b_bytes[..]);
1518 let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1519
1520 let expected_c = a * b;
1521 let also_expected_c = also_a * also_b;
1522
1523 assert_eq!(c, expected_c);
1524 assert_eq!(c, also_expected_c);
1525 }
1526
1527 #[test]
1528 fn non_adjacent_form_test_vector() {
1529 let naf = A_SCALAR.non_adjacent_form(5);
1530 for i in 0..256 {
1531 assert_eq!(naf[i], A_NAF[i]);
1532 }
1533 }
1534
1535 fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1536 let naf = x.non_adjacent_form(w);
1537
1538 let mut y = Scalar::ZERO;
1540 for i in (0..256).rev() {
1541 y += y;
1542 let digit = if naf[i] < 0 {
1543 -Scalar::from((-naf[i]) as u64)
1544 } else {
1545 Scalar::from(naf[i] as u64)
1546 };
1547 y += digit;
1548 }
1549
1550 assert_eq!(*x, y);
1551 }
1552
1553 #[test]
1554 fn non_adjacent_form_random() {
1555 let mut rng = rand::thread_rng();
1556 for _ in 0..1_000 {
1557 let x = Scalar::random(&mut rng);
1558 for w in &[5, 6, 7, 8] {
1559 non_adjacent_form_iter(*w, &x);
1560 }
1561 }
1562 }
1563
1564 #[test]
1565 fn from_u64() {
1566 let val: u64 = 0xdeadbeefdeadbeef;
1567 let s = Scalar::from(val);
1568 assert_eq!(s[7], 0xde);
1569 assert_eq!(s[6], 0xad);
1570 assert_eq!(s[5], 0xbe);
1571 assert_eq!(s[4], 0xef);
1572 assert_eq!(s[3], 0xde);
1573 assert_eq!(s[2], 0xad);
1574 assert_eq!(s[1], 0xbe);
1575 assert_eq!(s[0], 0xef);
1576 }
1577
1578 #[test]
1579 fn scalar_mul_by_one() {
1580 let test_scalar = X * Scalar::ONE;
1581 for i in 0..32 {
1582 assert!(test_scalar[i] == X[i]);
1583 }
1584 }
1585
1586 #[test]
1587 fn add_reduces() {
1588 assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO);
1590 }
1591
1592 #[test]
1593 fn sub_reduces() {
1594 assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE);
1596 }
1597
1598 #[test]
1599 fn impl_add() {
1600 let two = Scalar::from(2u64);
1601 let one = Scalar::ONE;
1602 let should_be_two = one + one;
1603 assert_eq!(should_be_two, two);
1604 }
1605
1606 #[allow(non_snake_case)]
1607 #[test]
1608 fn impl_mul() {
1609 let should_be_X_times_Y = X * Y;
1610 assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1611 }
1612
1613 #[allow(non_snake_case)]
1614 #[test]
1615 #[cfg(feature = "alloc")]
1616 fn impl_product() {
1617 let X_Y_vector = [X, Y];
1619 let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1620 assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1621
1622 let one = Scalar::ONE;
1624 let empty_vector = [];
1625 let should_be_one: Scalar = empty_vector.iter().product();
1626 assert_eq!(should_be_one, one);
1627
1628 let xs = [Scalar::from(2u64); 10];
1630 let ys = [Scalar::from(3u64); 10];
1631 let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x * y);
1633
1634 let x_prod: Scalar = xs.iter().product();
1635 let y_prod: Scalar = ys.iter().product();
1636 let z_prod: Scalar = zs.product();
1637
1638 assert_eq!(x_prod, Scalar::from(1024u64));
1639 assert_eq!(y_prod, Scalar::from(59049u64));
1640 assert_eq!(z_prod, Scalar::from(60466176u64));
1641 assert_eq!(x_prod * y_prod, z_prod);
1642 }
1643
1644 #[test]
1645 #[cfg(feature = "alloc")]
1646 fn impl_sum() {
1647 let two = Scalar::from(2u64);
1649 let one_vector = [Scalar::ONE, Scalar::ONE];
1650 let should_be_two: Scalar = one_vector.iter().sum();
1651 assert_eq!(should_be_two, two);
1652
1653 let zero = Scalar::ZERO;
1655 let empty_vector = [];
1656 let should_be_zero: Scalar = empty_vector.iter().sum();
1657 assert_eq!(should_be_zero, zero);
1658
1659 let xs = [Scalar::from(1u64); 10];
1661 let ys = [Scalar::from(2u64); 10];
1662 let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x + y);
1664
1665 let x_sum: Scalar = xs.iter().sum();
1666 let y_sum: Scalar = ys.iter().sum();
1667 let z_sum: Scalar = zs.sum();
1668
1669 assert_eq!(x_sum, Scalar::from(10u64));
1670 assert_eq!(y_sum, Scalar::from(20u64));
1671 assert_eq!(z_sum, Scalar::from(30u64));
1672 assert_eq!(x_sum + y_sum, z_sum);
1673 }
1674
1675 #[test]
1676 fn square() {
1677 let expected = X * X;
1678 let actual = X.unpack().square().pack();
1679 for i in 0..32 {
1680 assert!(expected[i] == actual[i]);
1681 }
1682 }
1683
1684 #[test]
1685 fn reduce() {
1686 let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1687 assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1688 }
1689
1690 #[test]
1691 fn from_bytes_mod_order_wide() {
1692 let mut bignum = [0u8; 64];
1693 for i in 0..32 {
1695 bignum[i] = X[i];
1696 bignum[32 + i] = X[i];
1697 }
1698 let reduced = Scalar {
1701 bytes: [
1702 216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1703 91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1704 ],
1705 };
1706 let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1707 for i in 0..32 {
1708 assert!(test_red[i] == reduced[i]);
1709 }
1710 }
1711
1712 #[allow(non_snake_case)]
1713 #[test]
1714 fn invert() {
1715 let inv_X = X.invert();
1716 assert_eq!(inv_X, XINV);
1717 let should_be_one = inv_X * X;
1718 assert_eq!(should_be_one, Scalar::ONE);
1719 }
1720
1721 #[allow(non_snake_case)]
1723 #[test]
1724 fn neg_twice_is_identity() {
1725 let negative_X = -&X;
1726 let should_be_X = -&negative_X;
1727
1728 assert_eq!(should_be_X, X);
1729 }
1730
1731 #[test]
1732 fn to_bytes_from_bytes_roundtrips() {
1733 let unpacked = X.unpack();
1734 let bytes = unpacked.as_bytes();
1735 let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1736
1737 assert_eq!(should_be_unpacked.0, unpacked.0);
1738 }
1739
1740 #[test]
1741 fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1742 let mut bignum = [0u8; 64];
1743
1744 for i in 0..32 {
1746 bignum[i] = X[i];
1747 bignum[32 + i] = X[i];
1748 }
1749 let expected = Scalar {
1752 bytes: [
1753 216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1754 91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1755 ],
1756 };
1757 let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1758
1759 assert_eq!(reduced.bytes, expected.bytes);
1761
1762 let interim =
1764 UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum), &constants::R);
1765 let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1767
1768 assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1770 assert_eq!(montgomery_reduced.0, expected.unpack().0)
1771 }
1772
1773 #[test]
1774 fn canonical_decoding() {
1775 let canonical_bytes = [
1777 99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 0, 0, 0, 0,
1779 ];
1780
1781 let non_canonical_bytes_because_unreduced = [16; 32];
1786
1787 let non_canonical_bytes_because_highbit = [
1789 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1790 0, 0, 128,
1791 ];
1792
1793 assert!(bool::from(
1794 Scalar::from_canonical_bytes(canonical_bytes).is_some()
1795 ));
1796 assert!(bool::from(
1797 Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none()
1798 ));
1799 assert!(bool::from(
1800 Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none()
1801 ));
1802 }
1803
1804 #[test]
1805 #[cfg(feature = "serde")]
1806 fn serde_bincode_scalar_roundtrip() {
1807 use bincode;
1808 let encoded = bincode::serialize(&X).unwrap();
1809 let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1810 assert_eq!(parsed, X);
1811
1812 assert_eq!(encoded.len(), 32);
1814
1815 assert_eq!(X, bincode::deserialize(X.as_bytes()).unwrap(),);
1817 }
1818
1819 #[cfg(all(debug_assertions, feature = "alloc"))]
1820 #[test]
1821 #[should_panic]
1822 fn batch_invert_with_a_zero_input_panics() {
1823 let mut xs = vec![Scalar::ONE; 16];
1824 xs[3] = Scalar::ZERO;
1825 Scalar::batch_invert(&mut xs);
1827 }
1828
1829 #[test]
1830 #[cfg(feature = "alloc")]
1831 fn batch_invert_empty() {
1832 assert_eq!(Scalar::ONE, Scalar::batch_invert(&mut []));
1833 }
1834
1835 #[test]
1836 #[cfg(feature = "alloc")]
1837 fn batch_invert_consistency() {
1838 let mut x = Scalar::from(1u64);
1839 let mut v1: Vec<_> = (0..16)
1840 .map(|_| {
1841 let tmp = x;
1842 x = x + x;
1843 tmp
1844 })
1845 .collect();
1846 let v2 = v1.clone();
1847
1848 let expected: Scalar = v1.iter().product();
1849 let expected = expected.invert();
1850 let ret = Scalar::batch_invert(&mut v1);
1851 assert_eq!(ret, expected);
1852
1853 for (a, b) in v1.iter().zip(v2.iter()) {
1854 assert_eq!(a * b, Scalar::ONE);
1855 }
1856 }
1857
1858 #[cfg(feature = "precomputed-tables")]
1859 fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1860 let digits_count = Scalar::to_radix_2w_size_hint(w);
1861 let digits = scalar.as_radix_2w(w);
1862
1863 let radix = Scalar::from((1 << w) as u64);
1864 let mut term = Scalar::ONE;
1865 let mut recovered_scalar = Scalar::ZERO;
1866 for digit in &digits[0..digits_count] {
1867 let digit = *digit;
1868 if digit != 0 {
1869 let sdigit = if digit < 0 {
1870 -Scalar::from((-(digit as i64)) as u64)
1871 } else {
1872 Scalar::from(digit as u64)
1873 };
1874 recovered_scalar += term * sdigit;
1875 }
1876 term *= radix;
1877 }
1878 assert_eq!(recovered_scalar, scalar.reduce());
1880 }
1881
1882 #[test]
1883 #[cfg(feature = "precomputed-tables")]
1884 fn test_pippenger_radix() {
1885 use core::iter;
1886 let cases = (2..100)
1889 .map(|s| Scalar::from(s as u64).invert())
1890 .chain(iter::once(LARGEST_UNREDUCED_SCALAR));
1893
1894 for scalar in cases {
1895 test_pippenger_radix_iter(scalar, 6);
1896 test_pippenger_radix_iter(scalar, 7);
1897 test_pippenger_radix_iter(scalar, 8);
1898 }
1899 }
1900
1901 #[test]
1902 #[cfg(feature = "alloc")]
1903 fn test_read_le_u64_into() {
1904 let cases: &[(&[u8], &[u64])] = &[
1905 (
1906 &[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0],
1907 &[0xF00F_F11F_0110_EFFE],
1908 ),
1909 (
1910 &[
1911 0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A,
1912 0xBC, 0xDE, 0xF0,
1913 ],
1914 &[0xF00F_F11F_0110_EFFE, 0xF0DE_BC9A_7856_3412],
1915 ),
1916 ];
1917
1918 for (src, expected) in cases {
1919 let mut dst = vec![0; expected.len()];
1920 read_le_u64_into(src, &mut dst);
1921
1922 assert_eq!(&dst, expected, "Expected {:x?} got {:x?}", expected, dst);
1923 }
1924 }
1925
1926 #[test]
1928 fn test_scalar_from_int() {
1929 let s1 = Scalar::ONE;
1930
1931 let x = 0x23u8;
1935 let sx = Scalar::from(x);
1936 assert_eq!(sx + s1, Scalar::from(x + 1));
1937
1938 let x = 0x2323u16;
1939 let sx = Scalar::from(x);
1940 assert_eq!(sx + s1, Scalar::from(x + 1));
1941
1942 let x = 0x2323_2323u32;
1943 let sx = Scalar::from(x);
1944 assert_eq!(sx + s1, Scalar::from(x + 1));
1945
1946 let x = 0x2323_2323_2323_2323u64;
1947 let sx = Scalar::from(x);
1948 assert_eq!(sx + s1, Scalar::from(x + 1));
1949
1950 let x = 0x2323_2323_2323_2323_2323_2323_2323_2323u128;
1951 let sx = Scalar::from(x);
1952 assert_eq!(sx + s1, Scalar::from(x + 1));
1953 }
1954
1955 #[cfg(feature = "group")]
1956 #[test]
1957 fn ff_constants() {
1958 assert_eq!(Scalar::from(2u64) * Scalar::TWO_INV, Scalar::ONE);
1959
1960 assert_eq!(
1961 Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
1962 Scalar::ONE,
1963 );
1964
1965 assert_eq!(
1967 Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
1968 Scalar::ONE,
1969 );
1970
1971 assert_eq!(
1973 Scalar::DELTA.pow(&[
1974 0x9604_98c6_973d_74fb,
1975 0x0537_be77_a8bd_e735,
1976 0x0000_0000_0000_0000,
1977 0x0400_0000_0000_0000,
1978 ]),
1979 Scalar::ONE,
1980 );
1981 }
1982
1983 #[cfg(feature = "group")]
1984 #[test]
1985 fn ff_impls() {
1986 assert!(bool::from(Scalar::ZERO.is_even()));
1987 assert!(bool::from(Scalar::ONE.is_odd()));
1988 assert!(bool::from(Scalar::from(2u64).is_even()));
1989 assert!(bool::from(Scalar::DELTA.is_even()));
1990
1991 assert!(bool::from(Field::invert(&Scalar::ZERO).is_none()));
1992 assert_eq!(Field::invert(&X).unwrap(), XINV);
1993
1994 let x_sq = X.square();
1995 assert!([X, -X].contains(&x_sq.sqrt().unwrap()));
1997
1998 assert_eq!(Scalar::from_repr_vartime(X.to_repr()), Some(X));
1999 assert_eq!(Scalar::from_repr_vartime([0xff; 32]), None);
2000
2001 assert_eq!(Scalar::from_repr(X.to_repr()).unwrap(), X);
2002 assert!(bool::from(Scalar::from_repr([0xff; 32]).is_none()));
2003 }
2004
2005 #[test]
2006 #[should_panic]
2007 fn test_read_le_u64_into_should_panic_on_bad_input() {
2008 let mut dst = [0_u64; 1];
2009 read_le_u64_into(&[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F], &mut dst);
2011 }
2012
2013 #[test]
2014 fn test_scalar_clamp() {
2015 let input = A_SCALAR.bytes;
2016 let expected = [
2017 0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
2018 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
2019 0x23, 0x76, 0xef, 0x49,
2020 ];
2021 let actual = clamp_integer(input);
2022 assert_eq!(actual, expected);
2023
2024 let expected = [
2025 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2026 0, 0, 0x40,
2027 ];
2028 let actual = clamp_integer([0; 32]);
2029 assert_eq!(expected, actual);
2030 let expected = [
2031 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2032 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2033 0xff, 0xff, 0xff, 0x7f,
2034 ];
2035 let actual = clamp_integer([0xff; 32]);
2036 assert_eq!(actual, expected);
2037
2038 assert_eq!(
2039 LARGEST_CLAMPED_INTEGER,
2040 clamp_integer(LARGEST_CLAMPED_INTEGER)
2041 );
2042 }
2043
2044 #[test]
2048 fn test_mul_reduction_invariance() {
2049 let mut rng = rand::thread_rng();
2050
2051 for _ in 0..10 {
2052 let (a, b, c) = {
2055 let mut a_bytes = [0u8; 32];
2056 let mut b_bytes = [0u8; 32];
2057 let mut c_bytes = [0u8; 32];
2058 rng.fill_bytes(&mut a_bytes);
2059 rng.fill_bytes(&mut b_bytes);
2060 rng.fill_bytes(&mut c_bytes);
2061 (
2062 Scalar { bytes: a_bytes },
2063 Scalar { bytes: b_bytes },
2064 Scalar {
2065 bytes: clamp_integer(c_bytes),
2066 },
2067 )
2068 };
2069
2070 let reduced_mul_ab = a.reduce() * b.reduce();
2072 let reduced_mul_ac = a.reduce() * c.reduce();
2073 assert_eq!(a * b, reduced_mul_ab);
2074 assert_eq!(a.reduce() * b, reduced_mul_ab);
2075 assert_eq!(a * b.reduce(), reduced_mul_ab);
2076 assert_eq!(a * c, reduced_mul_ac);
2077 assert_eq!(a.reduce() * c, reduced_mul_ac);
2078 assert_eq!(a * c.reduce(), reduced_mul_ac);
2079 }
2080 }
2081}