1use core::ops::{Add, AddAssign, Mul, MulAssign, Neg};
2use crunchy::unroll;
3use subtle::Choice;
4
5const SECP256K1_N: [u32; 8] = [
6 0xD0364141, 0xBFD25E8C, 0xAF48A03B, 0xBAAEDCE6, 0xFFFFFFFE, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
7];
8
9const SECP256K1_N_C_0: u32 = !SECP256K1_N[0] + 1;
10const SECP256K1_N_C_1: u32 = !SECP256K1_N[1];
11const SECP256K1_N_C_2: u32 = !SECP256K1_N[2];
12const SECP256K1_N_C_3: u32 = !SECP256K1_N[3];
13const SECP256K1_N_C_4: u32 = 1;
14
15const SECP256K1_N_H_0: u32 = 0x681B20A0;
16const SECP256K1_N_H_1: u32 = 0xDFE92F46;
17const SECP256K1_N_H_2: u32 = 0x57A4501D;
18const SECP256K1_N_H_3: u32 = 0x5D576E73;
19const SECP256K1_N_H_4: u32 = 0xFFFFFFFF;
20const SECP256K1_N_H_5: u32 = 0xFFFFFFFF;
21const SECP256K1_N_H_6: u32 = 0xFFFFFFFF;
22const SECP256K1_N_H_7: u32 = 0x7FFFFFFF;
23
24#[derive(Debug, Clone, Copy, Eq, PartialEq)]
25pub struct Scalar(pub [u32; 8]);
27
28impl Scalar {
29 pub fn clear(&mut self) {
31 unsafe {
32 core::ptr::write_volatile(&mut self.0, [0u32; 8]);
33 }
34 }
35
36 pub fn set_int(&mut self, v: u32) {
38 self.0 = [v, 0, 0, 0, 0, 0, 0, 0];
39 }
40
41 pub fn from_int(v: u32) -> Self {
43 let mut scalar = Self::default();
44 scalar.set_int(v);
45 scalar
46 }
47
48 pub fn bits(&self, offset: usize, count: usize) -> u32 {
51 debug_assert!((offset + count - 1) >> 5 == offset >> 5);
52 (self.0[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1)
53 }
54
55 pub fn bits_var(&self, offset: usize, count: usize) -> u32 {
57 debug_assert!(count < 32);
58 debug_assert!(offset + count <= 256);
59 if (offset + count - 1) >> 5 == offset >> 5 {
60 self.bits(offset, count)
61 } else {
62 debug_assert!((offset >> 5) + 1 < 8);
63 ((self.0[offset >> 5] >> (offset & 0x1f))
64 | (self.0[(offset >> 5) + 1] << (32 - (offset & 0x1f))))
65 & ((1 << count) - 1)
66 }
67 }
68
69 #[must_use]
70 fn check_overflow(&self) -> Choice {
71 let mut yes: Choice = 0.into();
72 let mut no: Choice = 0.into();
73 no |= Choice::from((self.0[7] < SECP256K1_N[7]) as u8); no |= Choice::from((self.0[6] < SECP256K1_N[6]) as u8); no |= Choice::from((self.0[5] < SECP256K1_N[5]) as u8); no |= Choice::from((self.0[4] < SECP256K1_N[4]) as u8);
77 yes |= Choice::from((self.0[4] > SECP256K1_N[4]) as u8) & !no;
78 no |= Choice::from((self.0[3] < SECP256K1_N[3]) as u8) & !yes;
79 yes |= Choice::from((self.0[3] > SECP256K1_N[3]) as u8) & !no;
80 no |= Choice::from((self.0[2] < SECP256K1_N[2]) as u8) & !yes;
81 yes |= Choice::from((self.0[2] > SECP256K1_N[2]) as u8) & !no;
82 no |= Choice::from((self.0[1] < SECP256K1_N[1]) as u8) & !yes;
83 yes |= Choice::from((self.0[1] > SECP256K1_N[1]) as u8) & !no;
84 yes |= Choice::from((self.0[0] >= SECP256K1_N[0]) as u8) & !no;
85
86 yes
87 }
88
89 fn reduce(&mut self, overflow: Choice) {
90 let o = overflow.unwrap_u8() as u64;
91 let mut t: u64;
92
93 t = (self.0[0] as u64) + o * (SECP256K1_N_C_0 as u64);
94 self.0[0] = (t & 0xFFFFFFFF) as u32;
95 t >>= 32;
96
97 t += (self.0[1] as u64) + o * (SECP256K1_N_C_1 as u64);
98 self.0[1] = (t & 0xFFFFFFFF) as u32;
99 t >>= 32;
100
101 t += (self.0[2] as u64) + o * (SECP256K1_N_C_2 as u64);
102 self.0[2] = (t & 0xFFFFFFFF) as u32;
103 t >>= 32;
104
105 t += (self.0[3] as u64) + o * (SECP256K1_N_C_3 as u64);
106 self.0[3] = (t & 0xFFFFFFFF) as u32;
107 t >>= 32;
108
109 t += (self.0[4] as u64) + o * (SECP256K1_N_C_4 as u64);
110 self.0[4] = (t & 0xFFFFFFFF) as u32;
111 t >>= 32;
112
113 t += self.0[5] as u64;
114 self.0[5] = (t & 0xFFFFFFFF) as u32;
115 t >>= 32;
116
117 t += self.0[6] as u64;
118 self.0[6] = (t & 0xFFFFFFFF) as u32;
119 t >>= 32;
120
121 t += self.0[7] as u64;
122 self.0[7] = (t & 0xFFFFFFFF) as u32;
123 }
124
125 pub fn cadd_bit(&mut self, mut bit: usize, flag: bool) {
128 let mut t: u64;
129 debug_assert!(bit < 256);
130 bit += if flag { 0 } else { usize::max_value() } & 0x100;
131 t = (self.0[0] as u64) + ((if (bit >> 5) == 0 { 1 } else { 0 }) << (bit & 0x1F));
132 self.0[0] = (t & 0xFFFFFFFF) as u32;
133 t >>= 32;
134 t += (self.0[1] as u64) + ((if (bit >> 5) == 1 { 1 } else { 0 }) << (bit & 0x1F));
135 self.0[1] = (t & 0xFFFFFFFF) as u32;
136 t >>= 32;
137 t += (self.0[2] as u64) + ((if (bit >> 5) == 2 { 1 } else { 0 }) << (bit & 0x1F));
138 self.0[2] = (t & 0xFFFFFFFF) as u32;
139 t >>= 32;
140 t += (self.0[3] as u64) + ((if (bit >> 5) == 3 { 1 } else { 0 }) << (bit & 0x1F));
141 self.0[3] = (t & 0xFFFFFFFF) as u32;
142 t >>= 32;
143 t += (self.0[4] as u64) + ((if (bit >> 5) == 4 { 1 } else { 0 }) << (bit & 0x1F));
144 self.0[4] = (t & 0xFFFFFFFF) as u32;
145 t >>= 32;
146 t += (self.0[5] as u64) + ((if (bit >> 5) == 5 { 1 } else { 0 }) << (bit & 0x1F));
147 self.0[5] = (t & 0xFFFFFFFF) as u32;
148 t >>= 32;
149 t += (self.0[6] as u64) + ((if (bit >> 5) == 6 { 1 } else { 0 }) << (bit & 0x1F));
150 self.0[6] = (t & 0xFFFFFFFF) as u32;
151 t >>= 32;
152 t += (self.0[7] as u64) + ((if (bit >> 5) == 7 { 1 } else { 0 }) << (bit & 0x1F));
153 self.0[7] = (t & 0xFFFFFFFF) as u32;
154 debug_assert!((t >> 32) == 0);
155 debug_assert!(!bool::from(self.check_overflow()));
156 }
157
158 #[must_use]
160 pub fn set_b32(&mut self, b32: &[u8; 32]) -> Choice {
161 self.0[0] = (b32[31] as u32)
162 | ((b32[30] as u32) << 8)
163 | ((b32[29] as u32) << 16)
164 | ((b32[28] as u32) << 24);
165 self.0[1] = (b32[27] as u32)
166 | ((b32[26] as u32) << 8)
167 | ((b32[25] as u32) << 16)
168 | ((b32[24] as u32) << 24);
169 self.0[2] = (b32[23] as u32)
170 | ((b32[22] as u32) << 8)
171 | ((b32[21] as u32) << 16)
172 | ((b32[20] as u32) << 24);
173 self.0[3] = (b32[19] as u32)
174 | ((b32[18] as u32) << 8)
175 | ((b32[17] as u32) << 16)
176 | ((b32[16] as u32) << 24);
177 self.0[4] = (b32[15] as u32)
178 | ((b32[14] as u32) << 8)
179 | ((b32[13] as u32) << 16)
180 | ((b32[12] as u32) << 24);
181 self.0[5] = (b32[11] as u32)
182 | ((b32[10] as u32) << 8)
183 | ((b32[9] as u32) << 16)
184 | ((b32[8] as u32) << 24);
185 self.0[6] = (b32[7] as u32)
186 | ((b32[6] as u32) << 8)
187 | ((b32[5] as u32) << 16)
188 | ((b32[4] as u32) << 24);
189 self.0[7] = (b32[3] as u32)
190 | ((b32[2] as u32) << 8)
191 | ((b32[1] as u32) << 16)
192 | ((b32[0] as u32) << 24);
193
194 let overflow = self.check_overflow();
195 self.reduce(overflow);
196
197 overflow
198 }
199
200 pub fn b32(&self) -> [u8; 32] {
202 let mut bin = [0u8; 32];
203 self.fill_b32(&mut bin);
204 bin
205 }
206
207 pub fn fill_b32(&self, bin: &mut [u8; 32]) {
209 bin[0] = (self.0[7] >> 24) as u8;
210 bin[1] = (self.0[7] >> 16) as u8;
211 bin[2] = (self.0[7] >> 8) as u8;
212 bin[3] = (self.0[7]) as u8;
213 bin[4] = (self.0[6] >> 24) as u8;
214 bin[5] = (self.0[6] >> 16) as u8;
215 bin[6] = (self.0[6] >> 8) as u8;
216 bin[7] = (self.0[6]) as u8;
217 bin[8] = (self.0[5] >> 24) as u8;
218 bin[9] = (self.0[5] >> 16) as u8;
219 bin[10] = (self.0[5] >> 8) as u8;
220 bin[11] = (self.0[5]) as u8;
221 bin[12] = (self.0[4] >> 24) as u8;
222 bin[13] = (self.0[4] >> 16) as u8;
223 bin[14] = (self.0[4] >> 8) as u8;
224 bin[15] = (self.0[4]) as u8;
225 bin[16] = (self.0[3] >> 24) as u8;
226 bin[17] = (self.0[3] >> 16) as u8;
227 bin[18] = (self.0[3] >> 8) as u8;
228 bin[19] = (self.0[3]) as u8;
229 bin[20] = (self.0[2] >> 24) as u8;
230 bin[21] = (self.0[2] >> 16) as u8;
231 bin[22] = (self.0[2] >> 8) as u8;
232 bin[23] = (self.0[2]) as u8;
233 bin[24] = (self.0[1] >> 24) as u8;
234 bin[25] = (self.0[1] >> 16) as u8;
235 bin[26] = (self.0[1] >> 8) as u8;
236 bin[27] = (self.0[1]) as u8;
237 bin[28] = (self.0[0] >> 24) as u8;
238 bin[29] = (self.0[0] >> 16) as u8;
239 bin[30] = (self.0[0] >> 8) as u8;
240 bin[31] = (self.0[0]) as u8;
241 }
242
243 pub fn is_zero(&self) -> bool {
245 (self.0[0]
246 | self.0[1]
247 | self.0[2]
248 | self.0[3]
249 | self.0[4]
250 | self.0[5]
251 | self.0[6]
252 | self.0[7])
253 == 0
254 }
255
256 pub fn is_one(&self) -> bool {
258 ((self.0[0] ^ 1)
259 | self.0[1]
260 | self.0[2]
261 | self.0[3]
262 | self.0[4]
263 | self.0[5]
264 | self.0[6]
265 | self.0[7])
266 == 0
267 }
268
269 pub fn is_high(&self) -> bool {
272 let mut yes: Choice = 0.into();
273 let mut no: Choice = 0.into();
274 no |= Choice::from((self.0[7] < SECP256K1_N_H_7) as u8);
275 yes |= Choice::from((self.0[7] > SECP256K1_N_H_7) as u8) & !no;
276 no |= Choice::from((self.0[6] < SECP256K1_N_H_6) as u8) & !yes; no |= Choice::from((self.0[5] < SECP256K1_N_H_5) as u8) & !yes; no |= Choice::from((self.0[4] < SECP256K1_N_H_4) as u8) & !yes; no |= Choice::from((self.0[3] < SECP256K1_N_H_3) as u8) & !yes;
280 yes |= Choice::from((self.0[3] > SECP256K1_N_H_3) as u8) & !no;
281 no |= Choice::from((self.0[2] < SECP256K1_N_H_2) as u8) & !yes;
282 yes |= Choice::from((self.0[2] > SECP256K1_N_H_2) as u8) & !no;
283 no |= Choice::from((self.0[1] < SECP256K1_N_H_1) as u8) & !yes;
284 yes |= Choice::from((self.0[1] > SECP256K1_N_H_1) as u8) & !no;
285 yes |= Choice::from((self.0[0] >= SECP256K1_N_H_0) as u8) & !no;
286 yes.into()
287 }
288
289 pub fn cond_neg_assign(&mut self, flag: Choice) {
291 let mask = u32::max_value() * flag.unwrap_u8() as u32;
292
293 let nonzero = 0xFFFFFFFFu64 * !self.is_zero() as u64;
294 let mut t = 1u64 * flag.unwrap_u8() as u64;
295
296 unroll! {
297 for i in 0..8 {
298 t += (self.0[i] ^ mask) as u64 + (SECP256K1_N[i] & mask) as u64;
299 self.0[i] = (t & nonzero) as u32;
300 t >>= 32;
301 }
302 }
303
304 let _ = t;
305 }
306}
307
308macro_rules! define_ops {
309 ($c0: ident, $c1: ident, $c2: ident) => {
310 #[allow(unused_macros)]
311 macro_rules! muladd {
312 ($a: expr, $b: expr) => {
313 let a = $a;
314 let b = $b;
315 let t = (a as u64) * (b as u64);
316 let mut th = (t >> 32) as u32;
317 let tl = t as u32;
318 $c0 = $c0.wrapping_add(tl);
319 th = th.wrapping_add(if $c0 < tl { 1 } else { 0 });
320 $c1 = $c1.wrapping_add(th);
321 $c2 = $c2.wrapping_add(if $c1 < th { 1 } else { 0 });
322 debug_assert!($c1 >= th || $c2 != 0);
323 };
324 }
325
326 #[allow(unused_macros)]
327 macro_rules! muladd_fast {
328 ($a: expr, $b: expr) => {
329 let a = $a;
330 let b = $b;
331 let t = (a as u64) * (b as u64);
332 let mut th = (t >> 32) as u32;
333 let tl = t as u32;
334 $c0 = $c0.wrapping_add(tl);
335 th = th.wrapping_add(if $c0 < tl { 1 } else { 0 });
336 $c1 = $c1.wrapping_add(th);
337 debug_assert!($c1 >= th);
338 };
339 }
340
341 #[allow(unused_macros)]
342 macro_rules! muladd2 {
343 ($a: expr, $b: expr) => {
344 let a = $a;
345 let b = $b;
346 let t = (a as u64) * (b as u64);
347 let th = (t >> 32) as u32;
348 let tl = t as u32;
349 let mut th2 = th.wrapping_add(th);
350 $c2 = $c2.wrapping_add(if th2 < th { 1 } else { 0 });
351 debug_assert!(th2 >= th || $c2 != 0);
352 let tl2 = tl.wrapping_add(tl);
353 th2 = th2.wrapping_add(if tl2 < tl { 1 } else { 0 });
354 $c0 = $c0.wrapping_add(tl2);
355 th2 = th2.wrapping_add(if $c0 < tl2 { 1 } else { 0 });
356 $c2 = $c2.wrapping_add(if $c0 < tl2 && th2 == 0 { 1 } else { 0 });
357 debug_assert!($c0 >= tl2 || th2 != 0 || $c2 != 0);
358 $c1 = $c1.wrapping_add(th2);
359 $c2 = $c2.wrapping_add(if $c1 < th2 { 1 } else { 0 });
360 debug_assert!($c1 >= th2 || $c2 != 0);
361 };
362 }
363
364 #[allow(unused_macros)]
365 macro_rules! sumadd {
366 ($a: expr) => {
367 let a = $a;
368 $c0 = $c0.wrapping_add(a);
369 let over = if $c0 < a { 1 } else { 0 };
370 $c1 = $c1.wrapping_add(over);
371 $c2 = $c2.wrapping_add(if $c1 < over { 1 } else { 0 });
372 };
373 }
374
375 #[allow(unused_macros)]
376 macro_rules! sumadd_fast {
377 ($a: expr) => {
378 let a = $a;
379 $c0 = $c0.wrapping_add(a);
380 $c1 = $c1.wrapping_add(if $c0 < a { 1 } else { 0 });
381 debug_assert!($c1 != 0 || $c0 >= a);
382 debug_assert!($c2 == 0);
383 };
384 }
385
386 #[allow(unused_macros)]
387 macro_rules! extract {
388 () => {{
389 #[allow(unused_assignments)]
390 {
391 let n = $c0;
392 $c0 = $c1;
393 $c1 = $c2;
394 $c2 = 0;
395 n
396 }
397 }};
398 }
399
400 #[allow(unused_macros)]
401 macro_rules! extract_fast {
402 () => {{
403 #[allow(unused_assignments)]
404 {
405 let n = $c0;
406 $c0 = $c1;
407 $c1 = 0;
408 debug_assert!($c2 == 0);
409 n
410 }
411 }};
412 }
413 };
414}
415
416impl Scalar {
417 fn reduce_512(&mut self, l: &[u32; 16]) {
418 let (mut c0, mut c1, mut c2): (u32, u32, u32);
419 define_ops!(c0, c1, c2);
420
421 let mut c: u64;
422 let (n0, n1, n2, n3, n4, n5, n6, n7) =
423 (l[8], l[9], l[10], l[11], l[12], l[13], l[14], l[15]);
424 let (m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12): (
425 u32,
426 u32,
427 u32,
428 u32,
429 u32,
430 u32,
431 u32,
432 u32,
433 u32,
434 u32,
435 u32,
436 u32,
437 u32,
438 );
439 let (p0, p1, p2, p3, p4, p5, p6, p7, p8): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
440
441 c0 = l[0];
442 c1 = 0;
443 c2 = 0;
444 muladd_fast!(n0, SECP256K1_N_C_0);
445 m0 = extract_fast!();
446 sumadd_fast!(l[1]);
447 muladd!(n1, SECP256K1_N_C_0);
448 muladd!(n0, SECP256K1_N_C_1);
449 m1 = extract!();
450 sumadd!(l[2]);
451 muladd!(n2, SECP256K1_N_C_0);
452 muladd!(n1, SECP256K1_N_C_1);
453 muladd!(n0, SECP256K1_N_C_2);
454 m2 = extract!();
455 sumadd!(l[3]);
456 muladd!(n3, SECP256K1_N_C_0);
457 muladd!(n2, SECP256K1_N_C_1);
458 muladd!(n1, SECP256K1_N_C_2);
459 muladd!(n0, SECP256K1_N_C_3);
460 m3 = extract!();
461 sumadd!(l[4]);
462 muladd!(n4, SECP256K1_N_C_0);
463 muladd!(n3, SECP256K1_N_C_1);
464 muladd!(n2, SECP256K1_N_C_2);
465 muladd!(n1, SECP256K1_N_C_3);
466 sumadd!(n0);
467 m4 = extract!();
468 sumadd!(l[5]);
469 muladd!(n5, SECP256K1_N_C_0);
470 muladd!(n4, SECP256K1_N_C_1);
471 muladd!(n3, SECP256K1_N_C_2);
472 muladd!(n2, SECP256K1_N_C_3);
473 sumadd!(n1);
474 m5 = extract!();
475 sumadd!(l[6]);
476 muladd!(n6, SECP256K1_N_C_0);
477 muladd!(n5, SECP256K1_N_C_1);
478 muladd!(n4, SECP256K1_N_C_2);
479 muladd!(n3, SECP256K1_N_C_3);
480 sumadd!(n2);
481 m6 = extract!();
482 sumadd!(l[7]);
483 muladd!(n7, SECP256K1_N_C_0);
484 muladd!(n6, SECP256K1_N_C_1);
485 muladd!(n5, SECP256K1_N_C_2);
486 muladd!(n4, SECP256K1_N_C_3);
487 sumadd!(n3);
488 m7 = extract!();
489 muladd!(n7, SECP256K1_N_C_1);
490 muladd!(n6, SECP256K1_N_C_2);
491 muladd!(n5, SECP256K1_N_C_3);
492 sumadd!(n4);
493 m8 = extract!();
494 muladd!(n7, SECP256K1_N_C_2);
495 muladd!(n6, SECP256K1_N_C_3);
496 sumadd!(n5);
497 m9 = extract!();
498 muladd!(n7, SECP256K1_N_C_3);
499 sumadd!(n6);
500 m10 = extract!();
501 sumadd_fast!(n7);
502 m11 = extract_fast!();
503 debug_assert!(c0 <= 1);
504 m12 = c0;
505
506 c0 = m0;
509 c1 = 0;
510 c2 = 0;
511 muladd_fast!(m8, SECP256K1_N_C_0);
512 p0 = extract_fast!();
513 sumadd_fast!(m1);
514 muladd!(m9, SECP256K1_N_C_0);
515 muladd!(m8, SECP256K1_N_C_1);
516 p1 = extract!();
517 sumadd!(m2);
518 muladd!(m10, SECP256K1_N_C_0);
519 muladd!(m9, SECP256K1_N_C_1);
520 muladd!(m8, SECP256K1_N_C_2);
521 p2 = extract!();
522 sumadd!(m3);
523 muladd!(m11, SECP256K1_N_C_0);
524 muladd!(m10, SECP256K1_N_C_1);
525 muladd!(m9, SECP256K1_N_C_2);
526 muladd!(m8, SECP256K1_N_C_3);
527 p3 = extract!();
528 sumadd!(m4);
529 muladd!(m12, SECP256K1_N_C_0);
530 muladd!(m11, SECP256K1_N_C_1);
531 muladd!(m10, SECP256K1_N_C_2);
532 muladd!(m9, SECP256K1_N_C_3);
533 sumadd!(m8);
534 p4 = extract!();
535 sumadd!(m5);
536 muladd!(m12, SECP256K1_N_C_1);
537 muladd!(m11, SECP256K1_N_C_2);
538 muladd!(m10, SECP256K1_N_C_3);
539 sumadd!(m9);
540 p5 = extract!();
541 sumadd!(m6);
542 muladd!(m12, SECP256K1_N_C_2);
543 muladd!(m11, SECP256K1_N_C_3);
544 sumadd!(m10);
545 p6 = extract!();
546 sumadd_fast!(m7);
547 muladd_fast!(m12, SECP256K1_N_C_3);
548 sumadd_fast!(m11);
549 p7 = extract_fast!();
550 p8 = c0 + m12;
551 debug_assert!(p8 <= 2);
552
553 c = p0 as u64 + SECP256K1_N_C_0 as u64 * p8 as u64;
556 self.0[0] = (c & 0xFFFFFFFF) as u32;
557 c >>= 32;
558 c += p1 as u64 + SECP256K1_N_C_1 as u64 * p8 as u64;
559 self.0[1] = (c & 0xFFFFFFFF) as u32;
560 c >>= 32;
561 c += p2 as u64 + SECP256K1_N_C_2 as u64 * p8 as u64;
562 self.0[2] = (c & 0xFFFFFFFF) as u32;
563 c >>= 32;
564 c += p3 as u64 + SECP256K1_N_C_3 as u64 * p8 as u64;
565 self.0[3] = (c & 0xFFFFFFFF) as u32;
566 c >>= 32;
567 c += p4 as u64 + p8 as u64;
568 self.0[4] = (c & 0xFFFFFFFF) as u32;
569 c >>= 32;
570 c += p5 as u64;
571 self.0[5] = (c & 0xFFFFFFFF) as u32;
572 c >>= 32;
573 c += p6 as u64;
574 self.0[6] = (c & 0xFFFFFFFF) as u32;
575 c >>= 32;
576 c += p7 as u64;
577 self.0[7] = (c & 0xFFFFFFFF) as u32;
578 c >>= 32;
579
580 let overflow = self.check_overflow();
581 self.reduce(Choice::from(c as u8) | overflow);
582 }
583
584 fn mul_512(&self, b: &Scalar, l: &mut [u32; 16]) {
585 let (mut c0, mut c1, mut c2): (u32, u32, u32) = (0, 0, 0);
586 define_ops!(c0, c1, c2);
587
588 muladd_fast!(self.0[0], b.0[0]);
590 l[0] = extract_fast!();
591 muladd!(self.0[0], b.0[1]);
592 muladd!(self.0[1], b.0[0]);
593 l[1] = extract!();
594 muladd!(self.0[0], b.0[2]);
595 muladd!(self.0[1], b.0[1]);
596 muladd!(self.0[2], b.0[0]);
597 l[2] = extract!();
598 muladd!(self.0[0], b.0[3]);
599 muladd!(self.0[1], b.0[2]);
600 muladd!(self.0[2], b.0[1]);
601 muladd!(self.0[3], b.0[0]);
602 l[3] = extract!();
603 muladd!(self.0[0], b.0[4]);
604 muladd!(self.0[1], b.0[3]);
605 muladd!(self.0[2], b.0[2]);
606 muladd!(self.0[3], b.0[1]);
607 muladd!(self.0[4], b.0[0]);
608 l[4] = extract!();
609 muladd!(self.0[0], b.0[5]);
610 muladd!(self.0[1], b.0[4]);
611 muladd!(self.0[2], b.0[3]);
612 muladd!(self.0[3], b.0[2]);
613 muladd!(self.0[4], b.0[1]);
614 muladd!(self.0[5], b.0[0]);
615 l[5] = extract!();
616 muladd!(self.0[0], b.0[6]);
617 muladd!(self.0[1], b.0[5]);
618 muladd!(self.0[2], b.0[4]);
619 muladd!(self.0[3], b.0[3]);
620 muladd!(self.0[4], b.0[2]);
621 muladd!(self.0[5], b.0[1]);
622 muladd!(self.0[6], b.0[0]);
623 l[6] = extract!();
624 muladd!(self.0[0], b.0[7]);
625 muladd!(self.0[1], b.0[6]);
626 muladd!(self.0[2], b.0[5]);
627 muladd!(self.0[3], b.0[4]);
628 muladd!(self.0[4], b.0[3]);
629 muladd!(self.0[5], b.0[2]);
630 muladd!(self.0[6], b.0[1]);
631 muladd!(self.0[7], b.0[0]);
632 l[7] = extract!();
633 muladd!(self.0[1], b.0[7]);
634 muladd!(self.0[2], b.0[6]);
635 muladd!(self.0[3], b.0[5]);
636 muladd!(self.0[4], b.0[4]);
637 muladd!(self.0[5], b.0[3]);
638 muladd!(self.0[6], b.0[2]);
639 muladd!(self.0[7], b.0[1]);
640 l[8] = extract!();
641 muladd!(self.0[2], b.0[7]);
642 muladd!(self.0[3], b.0[6]);
643 muladd!(self.0[4], b.0[5]);
644 muladd!(self.0[5], b.0[4]);
645 muladd!(self.0[6], b.0[3]);
646 muladd!(self.0[7], b.0[2]);
647 l[9] = extract!();
648 muladd!(self.0[3], b.0[7]);
649 muladd!(self.0[4], b.0[6]);
650 muladd!(self.0[5], b.0[5]);
651 muladd!(self.0[6], b.0[4]);
652 muladd!(self.0[7], b.0[3]);
653 l[10] = extract!();
654 muladd!(self.0[4], b.0[7]);
655 muladd!(self.0[5], b.0[6]);
656 muladd!(self.0[6], b.0[5]);
657 muladd!(self.0[7], b.0[4]);
658 l[11] = extract!();
659 muladd!(self.0[5], b.0[7]);
660 muladd!(self.0[6], b.0[6]);
661 muladd!(self.0[7], b.0[5]);
662 l[12] = extract!();
663 muladd!(self.0[6], b.0[7]);
664 muladd!(self.0[7], b.0[6]);
665 l[13] = extract!();
666 muladd_fast!(self.0[7], b.0[7]);
667 l[14] = extract_fast!();
668 debug_assert!(c1 == 0);
669 l[15] = c0;
670 }
671
672 fn sqr_512(&self, l: &mut [u32; 16]) {
673 let (mut c0, mut c1, mut c2): (u32, u32, u32) = (0, 0, 0);
674 define_ops!(c0, c1, c2);
675
676 muladd_fast!(self.0[0], self.0[0]);
678 l[0] = extract_fast!();
679 muladd2!(self.0[0], self.0[1]);
680 l[1] = extract!();
681 muladd2!(self.0[0], self.0[2]);
682 muladd!(self.0[1], self.0[1]);
683 l[2] = extract!();
684 muladd2!(self.0[0], self.0[3]);
685 muladd2!(self.0[1], self.0[2]);
686 l[3] = extract!();
687 muladd2!(self.0[0], self.0[4]);
688 muladd2!(self.0[1], self.0[3]);
689 muladd!(self.0[2], self.0[2]);
690 l[4] = extract!();
691 muladd2!(self.0[0], self.0[5]);
692 muladd2!(self.0[1], self.0[4]);
693 muladd2!(self.0[2], self.0[3]);
694 l[5] = extract!();
695 muladd2!(self.0[0], self.0[6]);
696 muladd2!(self.0[1], self.0[5]);
697 muladd2!(self.0[2], self.0[4]);
698 muladd!(self.0[3], self.0[3]);
699 l[6] = extract!();
700 muladd2!(self.0[0], self.0[7]);
701 muladd2!(self.0[1], self.0[6]);
702 muladd2!(self.0[2], self.0[5]);
703 muladd2!(self.0[3], self.0[4]);
704 l[7] = extract!();
705 muladd2!(self.0[1], self.0[7]);
706 muladd2!(self.0[2], self.0[6]);
707 muladd2!(self.0[3], self.0[5]);
708 muladd!(self.0[4], self.0[4]);
709 l[8] = extract!();
710 muladd2!(self.0[2], self.0[7]);
711 muladd2!(self.0[3], self.0[6]);
712 muladd2!(self.0[4], self.0[5]);
713 l[9] = extract!();
714 muladd2!(self.0[3], self.0[7]);
715 muladd2!(self.0[4], self.0[6]);
716 muladd!(self.0[5], self.0[5]);
717 l[10] = extract!();
718 muladd2!(self.0[4], self.0[7]);
719 muladd2!(self.0[5], self.0[6]);
720 l[11] = extract!();
721 muladd2!(self.0[5], self.0[7]);
722 muladd!(self.0[6], self.0[6]);
723 l[12] = extract!();
724 muladd2!(self.0[6], self.0[7]);
725 l[13] = extract!();
726 muladd_fast!(self.0[7], self.0[7]);
727 l[14] = extract_fast!();
728 debug_assert!(c1 == 0);
729 l[15] = c0;
730 }
731
732 pub fn mul_in_place(&mut self, a: &Scalar, b: &Scalar) {
733 let mut l = [0u32; 16];
734 a.mul_512(b, &mut l);
735 self.reduce_512(&l);
736 }
737
738 pub fn shr_int(&mut self, n: usize) -> u32 {
741 let ret: u32;
742 debug_assert!(n > 0);
743 debug_assert!(n < 16);
744 ret = self.0[0] & ((1 << n) - 1);
745 self.0[0] = (self.0[0] >> n) + (self.0[1] << (32 - n));
746 self.0[1] = (self.0[1] >> n) + (self.0[2] << (32 - n));
747 self.0[2] = (self.0[2] >> n) + (self.0[3] << (32 - n));
748 self.0[3] = (self.0[3] >> n) + (self.0[4] << (32 - n));
749 self.0[4] = (self.0[4] >> n) + (self.0[5] << (32 - n));
750 self.0[5] = (self.0[5] >> n) + (self.0[6] << (32 - n));
751 self.0[6] = (self.0[6] >> n) + (self.0[7] << (32 - n));
752 self.0[7] >>= n;
753 ret
754 }
755
756 pub fn sqr_in_place(&mut self, a: &Scalar) {
757 let mut l = [0u32; 16];
758 a.sqr_512(&mut l);
759 self.reduce_512(&l);
760 }
761
762 pub fn sqr(&self) -> Scalar {
763 let mut ret = Scalar::default();
764 ret.sqr_in_place(self);
765 ret
766 }
767
768 pub fn inv_in_place(&mut self, x: &Scalar) {
769 let u2 = x.sqr();
770 let x2 = u2 * *x;
771 let u5 = u2 * x2;
772 let x3 = u5 * u2;
773 let u9 = x3 * u2;
774 let u11 = u9 * u2;
775 let u13 = u11 * u2;
776
777 let mut x6 = u13.sqr();
778 x6 = x6.sqr();
779 x6 *= &u11;
780
781 let mut x8 = x6.sqr();
782 x8 = x8.sqr();
783 x8 *= &x2;
784
785 let mut x14 = x8.sqr();
786 for _ in 0..5 {
787 x14 = x14.sqr();
788 }
789 x14 *= &x6;
790
791 let mut x28 = x14.sqr();
792 for _ in 0..13 {
793 x28 = x28.sqr();
794 }
795 x28 *= &x14;
796
797 let mut x56 = x28.sqr();
798 for _ in 0..27 {
799 x56 = x56.sqr();
800 }
801 x56 *= &x28;
802
803 let mut x112 = x56.sqr();
804 for _ in 0..55 {
805 x112 = x112.sqr();
806 }
807 x112 *= &x56;
808
809 let mut x126 = x112.sqr();
810 for _ in 0..13 {
811 x126 = x126.sqr();
812 }
813 x126 *= &x14;
814
815 let mut t = x126;
816 for _ in 0..3 {
817 t = t.sqr();
818 }
819 t *= &u5;
820 for _ in 0..4 {
821 t = t.sqr();
822 }
823 t *= &x3;
824 for _ in 0..4 {
825 t = t.sqr();
826 }
827 t *= &u5;
828 for _ in 0..5 {
829 t = t.sqr();
830 }
831 t *= &u11;
832 for _ in 0..4 {
833 t = t.sqr();
834 }
835 t *= &u11;
836 for _ in 0..4 {
837 t = t.sqr();
838 }
839 t *= &x3;
840 for _ in 0..5 {
841 t = t.sqr();
842 }
843 t *= &x3;
844 for _ in 0..6 {
845 t = t.sqr();
846 }
847 t *= &u13;
848 for _ in 0..4 {
849 t = t.sqr();
850 }
851 t *= &u5;
852 for _ in 0..3 {
853 t = t.sqr();
854 }
855 t *= &x3;
856 for _ in 0..5 {
857 t = t.sqr();
858 }
859 t *= &u9;
860 for _ in 0..6 {
861 t = t.sqr();
862 }
863 t *= &u5;
864 for _ in 0..10 {
865 t = t.sqr();
866 }
867 t *= &x3;
868 for _ in 0..4 {
869 t = t.sqr();
870 }
871 t *= &x3;
872 for _ in 0..9 {
873 t = t.sqr();
874 }
875 t *= &x8;
876 for _ in 0..5 {
877 t = t.sqr();
878 }
879 t *= &u9;
880 for _ in 0..6 {
881 t = t.sqr();
882 }
883 t *= &u11;
884 for _ in 0..4 {
885 t = t.sqr();
886 }
887 t *= &u13;
888 for _ in 0..5 {
889 t = t.sqr();
890 }
891 t *= &x2;
892 for _ in 0..6 {
893 t = t.sqr();
894 }
895 t *= &u13;
896 for _ in 0..10 {
897 t = t.sqr();
898 }
899 t *= &u13;
900 for _ in 0..4 {
901 t = t.sqr();
902 }
903 t *= &u9;
904 for _ in 0..6 {
905 t = t.sqr();
906 }
907 t *= x;
908 for _ in 0..8 {
909 t = t.sqr();
910 }
911 *self = t * x6;
912 }
913
914 pub fn inv(&self) -> Scalar {
915 let mut ret = Scalar::default();
916 ret.inv_in_place(self);
917 ret
918 }
919
920 pub fn inv_var(&self) -> Scalar {
921 self.inv()
922 }
923
924 pub fn is_even(&self) -> bool {
925 self.0[0] & 1 == 0
926 }
927}
928
929impl Default for Scalar {
930 fn default() -> Scalar {
931 Scalar([0u32; 8])
932 }
933}
934
935impl Add<Scalar> for Scalar {
936 type Output = Scalar;
937 fn add(mut self, other: Scalar) -> Scalar {
938 self.add_assign(&other);
939 self
940 }
941}
942
943impl<'a, 'b> Add<&'a Scalar> for &'b Scalar {
944 type Output = Scalar;
945 fn add(self, other: &'a Scalar) -> Scalar {
946 let mut ret = *self;
947 ret.add_assign(other);
948 ret
949 }
950}
951
952impl<'a> AddAssign<&'a Scalar> for Scalar {
953 fn add_assign(&mut self, other: &'a Scalar) {
954 let mut t = 0u64;
955
956 unroll! {
957 for i in 0..8 {
958 t += (self.0[i] as u64) + (other.0[i] as u64);
959 self.0[i] = (t & 0xFFFFFFFF) as u32;
960 t >>= 32;
961 }
962 }
963
964 let overflow = self.check_overflow();
965 self.reduce(Choice::from(t as u8) | overflow);
966 }
967}
968
969impl AddAssign<Scalar> for Scalar {
970 fn add_assign(&mut self, other: Scalar) {
971 self.add_assign(&other)
972 }
973}
974
975impl Mul<Scalar> for Scalar {
976 type Output = Scalar;
977 fn mul(self, other: Scalar) -> Scalar {
978 let mut ret = Scalar::default();
979 ret.mul_in_place(&self, &other);
980 ret
981 }
982}
983
984impl<'a, 'b> Mul<&'a Scalar> for &'b Scalar {
985 type Output = Scalar;
986 fn mul(self, other: &'a Scalar) -> Scalar {
987 let mut ret = Scalar::default();
988 ret.mul_in_place(self, other);
989 ret
990 }
991}
992
993impl<'a> MulAssign<&'a Scalar> for Scalar {
994 fn mul_assign(&mut self, other: &'a Scalar) {
995 let mut ret = Scalar::default();
996 ret.mul_in_place(self, other);
997 *self = ret;
998 }
999}
1000
1001impl MulAssign<Scalar> for Scalar {
1002 fn mul_assign(&mut self, other: Scalar) {
1003 self.mul_assign(&other)
1004 }
1005}
1006
1007impl Neg for Scalar {
1008 type Output = Scalar;
1009 fn neg(mut self) -> Scalar {
1010 self.cond_neg_assign(1.into());
1011 self
1012 }
1013}
1014
1015impl<'a> Neg for &'a Scalar {
1016 type Output = Scalar;
1017 fn neg(self) -> Scalar {
1018 let value = *self;
1019 -value
1020 }
1021}
1022
1023impl core::fmt::LowerHex for Scalar {
1024 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1025 for word in &self.0[..] {
1026 for byte in word.to_be_bytes().iter() {
1027 write!(f, "{:02x}", byte)?;
1028 }
1029 }
1030 Ok(())
1031 }
1032}