1use core::{
2 cmp::Ordering,
3 ops::{Add, AddAssign, Mul, MulAssign},
4};
5
6macro_rules! debug_assert_bits {
7 ($x: expr, $n: expr) => {
8 debug_assert!($x >> $n == 0);
9 };
10}
11
12#[derive(Debug, Clone, Copy)]
13pub struct Field {
15 n: [u32; 10],
21 magnitude: u32,
22 normalized: bool,
23}
24
25impl Field {
26 pub const fn new_raw(
27 d9: u32,
28 d8: u32,
29 d7: u32,
30 d6: u32,
31 d5: u32,
32 d4: u32,
33 d3: u32,
34 d2: u32,
35 d1: u32,
36 d0: u32,
37 ) -> Self {
38 Self {
39 n: [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9],
40 magnitude: 1,
41 normalized: false,
42 }
43 }
44
45 pub const fn new(
46 d7: u32,
47 d6: u32,
48 d5: u32,
49 d4: u32,
50 d3: u32,
51 d2: u32,
52 d1: u32,
53 d0: u32,
54 ) -> Self {
55 Self {
56 n: [
57 d0 & 0x3ffffff,
58 (d0 >> 26) | ((d1 & 0xfffff) << 6),
59 (d1 >> 20) | ((d2 & 0x3fff) << 12),
60 (d2 >> 14) | ((d3 & 0xff) << 18),
61 (d3 >> 8) | ((d4 & 0x3) << 24),
62 (d4 >> 2) & 0x3ffffff,
63 (d4 >> 28) | ((d5 & 0x3fffff) << 4),
64 (d5 >> 22) | ((d6 & 0xffff) << 10),
65 (d6 >> 16) | ((d7 & 0x3ff) << 16),
66 (d7 >> 10),
67 ],
68 magnitude: 1,
69 normalized: true,
70 }
71 }
72
73 pub fn from_int(a: u32) -> Field {
74 let mut f = Field::default();
75 f.set_int(a);
76 f
77 }
78
79 fn verify(&self) -> bool {
80 let m = if self.normalized { 1 } else { 2 } * self.magnitude;
81 let mut r = true;
82 r = r && (self.n[0] <= 0x3ffffff * m);
83 r = r && (self.n[1] <= 0x3ffffff * m);
84 r = r && (self.n[2] <= 0x3ffffff * m);
85 r = r && (self.n[3] <= 0x3ffffff * m);
86 r = r && (self.n[4] <= 0x3ffffff * m);
87 r = r && (self.n[5] <= 0x3ffffff * m);
88 r = r && (self.n[6] <= 0x3ffffff * m);
89 r = r && (self.n[7] <= 0x3ffffff * m);
90 r = r && (self.n[8] <= 0x3ffffff * m);
91 r = r && (self.n[9] <= 0x03fffff * m);
92 r = r && (self.magnitude <= 32);
93 if self.normalized {
94 r = r && self.magnitude <= 1;
95 if r && (self.n[9] == 0x03fffff) {
96 let mid = self.n[8]
97 & self.n[7]
98 & self.n[6]
99 & self.n[5]
100 & self.n[4]
101 & self.n[3]
102 & self.n[2];
103 if mid == 0x3ffffff {
104 r = r && ((self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) <= 0x3ffffff)
105 }
106 }
107 }
108 r
109 }
110
111 pub fn normalize(&mut self) {
113 let mut t0 = self.n[0];
114 let mut t1 = self.n[1];
115 let mut t2 = self.n[2];
116 let mut t3 = self.n[3];
117 let mut t4 = self.n[4];
118 let mut t5 = self.n[5];
119 let mut t6 = self.n[6];
120 let mut t7 = self.n[7];
121 let mut t8 = self.n[8];
122 let mut t9 = self.n[9];
123
124 let mut m: u32;
125 let mut x = t9 >> 22;
126 t9 &= 0x03fffff;
127
128 t0 += x * 0x3d1;
129 t1 += x << 6;
130 t1 += t0 >> 26;
131 t0 &= 0x3ffffff;
132 t2 += t1 >> 26;
133 t1 &= 0x3ffffff;
134 t3 += t2 >> 26;
135 t2 &= 0x3ffffff;
136 m = t2;
137 t4 += t3 >> 26;
138 t3 &= 0x3ffffff;
139 m &= t3;
140 t5 += t4 >> 26;
141 t4 &= 0x3ffffff;
142 m &= t4;
143 t6 += t5 >> 26;
144 t5 &= 0x3ffffff;
145 m &= t5;
146 t7 += t6 >> 26;
147 t6 &= 0x3ffffff;
148 m &= t6;
149 t8 += t7 >> 26;
150 t7 &= 0x3ffffff;
151 m &= t7;
152 t9 += t8 >> 26;
153 t8 &= 0x3ffffff;
154 m &= t8;
155
156 debug_assert!(t9 >> 23 == 0);
157
158 x = (t9 >> 22)
159 | (if t9 == 0x03fffff { 1 } else { 0 }
160 & if m == 0x3ffffff { 1 } else { 0 }
161 & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff {
162 1
163 } else {
164 0
165 }));
166
167 t0 += x * 0x3d1;
168 t1 += x << 6;
169 t1 += t0 >> 26;
170 t0 &= 0x3ffffff;
171 t2 += t1 >> 26;
172 t1 &= 0x3ffffff;
173 t3 += t2 >> 26;
174 t2 &= 0x3ffffff;
175 t4 += t3 >> 26;
176 t3 &= 0x3ffffff;
177 t5 += t4 >> 26;
178 t4 &= 0x3ffffff;
179 t6 += t5 >> 26;
180 t5 &= 0x3ffffff;
181 t7 += t6 >> 26;
182 t6 &= 0x3ffffff;
183 t8 += t7 >> 26;
184 t7 &= 0x3ffffff;
185 t9 += t8 >> 26;
186 t8 &= 0x3ffffff;
187
188 debug_assert!(t9 >> 22 == x);
189
190 t9 &= 0x03fffff;
191
192 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
193 self.magnitude = 1;
194 self.normalized = true;
195 debug_assert!(self.verify());
196 }
197
198 pub fn normalize_weak(&mut self) {
201 let mut t0 = self.n[0];
202 let mut t1 = self.n[1];
203 let mut t2 = self.n[2];
204 let mut t3 = self.n[3];
205 let mut t4 = self.n[4];
206 let mut t5 = self.n[5];
207 let mut t6 = self.n[6];
208 let mut t7 = self.n[7];
209 let mut t8 = self.n[8];
210 let mut t9 = self.n[9];
211
212 let x = t9 >> 22;
213 t9 &= 0x03fffff;
214
215 t0 += x * 0x3d1;
216 t1 += x << 6;
217 t1 += t0 >> 26;
218 t0 &= 0x3ffffff;
219 t2 += t1 >> 26;
220 t1 &= 0x3ffffff;
221 t3 += t2 >> 26;
222 t2 &= 0x3ffffff;
223 t4 += t3 >> 26;
224 t3 &= 0x3ffffff;
225 t5 += t4 >> 26;
226 t4 &= 0x3ffffff;
227 t6 += t5 >> 26;
228 t5 &= 0x3ffffff;
229 t7 += t6 >> 26;
230 t6 &= 0x3ffffff;
231 t8 += t7 >> 26;
232 t7 &= 0x3ffffff;
233 t9 += t8 >> 26;
234 t8 &= 0x3ffffff;
235
236 debug_assert!(t9 >> 23 == 0);
237
238 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
239 self.magnitude = 1;
240 debug_assert!(self.verify());
241 }
242
243 pub fn normalize_var(&mut self) {
245 let mut t0 = self.n[0];
246 let mut t1 = self.n[1];
247 let mut t2 = self.n[2];
248 let mut t3 = self.n[3];
249 let mut t4 = self.n[4];
250 let mut t5 = self.n[5];
251 let mut t6 = self.n[6];
252 let mut t7 = self.n[7];
253 let mut t8 = self.n[8];
254 let mut t9 = self.n[9];
255
256 let mut m: u32;
257 let mut x = t9 >> 22;
258 t9 &= 0x03fffff;
259
260 t0 += x * 0x3d1;
261 t1 += x << 6;
262 t1 += t0 >> 26;
263 t0 &= 0x3ffffff;
264 t2 += t1 >> 26;
265 t1 &= 0x3ffffff;
266 t3 += t2 >> 26;
267 t2 &= 0x3ffffff;
268 m = t2;
269 t4 += t3 >> 26;
270 t3 &= 0x3ffffff;
271 m &= t3;
272 t5 += t4 >> 26;
273 t4 &= 0x3ffffff;
274 m &= t4;
275 t6 += t5 >> 26;
276 t5 &= 0x3ffffff;
277 m &= t5;
278 t7 += t6 >> 26;
279 t6 &= 0x3ffffff;
280 m &= t6;
281 t8 += t7 >> 26;
282 t7 &= 0x3ffffff;
283 m &= t7;
284 t9 += t8 >> 26;
285 t8 &= 0x3ffffff;
286 m &= t8;
287
288 debug_assert!(t9 >> 23 == 0);
289
290 x = (t9 >> 22)
291 | (if t9 == 0x03fffff { 1 } else { 0 }
292 & if m == 0x3ffffff { 1 } else { 0 }
293 & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff {
294 1
295 } else {
296 0
297 }));
298
299 if x > 0 {
300 t0 += 0x3d1;
301 t1 += x << 6;
302 t1 += t0 >> 26;
303 t0 &= 0x3ffffff;
304 t2 += t1 >> 26;
305 t1 &= 0x3ffffff;
306 t3 += t2 >> 26;
307 t2 &= 0x3ffffff;
308 t4 += t3 >> 26;
309 t3 &= 0x3ffffff;
310 t5 += t4 >> 26;
311 t4 &= 0x3ffffff;
312 t6 += t5 >> 26;
313 t5 &= 0x3ffffff;
314 t7 += t6 >> 26;
315 t6 &= 0x3ffffff;
316 t8 += t7 >> 26;
317 t7 &= 0x3ffffff;
318 t9 += t8 >> 26;
319 t8 &= 0x3ffffff;
320
321 debug_assert!(t9 >> 22 == x);
322
323 t9 &= 0x03fffff;
324 }
325
326 self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9];
327 self.magnitude = 1;
328 self.normalized = true;
329 debug_assert!(self.verify());
330 }
331
332 pub fn normalizes_to_zero(&self) -> bool {
337 let mut t0 = self.n[0];
338 let mut t1 = self.n[1];
339 let mut t2 = self.n[2];
340 let mut t3 = self.n[3];
341 let mut t4 = self.n[4];
342 let mut t5 = self.n[5];
343 let mut t6 = self.n[6];
344 let mut t7 = self.n[7];
345 let mut t8 = self.n[8];
346 let mut t9 = self.n[9];
347
348 let mut z0: u32;
349 let mut z1: u32;
350
351 let x = t9 >> 22;
352 t9 &= 0x03fffff;
353
354 t0 += x * 0x3d1;
355 t1 += x << 6;
356 t1 += t0 >> 26;
357 t0 &= 0x3ffffff;
358 z0 = t0;
359 z1 = t0 ^ 0x3d0;
360 t2 += t1 >> 26;
361 t1 &= 0x3ffffff;
362 z0 |= t1;
363 z1 &= t1 ^ 0x40;
364 t3 += t2 >> 26;
365 t2 &= 0x3ffffff;
366 z0 |= t2;
367 z1 &= t2;
368 t4 += t3 >> 26;
369 t3 &= 0x3ffffff;
370 z0 |= t3;
371 z1 &= t3;
372 t5 += t4 >> 26;
373 t4 &= 0x3ffffff;
374 z0 |= t4;
375 z1 &= t4;
376 t6 += t5 >> 26;
377 t5 &= 0x3ffffff;
378 z0 |= t5;
379 z1 &= t5;
380 t7 += t6 >> 26;
381 t6 &= 0x3ffffff;
382 z0 |= t6;
383 z1 &= t6;
384 t8 += t7 >> 26;
385 t7 &= 0x3ffffff;
386 z0 |= t7;
387 z1 &= t7;
388 t9 += t8 >> 26;
389 t8 &= 0x3ffffff;
390 z0 |= t8;
391 z1 &= t8;
392 z0 |= t9;
393 z1 &= t9 ^ 0x3c00000;
394
395 debug_assert!(t9 >> 23 == 0);
396
397 z0 == 0 || z1 == 0x3ffffff
398 }
399
400 pub fn normalizes_to_zero_var(&self) -> bool {
405 let mut t0: u32;
406 let mut t1: u32;
407 let mut t2: u32;
408 let mut t3: u32;
409 let mut t4: u32;
410 let mut t5: u32;
411 let mut t6: u32;
412 let mut t7: u32;
413 let mut t8: u32;
414 let mut t9: u32;
415 let mut z0: u32;
416 let mut z1: u32;
417 let x: u32;
418
419 t0 = self.n[0];
420 t9 = self.n[9];
421
422 x = t9 >> 22;
423 t0 += x * 0x3d1;
424
425 z0 = t0 & 0x3ffffff;
426 z1 = z0 ^ 0x3d0;
427
428 if z0 != 0 && z1 != 0x3ffffff {
429 return false;
430 }
431
432 t1 = self.n[1];
433 t2 = self.n[2];
434 t3 = self.n[3];
435 t4 = self.n[4];
436 t5 = self.n[5];
437 t6 = self.n[6];
438 t7 = self.n[7];
439 t8 = self.n[8];
440
441 t9 &= 0x03fffff;
442 t1 += x << 6;
443
444 t1 += t0 >> 26;
445 t2 += t1 >> 26;
446 t1 &= 0x3ffffff;
447 z0 |= t1;
448 z1 &= t1 ^ 0x40;
449 t3 += t2 >> 26;
450 t2 &= 0x3ffffff;
451 z0 |= t2;
452 z1 &= t2;
453 t4 += t3 >> 26;
454 t3 &= 0x3ffffff;
455 z0 |= t3;
456 z1 &= t3;
457 t5 += t4 >> 26;
458 t4 &= 0x3ffffff;
459 z0 |= t4;
460 z1 &= t4;
461 t6 += t5 >> 26;
462 t5 &= 0x3ffffff;
463 z0 |= t5;
464 z1 &= t5;
465 t7 += t6 >> 26;
466 t6 &= 0x3ffffff;
467 z0 |= t6;
468 z1 &= t6;
469 t8 += t7 >> 26;
470 t7 &= 0x3ffffff;
471 z0 |= t7;
472 z1 &= t7;
473 t9 += t8 >> 26;
474 t8 &= 0x3ffffff;
475 z0 |= t8;
476 z1 &= t8;
477 z0 |= t9;
478 z1 &= t9 ^ 0x3c00000;
479
480 debug_assert!(t9 >> 23 == 0);
481
482 z0 == 0 || z1 == 0x3ffffff
483 }
484
485 pub fn set_int(&mut self, a: u32) {
488 self.n = [a, 0, 0, 0, 0, 0, 0, 0, 0, 0];
489 self.magnitude = 1;
490 self.normalized = true;
491 debug_assert!(self.verify());
492 }
493
494 pub fn is_zero(&self) -> bool {
497 debug_assert!(self.normalized);
498 debug_assert!(self.verify());
499 (self.n[0]
500 | self.n[1]
501 | self.n[2]
502 | self.n[3]
503 | self.n[4]
504 | self.n[5]
505 | self.n[6]
506 | self.n[7]
507 | self.n[8]
508 | self.n[9])
509 == 0
510 }
511
512 pub fn is_odd(&self) -> bool {
515 debug_assert!(self.normalized);
516 debug_assert!(self.verify());
517 self.n[0] & 1 != 0
518 }
519
520 pub fn clear(&mut self) {
522 self.magnitude = 0;
523 self.normalized = true;
524 self.n = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
525 }
526
527 #[must_use]
530 pub fn set_b32(&mut self, a: &[u8; 32]) -> bool {
531 self.n[0] = (a[31] as u32)
532 | ((a[30] as u32) << 8)
533 | ((a[29] as u32) << 16)
534 | (((a[28] & 0x3) as u32) << 24);
535 self.n[1] = (((a[28] >> 2) & 0x3f) as u32)
536 | ((a[27] as u32) << 6)
537 | ((a[26] as u32) << 14)
538 | (((a[25] & 0xf) as u32) << 22);
539 self.n[2] = (((a[25] >> 4) & 0xf) as u32)
540 | ((a[24] as u32) << 4)
541 | ((a[23] as u32) << 12)
542 | (((a[22] as u32) & 0x3f) << 20);
543 self.n[3] = (((a[22] >> 6) & 0x3) as u32)
544 | ((a[21] as u32) << 2)
545 | ((a[20] as u32) << 10)
546 | ((a[19] as u32) << 18);
547 self.n[4] = (a[18] as u32)
548 | ((a[17] as u32) << 8)
549 | ((a[16] as u32) << 16)
550 | (((a[15] & 0x3) as u32) << 24);
551 self.n[5] = (((a[15] >> 2) & 0x3f) as u32)
552 | ((a[14] as u32) << 6)
553 | ((a[13] as u32) << 14)
554 | (((a[12] as u32) & 0xf) << 22);
555 self.n[6] = (((a[12] >> 4) & 0xf) as u32)
556 | ((a[11] as u32) << 4)
557 | ((a[10] as u32) << 12)
558 | (((a[9] & 0x3f) as u32) << 20);
559 self.n[7] = (((a[9] >> 6) & 0x3) as u32)
560 | ((a[8] as u32) << 2)
561 | ((a[7] as u32) << 10)
562 | ((a[6] as u32) << 18);
563 self.n[8] = (a[5] as u32)
564 | ((a[4] as u32) << 8)
565 | ((a[3] as u32) << 16)
566 | (((a[2] & 0x3) as u32) << 24);
567 self.n[9] = (((a[2] >> 2) & 0x3f) as u32) | ((a[1] as u32) << 6) | ((a[0] as u32) << 14);
568
569 if self.n[9] == 0x03fffff
570 && (self.n[8] & self.n[7] & self.n[6] & self.n[5] & self.n[4] & self.n[3] & self.n[2])
571 == 0x3ffffff
572 && (self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) > 0x3ffffff
573 {
574 return false;
575 }
576
577 self.magnitude = 1;
578 self.normalized = true;
579 debug_assert!(self.verify());
580
581 true
582 }
583
584 pub fn fill_b32(&self, r: &mut [u8; 32]) {
585 debug_assert!(self.normalized);
586 debug_assert!(self.verify());
587
588 r[0] = ((self.n[9] >> 14) & 0xff) as u8;
589 r[1] = ((self.n[9] >> 6) & 0xff) as u8;
590 r[2] = (((self.n[9] & 0x3f) << 2) | ((self.n[8] >> 24) & 0x3)) as u8;
591 r[3] = ((self.n[8] >> 16) & 0xff) as u8;
592 r[4] = ((self.n[8] >> 8) & 0xff) as u8;
593 r[5] = (self.n[8] & 0xff) as u8;
594 r[6] = ((self.n[7] >> 18) & 0xff) as u8;
595 r[7] = ((self.n[7] >> 10) & 0xff) as u8;
596 r[8] = ((self.n[7] >> 2) & 0xff) as u8;
597 r[9] = (((self.n[7] & 0x3) << 6) | ((self.n[6] >> 20) & 0x3f)) as u8;
598 r[10] = ((self.n[6] >> 12) & 0xff) as u8;
599 r[11] = ((self.n[6] >> 4) & 0xff) as u8;
600 r[12] = (((self.n[6] & 0xf) << 4) | ((self.n[5] >> 22) & 0xf)) as u8;
601 r[13] = ((self.n[5] >> 14) & 0xff) as u8;
602 r[14] = ((self.n[5] >> 6) & 0xff) as u8;
603 r[15] = (((self.n[5] & 0x3f) << 2) | ((self.n[4] >> 24) & 0x3)) as u8;
604 r[16] = ((self.n[4] >> 16) & 0xff) as u8;
605 r[17] = ((self.n[4] >> 8) & 0xff) as u8;
606 r[18] = (self.n[4] & 0xff) as u8;
607 r[19] = ((self.n[3] >> 18) & 0xff) as u8;
608 r[20] = ((self.n[3] >> 10) & 0xff) as u8;
609 r[21] = ((self.n[3] >> 2) & 0xff) as u8;
610 r[22] = (((self.n[3] & 0x3) << 6) | ((self.n[2] >> 20) & 0x3f)) as u8;
611 r[23] = ((self.n[2] >> 12) & 0xff) as u8;
612 r[24] = ((self.n[2] >> 4) & 0xff) as u8;
613 r[25] = (((self.n[2] & 0xf) << 4) | ((self.n[1] >> 22) & 0xf)) as u8;
614 r[26] = ((self.n[1] >> 14) & 0xff) as u8;
615 r[27] = ((self.n[1] >> 6) & 0xff) as u8;
616 r[28] = (((self.n[1] & 0x3f) << 2) | ((self.n[0] >> 24) & 0x3)) as u8;
617 r[29] = ((self.n[0] >> 16) & 0xff) as u8;
618 r[30] = ((self.n[0] >> 8) & 0xff) as u8;
619 r[31] = (self.n[0] & 0xff) as u8;
620 }
621
622 pub fn b32(&self) -> [u8; 32] {
625 let mut r = [0u8; 32];
626 self.fill_b32(&mut r);
627 r
628 }
629
630 pub fn neg_in_place(&mut self, other: &Field, m: u32) {
634 debug_assert!(other.magnitude <= m);
635 debug_assert!(other.verify());
636
637 self.n[0] = 0x3fffc2f * 2 * (m + 1) - other.n[0];
638 self.n[1] = 0x3ffffbf * 2 * (m + 1) - other.n[1];
639 self.n[2] = 0x3ffffff * 2 * (m + 1) - other.n[2];
640 self.n[3] = 0x3ffffff * 2 * (m + 1) - other.n[3];
641 self.n[4] = 0x3ffffff * 2 * (m + 1) - other.n[4];
642 self.n[5] = 0x3ffffff * 2 * (m + 1) - other.n[5];
643 self.n[6] = 0x3ffffff * 2 * (m + 1) - other.n[6];
644 self.n[7] = 0x3ffffff * 2 * (m + 1) - other.n[7];
645 self.n[8] = 0x3ffffff * 2 * (m + 1) - other.n[8];
646 self.n[9] = 0x03fffff * 2 * (m + 1) - other.n[9];
647
648 self.magnitude = m + 1;
649 self.normalized = false;
650 debug_assert!(self.verify());
651 }
652
653 pub fn neg(&self, m: u32) -> Field {
656 let mut ret = Field::default();
657 ret.neg_in_place(self, m);
658 ret
659 }
660
661 pub fn mul_int(&mut self, a: u32) {
664 self.n[0] *= a;
665 self.n[1] *= a;
666 self.n[2] *= a;
667 self.n[3] *= a;
668 self.n[4] *= a;
669 self.n[5] *= a;
670 self.n[6] *= a;
671 self.n[7] *= a;
672 self.n[8] *= a;
673 self.n[9] *= a;
674
675 self.magnitude *= a;
676 self.normalized = false;
677 debug_assert!(self.verify());
678 }
679
680 pub fn cmp_var(&self, other: &Field) -> Ordering {
683 debug_assert!(self.normalized);
685 debug_assert!(other.normalized);
686 debug_assert!(self.verify());
687 debug_assert!(other.verify());
688
689 for i in (0..10).rev() {
690 if self.n[i] > other.n[i] {
691 return Ordering::Greater;
692 }
693 if self.n[i] < other.n[i] {
694 return Ordering::Less;
695 }
696 }
697 Ordering::Equal
698 }
699
700 pub fn eq_var(&self, other: &Field) -> bool {
701 let mut na = self.neg(1);
702 na += other;
703 na.normalizes_to_zero_var()
704 }
705
706 fn mul_inner(&mut self, a: &Field, b: &Field) {
707 const M: u64 = 0x3ffffff;
708 const R0: u64 = 0x3d10;
709 const R1: u64 = 0x400;
710
711 let (mut c, mut d): (u64, u64);
712 let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64);
713 let (t9, t1, t0, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
714
715 debug_assert_bits!(a.n[0], 30);
716 debug_assert_bits!(a.n[1], 30);
717 debug_assert_bits!(a.n[2], 30);
718 debug_assert_bits!(a.n[3], 30);
719 debug_assert_bits!(a.n[4], 30);
720 debug_assert_bits!(a.n[5], 30);
721 debug_assert_bits!(a.n[6], 30);
722 debug_assert_bits!(a.n[7], 30);
723 debug_assert_bits!(a.n[8], 30);
724 debug_assert_bits!(a.n[9], 26);
725 debug_assert_bits!(b.n[0], 30);
726 debug_assert_bits!(b.n[1], 30);
727 debug_assert_bits!(b.n[2], 30);
728 debug_assert_bits!(b.n[3], 30);
729 debug_assert_bits!(b.n[4], 30);
730 debug_assert_bits!(b.n[5], 30);
731 debug_assert_bits!(b.n[6], 30);
732 debug_assert_bits!(b.n[7], 30);
733 debug_assert_bits!(b.n[8], 30);
734 debug_assert_bits!(b.n[9], 26);
735
736 d = ((a.n[0] as u64) * (b.n[9] as u64))
741 .wrapping_add((a.n[1] as u64) * (b.n[8] as u64))
742 .wrapping_add((a.n[2] as u64) * (b.n[7] as u64))
743 .wrapping_add((a.n[3] as u64) * (b.n[6] as u64))
744 .wrapping_add((a.n[4] as u64) * (b.n[5] as u64))
745 .wrapping_add((a.n[5] as u64) * (b.n[4] as u64))
746 .wrapping_add((a.n[6] as u64) * (b.n[3] as u64))
747 .wrapping_add((a.n[7] as u64) * (b.n[2] as u64))
748 .wrapping_add((a.n[8] as u64) * (b.n[1] as u64))
749 .wrapping_add((a.n[9] as u64) * (b.n[0] as u64));
750 t9 = (d & M) as u32;
754 d >>= 26;
755 debug_assert_bits!(t9, 26);
756 debug_assert_bits!(d, 38);
757 c = (a.n[0] as u64) * (b.n[0] as u64);
760 debug_assert_bits!(c, 60);
761 d = d
764 .wrapping_add((a.n[1] as u64) * (b.n[9] as u64))
765 .wrapping_add((a.n[2] as u64) * (b.n[8] as u64))
766 .wrapping_add((a.n[3] as u64) * (b.n[7] as u64))
767 .wrapping_add((a.n[4] as u64) * (b.n[6] as u64))
768 .wrapping_add((a.n[5] as u64) * (b.n[5] as u64))
769 .wrapping_add((a.n[6] as u64) * (b.n[4] as u64))
770 .wrapping_add((a.n[7] as u64) * (b.n[3] as u64))
771 .wrapping_add((a.n[8] as u64) * (b.n[2] as u64))
772 .wrapping_add((a.n[9] as u64) * (b.n[1] as u64));
773 debug_assert_bits!(d, 63);
774 v0 = d & M;
776 d >>= 26;
777 c += v0 * R0;
778 debug_assert_bits!(v0, 26);
779 debug_assert_bits!(d, 37);
780 debug_assert_bits!(c, 61);
781 t0 = (c & M) as u32;
783 c >>= 26;
784 c += v0 * R1;
785
786 debug_assert_bits!(t0, 26);
787 debug_assert_bits!(c, 37);
788 c = c
792 .wrapping_add((a.n[0] as u64) * (b.n[1] as u64))
793 .wrapping_add((a.n[1] as u64) * (b.n[0] as u64));
794 debug_assert_bits!(c, 62);
795 d = d
797 .wrapping_add((a.n[2] as u64) * (b.n[9] as u64))
798 .wrapping_add((a.n[3] as u64) * (b.n[8] as u64))
799 .wrapping_add((a.n[4] as u64) * (b.n[7] as u64))
800 .wrapping_add((a.n[5] as u64) * (b.n[6] as u64))
801 .wrapping_add((a.n[6] as u64) * (b.n[5] as u64))
802 .wrapping_add((a.n[7] as u64) * (b.n[4] as u64))
803 .wrapping_add((a.n[8] as u64) * (b.n[3] as u64))
804 .wrapping_add((a.n[9] as u64) * (b.n[2] as u64));
805 debug_assert_bits!(d, 63);
806 v1 = d & M;
808 d >>= 26;
809 c += v1 * R0;
810 debug_assert_bits!(v1, 26);
811 debug_assert_bits!(d, 37);
812 debug_assert_bits!(c, 63);
813 t1 = (c & M) as u32;
815 c >>= 26;
816 c += v1 * R1;
817 debug_assert_bits!(t1, 26);
818 debug_assert_bits!(c, 38);
819 c = c
823 .wrapping_add((a.n[0] as u64) * (b.n[2] as u64))
824 .wrapping_add((a.n[1] as u64) * (b.n[1] as u64))
825 .wrapping_add((a.n[2] as u64) * (b.n[0] as u64));
826 debug_assert_bits!(c, 62);
827 d = d
829 .wrapping_add((a.n[3] as u64) * (b.n[9] as u64))
830 .wrapping_add((a.n[4] as u64) * (b.n[8] as u64))
831 .wrapping_add((a.n[5] as u64) * (b.n[7] as u64))
832 .wrapping_add((a.n[6] as u64) * (b.n[6] as u64))
833 .wrapping_add((a.n[7] as u64) * (b.n[5] as u64))
834 .wrapping_add((a.n[8] as u64) * (b.n[4] as u64))
835 .wrapping_add((a.n[9] as u64) * (b.n[3] as u64));
836 debug_assert_bits!(d, 63);
837 v2 = d & M;
839 d >>= 26;
840 c += v2 * R0;
841 debug_assert_bits!(v2, 26);
842 debug_assert_bits!(d, 37);
843 debug_assert_bits!(c, 63);
844 t2 = (c & M) as u32;
846 c >>= 26;
847 c += v2 * R1;
848 debug_assert_bits!(t2, 26);
849 debug_assert_bits!(c, 38);
850 c = c
854 .wrapping_add((a.n[0] as u64) * (b.n[3] as u64))
855 .wrapping_add((a.n[1] as u64) * (b.n[2] as u64))
856 .wrapping_add((a.n[2] as u64) * (b.n[1] as u64))
857 .wrapping_add((a.n[3] as u64) * (b.n[0] as u64));
858 debug_assert_bits!(c, 63);
859 d = d
861 .wrapping_add((a.n[4] as u64) * (b.n[9] as u64))
862 .wrapping_add((a.n[5] as u64) * (b.n[8] as u64))
863 .wrapping_add((a.n[6] as u64) * (b.n[7] as u64))
864 .wrapping_add((a.n[7] as u64) * (b.n[6] as u64))
865 .wrapping_add((a.n[8] as u64) * (b.n[5] as u64))
866 .wrapping_add((a.n[9] as u64) * (b.n[4] as u64));
867 debug_assert_bits!(d, 63);
868 v3 = d & M;
870 d >>= 26;
871 c += v3 * R0;
872 debug_assert_bits!(v3, 26);
873 debug_assert_bits!(d, 37);
874 t3 = (c & M) as u32;
877 c >>= 26;
878 c += v3 * R1;
879 debug_assert_bits!(t3, 26);
880 debug_assert_bits!(c, 39);
881 c = c
885 .wrapping_add((a.n[0] as u64) * (b.n[4] as u64))
886 .wrapping_add((a.n[1] as u64) * (b.n[3] as u64))
887 .wrapping_add((a.n[2] as u64) * (b.n[2] as u64))
888 .wrapping_add((a.n[3] as u64) * (b.n[1] as u64))
889 .wrapping_add((a.n[4] as u64) * (b.n[0] as u64));
890 debug_assert_bits!(c, 63);
891 d = d
893 .wrapping_add((a.n[5] as u64) * (b.n[9] as u64))
894 .wrapping_add((a.n[6] as u64) * (b.n[8] as u64))
895 .wrapping_add((a.n[7] as u64) * (b.n[7] as u64))
896 .wrapping_add((a.n[8] as u64) * (b.n[6] as u64))
897 .wrapping_add((a.n[9] as u64) * (b.n[5] as u64));
898 debug_assert_bits!(d, 62);
899 v4 = d & M;
901 d >>= 26;
902 c += v4 * R0;
903 debug_assert_bits!(v4, 26);
904 debug_assert_bits!(d, 36);
905 t4 = (c & M) as u32;
908 c >>= 26;
909 c += v4 * R1;
910 debug_assert_bits!(t4, 26);
911 debug_assert_bits!(c, 39);
912 c = c
916 .wrapping_add((a.n[0] as u64) * (b.n[5] as u64))
917 .wrapping_add((a.n[1] as u64) * (b.n[4] as u64))
918 .wrapping_add((a.n[2] as u64) * (b.n[3] as u64))
919 .wrapping_add((a.n[3] as u64) * (b.n[2] as u64))
920 .wrapping_add((a.n[4] as u64) * (b.n[1] as u64))
921 .wrapping_add((a.n[5] as u64) * (b.n[0] as u64));
922 debug_assert_bits!(c, 63);
923 d = d
925 .wrapping_add((a.n[6] as u64) * (b.n[9] as u64))
926 .wrapping_add((a.n[7] as u64) * (b.n[8] as u64))
927 .wrapping_add((a.n[8] as u64) * (b.n[7] as u64))
928 .wrapping_add((a.n[9] as u64) * (b.n[6] as u64));
929 debug_assert_bits!(d, 62);
930 v5 = d & M;
932 d >>= 26;
933 c += v5 * R0;
934 debug_assert_bits!(v5, 26);
935 debug_assert_bits!(d, 36);
936 t5 = (c & M) as u32;
939 c >>= 26;
940 c += v5 * R1;
941 debug_assert_bits!(t5, 26);
942 debug_assert_bits!(c, 39);
943 c = c
947 .wrapping_add((a.n[0] as u64) * (b.n[6] as u64))
948 .wrapping_add((a.n[1] as u64) * (b.n[5] as u64))
949 .wrapping_add((a.n[2] as u64) * (b.n[4] as u64))
950 .wrapping_add((a.n[3] as u64) * (b.n[3] as u64))
951 .wrapping_add((a.n[4] as u64) * (b.n[2] as u64))
952 .wrapping_add((a.n[5] as u64) * (b.n[1] as u64))
953 .wrapping_add((a.n[6] as u64) * (b.n[0] as u64));
954 debug_assert_bits!(c, 63);
955 d = d
957 .wrapping_add((a.n[7] as u64) * (b.n[9] as u64))
958 .wrapping_add((a.n[8] as u64) * (b.n[8] as u64))
959 .wrapping_add((a.n[9] as u64) * (b.n[7] as u64));
960 debug_assert_bits!(d, 61);
961 v6 = d & M;
963 d >>= 26;
964 c += v6 * R0;
965 debug_assert_bits!(v6, 26);
966 debug_assert_bits!(d, 35);
967 t6 = (c & M) as u32;
970 c >>= 26;
971 c += v6 * R1;
972 debug_assert_bits!(t6, 26);
973 debug_assert_bits!(c, 39);
974 c = c
978 .wrapping_add((a.n[0] as u64) * (b.n[7] as u64))
979 .wrapping_add((a.n[1] as u64) * (b.n[6] as u64))
980 .wrapping_add((a.n[2] as u64) * (b.n[5] as u64))
981 .wrapping_add((a.n[3] as u64) * (b.n[4] as u64))
982 .wrapping_add((a.n[4] as u64) * (b.n[3] as u64))
983 .wrapping_add((a.n[5] as u64) * (b.n[2] as u64))
984 .wrapping_add((a.n[6] as u64) * (b.n[1] as u64))
985 .wrapping_add((a.n[7] as u64) * (b.n[0] as u64));
986 debug_assert!(c <= 0x8000007c00000007);
988 d = d
990 .wrapping_add((a.n[8] as u64) * (b.n[9] as u64))
991 .wrapping_add((a.n[9] as u64) * (b.n[8] as u64));
992 debug_assert_bits!(d, 58);
993 v7 = d & M;
995 d >>= 26;
996 c += v7 * R0;
997 debug_assert_bits!(v7, 26);
998 debug_assert_bits!(d, 32);
999 debug_assert!(c <= 0x800001703fffc2f7);
1001 t7 = (c & M) as u32;
1003 c >>= 26;
1004 c += v7 * R1;
1005 debug_assert_bits!(t7, 26);
1006 debug_assert_bits!(c, 38);
1007 c = c
1011 .wrapping_add((a.n[0] as u64) * (b.n[8] as u64))
1012 .wrapping_add((a.n[1] as u64) * (b.n[7] as u64))
1013 .wrapping_add((a.n[2] as u64) * (b.n[6] as u64))
1014 .wrapping_add((a.n[3] as u64) * (b.n[5] as u64))
1015 .wrapping_add((a.n[4] as u64) * (b.n[4] as u64))
1016 .wrapping_add((a.n[5] as u64) * (b.n[3] as u64))
1017 .wrapping_add((a.n[6] as u64) * (b.n[2] as u64))
1018 .wrapping_add((a.n[7] as u64) * (b.n[1] as u64))
1019 .wrapping_add((a.n[8] as u64) * (b.n[0] as u64));
1020 debug_assert!(c <= 0x9000007b80000008);
1022 d = d.wrapping_add((a.n[9] as u64) * (b.n[9] as u64));
1024 debug_assert_bits!(d, 57);
1025 v8 = d & M;
1027 d >>= 26;
1028 c += v8 * R0;
1029 debug_assert_bits!(v8, 26);
1030 debug_assert_bits!(d, 31);
1031 debug_assert!(c <= 0x9000016fbfffc2f8);
1033 self.n[3] = t3;
1036 debug_assert_bits!(self.n[3], 26);
1037 self.n[4] = t4;
1039 debug_assert_bits!(self.n[4], 26);
1040 self.n[5] = t5;
1042 debug_assert_bits!(self.n[5], 26);
1043 self.n[6] = t6;
1045 debug_assert_bits!(self.n[6], 26);
1046 self.n[7] = t7;
1048 debug_assert_bits!(self.n[7], 26);
1049 self.n[8] = (c & M) as u32;
1052 c >>= 26;
1053 c += v8 * R1;
1054 debug_assert_bits!(self.n[8], 26);
1055 debug_assert_bits!(c, 39);
1056 c += d * R0 + t9 as u64;
1059 debug_assert_bits!(c, 45);
1060 self.n[9] = (c & (M >> 4)) as u32;
1062 c >>= 22;
1063 c += d * (R1 << 4);
1064 debug_assert_bits!(self.n[9], 22);
1065 debug_assert_bits!(c, 46);
1066 d = c * (R0 >> 4) + t0 as u64;
1071 debug_assert_bits!(d, 56);
1072 self.n[0] = (d & M) as u32;
1074 d >>= 26;
1075 debug_assert_bits!(self.n[0], 26);
1076 debug_assert_bits!(d, 30);
1077 d += c * (R1 >> 4) + t1 as u64;
1079 debug_assert_bits!(d, 53);
1080 debug_assert!(d <= 0x10000003ffffbf);
1081 self.n[1] = (d & M) as u32;
1084 d >>= 26;
1085 debug_assert_bits!(self.n[1], 26);
1086 debug_assert_bits!(d, 27);
1087 debug_assert!(d <= 0x4000000);
1088 d += t2 as u64;
1090 debug_assert_bits!(d, 27);
1091 self.n[2] = d as u32;
1093 debug_assert_bits!(self.n[2], 27);
1094 }
1096
1097 fn sqr_inner(&mut self, a: &Field) {
1098 const M: u64 = 0x3ffffff;
1099 const R0: u64 = 0x3d10;
1100 const R1: u64 = 0x400;
1101
1102 let (mut c, mut d): (u64, u64);
1103 let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64);
1104 let (t9, t0, t1, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32);
1105
1106 debug_assert_bits!(a.n[0], 30);
1107 debug_assert_bits!(a.n[1], 30);
1108 debug_assert_bits!(a.n[2], 30);
1109 debug_assert_bits!(a.n[3], 30);
1110 debug_assert_bits!(a.n[4], 30);
1111 debug_assert_bits!(a.n[5], 30);
1112 debug_assert_bits!(a.n[6], 30);
1113 debug_assert_bits!(a.n[7], 30);
1114 debug_assert_bits!(a.n[8], 30);
1115 debug_assert_bits!(a.n[9], 26);
1116
1117 d = (((a.n[0] * 2) as u64) * (a.n[9] as u64))
1122 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[8] as u64))
1123 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[7] as u64))
1124 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[6] as u64))
1125 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[5] as u64));
1126 t9 = (d & M) as u32;
1129 d >>= 26;
1130 debug_assert_bits!(t9, 26);
1131 debug_assert_bits!(d, 38);
1132 c = (a.n[0] as u64) * (a.n[0] as u64);
1135 debug_assert_bits!(c, 60);
1136 d = d
1138 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[9] as u64))
1139 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[8] as u64))
1140 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[7] as u64))
1141 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[6] as u64))
1142 .wrapping_add((a.n[5] as u64) * (a.n[5] as u64));
1143 debug_assert_bits!(d, 63);
1144 v0 = d & M;
1146 d >>= 26;
1147 c += v0 * R0;
1148 debug_assert_bits!(v0, 26);
1149 debug_assert_bits!(d, 37);
1150 debug_assert_bits!(c, 61);
1151 t0 = (c & M) as u32;
1153 c >>= 26;
1154 c += v0 * R1;
1155 debug_assert_bits!(t0, 26);
1156 debug_assert_bits!(c, 37);
1157 c = c.wrapping_add(((a.n[0] * 2) as u64) * (a.n[1] as u64));
1161 debug_assert_bits!(c, 62);
1162 d = d
1164 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[9] as u64))
1165 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[8] as u64))
1166 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[7] as u64))
1167 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[6] as u64));
1168 debug_assert_bits!(d, 63);
1169 v1 = d & M;
1171 d >>= 26;
1172 c += v1 * R0;
1173 debug_assert_bits!(v1, 26);
1174 debug_assert_bits!(d, 37);
1175 debug_assert_bits!(c, 63);
1176 t1 = (c & M) as u32;
1178 c >>= 26;
1179 c += v1 * R1;
1180 debug_assert_bits!(t1, 26);
1181 debug_assert_bits!(c, 38);
1182 c = c
1186 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[2] as u64))
1187 .wrapping_add((a.n[1] as u64) * (a.n[1] as u64));
1188 debug_assert_bits!(c, 62);
1189 d = d
1191 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[9] as u64))
1192 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[8] as u64))
1193 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[7] as u64))
1194 .wrapping_add((a.n[6] as u64) * (a.n[6] as u64));
1195 debug_assert_bits!(d, 63);
1196 v2 = d & M;
1198 d >>= 26;
1199 c += v2 * R0;
1200 debug_assert_bits!(v2, 26);
1201 debug_assert_bits!(d, 37);
1202 debug_assert_bits!(c, 63);
1203 t2 = (c & M) as u32;
1205 c >>= 26;
1206 c += v2 * R1;
1207 debug_assert_bits!(t2, 26);
1208 debug_assert_bits!(c, 38);
1209 c = c
1213 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[3] as u64))
1214 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[2] as u64));
1215 debug_assert_bits!(c, 63);
1216 d = d
1218 .wrapping_add(((a.n[4] * 2) as u64) * (a.n[9] as u64))
1219 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[8] as u64))
1220 .wrapping_add(((a.n[6] * 2) as u64) * (a.n[7] as u64));
1221 debug_assert_bits!(d, 63);
1222 v3 = d & M;
1224 d >>= 26;
1225 c += v3 * R0;
1226 debug_assert_bits!(v3, 26);
1227 debug_assert_bits!(d, 37);
1228 t3 = (c & M) as u32;
1231 c >>= 26;
1232 c += v3 * R1;
1233 debug_assert_bits!(t3, 26);
1234 debug_assert_bits!(c, 39);
1235 c = c
1239 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[4] as u64))
1240 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[3] as u64))
1241 .wrapping_add((a.n[2] as u64) * (a.n[2] as u64));
1242 debug_assert_bits!(c, 63);
1243 d = d
1245 .wrapping_add(((a.n[5] * 2) as u64) * (a.n[9] as u64))
1246 .wrapping_add(((a.n[6] * 2) as u64) * (a.n[8] as u64))
1247 .wrapping_add((a.n[7] as u64) * (a.n[7] as u64));
1248 debug_assert_bits!(d, 62);
1249 v4 = d & M;
1251 d >>= 26;
1252 c += v4 * R0;
1253 debug_assert_bits!(v4, 26);
1254 debug_assert_bits!(d, 36);
1255 t4 = (c & M) as u32;
1258 c >>= 26;
1259 c += v4 * R1;
1260 debug_assert_bits!(t4, 26);
1261 debug_assert_bits!(c, 39);
1262 c = c
1266 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[5] as u64))
1267 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[4] as u64))
1268 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[3] as u64));
1269 debug_assert_bits!(c, 63);
1270 d = d
1272 .wrapping_add(((a.n[6] * 2) as u64) * (a.n[9] as u64))
1273 .wrapping_add(((a.n[7] * 2) as u64) * (a.n[8] as u64));
1274 debug_assert_bits!(d, 62);
1275 v5 = d & M;
1277 d >>= 26;
1278 c += v5 * R0;
1279 debug_assert_bits!(v5, 26);
1280 debug_assert_bits!(d, 36);
1281 t5 = (c & M) as u32;
1284 c >>= 26;
1285 c += v5 * R1;
1286 debug_assert_bits!(t5, 26);
1287 debug_assert_bits!(c, 39);
1288 c = c
1292 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[6] as u64))
1293 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[5] as u64))
1294 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[4] as u64))
1295 .wrapping_add((a.n[3] as u64) * (a.n[3] as u64));
1296 debug_assert_bits!(c, 63);
1297 d = d
1299 .wrapping_add(((a.n[7] * 2) as u64) * (a.n[9] as u64))
1300 .wrapping_add((a.n[8] as u64) * (a.n[8] as u64));
1301 debug_assert_bits!(d, 61);
1302 v6 = d & M;
1304 d >>= 26;
1305 c += v6 * R0;
1306 debug_assert_bits!(v6, 26);
1307 debug_assert_bits!(d, 35);
1308 t6 = (c & M) as u32;
1311 c >>= 26;
1312 c += v6 * R1;
1313 debug_assert_bits!(t6, 26);
1314 debug_assert_bits!(c, 39);
1315 c = c
1319 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[7] as u64))
1320 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[6] as u64))
1321 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[5] as u64))
1322 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[4] as u64));
1323 debug_assert!(c <= 0x8000007C00000007);
1325 d = d.wrapping_add(((a.n[8] * 2) as u64) * (a.n[9] as u64));
1327 debug_assert_bits!(d, 58);
1328 v7 = d & M;
1330 d >>= 26;
1331 c += v7 * R0;
1332 debug_assert_bits!(v7, 26);
1333 debug_assert_bits!(d, 32);
1334 debug_assert!(c <= 0x800001703FFFC2F7);
1336 t7 = (c & M) as u32;
1338 c >>= 26;
1339 c += v7 * R1;
1340 debug_assert_bits!(t7, 26);
1341 debug_assert_bits!(c, 38);
1342 c = c
1346 .wrapping_add(((a.n[0] * 2) as u64) * (a.n[8] as u64))
1347 .wrapping_add(((a.n[1] * 2) as u64) * (a.n[7] as u64))
1348 .wrapping_add(((a.n[2] * 2) as u64) * (a.n[6] as u64))
1349 .wrapping_add(((a.n[3] * 2) as u64) * (a.n[5] as u64))
1350 .wrapping_add((a.n[4] as u64) * (a.n[4] as u64));
1351 debug_assert!(c <= 0x9000007B80000008);
1353 d = d.wrapping_add((a.n[9] as u64) * (a.n[9] as u64));
1355 debug_assert_bits!(d, 57);
1356 v8 = d & M;
1358 d >>= 26;
1359 c += v8 * R0;
1360 debug_assert_bits!(v8, 26);
1361 debug_assert_bits!(d, 31);
1362 debug_assert!(c <= 0x9000016FBFFFC2F8);
1364 self.n[3] = t3;
1367 debug_assert_bits!(self.n[3], 26);
1368 self.n[4] = t4;
1370 debug_assert_bits!(self.n[4], 26);
1371 self.n[5] = t5;
1373 debug_assert_bits!(self.n[5], 26);
1374 self.n[6] = t6;
1376 debug_assert_bits!(self.n[6], 26);
1377 self.n[7] = t7;
1379 debug_assert_bits!(self.n[7], 26);
1380 self.n[8] = (c & M) as u32;
1383 c >>= 26;
1384 c += v8 * R1;
1385 debug_assert_bits!(self.n[8], 26);
1386 debug_assert_bits!(c, 39);
1387 c += d * R0 + t9 as u64;
1390 debug_assert_bits!(c, 45);
1391 self.n[9] = (c & (M >> 4)) as u32;
1393 c >>= 22;
1394 c += d * (R1 << 4);
1395 debug_assert_bits!(self.n[9], 22);
1396 debug_assert_bits!(c, 46);
1397 d = c * (R0 >> 4) + t0 as u64;
1402 debug_assert_bits!(d, 56);
1403 self.n[0] = (d & M) as u32;
1405 d >>= 26;
1406 debug_assert_bits!(self.n[0], 26);
1407 debug_assert_bits!(d, 30);
1408 d += c * (R1 >> 4) + t1 as u64;
1410 debug_assert_bits!(d, 53);
1411 debug_assert!(d <= 0x10000003FFFFBF);
1412 self.n[1] = (d & M) as u32;
1415 d >>= 26;
1416 debug_assert_bits!(self.n[1], 26);
1417 debug_assert_bits!(d, 27);
1418 debug_assert!(d <= 0x4000000);
1419 d += t2 as u64;
1421 debug_assert_bits!(d, 27);
1422 self.n[2] = d as u32;
1424 debug_assert_bits!(self.n[2], 27);
1425 }
1427
1428 pub fn mul_in_place(&mut self, a: &Field, b: &Field) {
1432 debug_assert!(a.magnitude <= 8);
1433 debug_assert!(b.magnitude <= 8);
1434 debug_assert!(a.verify());
1435 debug_assert!(b.verify());
1436 self.mul_inner(a, b);
1437 self.magnitude = 1;
1438 self.normalized = false;
1439 debug_assert!(self.verify());
1440 }
1441
1442 pub fn sqr_in_place(&mut self, a: &Field) {
1446 debug_assert!(a.magnitude <= 8);
1447 debug_assert!(a.verify());
1448 self.sqr_inner(a);
1449 self.magnitude = 1;
1450 self.normalized = false;
1451 debug_assert!(a.verify());
1452 }
1453
1454 pub fn sqr(&self) -> Field {
1455 let mut ret = Field::default();
1456 ret.sqr_in_place(self);
1457 ret
1458 }
1459
1460 pub fn sqrt(&self) -> (Field, bool) {
1467 let mut x2 = self.sqr();
1468 x2 *= self;
1469
1470 let mut x3 = x2.sqr();
1471 x3 *= self;
1472
1473 let mut x6 = x3;
1474 for _ in 0..3 {
1475 x6 = x6.sqr();
1476 }
1477 x6 *= &x3;
1478
1479 let mut x9 = x6;
1480 for _ in 0..3 {
1481 x9 = x9.sqr();
1482 }
1483 x9 *= &x3;
1484
1485 let mut x11 = x9;
1486 for _ in 0..2 {
1487 x11 = x11.sqr();
1488 }
1489 x11 *= &x2;
1490
1491 let mut x22 = x11;
1492 for _ in 0..11 {
1493 x22 = x22.sqr();
1494 }
1495 x22 *= &x11;
1496
1497 let mut x44 = x22;
1498 for _ in 0..22 {
1499 x44 = x44.sqr();
1500 }
1501 x44 *= &x22;
1502
1503 let mut x88 = x44;
1504 for _ in 0..44 {
1505 x88 = x88.sqr();
1506 }
1507 x88 *= &x44;
1508
1509 let mut x176 = x88;
1510 for _ in 0..88 {
1511 x176 = x176.sqr();
1512 }
1513 x176 *= &x88;
1514
1515 let mut x220 = x176;
1516 for _ in 0..44 {
1517 x220 = x220.sqr();
1518 }
1519 x220 *= &x44;
1520
1521 let mut x223 = x220;
1522 for _ in 0..3 {
1523 x223 = x223.sqr();
1524 }
1525 x223 *= &x3;
1526
1527 let mut t1 = x223;
1528 for _ in 0..23 {
1529 t1 = t1.sqr();
1530 }
1531 t1 *= &x22;
1532 for _ in 0..6 {
1533 t1 = t1.sqr();
1534 }
1535 t1 *= &x2;
1536 t1 = t1.sqr();
1537 let r = t1.sqr();
1538
1539 t1 = r.sqr();
1540 (r, &t1 == self)
1541 }
1542
1543 pub fn inv(&self) -> Field {
1547 let mut x2 = self.sqr();
1548 x2 *= self;
1549
1550 let mut x3 = x2.sqr();
1551 x3 *= self;
1552
1553 let mut x6 = x3;
1554 for _ in 0..3 {
1555 x6 = x6.sqr();
1556 }
1557 x6 *= &x3;
1558
1559 let mut x9 = x6;
1560 for _ in 0..3 {
1561 x9 = x9.sqr();
1562 }
1563 x9 *= &x3;
1564
1565 let mut x11 = x9;
1566 for _ in 0..2 {
1567 x11 = x11.sqr();
1568 }
1569 x11 *= &x2;
1570
1571 let mut x22 = x11;
1572 for _ in 0..11 {
1573 x22 = x22.sqr();
1574 }
1575 x22 *= &x11;
1576
1577 let mut x44 = x22;
1578 for _ in 0..22 {
1579 x44 = x44.sqr();
1580 }
1581 x44 *= &x22;
1582
1583 let mut x88 = x44;
1584 for _ in 0..44 {
1585 x88 = x88.sqr();
1586 }
1587 x88 *= &x44;
1588
1589 let mut x176 = x88;
1590 for _ in 0..88 {
1591 x176 = x176.sqr();
1592 }
1593 x176 *= &x88;
1594
1595 let mut x220 = x176;
1596 for _ in 0..44 {
1597 x220 = x220.sqr();
1598 }
1599 x220 *= &x44;
1600
1601 let mut x223 = x220;
1602 for _ in 0..3 {
1603 x223 = x223.sqr();
1604 }
1605 x223 *= &x3;
1606
1607 let mut t1 = x223;
1608 for _ in 0..23 {
1609 t1 = t1.sqr();
1610 }
1611 t1 *= &x22;
1612 for _ in 0..5 {
1613 t1 = t1.sqr();
1614 }
1615 t1 *= self;
1616 for _ in 0..3 {
1617 t1 = t1.sqr();
1618 }
1619 t1 *= &x2;
1620 for _ in 0..2 {
1621 t1 = t1.sqr();
1622 }
1623 self * &t1
1624 }
1625
1626 pub fn inv_var(&self) -> Field {
1629 self.inv()
1630 }
1631
1632 pub fn is_quad_var(&self) -> bool {
1634 let (_, ret) = self.sqrt();
1635 ret
1636 }
1637
1638 pub fn cmov(&mut self, other: &Field, flag: bool) {
1641 self.n[0] = if flag { other.n[0] } else { self.n[0] };
1642 self.n[1] = if flag { other.n[1] } else { self.n[1] };
1643 self.n[2] = if flag { other.n[2] } else { self.n[2] };
1644 self.n[3] = if flag { other.n[3] } else { self.n[3] };
1645 self.n[4] = if flag { other.n[4] } else { self.n[4] };
1646 self.n[5] = if flag { other.n[5] } else { self.n[5] };
1647 self.n[6] = if flag { other.n[6] } else { self.n[6] };
1648 self.n[7] = if flag { other.n[7] } else { self.n[7] };
1649 self.n[8] = if flag { other.n[8] } else { self.n[8] };
1650 self.n[9] = if flag { other.n[9] } else { self.n[9] };
1651 self.magnitude = if flag {
1652 other.magnitude
1653 } else {
1654 self.magnitude
1655 };
1656 self.normalized = if flag {
1657 other.normalized
1658 } else {
1659 self.normalized
1660 };
1661 }
1662}
1663
1664impl Default for Field {
1665 fn default() -> Field {
1666 Self {
1667 n: [0u32; 10],
1668 magnitude: 0,
1669 normalized: true,
1670 }
1671 }
1672}
1673
1674impl Add<Field> for Field {
1675 type Output = Field;
1676 fn add(self, other: Field) -> Field {
1677 let mut ret = self;
1678 ret.add_assign(&other);
1679 ret
1680 }
1681}
1682
1683impl<'a, 'b> Add<&'a Field> for &'b Field {
1684 type Output = Field;
1685 fn add(self, other: &'a Field) -> Field {
1686 let mut ret = *self;
1687 ret.add_assign(other);
1688 ret
1689 }
1690}
1691
1692impl<'a> AddAssign<&'a Field> for Field {
1693 fn add_assign(&mut self, other: &'a Field) {
1694 self.n[0] += other.n[0];
1695 self.n[1] += other.n[1];
1696 self.n[2] += other.n[2];
1697 self.n[3] += other.n[3];
1698 self.n[4] += other.n[4];
1699 self.n[5] += other.n[5];
1700 self.n[6] += other.n[6];
1701 self.n[7] += other.n[7];
1702 self.n[8] += other.n[8];
1703 self.n[9] += other.n[9];
1704
1705 self.magnitude += other.magnitude;
1706 self.normalized = false;
1707 debug_assert!(self.verify());
1708 }
1709}
1710
1711impl AddAssign<Field> for Field {
1712 fn add_assign(&mut self, other: Field) {
1713 self.add_assign(&other)
1714 }
1715}
1716
1717impl Mul<Field> for Field {
1718 type Output = Field;
1719 fn mul(self, other: Field) -> Field {
1720 let mut ret = Field::default();
1721 ret.mul_in_place(&self, &other);
1722 ret
1723 }
1724}
1725
1726impl<'a, 'b> Mul<&'a Field> for &'b Field {
1727 type Output = Field;
1728 fn mul(self, other: &'a Field) -> Field {
1729 let mut ret = Field::default();
1730 ret.mul_in_place(self, other);
1731 ret
1732 }
1733}
1734
1735impl<'a> MulAssign<&'a Field> for Field {
1736 fn mul_assign(&mut self, other: &'a Field) {
1737 let mut ret = Field::default();
1738 ret.mul_in_place(self, other);
1739 *self = ret;
1740 }
1741}
1742
1743impl MulAssign<Field> for Field {
1744 fn mul_assign(&mut self, other: Field) {
1745 self.mul_assign(&other)
1746 }
1747}
1748
1749impl PartialEq for Field {
1750 fn eq(&self, other: &Field) -> bool {
1751 let mut na = self.neg(self.magnitude);
1752 na += other;
1753 na.normalizes_to_zero()
1754 }
1755}
1756
1757impl Eq for Field {}
1758
1759impl Ord for Field {
1760 fn cmp(&self, other: &Field) -> Ordering {
1761 self.cmp_var(other)
1762 }
1763}
1764
1765impl PartialOrd for Field {
1766 fn partial_cmp(&self, other: &Field) -> Option<Ordering> {
1767 Some(self.cmp(other))
1768 }
1769}
1770
1771#[derive(Debug, Clone, Copy, Eq, PartialEq)]
1772pub struct FieldStorage(pub [u32; 8]);
1774
1775impl Default for FieldStorage {
1776 fn default() -> FieldStorage {
1777 FieldStorage([0; 8])
1778 }
1779}
1780
1781impl FieldStorage {
1782 pub const fn new(
1783 d7: u32,
1784 d6: u32,
1785 d5: u32,
1786 d4: u32,
1787 d3: u32,
1788 d2: u32,
1789 d1: u32,
1790 d0: u32,
1791 ) -> Self {
1792 Self([d0, d1, d2, d3, d4, d5, d6, d7])
1793 }
1794
1795 pub fn cmov(&mut self, other: &FieldStorage, flag: bool) {
1796 self.0[0] = if flag { other.0[0] } else { self.0[0] };
1797 self.0[1] = if flag { other.0[1] } else { self.0[1] };
1798 self.0[2] = if flag { other.0[2] } else { self.0[2] };
1799 self.0[3] = if flag { other.0[3] } else { self.0[3] };
1800 self.0[4] = if flag { other.0[4] } else { self.0[4] };
1801 self.0[5] = if flag { other.0[5] } else { self.0[5] };
1802 self.0[6] = if flag { other.0[6] } else { self.0[6] };
1803 self.0[7] = if flag { other.0[7] } else { self.0[7] };
1804 }
1805}
1806
1807impl From<FieldStorage> for Field {
1808 fn from(a: FieldStorage) -> Field {
1809 let mut r = Field::default();
1810
1811 r.n[0] = a.0[0] & 0x3FFFFFF;
1812 r.n[1] = a.0[0] >> 26 | ((a.0[1] << 6) & 0x3FFFFFF);
1813 r.n[2] = a.0[1] >> 20 | ((a.0[2] << 12) & 0x3FFFFFF);
1814 r.n[3] = a.0[2] >> 14 | ((a.0[3] << 18) & 0x3FFFFFF);
1815 r.n[4] = a.0[3] >> 8 | ((a.0[4] << 24) & 0x3FFFFFF);
1816 r.n[5] = (a.0[4] >> 2) & 0x3FFFFFF;
1817 r.n[6] = a.0[4] >> 28 | ((a.0[5] << 4) & 0x3FFFFFF);
1818 r.n[7] = a.0[5] >> 22 | ((a.0[6] << 10) & 0x3FFFFFF);
1819 r.n[8] = a.0[6] >> 16 | ((a.0[7] << 16) & 0x3FFFFFF);
1820 r.n[9] = a.0[7] >> 10;
1821
1822 r.magnitude = 1;
1823 r.normalized = true;
1824
1825 r
1826 }
1827}
1828
1829impl Into<FieldStorage> for Field {
1830 fn into(self) -> FieldStorage {
1831 debug_assert!(self.normalized);
1832 let mut r = FieldStorage::default();
1833
1834 r.0[0] = self.n[0] | self.n[1] << 26;
1835 r.0[1] = self.n[1] >> 6 | self.n[2] << 20;
1836 r.0[2] = self.n[2] >> 12 | self.n[3] << 14;
1837 r.0[3] = self.n[3] >> 18 | self.n[4] << 8;
1838 r.0[4] = self.n[4] >> 24 | self.n[5] << 2 | self.n[6] << 28;
1839 r.0[5] = self.n[6] >> 4 | self.n[7] << 22;
1840 r.0[6] = self.n[7] >> 10 | self.n[8] << 16;
1841 r.0[7] = self.n[8] >> 16 | self.n[9] << 10;
1842
1843 r
1844 }
1845}