use crate::{
AdditiveGroup, BigInt, BigInteger, FftField, Field, LegendreSymbol, One, PrimeField,
SqrtPrecomputation, Zero,
};
use ark_serialize::{
buffer_byte_size, CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize,
CanonicalSerializeWithFlags, Compress, EmptyFlags, Flags, SerializationError, Valid, Validate,
};
use ark_std::{
cmp::*,
fmt::{Display, Formatter, Result as FmtResult},
marker::PhantomData,
ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign},
str::FromStr,
string::*,
};
use core::iter;
#[macro_use]
mod montgomery_backend;
pub use montgomery_backend::*;
pub trait FpConfig<const N: usize>: Send + Sync + 'static + Sized {
const MODULUS: crate::BigInt<N>;
const GENERATOR: Fp<Self, N>;
const ZERO: Fp<Self, N>;
const ONE: Fp<Self, N>;
const TWO_ADICITY: u32;
const TWO_ADIC_ROOT_OF_UNITY: Fp<Self, N>;
const SMALL_SUBGROUP_BASE: Option<u32> = None;
const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = None;
const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Fp<Self, N>> = None;
const SQRT_PRECOMP: Option<SqrtPrecomputation<Fp<Self, N>>>;
fn add_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
fn sub_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
fn double_in_place(a: &mut Fp<Self, N>);
fn neg_in_place(a: &mut Fp<Self, N>);
fn mul_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
fn sum_of_products<const T: usize>(a: &[Fp<Self, N>; T], b: &[Fp<Self, N>; T]) -> Fp<Self, N>;
fn square_in_place(a: &mut Fp<Self, N>);
fn inverse(a: &Fp<Self, N>) -> Option<Fp<Self, N>>;
fn from_bigint(other: BigInt<N>) -> Option<Fp<Self, N>>;
fn into_bigint(other: Fp<Self, N>) -> BigInt<N>;
}
#[derive(Educe)]
#[educe(Default, Hash, Clone, Copy, PartialEq, Eq)]
pub struct Fp<P: FpConfig<N>, const N: usize>(
#[doc(hidden)]
pub BigInt<N>,
#[doc(hidden)] pub PhantomData<P>,
);
pub type Fp64<P> = Fp<P, 1>;
pub type Fp128<P> = Fp<P, 2>;
pub type Fp192<P> = Fp<P, 3>;
pub type Fp256<P> = Fp<P, 4>;
pub type Fp320<P> = Fp<P, 5>;
pub type Fp384<P> = Fp<P, 6>;
pub type Fp448<P> = Fp<P, 7>;
pub type Fp512<P> = Fp<P, 8>;
pub type Fp576<P> = Fp<P, 9>;
pub type Fp640<P> = Fp<P, 10>;
pub type Fp704<P> = Fp<P, 11>;
pub type Fp768<P> = Fp<P, 12>;
pub type Fp832<P> = Fp<P, 13>;
impl<P: FpConfig<N>, const N: usize> Fp<P, N> {
#[doc(hidden)]
#[inline]
pub fn is_geq_modulus(&self) -> bool {
self.0 >= P::MODULUS
}
#[inline]
fn subtract_modulus(&mut self) {
if self.is_geq_modulus() {
self.0.sub_with_borrow(&Self::MODULUS);
}
}
#[inline]
fn subtract_modulus_with_carry(&mut self, carry: bool) {
if carry || self.is_geq_modulus() {
self.0.sub_with_borrow(&Self::MODULUS);
}
}
fn num_bits_to_shave() -> usize {
64 * N - (Self::MODULUS_BIT_SIZE as usize)
}
}
impl<P: FpConfig<N>, const N: usize> ark_std::fmt::Debug for Fp<P, N> {
fn fmt(&self, f: &mut Formatter<'_>) -> ark_std::fmt::Result {
ark_std::fmt::Debug::fmt(&self.into_bigint(), f)
}
}
impl<P: FpConfig<N>, const N: usize> Zero for Fp<P, N> {
#[inline]
fn zero() -> Self {
P::ZERO
}
#[inline]
fn is_zero(&self) -> bool {
*self == P::ZERO
}
}
impl<P: FpConfig<N>, const N: usize> One for Fp<P, N> {
#[inline]
fn one() -> Self {
P::ONE
}
#[inline]
fn is_one(&self) -> bool {
*self == P::ONE
}
}
impl<P: FpConfig<N>, const N: usize> AdditiveGroup for Fp<P, N> {
type Scalar = Self;
const ZERO: Self = P::ZERO;
#[inline]
fn double(&self) -> Self {
let mut temp = *self;
temp.double_in_place();
temp
}
#[inline]
fn double_in_place(&mut self) -> &mut Self {
P::double_in_place(self);
self
}
#[inline]
fn neg_in_place(&mut self) -> &mut Self {
P::neg_in_place(self);
self
}
}
impl<P: FpConfig<N>, const N: usize> Field for Fp<P, N> {
type BasePrimeField = Self;
const SQRT_PRECOMP: Option<SqrtPrecomputation<Self>> = P::SQRT_PRECOMP;
const ONE: Self = P::ONE;
fn extension_degree() -> u64 {
1
}
fn from_base_prime_field(elem: Self::BasePrimeField) -> Self {
elem
}
fn to_base_prime_field_elements(&self) -> impl Iterator<Item = Self::BasePrimeField> {
iter::once(*self)
}
fn from_base_prime_field_elems(
elems: impl IntoIterator<Item = Self::BasePrimeField>,
) -> Option<Self> {
let mut elems = elems.into_iter();
let elem = elems.next()?;
if elems.next().is_some() {
return None;
}
Some(elem)
}
#[inline]
fn characteristic() -> &'static [u64] {
P::MODULUS.as_ref()
}
#[inline]
fn sum_of_products<const T: usize>(a: &[Self; T], b: &[Self; T]) -> Self {
P::sum_of_products(a, b)
}
#[inline]
fn from_random_bytes_with_flags<F: Flags>(bytes: &[u8]) -> Option<(Self, F)> {
if F::BIT_SIZE > 8 {
None
} else {
let shave_bits = Self::num_bits_to_shave();
let mut result_bytes = crate::const_helpers::SerBuffer::<N>::zeroed();
result_bytes.copy_from_u8_slice(bytes);
let last_limb_mask =
(u64::MAX.checked_shr(shave_bits as u32).unwrap_or(0)).to_le_bytes();
let mut last_bytes_mask = [0u8; 9];
last_bytes_mask[..8].copy_from_slice(&last_limb_mask);
let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
let flag_location = output_byte_size - 1;
let flag_location_in_last_limb = flag_location.saturating_sub(8 * (N - 1));
let last_bytes = result_bytes.last_n_plus_1_bytes_mut();
let flags_mask = u8::MAX.checked_shl(8 - (F::BIT_SIZE as u32)).unwrap_or(0);
let mut flags: u8 = 0;
for (i, (b, m)) in last_bytes.zip(&last_bytes_mask).enumerate() {
if i == flag_location_in_last_limb {
flags = *b & flags_mask
}
*b &= m;
}
Self::deserialize_compressed(&result_bytes.as_slice()[..(N * 8)])
.ok()
.and_then(|f| F::from_u8(flags).map(|flag| (f, flag)))
}
}
#[inline]
fn square(&self) -> Self {
let mut temp = *self;
temp.square_in_place();
temp
}
fn square_in_place(&mut self) -> &mut Self {
P::square_in_place(self);
self
}
#[inline]
fn inverse(&self) -> Option<Self> {
P::inverse(self)
}
fn inverse_in_place(&mut self) -> Option<&mut Self> {
if let Some(inverse) = self.inverse() {
*self = inverse;
Some(self)
} else {
None
}
}
#[inline]
fn frobenius_map_in_place(&mut self, _: usize) {}
#[inline]
fn legendre(&self) -> LegendreSymbol {
use crate::fields::LegendreSymbol::*;
let s = self.pow(Self::MODULUS_MINUS_ONE_DIV_TWO);
if s.is_zero() {
Zero
} else if s.is_one() {
QuadraticResidue
} else {
QuadraticNonResidue
}
}
#[inline]
fn mul_by_base_prime_field(&self, elem: &Self::BasePrimeField) -> Self {
*self * elem
}
}
impl<P: FpConfig<N>, const N: usize> PrimeField for Fp<P, N> {
type BigInt = BigInt<N>;
const MODULUS: Self::BigInt = P::MODULUS;
const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt = P::MODULUS.divide_by_2_round_down();
const MODULUS_BIT_SIZE: u32 = P::MODULUS.const_num_bits();
const TRACE: Self::BigInt = P::MODULUS.two_adic_coefficient();
const TRACE_MINUS_ONE_DIV_TWO: Self::BigInt = Self::TRACE.divide_by_2_round_down();
#[inline]
fn from_bigint(r: BigInt<N>) -> Option<Self> {
P::from_bigint(r)
}
fn into_bigint(self) -> BigInt<N> {
P::into_bigint(self)
}
}
impl<P: FpConfig<N>, const N: usize> FftField for Fp<P, N> {
const GENERATOR: Self = P::GENERATOR;
const TWO_ADICITY: u32 = P::TWO_ADICITY;
const TWO_ADIC_ROOT_OF_UNITY: Self = P::TWO_ADIC_ROOT_OF_UNITY;
const SMALL_SUBGROUP_BASE: Option<u32> = P::SMALL_SUBGROUP_BASE;
const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = P::SMALL_SUBGROUP_BASE_ADICITY;
const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Self> = P::LARGE_SUBGROUP_ROOT_OF_UNITY;
}
impl<P: FpConfig<N>, const N: usize> Ord for Fp<P, N> {
#[inline(always)]
fn cmp(&self, other: &Self) -> Ordering {
self.into_bigint().cmp(&other.into_bigint())
}
}
impl<P: FpConfig<N>, const N: usize> PartialOrd for Fp<P, N> {
#[inline(always)]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<P: FpConfig<N>, const N: usize> From<u128> for Fp<P, N> {
fn from(mut other: u128) -> Self {
let mut result = BigInt::default();
if N == 1 {
result.0[0] = (other % u128::from(P::MODULUS.0[0])) as u64;
} else if N == 2 || P::MODULUS.0[2..].iter().all(|&x| x == 0) {
let mod_as_u128 = P::MODULUS.0[0] as u128 + ((P::MODULUS.0[1] as u128) << 64);
other %= mod_as_u128;
result.0[0] = ((other << 64) >> 64) as u64;
result.0[1] = (other >> 64) as u64;
} else {
result.0[0] = ((other << 64) >> 64) as u64;
result.0[1] = (other >> 64) as u64;
}
Self::from_bigint(result).unwrap()
}
}
impl<P: FpConfig<N>, const N: usize> From<i128> for Fp<P, N> {
fn from(other: i128) -> Self {
let abs = Self::from(other.unsigned_abs());
if other.is_positive() {
abs
} else {
-abs
}
}
}
impl<P: FpConfig<N>, const N: usize> From<bool> for Fp<P, N> {
fn from(other: bool) -> Self {
if N == 1 {
Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
} else {
Self::from_bigint(BigInt::from(u64::from(other))).unwrap()
}
}
}
impl<P: FpConfig<N>, const N: usize> From<u64> for Fp<P, N> {
fn from(other: u64) -> Self {
if N == 1 {
Self::from_bigint(BigInt::from(other % P::MODULUS.0[0])).unwrap()
} else {
Self::from_bigint(BigInt::from(other)).unwrap()
}
}
}
impl<P: FpConfig<N>, const N: usize> From<i64> for Fp<P, N> {
fn from(other: i64) -> Self {
let abs = Self::from(other.unsigned_abs());
if other.is_positive() {
abs
} else {
-abs
}
}
}
impl<P: FpConfig<N>, const N: usize> From<u32> for Fp<P, N> {
fn from(other: u32) -> Self {
if N == 1 {
Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
} else {
Self::from_bigint(BigInt::from(other)).unwrap()
}
}
}
impl<P: FpConfig<N>, const N: usize> From<i32> for Fp<P, N> {
fn from(other: i32) -> Self {
let abs = Self::from(other.unsigned_abs());
if other.is_positive() {
abs
} else {
-abs
}
}
}
impl<P: FpConfig<N>, const N: usize> From<u16> for Fp<P, N> {
fn from(other: u16) -> Self {
if N == 1 {
Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
} else {
Self::from_bigint(BigInt::from(other)).unwrap()
}
}
}
impl<P: FpConfig<N>, const N: usize> From<i16> for Fp<P, N> {
fn from(other: i16) -> Self {
let abs = Self::from(other.unsigned_abs());
if other.is_positive() {
abs
} else {
-abs
}
}
}
impl<P: FpConfig<N>, const N: usize> From<u8> for Fp<P, N> {
fn from(other: u8) -> Self {
if N == 1 {
Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
} else {
Self::from_bigint(BigInt::from(other)).unwrap()
}
}
}
impl<P: FpConfig<N>, const N: usize> From<i8> for Fp<P, N> {
fn from(other: i8) -> Self {
let abs = Self::from(other.unsigned_abs());
if other.is_positive() {
abs
} else {
-abs
}
}
}
impl<P: FpConfig<N>, const N: usize> ark_std::rand::distributions::Distribution<Fp<P, N>>
for ark_std::rand::distributions::Standard
{
#[inline]
fn sample<R: ark_std::rand::Rng + ?Sized>(&self, rng: &mut R) -> Fp<P, N> {
loop {
let mut tmp = Fp(
rng.sample(ark_std::rand::distributions::Standard),
PhantomData,
);
let shave_bits = Fp::<P, N>::num_bits_to_shave();
assert!(shave_bits <= 64);
let mask = if shave_bits == 64 {
0
} else {
u64::MAX >> shave_bits
};
if let Some(val) = tmp.0 .0.last_mut() {
*val &= mask
}
if !tmp.is_geq_modulus() {
return tmp;
}
}
}
}
impl<P: FpConfig<N>, const N: usize> CanonicalSerializeWithFlags for Fp<P, N> {
fn serialize_with_flags<W: ark_std::io::Write, F: Flags>(
&self,
writer: W,
flags: F,
) -> Result<(), SerializationError> {
if F::BIT_SIZE > 8 {
return Err(SerializationError::NotEnoughSpace);
}
let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
let mut bytes = crate::const_helpers::SerBuffer::zeroed();
bytes.copy_from_u64_slice(&self.into_bigint().0);
bytes[output_byte_size - 1] |= flags.u8_bitmask();
bytes.write_up_to(writer, output_byte_size)?;
Ok(())
}
fn serialized_size_with_flags<F: Flags>(&self) -> usize {
buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE)
}
}
impl<P: FpConfig<N>, const N: usize> CanonicalSerialize for Fp<P, N> {
#[inline]
fn serialize_with_mode<W: ark_std::io::Write>(
&self,
writer: W,
_compress: Compress,
) -> Result<(), SerializationError> {
self.serialize_with_flags(writer, EmptyFlags)
}
#[inline]
fn serialized_size(&self, _compress: Compress) -> usize {
self.serialized_size_with_flags::<EmptyFlags>()
}
}
impl<P: FpConfig<N>, const N: usize> CanonicalDeserializeWithFlags for Fp<P, N> {
fn deserialize_with_flags<R: ark_std::io::Read, F: Flags>(
reader: R,
) -> Result<(Self, F), SerializationError> {
if F::BIT_SIZE > 8 {
return Err(SerializationError::NotEnoughSpace);
}
let output_byte_size = Self::zero().serialized_size_with_flags::<F>();
let mut masked_bytes = crate::const_helpers::SerBuffer::zeroed();
masked_bytes.read_exact_up_to(reader, output_byte_size)?;
let flags = F::from_u8_remove_flags(&mut masked_bytes[output_byte_size - 1])
.ok_or(SerializationError::UnexpectedFlags)?;
let self_integer = masked_bytes.to_bigint();
Self::from_bigint(self_integer)
.map(|v| (v, flags))
.ok_or(SerializationError::InvalidData)
}
}
impl<P: FpConfig<N>, const N: usize> Valid for Fp<P, N> {
fn check(&self) -> Result<(), SerializationError> {
Ok(())
}
}
impl<P: FpConfig<N>, const N: usize> CanonicalDeserialize for Fp<P, N> {
fn deserialize_with_mode<R: ark_std::io::Read>(
reader: R,
_compress: Compress,
_validate: Validate,
) -> Result<Self, SerializationError> {
Self::deserialize_with_flags::<R, EmptyFlags>(reader).map(|(r, _)| r)
}
}
impl<P: FpConfig<N>, const N: usize> FromStr for Fp<P, N> {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use num_bigint::{BigInt, BigUint};
use num_traits::Signed;
let modulus = BigInt::from(P::MODULUS);
let mut a = BigInt::from_str(s).map_err(|_| ())? % &modulus;
if a.is_negative() {
a += modulus
}
BigUint::try_from(a)
.map_err(|_| ())
.and_then(TryFrom::try_from)
.ok()
.and_then(Self::from_bigint)
.ok_or(())
}
}
impl<P: FpConfig<N>, const N: usize> Display for Fp<P, N> {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
let string = self.into_bigint().to_string();
write!(f, "{}", string)
}
}
impl<P: FpConfig<N>, const N: usize> Neg for Fp<P, N> {
type Output = Self;
#[inline]
#[must_use]
fn neg(mut self) -> Self {
P::neg_in_place(&mut self);
self
}
}
impl<'a, P: FpConfig<N>, const N: usize> Add<&'a Fp<P, N>> for Fp<P, N> {
type Output = Self;
#[inline]
fn add(mut self, other: &Self) -> Self {
self.add_assign(other);
self
}
}
impl<'a, P: FpConfig<N>, const N: usize> Sub<&'a Fp<P, N>> for Fp<P, N> {
type Output = Self;
#[inline]
fn sub(mut self, other: &Self) -> Self {
self.sub_assign(other);
self
}
}
impl<'a, P: FpConfig<N>, const N: usize> Mul<&'a Fp<P, N>> for Fp<P, N> {
type Output = Self;
#[inline]
fn mul(mut self, other: &Self) -> Self {
self.mul_assign(other);
self
}
}
impl<'a, P: FpConfig<N>, const N: usize> Div<&'a Fp<P, N>> for Fp<P, N> {
type Output = Self;
#[inline]
fn div(mut self, other: &Self) -> Self {
self.mul_assign(&other.inverse().unwrap());
self
}
}
impl<'a, 'b, P: FpConfig<N>, const N: usize> Add<&'b Fp<P, N>> for &'a Fp<P, N> {
type Output = Fp<P, N>;
#[inline]
fn add(self, other: &'b Fp<P, N>) -> Fp<P, N> {
let mut result = *self;
result.add_assign(other);
result
}
}
impl<'a, 'b, P: FpConfig<N>, const N: usize> Sub<&'b Fp<P, N>> for &'a Fp<P, N> {
type Output = Fp<P, N>;
#[inline]
fn sub(self, other: &Fp<P, N>) -> Fp<P, N> {
let mut result = *self;
result.sub_assign(other);
result
}
}
impl<'a, 'b, P: FpConfig<N>, const N: usize> Mul<&'b Fp<P, N>> for &'a Fp<P, N> {
type Output = Fp<P, N>;
#[inline]
fn mul(self, other: &Fp<P, N>) -> Fp<P, N> {
let mut result = *self;
result.mul_assign(other);
result
}
}
impl<'a, 'b, P: FpConfig<N>, const N: usize> Div<&'b Fp<P, N>> for &'a Fp<P, N> {
type Output = Fp<P, N>;
#[inline]
fn div(self, other: &Fp<P, N>) -> Fp<P, N> {
let mut result = *self;
result.div_assign(other);
result
}
}
impl<'a, P: FpConfig<N>, const N: usize> AddAssign<&'a Self> for Fp<P, N> {
#[inline]
fn add_assign(&mut self, other: &Self) {
P::add_assign(self, other)
}
}
impl<'a, P: FpConfig<N>, const N: usize> SubAssign<&'a Self> for Fp<P, N> {
#[inline]
fn sub_assign(&mut self, other: &Self) {
P::sub_assign(self, other);
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::Add<Self> for Fp<P, N> {
type Output = Self;
#[inline]
fn add(mut self, other: Self) -> Self {
self.add_assign(&other);
self
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::Add<&'a mut Self> for Fp<P, N> {
type Output = Self;
#[inline]
fn add(mut self, other: &'a mut Self) -> Self {
self.add_assign(&*other);
self
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::Sub<Self> for Fp<P, N> {
type Output = Self;
#[inline]
fn sub(mut self, other: Self) -> Self {
self.sub_assign(&other);
self
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::Sub<&'a mut Self> for Fp<P, N> {
type Output = Self;
#[inline]
fn sub(mut self, other: &'a mut Self) -> Self {
self.sub_assign(&*other);
self
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::iter::Sum<Self> for Fp<P, N> {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::zero(), core::ops::Add::add)
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::iter::Sum<&'a Self> for Fp<P, N> {
fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
iter.fold(Self::zero(), core::ops::Add::add)
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::AddAssign<Self> for Fp<P, N> {
#[inline(always)]
fn add_assign(&mut self, other: Self) {
self.add_assign(&other)
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::SubAssign<Self> for Fp<P, N> {
#[inline(always)]
fn sub_assign(&mut self, other: Self) {
self.sub_assign(&other)
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::AddAssign<&'a mut Self> for Fp<P, N> {
#[inline(always)]
fn add_assign(&mut self, other: &'a mut Self) {
self.add_assign(&*other)
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::SubAssign<&'a mut Self> for Fp<P, N> {
#[inline(always)]
fn sub_assign(&mut self, other: &'a mut Self) {
self.sub_assign(&*other)
}
}
impl<'a, P: FpConfig<N>, const N: usize> MulAssign<&'a Self> for Fp<P, N> {
fn mul_assign(&mut self, other: &Self) {
P::mul_assign(self, other)
}
}
impl<'a, P: FpConfig<N>, const N: usize> DivAssign<&'a Self> for Fp<P, N> {
#[inline(always)]
fn div_assign(&mut self, other: &Self) {
self.mul_assign(&other.inverse().unwrap());
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::Mul<Self> for Fp<P, N> {
type Output = Self;
#[inline(always)]
fn mul(mut self, other: Self) -> Self {
self.mul_assign(&other);
self
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::Div<Self> for Fp<P, N> {
type Output = Self;
#[inline(always)]
fn div(mut self, other: Self) -> Self {
self.div_assign(&other);
self
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::Mul<&'a mut Self> for Fp<P, N> {
type Output = Self;
#[inline(always)]
fn mul(mut self, other: &'a mut Self) -> Self {
self.mul_assign(&*other);
self
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::Div<&'a mut Self> for Fp<P, N> {
type Output = Self;
#[inline(always)]
fn div(mut self, other: &'a mut Self) -> Self {
self.div_assign(&*other);
self
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::iter::Product<Self> for Fp<P, N> {
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::one(), core::ops::Mul::mul)
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::iter::Product<&'a Self> for Fp<P, N> {
fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
iter.fold(Self::one(), Mul::mul)
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::MulAssign<Self> for Fp<P, N> {
#[inline(always)]
fn mul_assign(&mut self, other: Self) {
self.mul_assign(&other)
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::DivAssign<&'a mut Self> for Fp<P, N> {
#[inline(always)]
fn div_assign(&mut self, other: &'a mut Self) {
self.div_assign(&*other)
}
}
#[allow(unused_qualifications)]
impl<'a, P: FpConfig<N>, const N: usize> core::ops::MulAssign<&'a mut Self> for Fp<P, N> {
#[inline(always)]
fn mul_assign(&mut self, other: &'a mut Self) {
self.mul_assign(&*other)
}
}
#[allow(unused_qualifications)]
impl<P: FpConfig<N>, const N: usize> core::ops::DivAssign<Self> for Fp<P, N> {
#[inline(always)]
fn div_assign(&mut self, other: Self) {
self.div_assign(&other)
}
}
impl<P: FpConfig<N>, const N: usize> zeroize::Zeroize for Fp<P, N> {
fn zeroize(&mut self) {
self.0.zeroize();
}
}
impl<P: FpConfig<N>, const N: usize> From<num_bigint::BigUint> for Fp<P, N> {
#[inline]
fn from(val: num_bigint::BigUint) -> Fp<P, N> {
Fp::<P, N>::from_le_bytes_mod_order(&val.to_bytes_le())
}
}
impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for num_bigint::BigUint {
#[inline(always)]
fn from(other: Fp<P, N>) -> Self {
other.into_bigint().into()
}
}
impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for BigInt<N> {
#[inline(always)]
fn from(fp: Fp<P, N>) -> Self {
fp.into_bigint()
}
}
impl<P: FpConfig<N>, const N: usize> From<BigInt<N>> for Fp<P, N> {
#[inline(always)]
fn from(int: BigInt<N>) -> Self {
Self::from_bigint(int).unwrap()
}
}