tfhe_fft::fft128::f128_ops::x86

Trait V4F128Ext

Source
pub trait V4F128Ext {
Show 19 methods // Required methods fn add_estimate_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8); fn sub_estimate_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8); fn add_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8); fn sub_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8); fn mul_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8); fn add_estimate_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16); fn sub_estimate_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16); fn add_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16); fn sub_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16); fn mul_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16); fn splat_f64x16(self, value: f64) -> f64x16; fn add_f64x16(self, a: f64x16, b: f64x16) -> f64x16; fn sub_f64x16(self, a: f64x16, b: f64x16) -> f64x16; fn mul_f64x16(self, a: f64x16, b: f64x16) -> f64x16; fn mul_add_f64x16(self, a: f64x16, b: f64x16, c: f64x16) -> f64x16; fn mul_sub_f64x16(self, a: f64x16, b: f64x16, c: f64x16) -> f64x16; fn andnot_f64x16(self, a: f64x16, b: f64x16) -> f64x16; fn cmp_gt_f64x16(self, a: f64x16, b: f64x16) -> b16; fn select_f64x16( self, mask: b16, if_true: f64x16, if_false: f64x16, ) -> f64x16;
}
Available on crate feature fft128 and (x86 or x86-64) only.

Required Methods§

Source

fn add_estimate_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source

fn sub_estimate_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source

fn add_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source

fn sub_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source

fn mul_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source

fn add_estimate_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source

fn sub_estimate_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source

fn add_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source

fn sub_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source

fn mul_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source

fn splat_f64x16(self, value: f64) -> f64x16

Source

fn add_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source

fn sub_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source

fn mul_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source

fn mul_add_f64x16(self, a: f64x16, b: f64x16, c: f64x16) -> f64x16

Source

fn mul_sub_f64x16(self, a: f64x16, b: f64x16, c: f64x16) -> f64x16

Source

fn andnot_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source

fn cmp_gt_f64x16(self, a: f64x16, b: f64x16) -> b16

Source

fn select_f64x16(self, mask: b16, if_true: f64x16, if_false: f64x16) -> f64x16

Implementations on Foreign Types§

Source§

impl V4F128Ext for V4

Source§

fn add_estimate_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source§

fn sub_estimate_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source§

fn add_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source§

fn sub_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source§

fn mul_f128x8( self, a0: f64x8, a1: f64x8, b0: f64x8, b1: f64x8, ) -> (f64x8, f64x8)

Source§

fn add_estimate_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source§

fn sub_estimate_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source§

fn add_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source§

fn sub_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source§

fn mul_f128x16( self, a0: f64x16, a1: f64x16, b0: f64x16, b1: f64x16, ) -> (f64x16, f64x16)

Source§

fn add_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source§

fn sub_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source§

fn mul_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source§

fn mul_add_f64x16(self, a: f64x16, b: f64x16, c: f64x16) -> f64x16

Source§

fn mul_sub_f64x16(self, a: f64x16, b: f64x16, c: f64x16) -> f64x16

Source§

fn andnot_f64x16(self, a: f64x16, b: f64x16) -> f64x16

Source§

fn cmp_gt_f64x16(self, a: f64x16, b: f64x16) -> b16

Source§

fn select_f64x16(self, mask: b16, if_true: f64x16, if_false: f64x16) -> f64x16

Source§

fn splat_f64x16(self, value: f64) -> f64x16

Implementors§