pub struct SSE41(_);
Trait Implementations
sourceimpl InstructionSet for SSE41
impl InstructionSet for SSE41
sourceimpl SIMD128 for SSE41
impl SIMD128 for SSE41
type V128 = __m128i
unsafe fn v128_load(self, addr: *const u8) -> Self::V128
unsafe fn v128_load_unaligned(self, addr: *const u8) -> Self::V128
unsafe fn v128_store_unaligned(self, addr: *mut u8, a: Self::V128)
fn v128_or(self, a: Self::V128, b: Self::V128) -> Self::V128
fn v128_and(self, a: Self::V128, b: Self::V128) -> Self::V128
fn v128_andnot(self, a: Self::V128, b: Self::V128) -> Self::V128
fn v128_to_bytes(self, a: Self::V128) -> [u8; 16]
fn v128_create_zero(self) -> Self::V128
fn v128_all_zero(self, a: Self::V128) -> bool
fn u8x16_splat(self, x: u8) -> Self::V128
fn u8x16_swizzle(self, a: Self::V128, b: Self::V128) -> Self::V128
fn u8x16_add(self, a: Self::V128, b: Self::V128) -> Self::V128
fn u8x16_sub(self, a: Self::V128, b: Self::V128) -> Self::V128
fn u8x16_sub_sat(self, a: Self::V128, b: Self::V128) -> Self::V128
fn u8x16_any_zero(self, a: Self::V128) -> bool
fn u8x16_min(self, a: Self::V128, b: Self::V128) -> Self::V128
fn i8x16_splat(self, x: i8) -> Self::V128
fn i8x16_cmp_lt(self, a: Self::V128, b: Self::V128) -> Self::V128
fn i8x16_cmp_eq(self, a: Self::V128, b: Self::V128) -> Self::V128
fn u16x8_shl<const IMM8: i32>(self, a: Self::V128) -> Self::V128
fn u16x8_shr<const IMM8: i32>(self, a: Self::V128) -> Self::V128
fn u16x8_splat(self, x: u16) -> Self::V128
fn u32x4_splat(self, x: u32) -> Self::V128
fn u32x4_shl<const IMM8: i32>(self, a: Self::V128) -> Self::V128
fn u32x4_shr<const IMM8: i32>(self, a: Self::V128) -> Self::V128
sourceimpl SIMD256 for SSE41
impl SIMD256 for SSE41
type V256 = (__m128i, __m128i)
fn v256_to_bytes(self, a: Self::V256) -> [u8; 32]
fn u16x16_from_u8x16(self, a: Self::V128) -> Self::V256
fn u64x4_unzip_low(self, a: Self::V256) -> Self::V128
fn v256_from_v128x2(self, a: Self::V128, b: Self::V128) -> Self::V256
fn v256_to_v128x2(self, a: Self::V256) -> (Self::V128, Self::V128)
unsafe fn v256_load(self, addr: *const u8) -> Self::V256
unsafe fn v256_load_unaligned(self, addr: *const u8) -> Self::V256
unsafe fn v256_store_unaligned(self, addr: *mut u8, a: Self::V256)
fn v256_or(self, a: Self::V256, b: Self::V256) -> Self::V256
fn v256_and(self, a: Self::V256, b: Self::V256) -> Self::V256
fn v256_andnot(self, a: Self::V256, b: Self::V256) -> Self::V256
fn v256_create_zero(self) -> Self::V256
fn v256_all_zero(self, a: Self::V256) -> bool
fn v256_get_low(self, a: Self::V256) -> Self::V128
fn v256_get_high(self, a: Self::V256) -> Self::V128
fn u8x32_splat(self, x: u8) -> Self::V256
fn u8x32_add(self, a: Self::V256, b: Self::V256) -> Self::V256
fn u8x32_sub(self, a: Self::V256, b: Self::V256) -> Self::V256
fn u8x32_any_zero(self, a: Self::V256) -> bool
fn u8x16x2_swizzle(self, a: Self::V256, b: Self::V256) -> Self::V256
fn i8x32_splat(self, x: i8) -> Self::V256
fn i8x32_cmp_lt(self, a: Self::V256, b: Self::V256) -> Self::V256
fn i8x32_cmp_eq(self, a: Self::V256, b: Self::V256) -> Self::V256
fn u16x16_shl<const IMM8: i32>(self, a: Self::V256) -> Self::V256
fn u16x16_shr<const IMM8: i32>(self, a: Self::V256) -> Self::V256
fn u16x16_splat(self, x: u16) -> Self::V256
fn u32x8_splat(self, x: u32) -> Self::V256
fn u8x32_sub_sat(self, a: Self::V256, b: Self::V256) -> Self::V256
fn u32x8_shl<const IMM8: i32>(self, a: Self::V256) -> Self::V256
fn u32x8_shr<const IMM8: i32>(self, a: Self::V256) -> Self::V256
impl Copy for SSE41
Auto Trait Implementations
impl RefUnwindSafe for SSE41
impl Send for SSE41
impl Sync for SSE41
impl Unpin for SSE41
impl UnwindSafe for SSE41
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more