pub trait ExtendedOpVisitor: OpVisitor {
Show 282 methods
// Required methods
fn trap(&mut self) -> Self::Return;
fn nop(&mut self) -> Self::Return;
fn call_indirect_host(&mut self, id: u8) -> Self::Return;
fn xmov_fp(&mut self, dst: XReg) -> Self::Return;
fn xmov_lr(&mut self, dst: XReg) -> Self::Return;
fn bswap32(&mut self, dst: XReg, src: XReg) -> Self::Return;
fn bswap64(&mut self, dst: XReg, src: XReg) -> Self::Return;
fn xadd32_uoverflow_trap(
&mut self,
operands: BinaryOperands<XReg>,
) -> Self::Return;
fn xadd64_uoverflow_trap(
&mut self,
operands: BinaryOperands<XReg>,
) -> Self::Return;
fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return;
fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return;
fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return;
fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return;
fn xpush32(&mut self, src: XReg) -> Self::Return;
fn xpush32_many(&mut self, srcs: RegSet<XReg>) -> Self::Return;
fn xpush64(&mut self, src: XReg) -> Self::Return;
fn xpush64_many(&mut self, srcs: RegSet<XReg>) -> Self::Return;
fn xpop32(&mut self, dst: XReg) -> Self::Return;
fn xpop32_many(&mut self, dsts: RegSet<XReg>) -> Self::Return;
fn xpop64(&mut self, dst: XReg) -> Self::Return;
fn xpop64_many(&mut self, dsts: RegSet<XReg>) -> Self::Return;
fn xload16be_u64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn xload16be_s64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn xload32be_u64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn xload32be_s64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn xload64be_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn xstore16be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: XReg,
) -> Self::Return;
fn xstore32be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: XReg,
) -> Self::Return;
fn xstore64be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: XReg,
) -> Self::Return;
fn fload32be_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn fload64be_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn fstore32be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return;
fn fstore64be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return;
fn fload32le_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn fload64le_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn fstore32le_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return;
fn fstore64le_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return;
fn vload128le_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vstore128le_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: VReg,
) -> Self::Return;
fn fmov(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vmov(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn bitcast_int_from_float_32(
&mut self,
dst: XReg,
src: FReg,
) -> Self::Return;
fn bitcast_int_from_float_64(
&mut self,
dst: XReg,
src: FReg,
) -> Self::Return;
fn bitcast_float_from_int_32(
&mut self,
dst: FReg,
src: XReg,
) -> Self::Return;
fn bitcast_float_from_int_64(
&mut self,
dst: FReg,
src: XReg,
) -> Self::Return;
fn fconst32(&mut self, dst: FReg, bits: u32) -> Self::Return;
fn fconst64(&mut self, dst: FReg, bits: u64) -> Self::Return;
fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return;
fn fselect32(
&mut self,
dst: FReg,
cond: XReg,
if_nonzero: FReg,
if_zero: FReg,
) -> Self::Return;
fn fselect64(
&mut self,
dst: FReg,
cond: XReg,
if_nonzero: FReg,
if_zero: FReg,
) -> Self::Return;
fn f32_from_f64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn f64_from_f32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return;
fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return;
fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn fceil32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fsqrt32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn fneg32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fabs32(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fadd64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fsub64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fmul64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fdiv64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return;
fn ftrunc64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn ffloor64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fceil64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fnearest64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn fsqrt64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fneg64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn fabs64(&mut self, dst: FReg, src: FReg) -> Self::Return;
fn vconst128(&mut self, dst: VReg, imm: u128) -> Self::Return;
fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vaddpairwisei16x8_s(
&mut self,
operands: BinaryOperands<VReg>,
) -> Self::Return;
fn vaddpairwisei32x4_s(
&mut self,
operands: BinaryOperands<VReg>,
) -> Self::Return;
fn vshli8x16(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshli16x8(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshli32x4(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshli64x2(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri8x16_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri16x8_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri32x4_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri64x2_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri8x16_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri16x8_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri32x4_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vshri64x2_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return;
fn vsplatx8(&mut self, dst: VReg, src: XReg) -> Self::Return;
fn vsplatx16(&mut self, dst: VReg, src: XReg) -> Self::Return;
fn vsplatx32(&mut self, dst: VReg, src: XReg) -> Self::Return;
fn vsplatx64(&mut self, dst: VReg, src: XReg) -> Self::Return;
fn vsplatf32(&mut self, dst: VReg, src: FReg) -> Self::Return;
fn vsplatf64(&mut self, dst: VReg, src: FReg) -> Self::Return;
fn vload8x8_s_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vload8x8_u_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vload16x4le_s_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vload16x4le_u_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vload32x2le_s_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vload32x2le_u_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return;
fn vband128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vbor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vbxor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vbnot128(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vbitselect128(
&mut self,
dst: VReg,
c: VReg,
x: VReg,
y: VReg,
) -> Self::Return;
fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return;
fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vpopcnt8x16(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return;
fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return;
fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return;
fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return;
fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return;
fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return;
fn vinsertx8(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return;
fn vinsertx16(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return;
fn vinsertx32(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return;
fn vinsertx64(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return;
fn vinsertf32(
&mut self,
operands: BinaryOperands<VReg, VReg, FReg>,
lane: u8,
) -> Self::Return;
fn vinsertf64(
&mut self,
operands: BinaryOperands<VReg, VReg, FReg>,
lane: u8,
) -> Self::Return;
fn veq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vneq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn veq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vneq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn veq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vneq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn veq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vneq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vneg8x16(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vneg16x8(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vneg32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vneg64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vnegf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vabs8x16(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vabs16x8(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vabs32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vabs64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vabsf32x4(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vabsf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return;
fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vshuffle(
&mut self,
dst: VReg,
src1: VReg,
src2: VReg,
mask: u128,
) -> Self::Return;
fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
}
decode
only.Expand description
Like OpVisitor
but for extended operations.
Required Methods§
Sourcefn call_indirect_host(&mut self, id: u8) -> Self::Return
fn call_indirect_host(&mut self, id: u8) -> Self::Return
A special opcode to halt interpreter execution and yield control back to the host.
This opcode results in DoneReason::CallIndirectHost
where the
id
here is shepherded along to the embedder. It’s up to the
embedder to determine what to do with the id
and the current
state of registers and the stack.
In Wasmtime this is used to implement interpreter-to-host calls.
This is modeled as a call
instruction where the first
parameter is the native function pointer to invoke and all
remaining parameters for the native function are in following
parameter positions (e.g. x1
, x2
, …). The results of the
host call are then store in x0
.
Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.
Sourcefn xmov_fp(&mut self, dst: XReg) -> Self::Return
fn xmov_fp(&mut self, dst: XReg) -> Self::Return
Gets the special “fp” register and moves it into dst
.
Sourcefn xmov_lr(&mut self, dst: XReg) -> Self::Return
fn xmov_lr(&mut self, dst: XReg) -> Self::Return
Gets the special “lr” register and moves it into dst
.
Sourcefn xadd32_uoverflow_trap(
&mut self,
operands: BinaryOperands<XReg>,
) -> Self::Return
fn xadd32_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return
32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2)
.
The upper 32-bits of dst
are unmodified. Traps if the addition
overflows.
Sourcefn xadd64_uoverflow_trap(
&mut self,
operands: BinaryOperands<XReg>,
) -> Self::Return
fn xadd64_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return
64-bit checked unsigned addition: dst = src1 + src2
.
Sourcefn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = high64(src1 * src2)
(signed)
Sourcefn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = high64(src1 * src2)
(unsigned)
Sourcefn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return
fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return
low32(dst) = if low32(src) == 0 { 0 } else { -1 }
Sourcefn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return
fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return
dst = if src == 0 { 0 } else { -1 }
Sourcefn xpush32_many(&mut self, srcs: RegSet<XReg>) -> Self::Return
fn xpush32_many(&mut self, srcs: RegSet<XReg>) -> Self::Return
for src in srcs { xpush32 src }
Sourcefn xpush64_many(&mut self, srcs: RegSet<XReg>) -> Self::Return
fn xpush64_many(&mut self, srcs: RegSet<XReg>) -> Self::Return
for src in srcs { xpush64 src }
Sourcefn xpop32_many(&mut self, dsts: RegSet<XReg>) -> Self::Return
fn xpop32_many(&mut self, dsts: RegSet<XReg>) -> Self::Return
for dst in dsts.rev() { xpop32 dst }
Sourcefn xpop64_many(&mut self, dsts: RegSet<XReg>) -> Self::Return
fn xpop64_many(&mut self, dsts: RegSet<XReg>) -> Self::Return
for dst in dsts.rev() { xpop64 dst }
Sourcefn xload16be_u64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn xload16be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = zext(*(ptr + offset))
Sourcefn xload16be_s64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn xload16be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = sext(*(ptr + offset))
Sourcefn xload32be_u64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn xload32be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = zext(*(ptr + offset))
Sourcefn xload32be_s64_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn xload32be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = sext(*(ptr + offset))
Sourcefn xload64be_offset32(
&mut self,
dst: XReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn xload64be_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = *(ptr + offset)
Sourcefn xstore16be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: XReg,
) -> Self::Return
fn xstore16be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return
*(ptr + offset) = low16(src)
Sourcefn xstore32be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: XReg,
) -> Self::Return
fn xstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return
*(ptr + offset) = low32(src)
Sourcefn xstore64be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: XReg,
) -> Self::Return
fn xstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return
*(ptr + offset) = low64(src)
Sourcefn fload32be_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn fload32be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return
low32(dst) = zext(*(ptr + offset))
Sourcefn fload64be_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn fload64be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = *(ptr + offset)
Sourcefn fstore32be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return
fn fstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return
*(ptr + offset) = low32(src)
Sourcefn fstore64be_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return
fn fstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return
*(ptr + offset) = src
Sourcefn fload32le_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn fload32le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return
low32(dst) = zext(*(ptr + offset))
Sourcefn fload64le_offset32(
&mut self,
dst: FReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn fload64le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = *(ptr + offset)
Sourcefn fstore32le_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return
fn fstore32le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return
*(ptr + offset) = low32(src)
Sourcefn fstore64le_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: FReg,
) -> Self::Return
fn fstore64le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return
*(ptr + offset) = src
Sourcefn vload128le_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload128le_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
dst = *(ptr + offset)
Sourcefn vstore128le_offset32(
&mut self,
ptr: XReg,
offset: i32,
src: VReg,
) -> Self::Return
fn vstore128le_offset32( &mut self, ptr: XReg, offset: i32, src: VReg, ) -> Self::Return
*(ptr + offset) = src
Sourcefn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return
fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = bitcast low32(src) as i32
Sourcefn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return
fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = bitcast src as i64
Sourcefn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return
fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = bitcast low32(src) as f32
Sourcefn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return
fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = bitcast src as f64
Sourcefn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 == src2)
Sourcefn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 != src2)
Sourcefn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 < src2)
Sourcefn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 <= src2)
Sourcefn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 == src2)
Sourcefn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 != src2)
Sourcefn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 < src2)
Sourcefn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 <= src2)
Sourcefn fselect32(
&mut self,
dst: FReg,
cond: XReg,
if_nonzero: FReg,
if_zero: FReg,
) -> Self::Return
fn fselect32( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
Sourcefn fselect64(
&mut self,
dst: FReg,
cond: XReg,
if_nonzero: FReg,
if_zero: FReg,
) -> Self::Return
fn fselect64( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return
dst = low32(cond) ? if_nonzero : if_zero
Sourcefn f32_from_f64(&mut self, dst: FReg, src: FReg) -> Self::Return
fn f32_from_f64(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = demote(src)
Sourcefn f64_from_f32(&mut self, dst: FReg, src: FReg) -> Self::Return
fn f64_from_f32(&mut self, dst: FReg, src: FReg) -> Self::Return
(st) = promote(low32(src))
Sourcefn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_signed(low32(src))
Sourcefn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_unsigned(low32(src))
Sourcefn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_signed(src)
Sourcefn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_unsigned(src)
Sourcefn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_signed(low32(src))
Sourcefn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_unsigned(low32(src))
Sourcefn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_signed(src)
Sourcefn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_unsigned(src)
Sourcefn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_signed_from_f32(low32(src))
Sourcefn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_unsigned_from_f32(low32(src))
Sourcefn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_signed_from_f64(src)
Sourcefn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_unsigned_from_f64(src)
Sourcefn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_signed_from_f32(low32(src))
Sourcefn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_unsigned_from_f32(low32(src))
Sourcefn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_signed_from_f64(src)
Sourcefn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_unsigned_from_f64(src)
Sourcefn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_signed_from_f32(low32(src))
Sourcefn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_unsigned_from_f32(low32(src))
Sourcefn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_signed_from_f64(src)
Sourcefn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_unsigned_from_f64(src)
Sourcefn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_signed_from_f32(low32(src))
Sourcefn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_unsigned_from_f32(low32(src))
Sourcefn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_signed_from_f64(src)
Sourcefn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_unsigned_from_f64(src)
Sourcefn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = copysign(low32(src1), low32(src2))
Sourcefn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = copysign(src1, src2)
Sourcefn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) + low32(src2)
Sourcefn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) - low32(src2)
Sourcefn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) * low32(src2)
Sourcefn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) / low32(src2)
Sourcefn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
low128(dst) = low128(src1) / low128(src2)
Sourcefn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = ieee_maximum(low32(src1), low32(src2))
Sourcefn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = ieee_minimum(low32(src1), low32(src2))
Sourcefn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return
fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_trunc(low32(src))
Sourcefn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_trunc(low128(src))
Sourcefn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_trunc(low128(src))
Sourcefn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return
fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_floor(low32(src))
Sourcefn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_floor(low128(src))
Sourcefn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_floor(low128(src))
Sourcefn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_ceil(low128(src))
Sourcefn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_ceil(low128(src))
Sourcefn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return
fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_nearest(low32(src))
Sourcefn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low32(dst) = ieee_sqrt(low32(src))
Sourcefn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low32(dst) = ieee_sqrt(low32(src))
Sourcefn fadd64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fadd64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = src1 + src2
Sourcefn fsub64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fsub64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = src1 - src2
Sourcefn fmul64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fmul64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = src1 * src2
Sourcefn fdiv64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fdiv64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = src1 / src2
Sourcefn vdivf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 / src2
Sourcefn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = ieee_maximum(src1, src2)
Sourcefn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = ieee_minimum(src1, src2)
Sourcefn fnearest64(&mut self, dst: FReg, src: FReg) -> Self::Return
fn fnearest64(&mut self, dst: FReg, src: FReg) -> Self::Return
dst = ieee_nearest(src)
Sourcefn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_nearest(low128(src))
Sourcefn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_nearest(low128(src))
Sourcefn vaddi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 + src2
Sourcefn vaddi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 + src2
Sourcefn vaddi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 + src2
Sourcefn vaddi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 + src2
Sourcefn vaddf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 + src2
Sourcefn vaddf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 + src2
Sourcefn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Sourcefn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Sourcefn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Sourcefn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Sourcefn vaddpairwisei16x8_s(
&mut self,
operands: BinaryOperands<VReg>,
) -> Self::Return
fn vaddpairwisei16x8_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return
dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
Sourcefn vaddpairwisei32x4_s(
&mut self,
operands: BinaryOperands<VReg>,
) -> Self::Return
fn vaddpairwisei32x4_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return
dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
Sourcefn vshli8x16(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshli8x16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Sourcefn vshli16x8(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshli16x8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Sourcefn vshli32x4(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshli32x4( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Sourcefn vshli64x2(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshli64x2( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Sourcefn vshri8x16_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri8x16_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(signed)
Sourcefn vshri16x8_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri16x8_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(signed)
Sourcefn vshri32x4_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri32x4_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(signed)
Sourcefn vshri64x2_s(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri64x2_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(signed)
Sourcefn vshri8x16_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri8x16_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(unsigned)
Sourcefn vshri16x8_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri16x8_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(unsigned)
Sourcefn vshri32x4_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri32x4_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(unsigned)
Sourcefn vshri64x2_u(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
fn vshri64x2_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2
(unsigned)
Sourcefn vload8x8_s_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload8x8_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
Load the 64-bit source as i8x8 and sign-extend to i16x8.
Sourcefn vload8x8_u_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload8x8_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
Load the 64-bit source as u8x8 and zero-extend to i16x8.
Sourcefn vload16x4le_s_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload16x4le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
Load the 64-bit source as i16x4 and sign-extend to i32x4.
Sourcefn vload16x4le_u_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload16x4le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
Load the 64-bit source as u16x4 and zero-extend to i32x4.
Sourcefn vload32x2le_s_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload32x2le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
Load the 64-bit source as i32x2 and sign-extend to i64x2.
Sourcefn vload32x2le_u_offset32(
&mut self,
dst: VReg,
ptr: XReg,
offset: i32,
) -> Self::Return
fn vload32x2le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return
Load the 64-bit source as u32x2 and zero-extend to i64x2.
Sourcefn vband128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vband128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 & src2
Sourcefn vbor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vbor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 | src2
Sourcefn vbxor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vbxor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 ^ src2
Sourcefn vbitselect128(
&mut self,
dst: VReg,
c: VReg,
x: VReg,
y: VReg,
) -> Self::Return
fn vbitselect128( &mut self, dst: VReg, c: VReg, x: VReg, y: VReg, ) -> Self::Return
dst = (c & x) | (!c & y)
Sourcefn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Sourcefn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Sourcefn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Sourcefn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Sourcefn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether all lanes are nonzero in dst
.
Sourcefn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether all lanes are nonzero in dst
.
Sourcefn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether all lanes are nonzero in dst
.
Sourcefn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst
.
Sourcefn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst
.
Sourcefn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst
.
Sourcefn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst
.
Sourcefn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst
.
Sourcefn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f32_from_x32_s
)
Sourcefn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f32_from_x32_u
)
Sourcefn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f64_from_x64_s
)
Sourcefn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f64_from_x64_u
)
Sourcefn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as signed, to twice the width.
Sourcefn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as unsigned, to twice the width.
Sourcefn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as signed, to twice the width.
Sourcefn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as unsigned, to twice the width.
Sourcefn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as signed, to twice the width.
Sourcefn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as unsigned, to twice the width.
Sourcefn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as signed, to twice the width.
Sourcefn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as unsigned, to twice the width.
Sourcefn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as signed, to twice the width.
Sourcefn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as unsigned, to twice the width.
Sourcefn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as signed, to twice the width.
Sourcefn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as unsigned, to twice the width.
Sourcefn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Sourcefn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Sourcefn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Sourcefn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Sourcefn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return
Promotes the low two lanes of the f32x4 input to f64x2.
Sourcefn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return
Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
Sourcefn vsubi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 - src2
Sourcefn vsubi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 - src2
Sourcefn vsubi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 - src2
Sourcefn vsubi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 - src2
Sourcefn vsubf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 - src2
Sourcefn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Sourcefn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Sourcefn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Sourcefn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Sourcefn vmuli8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 * src2
Sourcefn vmuli16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 * src2
Sourcefn vmuli32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 * src2
Sourcefn vmuli64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 * src2
Sourcefn vmulf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src1 * src2
Sourcefn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
Sourcefn vpopcnt8x16(&mut self, dst: VReg, src: VReg) -> Self::Return
fn vpopcnt8x16(&mut self, dst: VReg, src: VReg) -> Self::Return
dst = count_ones(src)
Sourcefn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = zext(src[lane])
Sourcefn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = zext(src[lane])
Sourcefn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = src[lane]
Sourcefn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return
fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = src[lane]
Sourcefn vinsertx8(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return
fn vinsertx8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Sourcefn vinsertx16(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return
fn vinsertx16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Sourcefn vinsertx32(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return
fn vinsertx32( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Sourcefn vinsertx64(
&mut self,
operands: BinaryOperands<VReg, VReg, XReg>,
lane: u8,
) -> Self::Return
fn vinsertx64( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Sourcefn vinsertf32(
&mut self,
operands: BinaryOperands<VReg, VReg, FReg>,
lane: u8,
) -> Self::Return
fn vinsertf32( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Sourcefn vinsertf64(
&mut self,
operands: BinaryOperands<VReg, VReg, FReg>,
lane: u8,
) -> Self::Return
fn vinsertf64( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Sourcefn veq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn veq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src == dst
Sourcefn vneq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vneq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src != dst
Sourcefn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(signed)
Sourcefn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(signed)
Sourcefn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(unsigned)
Sourcefn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(unsigned)
Sourcefn veq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn veq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src == dst
Sourcefn vneq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vneq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src != dst
Sourcefn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(signed)
Sourcefn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(signed)
Sourcefn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(unsigned)
Sourcefn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(unsigned)
Sourcefn veq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn veq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src == dst
Sourcefn vneq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vneq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src != dst
Sourcefn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(signed)
Sourcefn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(signed)
Sourcefn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(unsigned)
Sourcefn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(unsigned)
Sourcefn veq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn veq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src == dst
Sourcefn vneq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vneq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src != dst
Sourcefn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(signed)
Sourcefn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(signed)
Sourcefn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst
(unsigned)
Sourcefn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
(unsigned)
Sourcefn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2)
(signed)
Sourcefn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2)
(unsigned)
Sourcefn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2)
(signed)
Sourcefn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2)
(unsigned)
Sourcefn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2)
(signed)
Sourcefn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2)
(unsigned)
Sourcefn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2)
(signed)
Sourcefn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2)
(unsigned)
Sourcefn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2)
(signed)
Sourcefn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2)
(unsigned)
Sourcefn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2)
(signed)
Sourcefn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2)
(unsigned)
Sourcefn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_maximum(src1, src2)
Sourcefn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_maximum(src1, src2)
Sourcefn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_minimum(src1, src2)
Sourcefn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_minimum(src1, src2)
Sourcefn vshuffle(
&mut self,
dst: VReg,
src1: VReg,
src2: VReg,
mask: u128,
) -> Self::Return
fn vshuffle( &mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128, ) -> Self::Return
dst = shuffle(src1, src2, mask)
Sourcefn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = swizzle(src1, src2)
Sourcefn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = (src1 + src2 + 1) // 2
Sourcefn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = (src1 + src2 + 1) // 2
Implementors§
impl ExtendedOpVisitor for Disassembler<'_>
disas
only.