pulley_interpreter::decode

Trait ExtendedOpVisitor

Source
pub trait ExtendedOpVisitor: OpVisitor {
Show 282 methods // Required methods fn trap(&mut self) -> Self::Return; fn nop(&mut self) -> Self::Return; fn call_indirect_host(&mut self, id: u8) -> Self::Return; fn xmov_fp(&mut self, dst: XReg) -> Self::Return; fn xmov_lr(&mut self, dst: XReg) -> Self::Return; fn bswap32(&mut self, dst: XReg, src: XReg) -> Self::Return; fn bswap64(&mut self, dst: XReg, src: XReg) -> Self::Return; fn xadd32_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return; fn xadd64_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return; fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return; fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return; fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return; fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return; fn xpush32(&mut self, src: XReg) -> Self::Return; fn xpush32_many(&mut self, srcs: RegSet<XReg>) -> Self::Return; fn xpush64(&mut self, src: XReg) -> Self::Return; fn xpush64_many(&mut self, srcs: RegSet<XReg>) -> Self::Return; fn xpop32(&mut self, dst: XReg) -> Self::Return; fn xpop32_many(&mut self, dsts: RegSet<XReg>) -> Self::Return; fn xpop64(&mut self, dst: XReg) -> Self::Return; fn xpop64_many(&mut self, dsts: RegSet<XReg>) -> Self::Return; fn xload16be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return; fn xload16be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return; fn xload32be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return; fn xload32be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return; fn xload64be_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return; fn xstore16be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return; fn xstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return; fn xstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return; fn fload32be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return; fn fload64be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return; fn fstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return; fn fstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return; fn fload32le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return; fn fload64le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return; fn fstore32le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return; fn fstore64le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return; fn vload128le_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vstore128le_offset32( &mut self, ptr: XReg, offset: i32, src: VReg, ) -> Self::Return; fn fmov(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vmov(&mut self, dst: VReg, src: VReg) -> Self::Return; fn bitcast_int_from_float_32( &mut self, dst: XReg, src: FReg, ) -> Self::Return; fn bitcast_int_from_float_64( &mut self, dst: XReg, src: FReg, ) -> Self::Return; fn bitcast_float_from_int_32( &mut self, dst: FReg, src: XReg, ) -> Self::Return; fn bitcast_float_from_int_64( &mut self, dst: FReg, src: XReg, ) -> Self::Return; fn fconst32(&mut self, dst: FReg, bits: u32) -> Self::Return; fn fconst64(&mut self, dst: FReg, bits: u64) -> Self::Return; fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return; fn fselect32( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return; fn fselect64( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return; fn f32_from_f64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn f64_from_f32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return; fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return; fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return; fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn fceil32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fsqrt32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn fneg32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fabs32(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fadd64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fsub64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fmul64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fdiv64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return; fn ftrunc64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn ffloor64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fceil64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fnearest64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn fsqrt64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fneg64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn fabs64(&mut self, dst: FReg, src: FReg) -> Self::Return; fn vconst128(&mut self, dst: VReg, imm: u128) -> Self::Return; fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vaddpairwisei16x8_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return; fn vaddpairwisei32x4_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return; fn vshli8x16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshli16x8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshli32x4( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshli64x2( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri8x16_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri16x8_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri32x4_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri64x2_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri8x16_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri16x8_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri32x4_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vshri64x2_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return; fn vsplatx8(&mut self, dst: VReg, src: XReg) -> Self::Return; fn vsplatx16(&mut self, dst: VReg, src: XReg) -> Self::Return; fn vsplatx32(&mut self, dst: VReg, src: XReg) -> Self::Return; fn vsplatx64(&mut self, dst: VReg, src: XReg) -> Self::Return; fn vsplatf32(&mut self, dst: VReg, src: FReg) -> Self::Return; fn vsplatf64(&mut self, dst: VReg, src: FReg) -> Self::Return; fn vload8x8_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vload8x8_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vload16x4le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vload16x4le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vload32x2le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vload32x2le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return; fn vband128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vbor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vbxor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vbnot128(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vbitselect128( &mut self, dst: VReg, c: VReg, x: VReg, y: VReg, ) -> Self::Return; fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return; fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return; fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return; fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return; fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return; fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vpopcnt8x16(&mut self, dst: VReg, src: VReg) -> Self::Return; fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return; fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return; fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return; fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return; fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return; fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return; fn vinsertx8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return; fn vinsertx16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return; fn vinsertx32( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return; fn vinsertx64( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return; fn vinsertf32( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return; fn vinsertf64( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return; fn veq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vneq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn veq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vneq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn veq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vneq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn veq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vneq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vneg8x16(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vneg16x8(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vneg32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vneg64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vnegf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vabs8x16(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vabs16x8(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vabs32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vabs64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vabsf32x4(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vabsf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return; fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vshuffle( &mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128, ) -> Self::Return; fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return; fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return;
}
Available on crate feature decode only.
Expand description

Like OpVisitor but for extended operations.

Required Methods§

Source

fn trap(&mut self) -> Self::Return

Raise a trap.

Source

fn nop(&mut self) -> Self::Return

Do nothing.

Source

fn call_indirect_host(&mut self, id: u8) -> Self::Return

A special opcode to halt interpreter execution and yield control back to the host.

This opcode results in DoneReason::CallIndirectHost where the id here is shepherded along to the embedder. It’s up to the embedder to determine what to do with the id and the current state of registers and the stack.

In Wasmtime this is used to implement interpreter-to-host calls. This is modeled as a call instruction where the first parameter is the native function pointer to invoke and all remaining parameters for the native function are in following parameter positions (e.g. x1, x2, …). The results of the host call are then store in x0.

Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.

Source

fn xmov_fp(&mut self, dst: XReg) -> Self::Return

Gets the special “fp” register and moves it into dst.

Source

fn xmov_lr(&mut self, dst: XReg) -> Self::Return

Gets the special “lr” register and moves it into dst.

Source

fn bswap32(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = byteswap(low32(src))

Source

fn bswap64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = byteswap(src)

Source

fn xadd32_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return

32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified. Traps if the addition overflows.

Source

fn xadd64_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return

64-bit checked unsigned addition: dst = src1 + src2.

Source

fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = high64(src1 * src2) (signed)

Source

fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = high64(src1 * src2) (unsigned)

Source

fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = if low32(src) == 0 { 0 } else { -1 }

Source

fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = if src == 0 { 0 } else { -1 }

Source

fn xpush32(&mut self, src: XReg) -> Self::Return

*sp = low32(src); sp = sp.checked_add(4)

Source

fn xpush32_many(&mut self, srcs: RegSet<XReg>) -> Self::Return

for src in srcs { xpush32 src }

Source

fn xpush64(&mut self, src: XReg) -> Self::Return

*sp = src; sp = sp.checked_add(8)

Source

fn xpush64_many(&mut self, srcs: RegSet<XReg>) -> Self::Return

for src in srcs { xpush64 src }

Source

fn xpop32(&mut self, dst: XReg) -> Self::Return

*dst = *sp; sp -= 4

Source

fn xpop32_many(&mut self, dsts: RegSet<XReg>) -> Self::Return

for dst in dsts.rev() { xpop32 dst }

Source

fn xpop64(&mut self, dst: XReg) -> Self::Return

*dst = *sp; sp -= 8

Source

fn xpop64_many(&mut self, dsts: RegSet<XReg>) -> Self::Return

for dst in dsts.rev() { xpop64 dst }

Source

fn xload16be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source

fn xload16be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source

fn xload32be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source

fn xload32be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source

fn xload64be_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source

fn xstore16be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low16(src)

Source

fn xstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source

fn xstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low64(src)

Source

fn fload32be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source

fn fload64be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source

fn fstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source

fn fstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = src

Source

fn fload32le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source

fn fload64le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source

fn fstore32le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source

fn fstore64le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = src

Source

fn vload128le_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source

fn vstore128le_offset32( &mut self, ptr: XReg, offset: i32, src: VReg, ) -> Self::Return

*(ptr + offset) = src

Source

fn fmov(&mut self, dst: FReg, src: FReg) -> Self::Return

Move between f registers.

Source

fn vmov(&mut self, dst: VReg, src: VReg) -> Self::Return

Move between v registers.

Source

fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = bitcast low32(src) as i32

Source

fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = bitcast src as i64

Source

fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = bitcast low32(src) as f32

Source

fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = bitcast src as f64

Source

fn fconst32(&mut self, dst: FReg, bits: u32) -> Self::Return

low32(dst) = bits

Source

fn fconst64(&mut self, dst: FReg, bits: u64) -> Self::Return

dst = bits

Source

fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 == src2)

Source

fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 != src2)

Source

fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 < src2)

Source

fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 <= src2)

Source

fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 == src2)

Source

fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 != src2)

Source

fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 < src2)

Source

fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 <= src2)

Source

fn fselect32( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

Source

fn fselect64( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return

dst = low32(cond) ? if_nonzero : if_zero

Source

fn f32_from_f64(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = demote(src)

Source

fn f64_from_f32(&mut self, dst: FReg, src: FReg) -> Self::Return

(st) = promote(low32(src))

Source

fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_signed(low32(src))

Source

fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_unsigned(low32(src))

Source

fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_signed(src)

Source

fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_unsigned(src)

Source

fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_signed(low32(src))

Source

fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_unsigned(low32(src))

Source

fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_signed(src)

Source

fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_unsigned(src)

Source

fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_signed_from_f32(low32(src))

Source

fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_unsigned_from_f32(low32(src))

Source

fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_signed_from_f64(src)

Source

fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_unsigned_from_f64(src)

Source

fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_signed_from_f32(low32(src))

Source

fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_unsigned_from_f32(low32(src))

Source

fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_signed_from_f64(src)

Source

fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_unsigned_from_f64(src)

Source

fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_signed_from_f32(low32(src))

Source

fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_unsigned_from_f32(low32(src))

Source

fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_signed_from_f64(src)

Source

fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_unsigned_from_f64(src)

Source

fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_signed_from_f32(low32(src))

Source

fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_unsigned_from_f32(low32(src))

Source

fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_signed_from_f64(src)

Source

fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_unsigned_from_f64(src)

Source

fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = copysign(low32(src1), low32(src2))

Source

fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = copysign(src1, src2)

Source

fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) + low32(src2)

Source

fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) - low32(src2)

Source

fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) * low32(src2)

Source

fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) / low32(src2)

Source

fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

low128(dst) = low128(src1) / low128(src2)

Source

fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = ieee_maximum(low32(src1), low32(src2))

Source

fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = ieee_minimum(low32(src1), low32(src2))

Source

fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_trunc(low32(src))

Source

fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_trunc(low128(src))

Source

fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_trunc(low128(src))

Source

fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_floor(low32(src))

Source

fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_floor(low128(src))

Source

fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_floor(low128(src))

Source

fn fceil32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_ceil(low32(src))

Source

fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_ceil(low128(src))

Source

fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_ceil(low128(src))

Source

fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_nearest(low32(src))

Source

fn fsqrt32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_sqrt(low32(src))

Source

fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low32(dst) = ieee_sqrt(low32(src))

Source

fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low32(dst) = ieee_sqrt(low32(src))

Source

fn fneg32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = -low32(src)

Source

fn fabs32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = |low32(src)|

Source

fn fadd64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 + src2

Source

fn fsub64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 - src2

Source

fn fmul64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 * src2

Source

fn fdiv64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 / src2

Source

fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 / src2

Source

fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = ieee_maximum(src1, src2)

Source

fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = ieee_minimum(src1, src2)

Source

fn ftrunc64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_trunc(src)

Source

fn ffloor64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_floor(src)

Source

fn fceil64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_ceil(src)

Source

fn fnearest64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_nearest(src)

Source

fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_nearest(low128(src))

Source

fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_nearest(low128(src))

Source

fn fsqrt64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_sqrt(src)

Source

fn fneg64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = -src

Source

fn fabs64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = |src|

Source

fn vconst128(&mut self, dst: VReg, imm: u128) -> Self::Return

dst = imm

Source

fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source

fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source

fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source

fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source

fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source

fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source

fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source

fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source

fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source

fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source

fn vaddpairwisei16x8_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return

dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]

Source

fn vaddpairwisei32x4_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return

dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]

Source

fn vshli8x16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source

fn vshli16x8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source

fn vshli32x4( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source

fn vshli64x2( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source

fn vshri8x16_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source

fn vshri16x8_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source

fn vshri32x4_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source

fn vshri64x2_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source

fn vshri8x16_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source

fn vshri16x8_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source

fn vshri32x4_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source

fn vshri64x2_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source

fn vsplatx8(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(low8(src))

Source

fn vsplatx16(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(low16(src))

Source

fn vsplatx32(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(low32(src))

Source

fn vsplatx64(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(src)

Source

fn vsplatf32(&mut self, dst: VReg, src: FReg) -> Self::Return

dst = splat(low32(src))

Source

fn vsplatf64(&mut self, dst: VReg, src: FReg) -> Self::Return

dst = splat(src)

Source

fn vload8x8_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as i8x8 and sign-extend to i16x8.

Source

fn vload8x8_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as u8x8 and zero-extend to i16x8.

Source

fn vload16x4le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as i16x4 and sign-extend to i32x4.

Source

fn vload16x4le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as u16x4 and zero-extend to i32x4.

Source

fn vload32x2le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as i32x2 and sign-extend to i64x2.

Source

fn vload32x2le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as u32x2 and zero-extend to i64x2.

Source

fn vband128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 & src2

Source

fn vbor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 | src2

Source

fn vbxor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 ^ src2

Source

fn vbnot128(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = !src1

Source

fn vbitselect128( &mut self, dst: VReg, c: VReg, x: VReg, y: VReg, ) -> Self::Return

dst = (c & x) | (!c & y)

Source

fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source

fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source

fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source

fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source

fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether all lanes are nonzero in dst.

Source

fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether all lanes are nonzero in dst.

Source

fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether all lanes are nonzero in dst.

Source

fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source

fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source

fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source

fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source

fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source

fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f32_from_x32_s)

Source

fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f32_from_x32_u)

Source

fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f64_from_x64_s)

Source

fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f64_from_x64_u)

Source

fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as signed, to twice the width.

Source

fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as unsigned, to twice the width.

Source

fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as signed, to twice the width.

Source

fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as unsigned, to twice the width.

Source

fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as signed, to twice the width.

Source

fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as unsigned, to twice the width.

Source

fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as signed, to twice the width.

Source

fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as unsigned, to twice the width.

Source

fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as signed, to twice the width.

Source

fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as unsigned, to twice the width.

Source

fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as signed, to twice the width.

Source

fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as unsigned, to twice the width.

Source

fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

Source

fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

Source

fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

Source

fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

Source

fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return

Promotes the low two lanes of the f32x4 input to f64x2.

Source

fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return

Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.

Source

fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source

fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source

fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source

fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source

fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source

fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source

fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source

fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source

fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source

fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source

fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source

fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source

fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source

fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source

fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)

Source

fn vpopcnt8x16(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = count_ones(src)

Source

fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = zext(src[lane])

Source

fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = zext(src[lane])

Source

fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = src[lane]

Source

fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

dst = src[lane]

Source

fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = src[lane]

Source

fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return

dst = src[lane]

Source

fn vinsertx8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source

fn vinsertx16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source

fn vinsertx32( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source

fn vinsertx64( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source

fn vinsertf32( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source

fn vinsertf64( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source

fn veq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source

fn vneq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source

fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source

fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source

fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source

fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source

fn veq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source

fn vneq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source

fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source

fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source

fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source

fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source

fn veq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source

fn vneq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source

fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source

fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source

fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source

fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source

fn veq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source

fn vneq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source

fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source

fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source

fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source

fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source

fn vneg8x16(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source

fn vneg16x8(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source

fn vneg32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source

fn vneg64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source

fn vnegf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source

fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source

fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source

fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source

fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source

fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source

fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source

fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source

fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source

fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source

fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source

fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source

fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source

fn vabs8x16(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source

fn vabs16x8(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source

fn vabs32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source

fn vabs64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source

fn vabsf32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source

fn vabsf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source

fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_maximum(src1, src2)

Source

fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_maximum(src1, src2)

Source

fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_minimum(src1, src2)

Source

fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_minimum(src1, src2)

Source

fn vshuffle( &mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128, ) -> Self::Return

dst = shuffle(src1, src2, mask)

Source

fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = swizzle(src1, src2)

Source

fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = (src1 + src2 + 1) // 2

Source

fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = (src1 + src2 + 1) // 2

Implementors§

Source§

impl ExtendedOpVisitor for Disassembler<'_>

Available on crate feature disas only.
Source§

impl<B: BytecodeStream> ExtendedOpVisitor for MaterializeOpsVisitor<B>

Source§

impl<F, T, V1, V2> ExtendedOpVisitor for SequencedVisitor<'_, F, V1, V2>
where F: FnMut(V1::Return, V2::Return) -> T, V1: ExtendedOpVisitor, V2: ExtendedOpVisitor<BytecodeStream = V1::BytecodeStream>,