pub struct Disassembler<'a> { /* private fields */ }
disas
only.Expand description
A Pulley bytecode disassembler.
This is implemented as an OpVisitor
, where you pass a Disassembler
to a
Decoder
in order to disassemble instructions from a bytecode stream.
Alternatively, you can use the Disassembler::disassemble_all
method to
disassemble a complete bytecode stream.
Implementations§
Source§impl<'a> Disassembler<'a>
impl<'a> Disassembler<'a>
Sourcepub fn disassemble_all(bytecode: &'a [u8]) -> Result<String>
pub fn disassemble_all(bytecode: &'a [u8]) -> Result<String>
Disassemble every instruction in the given bytecode stream.
Sourcepub fn new(bytecode: &'a [u8]) -> Self
pub fn new(bytecode: &'a [u8]) -> Self
Create a new Disassembler
that can be used to incrementally
disassemble instructions from the given bytecode stream.
Sourcepub fn offsets(&mut self, offsets: bool) -> &mut Self
pub fn offsets(&mut self, offsets: bool) -> &mut Self
Whether to prefix each instruction’s disassembly with its offset.
True by default.
Sourcepub fn hexdump(&mut self, hexdump: bool) -> &mut Self
pub fn hexdump(&mut self, hexdump: bool) -> &mut Self
Whether to include a hexdump of the bytecode in the disassembly.
True by default.
Sourcepub fn start_offset(&mut self, offset: usize) -> &mut Self
pub fn start_offset(&mut self, offset: usize) -> &mut Self
Configures the offset that this function starts from, if it doesn’t start from 0.
This can possibly be useful when a single function at a time is being disassembled.
Trait Implementations§
Source§impl ExtendedOpVisitor for Disassembler<'_>
impl ExtendedOpVisitor for Disassembler<'_>
Source§fn call_indirect_host(&mut self, id: u8)
fn call_indirect_host(&mut self, id: u8)
decode
only.Source§fn xmov_fp(&mut self, dst: XReg)
fn xmov_fp(&mut self, dst: XReg)
decode
only.dst
.Source§fn xmov_lr(&mut self, dst: XReg)
fn xmov_lr(&mut self, dst: XReg)
decode
only.dst
.Source§fn bswap32(&mut self, dst: XReg, src: XReg)
fn bswap32(&mut self, dst: XReg, src: XReg)
decode
only.dst = byteswap(low32(src))
Source§fn bswap64(&mut self, dst: XReg, src: XReg)
fn bswap64(&mut self, dst: XReg, src: XReg)
decode
only.dst = byteswap(src)
Source§fn xadd32_uoverflow_trap(&mut self, operands: BinaryOperands<XReg>)
fn xadd32_uoverflow_trap(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) + low32(src2)
. Read moreSource§fn xadd64_uoverflow_trap(&mut self, operands: BinaryOperands<XReg>)
fn xadd64_uoverflow_trap(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 + src2
.Source§fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>)
fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = high64(src1 * src2)
(signed)Source§fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>)
fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = high64(src1 * src2)
(unsigned)Source§fn xbmask32(&mut self, dst: XReg, src: XReg)
fn xbmask32(&mut self, dst: XReg, src: XReg)
decode
only.Source§fn xbmask64(&mut self, dst: XReg, src: XReg)
fn xbmask64(&mut self, dst: XReg, src: XReg)
decode
only.Source§fn xpush32(&mut self, src: XReg)
fn xpush32(&mut self, src: XReg)
decode
only.*sp = low32(src); sp = sp.checked_add(4)
Source§fn xpush32_many(&mut self, srcs: RegSet<XReg>)
fn xpush32_many(&mut self, srcs: RegSet<XReg>)
decode
only.for src in srcs { xpush32 src }
Source§fn xpush64(&mut self, src: XReg)
fn xpush64(&mut self, src: XReg)
decode
only.*sp = src; sp = sp.checked_add(8)
Source§fn xpush64_many(&mut self, srcs: RegSet<XReg>)
fn xpush64_many(&mut self, srcs: RegSet<XReg>)
decode
only.for src in srcs { xpush64 src }
Source§fn xpop32_many(&mut self, dsts: RegSet<XReg>)
fn xpop32_many(&mut self, dsts: RegSet<XReg>)
decode
only.for dst in dsts.rev() { xpop32 dst }
Source§fn xpop64_many(&mut self, dsts: RegSet<XReg>)
fn xpop64_many(&mut self, dsts: RegSet<XReg>)
decode
only.for dst in dsts.rev() { xpop64 dst }
Source§fn xload16be_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload16be_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload16be_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload16be_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload32be_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload32be_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload32be_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload32be_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload64be_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload64be_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = *(ptr + offset)
Source§fn xstore16be_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore16be_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low16(src)
Source§fn xstore32be_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore32be_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low32(src)
Source§fn xstore64be_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore64be_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low64(src)
Source§fn fload32be_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
fn fload32be_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = zext(*(ptr + offset))
Source§fn fload64be_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
fn fload64be_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
decode
only.dst = *(ptr + offset)
Source§fn fstore32be_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
fn fstore32be_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
decode
only.*(ptr + offset) = low32(src)
Source§fn fstore64be_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
fn fstore64be_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
decode
only.*(ptr + offset) = src
Source§fn fload32le_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
fn fload32le_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = zext(*(ptr + offset))
Source§fn fload64le_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
fn fload64le_offset32(&mut self, dst: FReg, ptr: XReg, offset: i32)
decode
only.dst = *(ptr + offset)
Source§fn fstore32le_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
fn fstore32le_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
decode
only.*(ptr + offset) = low32(src)
Source§fn fstore64le_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
fn fstore64le_offset32(&mut self, ptr: XReg, offset: i32, src: FReg)
decode
only.*(ptr + offset) = src
Source§fn vload128le_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload128le_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.dst = *(ptr + offset)
Source§fn vstore128le_offset32(&mut self, ptr: XReg, offset: i32, src: VReg)
fn vstore128le_offset32(&mut self, ptr: XReg, offset: i32, src: VReg)
decode
only.*(ptr + offset) = src
Source§fn fmov(&mut self, dst: FReg, src: FReg)
fn fmov(&mut self, dst: FReg, src: FReg)
decode
only.f
registers.Source§fn vmov(&mut self, dst: VReg, src: VReg)
fn vmov(&mut self, dst: VReg, src: VReg)
decode
only.v
registers.Source§fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg)
fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = bitcast low32(src) as i32
Source§fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg)
fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg)
decode
only.dst = bitcast src as i64
Source§fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg)
fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg)
decode
only.low32(dst) = bitcast low32(src) as f32
Source§fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg)
fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg)
decode
only.dst = bitcast src as f64
Source§fn fconst32(&mut self, dst: FReg, bits: u32)
fn fconst32(&mut self, dst: FReg, bits: u32)
decode
only.low32(dst) = bits
Source§fn fconst64(&mut self, dst: FReg, bits: u64)
fn fconst64(&mut self, dst: FReg, bits: u64)
decode
only.dst = bits
Source§fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 == src2)
Source§fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 != src2)
Source§fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 < src2)
Source§fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 <= src2)
Source§fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 == src2)
Source§fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 != src2)
Source§fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 < src2)
Source§fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg)
fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg)
decode
only.low32(dst) = zext(src1 <= src2)
Source§fn fselect32(&mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg)
fn fselect32(&mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg)
decode
only.low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
Source§fn fselect64(&mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg)
fn fselect64(&mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg)
decode
only.dst = low32(cond) ? if_nonzero : if_zero
Source§fn f32_from_f64(&mut self, dst: FReg, src: FReg)
fn f32_from_f64(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = demote(src)
Source§fn f64_from_f32(&mut self, dst: FReg, src: FReg)
fn f64_from_f32(&mut self, dst: FReg, src: FReg)
decode
only.(st) = promote(low32(src))
Source§fn f32_from_x32_s(&mut self, dst: FReg, src: XReg)
fn f32_from_x32_s(&mut self, dst: FReg, src: XReg)
decode
only.low32(dst) = checked_f32_from_signed(low32(src))
Source§fn f32_from_x32_u(&mut self, dst: FReg, src: XReg)
fn f32_from_x32_u(&mut self, dst: FReg, src: XReg)
decode
only.low32(dst) = checked_f32_from_unsigned(low32(src))
Source§fn f32_from_x64_s(&mut self, dst: FReg, src: XReg)
fn f32_from_x64_s(&mut self, dst: FReg, src: XReg)
decode
only.low32(dst) = checked_f32_from_signed(src)
Source§fn f32_from_x64_u(&mut self, dst: FReg, src: XReg)
fn f32_from_x64_u(&mut self, dst: FReg, src: XReg)
decode
only.low32(dst) = checked_f32_from_unsigned(src)
Source§fn f64_from_x32_s(&mut self, dst: FReg, src: XReg)
fn f64_from_x32_s(&mut self, dst: FReg, src: XReg)
decode
only.dst = checked_f64_from_signed(low32(src))
Source§fn f64_from_x32_u(&mut self, dst: FReg, src: XReg)
fn f64_from_x32_u(&mut self, dst: FReg, src: XReg)
decode
only.dst = checked_f64_from_unsigned(low32(src))
Source§fn f64_from_x64_s(&mut self, dst: FReg, src: XReg)
fn f64_from_x64_s(&mut self, dst: FReg, src: XReg)
decode
only.dst = checked_f64_from_signed(src)
Source§fn f64_from_x64_u(&mut self, dst: FReg, src: XReg)
fn f64_from_x64_u(&mut self, dst: FReg, src: XReg)
decode
only.dst = checked_f64_from_unsigned(src)
Source§fn x32_from_f32_s(&mut self, dst: XReg, src: FReg)
fn x32_from_f32_s(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = checked_signed_from_f32(low32(src))
Source§fn x32_from_f32_u(&mut self, dst: XReg, src: FReg)
fn x32_from_f32_u(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = checked_unsigned_from_f32(low32(src))
Source§fn x32_from_f64_s(&mut self, dst: XReg, src: FReg)
fn x32_from_f64_s(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = checked_signed_from_f64(src)
Source§fn x32_from_f64_u(&mut self, dst: XReg, src: FReg)
fn x32_from_f64_u(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = checked_unsigned_from_f64(src)
Source§fn x64_from_f32_s(&mut self, dst: XReg, src: FReg)
fn x64_from_f32_s(&mut self, dst: XReg, src: FReg)
decode
only.dst = checked_signed_from_f32(low32(src))
Source§fn x64_from_f32_u(&mut self, dst: XReg, src: FReg)
fn x64_from_f32_u(&mut self, dst: XReg, src: FReg)
decode
only.dst = checked_unsigned_from_f32(low32(src))
Source§fn x64_from_f64_s(&mut self, dst: XReg, src: FReg)
fn x64_from_f64_s(&mut self, dst: XReg, src: FReg)
decode
only.dst = checked_signed_from_f64(src)
Source§fn x64_from_f64_u(&mut self, dst: XReg, src: FReg)
fn x64_from_f64_u(&mut self, dst: XReg, src: FReg)
decode
only.dst = checked_unsigned_from_f64(src)
Source§fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg)
fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = saturating_signed_from_f32(low32(src))
Source§fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg)
fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = saturating_unsigned_from_f32(low32(src))
Source§fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg)
fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = saturating_signed_from_f64(src)
Source§fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg)
fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg)
decode
only.low32(dst) = saturating_unsigned_from_f64(src)
Source§fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg)
fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg)
decode
only.dst = saturating_signed_from_f32(low32(src))
Source§fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg)
fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg)
decode
only.dst = saturating_unsigned_from_f32(low32(src))
Source§fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg)
fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg)
decode
only.dst = saturating_signed_from_f64(src)
Source§fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg)
fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg)
decode
only.dst = saturating_unsigned_from_f64(src)
Source§fn fcopysign32(&mut self, operands: BinaryOperands<FReg>)
fn fcopysign32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = copysign(low32(src1), low32(src2))
Source§fn fcopysign64(&mut self, operands: BinaryOperands<FReg>)
fn fcopysign64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = copysign(src1, src2)
Source§fn fadd32(&mut self, operands: BinaryOperands<FReg>)
fn fadd32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = low32(src1) + low32(src2)
Source§fn fsub32(&mut self, operands: BinaryOperands<FReg>)
fn fsub32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = low32(src1) - low32(src2)
Source§fn fmul32(&mut self, operands: BinaryOperands<FReg>)
fn fmul32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = low32(src1) * low32(src2)
Source§fn fdiv32(&mut self, operands: BinaryOperands<FReg>)
fn fdiv32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = low32(src1) / low32(src2)
Source§fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>)
fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.low128(dst) = low128(src1) / low128(src2)
Source§fn fmaximum32(&mut self, operands: BinaryOperands<FReg>)
fn fmaximum32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = ieee_maximum(low32(src1), low32(src2))
Source§fn fminimum32(&mut self, operands: BinaryOperands<FReg>)
fn fminimum32(&mut self, operands: BinaryOperands<FReg>)
decode
only.low32(dst) = ieee_minimum(low32(src1), low32(src2))
Source§fn ftrunc32(&mut self, dst: FReg, src: FReg)
fn ftrunc32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = ieee_trunc(low32(src))
Source§fn vtrunc32x4(&mut self, dst: VReg, src: VReg)
fn vtrunc32x4(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_trunc(low128(src))
Source§fn vtrunc64x2(&mut self, dst: VReg, src: VReg)
fn vtrunc64x2(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_trunc(low128(src))
Source§fn ffloor32(&mut self, dst: FReg, src: FReg)
fn ffloor32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = ieee_floor(low32(src))
Source§fn vfloor32x4(&mut self, dst: VReg, src: VReg)
fn vfloor32x4(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_floor(low128(src))
Source§fn vfloor64x2(&mut self, dst: VReg, src: VReg)
fn vfloor64x2(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_floor(low128(src))
Source§fn fceil32(&mut self, dst: FReg, src: FReg)
fn fceil32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = ieee_ceil(low32(src))
Source§fn vceil32x4(&mut self, dst: VReg, src: VReg)
fn vceil32x4(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_ceil(low128(src))
Source§fn vceil64x2(&mut self, dst: VReg, src: VReg)
fn vceil64x2(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_ceil(low128(src))
Source§fn fnearest32(&mut self, dst: FReg, src: FReg)
fn fnearest32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = ieee_nearest(low32(src))
Source§fn fsqrt32(&mut self, dst: FReg, src: FReg)
fn fsqrt32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = ieee_sqrt(low32(src))
Source§fn vsqrt32x4(&mut self, dst: VReg, src: VReg)
fn vsqrt32x4(&mut self, dst: VReg, src: VReg)
decode
only.low32(dst) = ieee_sqrt(low32(src))
Source§fn vsqrt64x2(&mut self, dst: VReg, src: VReg)
fn vsqrt64x2(&mut self, dst: VReg, src: VReg)
decode
only.low32(dst) = ieee_sqrt(low32(src))
Source§fn fneg32(&mut self, dst: FReg, src: FReg)
fn fneg32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = -low32(src)
Source§fn fabs32(&mut self, dst: FReg, src: FReg)
fn fabs32(&mut self, dst: FReg, src: FReg)
decode
only.low32(dst) = |low32(src)|
Source§fn fadd64(&mut self, operands: BinaryOperands<FReg>)
fn fadd64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = src1 + src2
Source§fn fsub64(&mut self, operands: BinaryOperands<FReg>)
fn fsub64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = src1 - src2
Source§fn fmul64(&mut self, operands: BinaryOperands<FReg>)
fn fmul64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = src1 * src2
Source§fn fdiv64(&mut self, operands: BinaryOperands<FReg>)
fn fdiv64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = src1 / src2
Source§fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>)
fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 / src2
Source§fn fmaximum64(&mut self, operands: BinaryOperands<FReg>)
fn fmaximum64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = ieee_maximum(src1, src2)
Source§fn fminimum64(&mut self, operands: BinaryOperands<FReg>)
fn fminimum64(&mut self, operands: BinaryOperands<FReg>)
decode
only.dst = ieee_minimum(src1, src2)
Source§fn ftrunc64(&mut self, dst: FReg, src: FReg)
fn ftrunc64(&mut self, dst: FReg, src: FReg)
decode
only.dst = ieee_trunc(src)
Source§fn ffloor64(&mut self, dst: FReg, src: FReg)
fn ffloor64(&mut self, dst: FReg, src: FReg)
decode
only.dst = ieee_floor(src)
Source§fn fceil64(&mut self, dst: FReg, src: FReg)
fn fceil64(&mut self, dst: FReg, src: FReg)
decode
only.dst = ieee_ceil(src)
Source§fn fnearest64(&mut self, dst: FReg, src: FReg)
fn fnearest64(&mut self, dst: FReg, src: FReg)
decode
only.dst = ieee_nearest(src)
Source§fn vnearest32x4(&mut self, dst: VReg, src: VReg)
fn vnearest32x4(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_nearest(low128(src))
Source§fn vnearest64x2(&mut self, dst: VReg, src: VReg)
fn vnearest64x2(&mut self, dst: VReg, src: VReg)
decode
only.low128(dst) = ieee_nearest(low128(src))
Source§fn fsqrt64(&mut self, dst: FReg, src: FReg)
fn fsqrt64(&mut self, dst: FReg, src: FReg)
decode
only.dst = ieee_sqrt(src)
Source§fn vconst128(&mut self, dst: VReg, imm: u128)
fn vconst128(&mut self, dst: VReg, imm: u128)
decode
only.dst = imm
Source§fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>)
fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 + src2
Source§fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>)
fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 + src2
Source§fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>)
fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 + src2
Source§fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>)
fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 + src2
Source§fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>)
fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 + src2
Source§fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>)
fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 + src2
Source§fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>)
fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = satruating_add(src1, src2)
Source§fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>)
fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = satruating_add(src1, src2)
Source§fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>)
fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = satruating_add(src1, src2)
Source§fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>)
fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = satruating_add(src1, src2)
Source§fn vaddpairwisei16x8_s(&mut self, operands: BinaryOperands<VReg>)
fn vaddpairwisei16x8_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
Source§fn vaddpairwisei32x4_s(&mut self, operands: BinaryOperands<VReg>)
fn vaddpairwisei32x4_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
Source§fn vshli8x16(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshli8x16(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 << src2
Source§fn vshli16x8(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshli16x8(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 << src2
Source§fn vshli32x4(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshli32x4(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 << src2
Source§fn vshli64x2(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshli64x2(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 << src2
Source§fn vshri8x16_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri8x16_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(signed)Source§fn vshri16x8_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri16x8_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(signed)Source§fn vshri32x4_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri32x4_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(signed)Source§fn vshri64x2_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri64x2_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(signed)Source§fn vshri8x16_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri8x16_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(unsigned)Source§fn vshri16x8_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri16x8_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(unsigned)Source§fn vshri32x4_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri32x4_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(unsigned)Source§fn vshri64x2_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
fn vshri64x2_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)
decode
only.dst = src1 >> src2
(unsigned)Source§fn vsplatx8(&mut self, dst: VReg, src: XReg)
fn vsplatx8(&mut self, dst: VReg, src: XReg)
decode
only.dst = splat(low8(src))
Source§fn vsplatx16(&mut self, dst: VReg, src: XReg)
fn vsplatx16(&mut self, dst: VReg, src: XReg)
decode
only.dst = splat(low16(src))
Source§fn vsplatx32(&mut self, dst: VReg, src: XReg)
fn vsplatx32(&mut self, dst: VReg, src: XReg)
decode
only.dst = splat(low32(src))
Source§fn vsplatx64(&mut self, dst: VReg, src: XReg)
fn vsplatx64(&mut self, dst: VReg, src: XReg)
decode
only.dst = splat(src)
Source§fn vsplatf32(&mut self, dst: VReg, src: FReg)
fn vsplatf32(&mut self, dst: VReg, src: FReg)
decode
only.dst = splat(low32(src))
Source§fn vsplatf64(&mut self, dst: VReg, src: FReg)
fn vsplatf64(&mut self, dst: VReg, src: FReg)
decode
only.dst = splat(src)
Source§fn vload8x8_s_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload8x8_s_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.Source§fn vload8x8_u_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload8x8_u_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.Source§fn vload16x4le_s_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload16x4le_s_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.Source§fn vload16x4le_u_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload16x4le_u_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.Source§fn vload32x2le_s_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload32x2le_s_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.Source§fn vload32x2le_u_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
fn vload32x2le_u_offset32(&mut self, dst: VReg, ptr: XReg, offset: i32)
decode
only.Source§fn vband128(&mut self, operands: BinaryOperands<VReg>)
fn vband128(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 & src2
Source§fn vbor128(&mut self, operands: BinaryOperands<VReg>)
fn vbor128(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 | src2
Source§fn vbxor128(&mut self, operands: BinaryOperands<VReg>)
fn vbxor128(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 ^ src2
Source§fn vbnot128(&mut self, dst: VReg, src: VReg)
fn vbnot128(&mut self, dst: VReg, src: VReg)
decode
only.dst = !src1
Source§fn vbitselect128(&mut self, dst: VReg, c: VReg, x: VReg, y: VReg)
fn vbitselect128(&mut self, dst: VReg, c: VReg, x: VReg, y: VReg)
decode
only.dst = (c & x) | (!c & y)
Source§fn vbitmask8x16(&mut self, dst: XReg, src: VReg)
fn vbitmask8x16(&mut self, dst: XReg, src: VReg)
decode
only.Source§fn vbitmask16x8(&mut self, dst: XReg, src: VReg)
fn vbitmask16x8(&mut self, dst: XReg, src: VReg)
decode
only.Source§fn vbitmask32x4(&mut self, dst: XReg, src: VReg)
fn vbitmask32x4(&mut self, dst: XReg, src: VReg)
decode
only.Source§fn vbitmask64x2(&mut self, dst: XReg, src: VReg)
fn vbitmask64x2(&mut self, dst: XReg, src: VReg)
decode
only.Source§fn valltrue8x16(&mut self, dst: XReg, src: VReg)
fn valltrue8x16(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn valltrue16x8(&mut self, dst: XReg, src: VReg)
fn valltrue16x8(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn valltrue32x4(&mut self, dst: XReg, src: VReg)
fn valltrue32x4(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn valltrue64x2(&mut self, dst: XReg, src: VReg)
fn valltrue64x2(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn vanytrue8x16(&mut self, dst: XReg, src: VReg)
fn vanytrue8x16(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn vanytrue16x8(&mut self, dst: XReg, src: VReg)
fn vanytrue16x8(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn vanytrue32x4(&mut self, dst: XReg, src: VReg)
fn vanytrue32x4(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn vanytrue64x2(&mut self, dst: XReg, src: VReg)
fn vanytrue64x2(&mut self, dst: XReg, src: VReg)
decode
only.dst
.Source§fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg)
fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg)
decode
only.f32_from_x32_s
)Source§fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg)
fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg)
decode
only.f32_from_x32_u
)Source§fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg)
fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg)
decode
only.f64_from_x64_s
)Source§fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg)
fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg)
decode
only.f64_from_x64_u
)Source§fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg)
fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg)
fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg)
fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg)
fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg)
fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg)
fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg)
fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg)
fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg)
fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg)
fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg)
fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg)
fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>)
fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.Source§fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>)
fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.Source§fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>)
fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.Source§fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>)
fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.Source§fn vfpromotelow(&mut self, dst: VReg, src: VReg)
fn vfpromotelow(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vfdemote(&mut self, dst: VReg, src: VReg)
fn vfdemote(&mut self, dst: VReg, src: VReg)
decode
only.Source§fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>)
fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 - src2
Source§fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>)
fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 - src2
Source§fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>)
fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 - src2
Source§fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>)
fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 - src2
Source§fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>)
fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 - src2
Source§fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>)
fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = saturating_sub(src1, src2)
Source§fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>)
fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = saturating_sub(src1, src2)
Source§fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>)
fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = saturating_sub(src1, src2)
Source§fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>)
fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = saturating_sub(src1, src2)
Source§fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>)
fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 * src2
Source§fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>)
fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 * src2
Source§fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>)
fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 * src2
Source§fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>)
fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 * src2
Source§fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>)
fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src1 * src2
Source§fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>)
fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
Source§fn vpopcnt8x16(&mut self, dst: VReg, src: VReg)
fn vpopcnt8x16(&mut self, dst: VReg, src: VReg)
decode
only.dst = count_ones(src)
Source§fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8)
fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8)
decode
only.low32(dst) = zext(src[lane])
Source§fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8)
fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8)
decode
only.low32(dst) = zext(src[lane])
Source§fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8)
fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8)
decode
only.low32(dst) = src[lane]
Source§fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8)
fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8)
decode
only.dst = src[lane]
Source§fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8)
fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8)
decode
only.low32(dst) = src[lane]
Source§fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8)
fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8)
decode
only.dst = src[lane]
Source§fn vinsertx8(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
fn vinsertx8(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
decode
only.dst = src1; dst[lane] = src2
Source§fn vinsertx16(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
fn vinsertx16(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
decode
only.dst = src1; dst[lane] = src2
Source§fn vinsertx32(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
fn vinsertx32(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
decode
only.dst = src1; dst[lane] = src2
Source§fn vinsertx64(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
fn vinsertx64(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)
decode
only.dst = src1; dst[lane] = src2
Source§fn vinsertf32(&mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8)
fn vinsertf32(&mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8)
decode
only.dst = src1; dst[lane] = src2
Source§fn vinsertf64(&mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8)
fn vinsertf64(&mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8)
decode
only.dst = src1; dst[lane] = src2
Source§fn veq8x16(&mut self, operands: BinaryOperands<VReg>)
fn veq8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src == dst
Source§fn vneq8x16(&mut self, operands: BinaryOperands<VReg>)
fn vneq8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src != dst
Source§fn vslt8x16(&mut self, operands: BinaryOperands<VReg>)
fn vslt8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(signed)Source§fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>)
fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(signed)Source§fn vult8x16(&mut self, operands: BinaryOperands<VReg>)
fn vult8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(unsigned)Source§fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>)
fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(unsigned)Source§fn veq16x8(&mut self, operands: BinaryOperands<VReg>)
fn veq16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src == dst
Source§fn vneq16x8(&mut self, operands: BinaryOperands<VReg>)
fn vneq16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src != dst
Source§fn vslt16x8(&mut self, operands: BinaryOperands<VReg>)
fn vslt16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(signed)Source§fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>)
fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(signed)Source§fn vult16x8(&mut self, operands: BinaryOperands<VReg>)
fn vult16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(unsigned)Source§fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>)
fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(unsigned)Source§fn veq32x4(&mut self, operands: BinaryOperands<VReg>)
fn veq32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src == dst
Source§fn vneq32x4(&mut self, operands: BinaryOperands<VReg>)
fn vneq32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src != dst
Source§fn vslt32x4(&mut self, operands: BinaryOperands<VReg>)
fn vslt32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(signed)Source§fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>)
fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(signed)Source§fn vult32x4(&mut self, operands: BinaryOperands<VReg>)
fn vult32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(unsigned)Source§fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>)
fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(unsigned)Source§fn veq64x2(&mut self, operands: BinaryOperands<VReg>)
fn veq64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src == dst
Source§fn vneq64x2(&mut self, operands: BinaryOperands<VReg>)
fn vneq64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src != dst
Source§fn vslt64x2(&mut self, operands: BinaryOperands<VReg>)
fn vslt64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(signed)Source§fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>)
fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(signed)Source§fn vult64x2(&mut self, operands: BinaryOperands<VReg>)
fn vult64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src < dst
(unsigned)Source§fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>)
fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = src <= dst
(unsigned)Source§fn vneg8x16(&mut self, dst: VReg, src: VReg)
fn vneg8x16(&mut self, dst: VReg, src: VReg)
decode
only.dst = -src
Source§fn vneg16x8(&mut self, dst: VReg, src: VReg)
fn vneg16x8(&mut self, dst: VReg, src: VReg)
decode
only.dst = -src
Source§fn vneg32x4(&mut self, dst: VReg, src: VReg)
fn vneg32x4(&mut self, dst: VReg, src: VReg)
decode
only.dst = -src
Source§fn vneg64x2(&mut self, dst: VReg, src: VReg)
fn vneg64x2(&mut self, dst: VReg, src: VReg)
decode
only.dst = -src
Source§fn vnegf64x2(&mut self, dst: VReg, src: VReg)
fn vnegf64x2(&mut self, dst: VReg, src: VReg)
decode
only.dst = -src
Source§fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>)
fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = min(src1, src2)
(signed)Source§fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>)
fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = min(src1, src2)
(unsigned)Source§fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>)
fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = min(src1, src2)
(signed)Source§fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>)
fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = min(src1, src2)
(unsigned)Source§fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>)
fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = max(src1, src2)
(signed)Source§fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>)
fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = max(src1, src2)
(unsigned)Source§fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>)
fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = max(src1, src2)
(signed)Source§fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>)
fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = max(src1, src2)
(unsigned)Source§fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>)
fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = min(src1, src2)
(signed)Source§fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>)
fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = min(src1, src2)
(unsigned)Source§fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>)
fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = max(src1, src2)
(signed)Source§fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>)
fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = max(src1, src2)
(unsigned)Source§fn vabs8x16(&mut self, dst: VReg, src: VReg)
fn vabs8x16(&mut self, dst: VReg, src: VReg)
decode
only.dst = |src|
Source§fn vabs16x8(&mut self, dst: VReg, src: VReg)
fn vabs16x8(&mut self, dst: VReg, src: VReg)
decode
only.dst = |src|
Source§fn vabs32x4(&mut self, dst: VReg, src: VReg)
fn vabs32x4(&mut self, dst: VReg, src: VReg)
decode
only.dst = |src|
Source§fn vabs64x2(&mut self, dst: VReg, src: VReg)
fn vabs64x2(&mut self, dst: VReg, src: VReg)
decode
only.dst = |src|
Source§fn vabsf32x4(&mut self, dst: VReg, src: VReg)
fn vabsf32x4(&mut self, dst: VReg, src: VReg)
decode
only.dst = |src|
Source§fn vabsf64x2(&mut self, dst: VReg, src: VReg)
fn vabsf64x2(&mut self, dst: VReg, src: VReg)
decode
only.dst = |src|
Source§fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>)
fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = ieee_maximum(src1, src2)
Source§fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>)
fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = ieee_maximum(src1, src2)
Source§fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>)
fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = ieee_minimum(src1, src2)
Source§fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>)
fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = ieee_minimum(src1, src2)
Source§fn vshuffle(&mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128)
fn vshuffle(&mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128)
decode
only.dst = shuffle(src1, src2, mask)
Source§fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>)
fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = swizzle(src1, src2)
Source§fn vavground8x16(&mut self, operands: BinaryOperands<VReg>)
fn vavground8x16(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = (src1 + src2 + 1) // 2
Source§fn vavground16x8(&mut self, operands: BinaryOperands<VReg>)
fn vavground16x8(&mut self, operands: BinaryOperands<VReg>)
decode
only.dst = (src1 + src2 + 1) // 2
Source§impl<'a> OpVisitor for Disassembler<'a>
impl<'a> OpVisitor for Disassembler<'a>
Source§type BytecodeStream = SafeBytecodeStream<'a>
type BytecodeStream = SafeBytecodeStream<'a>
decode
only.Source§type Return = ()
type Return = ()
decode
only.Source§fn bytecode(&mut self) -> &mut Self::BytecodeStream
fn bytecode(&mut self) -> &mut Self::BytecodeStream
decode
only.Source§fn before_visit(&mut self)
fn before_visit(&mut self)
decode
only.Source§fn after_visit(&mut self)
fn after_visit(&mut self)
decode
only.Source§fn ret(&mut self)
fn ret(&mut self)
decode
only.lr
register.Source§fn call(&mut self, offset: PcRelOffset)
fn call(&mut self, offset: PcRelOffset)
decode
only.lr
register to the PC just after this instruction. Read moreSource§fn call1(&mut self, arg1: XReg, offset: PcRelOffset)
fn call1(&mut self, arg1: XReg, offset: PcRelOffset)
decode
only.call
, but also x0 = arg1
Source§fn call2(&mut self, arg1: XReg, arg2: XReg, offset: PcRelOffset)
fn call2(&mut self, arg1: XReg, arg2: XReg, offset: PcRelOffset)
decode
only.call
, but also x0, x1 = arg1, arg2
Source§fn call3(&mut self, arg1: XReg, arg2: XReg, arg3: XReg, offset: PcRelOffset)
fn call3(&mut self, arg1: XReg, arg2: XReg, arg3: XReg, offset: PcRelOffset)
decode
only.call
, but also x0, x1, x2 = arg1, arg2, arg3
Source§fn call4(
&mut self,
arg1: XReg,
arg2: XReg,
arg3: XReg,
arg4: XReg,
offset: PcRelOffset,
)
fn call4( &mut self, arg1: XReg, arg2: XReg, arg3: XReg, arg4: XReg, offset: PcRelOffset, )
decode
only.call
, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4
Source§fn call_indirect(&mut self, reg: XReg)
fn call_indirect(&mut self, reg: XReg)
decode
only.reg
and set lr
to the PC just
after this instruction.Source§fn jump(&mut self, offset: PcRelOffset)
fn jump(&mut self, offset: PcRelOffset)
decode
only.Source§fn xjump(&mut self, reg: XReg)
fn xjump(&mut self, reg: XReg)
decode
only.Source§fn br_if32(&mut self, cond: XReg, offset: PcRelOffset)
fn br_if32(&mut self, cond: XReg, offset: PcRelOffset)
decode
only.low32(cond)
contains a non-zero value.Source§fn br_if_not32(&mut self, cond: XReg, offset: PcRelOffset)
fn br_if_not32(&mut self, cond: XReg, offset: PcRelOffset)
decode
only.low32(cond)
contains a zero value.Source§fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a == b
.Source§fn br_if_xneq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xneq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a !=
b.Source§fn br_if_xslt32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xslt32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xslteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xslteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xult32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xult32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xulteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xulteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a == b
.Source§fn br_if_xneq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xneq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a !=
b.Source§fn br_if_xslt64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xslt64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xslteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xslteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xult64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xult64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xulteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
fn br_if_xulteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xeq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xeq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a == b
.Source§fn br_if_xeq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xeq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a == b
.Source§fn br_if_xneq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xneq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a !=
b.Source§fn br_if_xneq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xneq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a !=
b.Source§fn br_if_xslt32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xslt32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xslt32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xslt32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xsgt32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xsgt32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xsgt32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xsgt32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xslteq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xslteq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xslteq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xslteq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xsgteq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xsgteq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xsgteq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xsgteq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xult32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xult32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xult32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xult32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xulteq32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xulteq32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xulteq32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xulteq32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xugt32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xugt32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xugt32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xugt32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xugteq32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xugteq32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xugteq32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xugteq32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xeq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xeq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a == b
.Source§fn br_if_xeq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xeq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a == b
.Source§fn br_if_xneq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xneq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a !=
b.Source§fn br_if_xneq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xneq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a !=
b.Source§fn br_if_xslt64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xslt64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xslt64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xslt64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xsgt64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xsgt64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xsgt64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xsgt64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xslteq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xslteq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xslteq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xslteq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xsgteq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
fn br_if_xsgteq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xsgteq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
fn br_if_xsgteq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xult64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xult64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xult64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xult64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a < b
.Source§fn br_if_xulteq64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xulteq64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xulteq64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xulteq64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a <= b
.Source§fn br_if_xugt64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xugt64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xugt64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xugt64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a > b
.Source§fn br_if_xugteq64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
fn br_if_xugteq64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_if_xugteq64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
fn br_if_xugteq64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)
decode
only.a >= b
.Source§fn br_table32(&mut self, idx: XReg, amt: u32)
fn br_table32(&mut self, idx: XReg, amt: u32)
decode
only.low32(idx)
. Read moreSource§fn xmov(&mut self, dst: XReg, src: XReg)
fn xmov(&mut self, dst: XReg, src: XReg)
decode
only.x
registers.Source§fn xconst8(&mut self, dst: XReg, imm: i8)
fn xconst8(&mut self, dst: XReg, imm: i8)
decode
only.dst = sign_extend(imm8)
.Source§fn xconst16(&mut self, dst: XReg, imm: i16)
fn xconst16(&mut self, dst: XReg, imm: i16)
decode
only.dst = sign_extend(imm16)
.Source§fn xconst32(&mut self, dst: XReg, imm: i32)
fn xconst32(&mut self, dst: XReg, imm: i32)
decode
only.dst = sign_extend(imm32)
.Source§fn xconst64(&mut self, dst: XReg, imm: i64)
fn xconst64(&mut self, dst: XReg, imm: i64)
decode
only.dst = imm64
.Source§fn xadd32(&mut self, operands: BinaryOperands<XReg>)
fn xadd32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) + low32(src2)
. Read moreSource§fn xadd32_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
fn xadd32_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
decode
only.xadd32
but src2
is a zero-extended 8-bit immediate.Source§fn xadd32_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
fn xadd32_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
decode
only.xadd32
but src2
is a 32-bit immediate.Source§fn xadd64(&mut self, operands: BinaryOperands<XReg>)
fn xadd64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 + src2
.Source§fn xadd64_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
fn xadd64_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
decode
only.xadd64
but src2
is a zero-extended 8-bit immediate.Source§fn xadd64_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
fn xadd64_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
decode
only.xadd64
but src2
is a zero-extended 32-bit immediate.Source§fn xsub32(&mut self, operands: BinaryOperands<XReg>)
fn xsub32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) - low32(src2)
. Read moreSource§fn xsub32_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
fn xsub32_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
decode
only.xsub32
but src2
is a zero-extended 8-bit immediate.Source§fn xsub32_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
fn xsub32_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
decode
only.xsub32
but src2
is a 32-bit immediate.Source§fn xsub64(&mut self, operands: BinaryOperands<XReg>)
fn xsub64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 - src2
.Source§fn xsub64_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
fn xsub64_u8(&mut self, dst: XReg, src1: XReg, src2: u8)
decode
only.xsub64
but src2
is a zero-extended 8-bit immediate.Source§fn xsub64_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
fn xsub64_u32(&mut self, dst: XReg, src1: XReg, src2: u32)
decode
only.xsub64
but src2
is a zero-extended 32-bit immediate.Source§fn xmul32(&mut self, operands: BinaryOperands<XReg>)
fn xmul32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) * low32(src2)
Source§fn xmul32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xmul32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xmul64
but src2
is a sign-extended 8-bit immediate.Source§fn xmul32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xmul32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xmul32
but src2
is a sign-extended 32-bit immediate.Source§fn xmul64(&mut self, operands: BinaryOperands<XReg>)
fn xmul64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 * src2
Source§fn xmul64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xmul64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xmul64
but src2
is a sign-extended 8-bit immediate.Source§fn xmul64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xmul64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xmul64
but src2
is a sign-extended 64-bit immediate.Source§fn xctz32(&mut self, dst: XReg, src: XReg)
fn xctz32(&mut self, dst: XReg, src: XReg)
decode
only.low32(dst) = trailing_zeros(low32(src))
Source§fn xctz64(&mut self, dst: XReg, src: XReg)
fn xctz64(&mut self, dst: XReg, src: XReg)
decode
only.dst = trailing_zeros(src)
Source§fn xclz32(&mut self, dst: XReg, src: XReg)
fn xclz32(&mut self, dst: XReg, src: XReg)
decode
only.low32(dst) = leading_zeros(low32(src))
Source§fn xclz64(&mut self, dst: XReg, src: XReg)
fn xclz64(&mut self, dst: XReg, src: XReg)
decode
only.dst = leading_zeros(src)
Source§fn xpopcnt32(&mut self, dst: XReg, src: XReg)
fn xpopcnt32(&mut self, dst: XReg, src: XReg)
decode
only.low32(dst) = count_ones(low32(src))
Source§fn xpopcnt64(&mut self, dst: XReg, src: XReg)
fn xpopcnt64(&mut self, dst: XReg, src: XReg)
decode
only.dst = count_ones(src)
Source§fn xrotl32(&mut self, operands: BinaryOperands<XReg>)
fn xrotl32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = rotate_left(low32(src1), low32(src2))
Source§fn xrotl64(&mut self, operands: BinaryOperands<XReg>)
fn xrotl64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = rotate_left(src1, src2)
Source§fn xrotr32(&mut self, operands: BinaryOperands<XReg>)
fn xrotr32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = rotate_right(low32(src1), low32(src2))
Source§fn xrotr64(&mut self, operands: BinaryOperands<XReg>)
fn xrotr64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = rotate_right(src1, src2)
Source§fn xshl32(&mut self, operands: BinaryOperands<XReg>)
fn xshl32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) << low5(src2)
Source§fn xshr32_s(&mut self, operands: BinaryOperands<XReg>)
fn xshr32_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) >> low5(src2)
Source§fn xshr32_u(&mut self, operands: BinaryOperands<XReg>)
fn xshr32_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) >> low5(src2)
Source§fn xshl64(&mut self, operands: BinaryOperands<XReg>)
fn xshl64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 << low5(src2)
Source§fn xshr64_s(&mut self, operands: BinaryOperands<XReg>)
fn xshr64_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 >> low6(src2)
Source§fn xshr64_u(&mut self, operands: BinaryOperands<XReg>)
fn xshr64_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 >> low6(src2)
Source§fn xshl32_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
fn xshl32_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
decode
only.low32(dst) = low32(src1) << low5(src2)
Source§fn xshr32_s_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
fn xshr32_s_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
decode
only.low32(dst) = low32(src1) >> low5(src2)
Source§fn xshr32_u_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
fn xshr32_u_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
decode
only.low32(dst) = low32(src1) >> low5(src2)
Source§fn xshl64_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
fn xshl64_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
decode
only.dst = src1 << low5(src2)
Source§fn xshr64_s_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
fn xshr64_s_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
decode
only.dst = src1 >> low6(src2)
Source§fn xshr64_u_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
fn xshr64_u_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)
decode
only.dst = src1 >> low6(src2)
Source§fn xneg32(&mut self, dst: XReg, src: XReg)
fn xneg32(&mut self, dst: XReg, src: XReg)
decode
only.low32(dst) = -low32(src)
Source§fn xeq64(&mut self, operands: BinaryOperands<XReg>)
fn xeq64(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = src1 == src2
Source§fn xneq64(&mut self, operands: BinaryOperands<XReg>)
fn xneq64(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = src1 != src2
Source§fn xslt64(&mut self, operands: BinaryOperands<XReg>)
fn xslt64(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = src1 < src2
(signed)Source§fn xslteq64(&mut self, operands: BinaryOperands<XReg>)
fn xslteq64(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = src1 <= src2
(signed)Source§fn xult64(&mut self, operands: BinaryOperands<XReg>)
fn xult64(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = src1 < src2
(unsigned)Source§fn xulteq64(&mut self, operands: BinaryOperands<XReg>)
fn xulteq64(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = src1 <= src2
(unsigned)Source§fn xeq32(&mut self, operands: BinaryOperands<XReg>)
fn xeq32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) == low32(src2)
Source§fn xneq32(&mut self, operands: BinaryOperands<XReg>)
fn xneq32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) != low32(src2)
Source§fn xslt32(&mut self, operands: BinaryOperands<XReg>)
fn xslt32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) < low32(src2)
(signed)Source§fn xslteq32(&mut self, operands: BinaryOperands<XReg>)
fn xslteq32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) <= low32(src2)
(signed)Source§fn xult32(&mut self, operands: BinaryOperands<XReg>)
fn xult32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) < low32(src2)
(unsigned)Source§fn xulteq32(&mut self, operands: BinaryOperands<XReg>)
fn xulteq32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) <= low32(src2)
(unsigned)Source§fn xload8_u32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload8_u32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = zext(*(ptr + offset))
Source§fn xload8_s32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload8_s32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = sext(*(ptr + offset))
Source§fn xload16le_u32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload16le_u32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = zext(*(ptr + offset))
Source§fn xload16le_s32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload16le_s32_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = sext(*(ptr + offset))
Source§fn xload32le_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload32le_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.low32(dst) = *(ptr + offset)
Source§fn xload8_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload8_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload8_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload8_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload16le_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload16le_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload16le_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload16le_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload32le_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload32le_u64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload32le_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload32le_s64_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload64le_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
fn xload64le_offset32(&mut self, dst: XReg, ptr: XReg, offset: i32)
decode
only.dst = *(ptr + offset)
Source§fn xstore8_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore8_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low8(src)
Source§fn xstore16le_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore16le_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low16(src)
Source§fn xstore32le_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore32le_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low32(src)
Source§fn xstore64le_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
fn xstore64le_offset32(&mut self, ptr: XReg, offset: i32, src: XReg)
decode
only.*(ptr + offset) = low64(src)
Source§fn xload8_u32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload8_u32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.low32(dst) = zext(*(ptr + offset))
Source§fn xload8_s32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload8_s32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.low32(dst) = sext(*(ptr + offset))
Source§fn xload16le_u32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload16le_u32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.low32(dst) = zext(*(ptr + offset))
Source§fn xload16le_s32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload16le_s32_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.low32(dst) = sext(*(ptr + offset))
Source§fn xload32le_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload32le_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.low32(dst) = *(ptr + offset)
Source§fn xload8_u64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload8_u64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload8_s64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload8_s64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload16le_u64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload16le_u64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload16le_s64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload16le_s64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload32le_u64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload32le_u64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = zext(*(ptr + offset))
Source§fn xload32le_s64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload32le_s64_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = sext(*(ptr + offset))
Source§fn xload64le_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
fn xload64le_offset8(&mut self, dst: XReg, ptr: XReg, offset: u8)
decode
only.dst = *(ptr + offset)
Source§fn xstore8_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
fn xstore8_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
decode
only.*(ptr + offset) = low8(src)
Source§fn xstore16le_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
fn xstore16le_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
decode
only.*(ptr + offset) = low16(src)
Source§fn xstore32le_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
fn xstore32le_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
decode
only.*(ptr + offset) = low32(src)
Source§fn xstore64le_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
fn xstore64le_offset8(&mut self, ptr: XReg, offset: u8, src: XReg)
decode
only.*(ptr + offset) = low64(src)
Source§fn push_frame_save(&mut self, amt: u32, regs: RegSet<XReg>)
fn push_frame_save(&mut self, amt: u32, regs: RegSet<XReg>)
decode
only.Source§fn pop_frame_restore(&mut self, amt: u32, regs: RegSet<XReg>)
fn pop_frame_restore(&mut self, amt: u32, regs: RegSet<XReg>)
decode
only.push_frame_save
. Restores regs
from the top of
the stack, then runs stack_free32 amt
, then runs pop_frame
.Source§fn stack_alloc32(&mut self, amt: u32)
fn stack_alloc32(&mut self, amt: u32)
decode
only.sp = sp.checked_sub(amt)
Source§fn zext8(&mut self, dst: XReg, src: XReg)
fn zext8(&mut self, dst: XReg, src: XReg)
decode
only.dst = zext(low8(src))
Source§fn zext16(&mut self, dst: XReg, src: XReg)
fn zext16(&mut self, dst: XReg, src: XReg)
decode
only.dst = zext(low16(src))
Source§fn zext32(&mut self, dst: XReg, src: XReg)
fn zext32(&mut self, dst: XReg, src: XReg)
decode
only.dst = zext(low32(src))
Source§fn sext8(&mut self, dst: XReg, src: XReg)
fn sext8(&mut self, dst: XReg, src: XReg)
decode
only.dst = sext(low8(src))
Source§fn sext16(&mut self, dst: XReg, src: XReg)
fn sext16(&mut self, dst: XReg, src: XReg)
decode
only.dst = sext(low16(src))
Source§fn sext32(&mut self, dst: XReg, src: XReg)
fn sext32(&mut self, dst: XReg, src: XReg)
decode
only.dst = sext(low32(src))
Source§fn xabs32(&mut self, dst: XReg, src: XReg)
fn xabs32(&mut self, dst: XReg, src: XReg)
decode
only.low32(dst) = |low32(src)|
Source§fn xdiv32_s(&mut self, operands: BinaryOperands<XReg>)
fn xdiv32_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) / low32(src2)
(signed)Source§fn xdiv64_s(&mut self, operands: BinaryOperands<XReg>)
fn xdiv64_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 / src2
(signed)Source§fn xdiv32_u(&mut self, operands: BinaryOperands<XReg>)
fn xdiv32_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) / low32(src2)
(unsigned)Source§fn xdiv64_u(&mut self, operands: BinaryOperands<XReg>)
fn xdiv64_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 / src2
(unsigned)Source§fn xrem32_s(&mut self, operands: BinaryOperands<XReg>)
fn xrem32_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) % low32(src2)
(signed)Source§fn xrem64_s(&mut self, operands: BinaryOperands<XReg>)
fn xrem64_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 / src2
(signed)Source§fn xrem32_u(&mut self, operands: BinaryOperands<XReg>)
fn xrem32_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) % low32(src2)
(unsigned)Source§fn xrem64_u(&mut self, operands: BinaryOperands<XReg>)
fn xrem64_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 / src2
(unsigned)Source§fn xband32(&mut self, operands: BinaryOperands<XReg>)
fn xband32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) & low32(src2)
Source§fn xband32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xband32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xband64
but src2
is a sign-extended 8-bit immediate.Source§fn xband32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xband32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xband32
but src2
is a sign-extended 32-bit immediate.Source§fn xband64(&mut self, operands: BinaryOperands<XReg>)
fn xband64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 & src2
Source§fn xband64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xband64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xband64
but src2
is a sign-extended 8-bit immediate.Source§fn xband64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xband64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xband64
but src2
is a sign-extended 32-bit immediate.Source§fn xbor32(&mut self, operands: BinaryOperands<XReg>)
fn xbor32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) | low32(src2)
Source§fn xbor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xbor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xbor64
but src2
is a sign-extended 8-bit immediate.Source§fn xbor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xbor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xbor32
but src2
is a sign-extended 32-bit immediate.Source§fn xbor64(&mut self, operands: BinaryOperands<XReg>)
fn xbor64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 | src2
Source§fn xbor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xbor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xbor64
but src2
is a sign-extended 8-bit immediate.Source§fn xbor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xbor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xbor64
but src2
is a sign-extended 32-bit immediate.Source§fn xbxor32(&mut self, operands: BinaryOperands<XReg>)
fn xbxor32(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = low32(src1) ^ low32(src2)
Source§fn xbxor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xbxor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xbxor64
but src2
is a sign-extended 8-bit immediate.Source§fn xbxor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xbxor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xbxor32
but src2
is a sign-extended 32-bit immediate.Source§fn xbxor64(&mut self, operands: BinaryOperands<XReg>)
fn xbxor64(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = src1 ^ src2
Source§fn xbxor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
fn xbxor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)
decode
only.xbxor64
but src2
is a sign-extended 8-bit immediate.Source§fn xbxor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
fn xbxor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)
decode
only.xbxor64
but src2
is a sign-extended 32-bit immediate.Source§fn xbnot32(&mut self, dst: XReg, src: XReg)
fn xbnot32(&mut self, dst: XReg, src: XReg)
decode
only.low32(dst) = !low32(src1)
Source§fn xbnot64(&mut self, dst: XReg, src: XReg)
fn xbnot64(&mut self, dst: XReg, src: XReg)
decode
only.dst = !src1
Source§fn xmin32_u(&mut self, operands: BinaryOperands<XReg>)
fn xmin32_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = min(low32(src1), low32(src2))
(unsigned)Source§fn xmin32_s(&mut self, operands: BinaryOperands<XReg>)
fn xmin32_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = min(low32(src1), low32(src2))
(signed)Source§fn xmax32_u(&mut self, operands: BinaryOperands<XReg>)
fn xmax32_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = max(low32(src1), low32(src2))
(unsigned)Source§fn xmax32_s(&mut self, operands: BinaryOperands<XReg>)
fn xmax32_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.low32(dst) = max(low32(src1), low32(src2))
(signed)Source§fn xmin64_u(&mut self, operands: BinaryOperands<XReg>)
fn xmin64_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = min(src1, src2)
(unsigned)Source§fn xmin64_s(&mut self, operands: BinaryOperands<XReg>)
fn xmin64_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = min(src1, src2)
(signed)Source§fn xmax64_u(&mut self, operands: BinaryOperands<XReg>)
fn xmax64_u(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = max(src1, src2)
(unsigned)Source§fn xmax64_s(&mut self, operands: BinaryOperands<XReg>)
fn xmax64_s(&mut self, operands: BinaryOperands<XReg>)
decode
only.dst = max(src1, src2)
(signed)