pulley_interpreter::decode

Struct SequencedVisitor

Source
pub struct SequencedVisitor<'a, F, V1, V2> { /* private fields */ }
Available on crate feature decode only.
Expand description

An OpVisitor combinator to sequence one visitor and then another.

Implementations§

Source§

impl<'a, F, V1, V2> SequencedVisitor<'a, F, V1, V2>

Source

pub fn new(join: F, v1: &'a mut V1, v2: &'a mut V2) -> Self

Create a new sequenced visitor.

The given join function is used to combine the results of each sub-visitor so that it can be returned from this combined visitor.

Trait Implementations§

Source§

impl<F, T, V1, V2> ExtendedOpVisitor for SequencedVisitor<'_, F, V1, V2>
where F: FnMut(V1::Return, V2::Return) -> T, V1: ExtendedOpVisitor, V2: ExtendedOpVisitor<BytecodeStream = V1::BytecodeStream>,

Source§

fn trap(&mut self) -> Self::Return

Raise a trap.

Source§

fn nop(&mut self) -> Self::Return

Do nothing.

Source§

fn call_indirect_host(&mut self, id: u8) -> Self::Return

A special opcode to halt interpreter execution and yield control back to the host.

This opcode results in DoneReason::CallIndirectHost where the id here is shepherded along to the embedder. It’s up to the embedder to determine what to do with the id and the current state of registers and the stack.

In Wasmtime this is used to implement interpreter-to-host calls. This is modeled as a call instruction where the first parameter is the native function pointer to invoke and all remaining parameters for the native function are in following parameter positions (e.g. x1, x2, …). The results of the host call are then store in x0.

Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.

Source§

fn xmov_fp(&mut self, dst: XReg) -> Self::Return

Gets the special “fp” register and moves it into dst.

Source§

fn xmov_lr(&mut self, dst: XReg) -> Self::Return

Gets the special “lr” register and moves it into dst.

Source§

fn bswap32(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = byteswap(low32(src))

Source§

fn bswap64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = byteswap(src)

Source§

fn xadd32_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return

32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified. Traps if the addition overflows.

Source§

fn xadd64_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return

64-bit checked unsigned addition: dst = src1 + src2.

Source§

fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = high64(src1 * src2) (signed)

Source§

fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = high64(src1 * src2) (unsigned)

Source§

fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = if low32(src) == 0 { 0 } else { -1 }

Source§

fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = if src == 0 { 0 } else { -1 }

Source§

fn xpush32(&mut self, src: XReg) -> Self::Return

*sp = low32(src); sp = sp.checked_add(4)

Source§

fn xpush32_many(&mut self, srcs: RegSet<XReg>) -> Self::Return

for src in srcs { xpush32 src }

Source§

fn xpush64(&mut self, src: XReg) -> Self::Return

*sp = src; sp = sp.checked_add(8)

Source§

fn xpush64_many(&mut self, srcs: RegSet<XReg>) -> Self::Return

for src in srcs { xpush64 src }

Source§

fn xpop32(&mut self, dst: XReg) -> Self::Return

*dst = *sp; sp -= 4

Source§

fn xpop32_many(&mut self, dsts: RegSet<XReg>) -> Self::Return

for dst in dsts.rev() { xpop32 dst }

Source§

fn xpop64(&mut self, dst: XReg) -> Self::Return

*dst = *sp; sp -= 8

Source§

fn xpop64_many(&mut self, dsts: RegSet<XReg>) -> Self::Return

for dst in dsts.rev() { xpop64 dst }

Source§

fn xload16be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload16be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload32be_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload32be_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload64be_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source§

fn xstore16be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low16(src)

Source§

fn xstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source§

fn xstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low64(src)

Source§

fn fload32be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source§

fn fload64be_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source§

fn fstore32be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source§

fn fstore64be_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = src

Source§

fn fload32le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source§

fn fload64le_offset32( &mut self, dst: FReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source§

fn fstore32le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source§

fn fstore64le_offset32( &mut self, ptr: XReg, offset: i32, src: FReg, ) -> Self::Return

*(ptr + offset) = src

Source§

fn vload128le_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source§

fn vstore128le_offset32( &mut self, ptr: XReg, offset: i32, src: VReg, ) -> Self::Return

*(ptr + offset) = src

Source§

fn fmov(&mut self, dst: FReg, src: FReg) -> Self::Return

Move between f registers.

Source§

fn vmov(&mut self, dst: VReg, src: VReg) -> Self::Return

Move between v registers.

Source§

fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = bitcast low32(src) as i32

Source§

fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = bitcast src as i64

Source§

fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = bitcast low32(src) as f32

Source§

fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = bitcast src as f64

Source§

fn fconst32(&mut self, dst: FReg, bits: u32) -> Self::Return

low32(dst) = bits

Source§

fn fconst64(&mut self, dst: FReg, bits: u64) -> Self::Return

dst = bits

Source§

fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 == src2)

Source§

fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 != src2)

Source§

fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 < src2)

Source§

fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 <= src2)

Source§

fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 == src2)

Source§

fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 != src2)

Source§

fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 < src2)

Source§

fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return

low32(dst) = zext(src1 <= src2)

Source§

fn fselect32( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

Source§

fn fselect64( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return

dst = low32(cond) ? if_nonzero : if_zero

Source§

fn f32_from_f64(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = demote(src)

Source§

fn f64_from_f32(&mut self, dst: FReg, src: FReg) -> Self::Return

(st) = promote(low32(src))

Source§

fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_signed(low32(src))

Source§

fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_unsigned(low32(src))

Source§

fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_signed(src)

Source§

fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return

low32(dst) = checked_f32_from_unsigned(src)

Source§

fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_signed(low32(src))

Source§

fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_unsigned(low32(src))

Source§

fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_signed(src)

Source§

fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return

dst = checked_f64_from_unsigned(src)

Source§

fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_signed_from_f32(low32(src))

Source§

fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_unsigned_from_f32(low32(src))

Source§

fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_signed_from_f64(src)

Source§

fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = checked_unsigned_from_f64(src)

Source§

fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_signed_from_f32(low32(src))

Source§

fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_unsigned_from_f32(low32(src))

Source§

fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_signed_from_f64(src)

Source§

fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = checked_unsigned_from_f64(src)

Source§

fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_signed_from_f32(low32(src))

Source§

fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_unsigned_from_f32(low32(src))

Source§

fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_signed_from_f64(src)

Source§

fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

low32(dst) = saturating_unsigned_from_f64(src)

Source§

fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_signed_from_f32(low32(src))

Source§

fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_unsigned_from_f32(low32(src))

Source§

fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_signed_from_f64(src)

Source§

fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return

dst = saturating_unsigned_from_f64(src)

Source§

fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = copysign(low32(src1), low32(src2))

Source§

fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = copysign(src1, src2)

Source§

fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) + low32(src2)

Source§

fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) - low32(src2)

Source§

fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) * low32(src2)

Source§

fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = low32(src1) / low32(src2)

Source§

fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

low128(dst) = low128(src1) / low128(src2)

Source§

fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = ieee_maximum(low32(src1), low32(src2))

Source§

fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

low32(dst) = ieee_minimum(low32(src1), low32(src2))

Source§

fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_trunc(low32(src))

Source§

fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_trunc(low128(src))

Source§

fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_trunc(low128(src))

Source§

fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_floor(low32(src))

Source§

fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_floor(low128(src))

Source§

fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_floor(low128(src))

Source§

fn fceil32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_ceil(low32(src))

Source§

fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_ceil(low128(src))

Source§

fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_ceil(low128(src))

Source§

fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_nearest(low32(src))

Source§

fn fsqrt32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = ieee_sqrt(low32(src))

Source§

fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low32(dst) = ieee_sqrt(low32(src))

Source§

fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low32(dst) = ieee_sqrt(low32(src))

Source§

fn fneg32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = -low32(src)

Source§

fn fabs32(&mut self, dst: FReg, src: FReg) -> Self::Return

low32(dst) = |low32(src)|

Source§

fn fadd64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 + src2

Source§

fn fsub64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 - src2

Source§

fn fmul64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 * src2

Source§

fn fdiv64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = src1 / src2

Source§

fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 / src2

Source§

fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = ieee_maximum(src1, src2)

Source§

fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return

dst = ieee_minimum(src1, src2)

Source§

fn ftrunc64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_trunc(src)

Source§

fn ffloor64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_floor(src)

Source§

fn fceil64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_ceil(src)

Source§

fn fnearest64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_nearest(src)

Source§

fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_nearest(low128(src))

Source§

fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

low128(dst) = ieee_nearest(low128(src))

Source§

fn fsqrt64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = ieee_sqrt(src)

Source§

fn fneg64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = -src

Source§

fn fabs64(&mut self, dst: FReg, src: FReg) -> Self::Return

dst = |src|

Source§

fn vconst128(&mut self, dst: VReg, imm: u128) -> Self::Return

dst = imm

Source§

fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source§

fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source§

fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source§

fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source§

fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source§

fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 + src2

Source§

fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source§

fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source§

fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source§

fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = satruating_add(src1, src2)

Source§

fn vaddpairwisei16x8_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return

dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]

Source§

fn vaddpairwisei32x4_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return

dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]

Source§

fn vshli8x16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source§

fn vshli16x8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source§

fn vshli32x4( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source§

fn vshli64x2( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 << src2

Source§

fn vshri8x16_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source§

fn vshri16x8_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source§

fn vshri32x4_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source§

fn vshri64x2_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (signed)

Source§

fn vshri8x16_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source§

fn vshri16x8_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source§

fn vshri32x4_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source§

fn vshri64x2_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return

dst = src1 >> src2 (unsigned)

Source§

fn vsplatx8(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(low8(src))

Source§

fn vsplatx16(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(low16(src))

Source§

fn vsplatx32(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(low32(src))

Source§

fn vsplatx64(&mut self, dst: VReg, src: XReg) -> Self::Return

dst = splat(src)

Source§

fn vsplatf32(&mut self, dst: VReg, src: FReg) -> Self::Return

dst = splat(low32(src))

Source§

fn vsplatf64(&mut self, dst: VReg, src: FReg) -> Self::Return

dst = splat(src)

Source§

fn vload8x8_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as i8x8 and sign-extend to i16x8.

Source§

fn vload8x8_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as u8x8 and zero-extend to i16x8.

Source§

fn vload16x4le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as i16x4 and sign-extend to i32x4.

Source§

fn vload16x4le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as u16x4 and zero-extend to i32x4.

Source§

fn vload32x2le_s_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as i32x2 and sign-extend to i64x2.

Source§

fn vload32x2le_u_offset32( &mut self, dst: VReg, ptr: XReg, offset: i32, ) -> Self::Return

Load the 64-bit source as u32x2 and zero-extend to i64x2.

Source§

fn vband128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 & src2

Source§

fn vbor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 | src2

Source§

fn vbxor128(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 ^ src2

Source§

fn vbnot128(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = !src1

Source§

fn vbitselect128( &mut self, dst: VReg, c: VReg, x: VReg, y: VReg, ) -> Self::Return

dst = (c & x) | (!c & y)

Source§

fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source§

fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source§

fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source§

fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return

Collect high bits of each lane into the low 32-bits of the destination.

Source§

fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether all lanes are nonzero in dst.

Source§

fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether all lanes are nonzero in dst.

Source§

fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether all lanes are nonzero in dst.

Source§

fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source§

fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source§

fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source§

fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source§

fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return

Store whether any lanes are nonzero in dst.

Source§

fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f32_from_x32_s)

Source§

fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f32_from_x32_u)

Source§

fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f64_from_x64_s)

Source§

fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Int-to-float conversion (same as f64_from_x64_u)

Source§

fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as signed, to twice the width.

Source§

fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as unsigned, to twice the width.

Source§

fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as signed, to twice the width.

Source§

fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as unsigned, to twice the width.

Source§

fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as signed, to twice the width.

Source§

fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the low lanes of the input vector, as unsigned, to twice the width.

Source§

fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as signed, to twice the width.

Source§

fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as unsigned, to twice the width.

Source§

fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as signed, to twice the width.

Source§

fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as unsigned, to twice the width.

Source§

fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as signed, to twice the width.

Source§

fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return

Widens the high lanes of the input vector, as unsigned, to twice the width.

Source§

fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

Source§

fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

Source§

fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

Source§

fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

Source§

fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return

Promotes the low two lanes of the f32x4 input to f64x2.

Source§

fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return

Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.

Source§

fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source§

fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source§

fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source§

fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source§

fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 - src2

Source§

fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source§

fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source§

fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source§

fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = saturating_sub(src1, src2)

Source§

fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source§

fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source§

fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source§

fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source§

fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src1 * src2

Source§

fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)

Source§

fn vpopcnt8x16(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = count_ones(src)

Source§

fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = zext(src[lane])

Source§

fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = zext(src[lane])

Source§

fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = src[lane]

Source§

fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return

dst = src[lane]

Source§

fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return

low32(dst) = src[lane]

Source§

fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return

dst = src[lane]

Source§

fn vinsertx8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source§

fn vinsertx16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source§

fn vinsertx32( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source§

fn vinsertx64( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source§

fn vinsertf32( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source§

fn vinsertf64( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return

dst = src1; dst[lane] = src2

Source§

fn veq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source§

fn vneq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source§

fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source§

fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source§

fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source§

fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source§

fn veq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source§

fn vneq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source§

fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source§

fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source§

fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source§

fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source§

fn veq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source§

fn vneq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source§

fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source§

fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source§

fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source§

fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source§

fn veq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src == dst

Source§

fn vneq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src != dst

Source§

fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (signed)

Source§

fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (signed)

Source§

fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src < dst (unsigned)

Source§

fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = src <= dst (unsigned)

Source§

fn vneg8x16(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source§

fn vneg16x8(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source§

fn vneg32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source§

fn vneg64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source§

fn vnegf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = -src

Source§

fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source§

fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source§

fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source§

fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source§

fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source§

fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source§

fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source§

fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source§

fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source§

fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source§

fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source§

fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source§

fn vabs8x16(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source§

fn vabs16x8(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source§

fn vabs32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source§

fn vabs64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source§

fn vabsf32x4(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source§

fn vabsf64x2(&mut self, dst: VReg, src: VReg) -> Self::Return

dst = |src|

Source§

fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_maximum(src1, src2)

Source§

fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_maximum(src1, src2)

Source§

fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_minimum(src1, src2)

Source§

fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = ieee_minimum(src1, src2)

Source§

fn vshuffle( &mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128, ) -> Self::Return

dst = shuffle(src1, src2, mask)

Source§

fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = swizzle(src1, src2)

Source§

fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = (src1 + src2 + 1) // 2

Source§

fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return

dst = (src1 + src2 + 1) // 2

Source§

impl<F, T, V1, V2> OpVisitor for SequencedVisitor<'_, F, V1, V2>
where F: FnMut(V1::Return, V2::Return) -> T, V1: OpVisitor, V2: OpVisitor<BytecodeStream = V1::BytecodeStream>,

Source§

fn ret(&mut self) -> Self::Return

Transfer control the address in the lr register.

Source§

fn call(&mut self, offset: PcRelOffset) -> Self::Return

Transfer control to the PC at the given offset and set the lr register to the PC just after this instruction.

This instruction generally assumes that the Pulley ABI is being respected where arguments are in argument registers (starting at x0 for integer arguments) and results are in result registers. This instruction itself assume that all arguments are already in their registers. Subsequent instructions below enable moving arguments into the correct registers as part of the same call instruction.

Source§

fn call1(&mut self, arg1: XReg, offset: PcRelOffset) -> Self::Return

Like call, but also x0 = arg1

Source§

fn call2(&mut self, arg1: XReg, arg2: XReg, offset: PcRelOffset) -> Self::Return

Like call, but also x0, x1 = arg1, arg2

Source§

fn call3( &mut self, arg1: XReg, arg2: XReg, arg3: XReg, offset: PcRelOffset, ) -> Self::Return

Like call, but also x0, x1, x2 = arg1, arg2, arg3

Source§

fn call4( &mut self, arg1: XReg, arg2: XReg, arg3: XReg, arg4: XReg, offset: PcRelOffset, ) -> Self::Return

Like call, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4

Source§

fn call_indirect(&mut self, reg: XReg) -> Self::Return

Transfer control to the PC in reg and set lr to the PC just after this instruction.

Source§

fn jump(&mut self, offset: PcRelOffset) -> Self::Return

Unconditionally transfer control to the PC at the given offset.

Source§

fn xjump(&mut self, reg: XReg) -> Self::Return

Unconditionally transfer control to the PC at specified register.

Source§

fn br_if32(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return

Conditionally transfer control to the given PC offset if low32(cond) contains a non-zero value.

Source§

fn br_if_not32(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return

Conditionally transfer control to the given PC offset if low32(cond) contains a zero value.

Source§

fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return

Branch if a == b.

Source§

fn br_if_xneq32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if a != b.

Source§

fn br_if_xslt32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if signed a < b.

Source§

fn br_if_xslteq32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if signed a <= b.

Source§

fn br_if_xult32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a < b.

Source§

fn br_if_xulteq32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a <= b.

Source§

fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return

Branch if a == b.

Source§

fn br_if_xneq64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if a != b.

Source§

fn br_if_xslt64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if signed a < b.

Source§

fn br_if_xslteq64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if signed a <= b.

Source§

fn br_if_xult64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a < b.

Source§

fn br_if_xulteq64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a <= b.

Source§

fn br_if_xeq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if a == b.

Source§

fn br_if_xeq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if a == b.

Source§

fn br_if_xneq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if a != b.

Source§

fn br_if_xneq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if a != b.

Source§

fn br_if_xslt32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a < b.

Source§

fn br_if_xslt32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a < b.

Source§

fn br_if_xsgt32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a > b.

Source§

fn br_if_xsgt32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a > b.

Source§

fn br_if_xslteq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a <= b.

Source§

fn br_if_xslteq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a <= b.

Source§

fn br_if_xsgteq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a >= b.

Source§

fn br_if_xsgteq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a >= b.

Source§

fn br_if_xult32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a < b.

Source§

fn br_if_xult32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a < b.

Source§

fn br_if_xulteq32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a <= b.

Source§

fn br_if_xulteq32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a <= b.

Source§

fn br_if_xugt32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a > b.

Source§

fn br_if_xugt32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a > b.

Source§

fn br_if_xugteq32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a >= b.

Source§

fn br_if_xugteq32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a >= b.

Source§

fn br_if_xeq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if a == b.

Source§

fn br_if_xeq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if a == b.

Source§

fn br_if_xneq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if a != b.

Source§

fn br_if_xneq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if a != b.

Source§

fn br_if_xslt64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a < b.

Source§

fn br_if_xslt64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a < b.

Source§

fn br_if_xsgt64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a > b.

Source§

fn br_if_xsgt64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a > b.

Source§

fn br_if_xslteq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a <= b.

Source§

fn br_if_xslteq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a <= b.

Source§

fn br_if_xsgteq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return

Branch if signed a >= b.

Source§

fn br_if_xsgteq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return

Branch if signed a >= b.

Source§

fn br_if_xult64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a < b.

Source§

fn br_if_xult64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a < b.

Source§

fn br_if_xulteq64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a <= b.

Source§

fn br_if_xulteq64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a <= b.

Source§

fn br_if_xugt64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a > b.

Source§

fn br_if_xugt64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a > b.

Source§

fn br_if_xugteq64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a >= b.

Source§

fn br_if_xugteq64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return

Branch if unsigned a >= b.

Source§

fn br_table32(&mut self, idx: XReg, amt: u32) -> Self::Return

Branch to the label indicated by low32(idx).

After this instruction are amt instances of PcRelOffset and the idx selects which one will be branched to. The value of idx is clamped to amt - 1 (e.g. the last offset is the “default” one.

Source§

fn xmov(&mut self, dst: XReg, src: XReg) -> Self::Return

Move between x registers.

Source§

fn xconst8(&mut self, dst: XReg, imm: i8) -> Self::Return

Set dst = sign_extend(imm8).

Source§

fn xconst16(&mut self, dst: XReg, imm: i16) -> Self::Return

Set dst = sign_extend(imm16).

Source§

fn xconst32(&mut self, dst: XReg, imm: i32) -> Self::Return

Set dst = sign_extend(imm32).

Source§

fn xconst64(&mut self, dst: XReg, imm: i64) -> Self::Return

Set dst = imm64.

Source§

fn xadd32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

32-bit wrapping addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified.

Source§

fn xadd32_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return

Same as xadd32 but src2 is a zero-extended 8-bit immediate.

Source§

fn xadd32_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return

Same as xadd32 but src2 is a 32-bit immediate.

Source§

fn xadd64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

64-bit wrapping addition: dst = src1 + src2.

Source§

fn xadd64_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return

Same as xadd64 but src2 is a zero-extended 8-bit immediate.

Source§

fn xadd64_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return

Same as xadd64 but src2 is a zero-extended 32-bit immediate.

Source§

fn xsub32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

32-bit wrapping subtraction: low32(dst) = low32(src1) - low32(src2).

The upper 32-bits of dst are unmodified.

Source§

fn xsub32_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return

Same as xsub32 but src2 is a zero-extended 8-bit immediate.

Source§

fn xsub32_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return

Same as xsub32 but src2 is a 32-bit immediate.

Source§

fn xsub64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

64-bit wrapping subtraction: dst = src1 - src2.

Source§

fn xsub64_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return

Same as xsub64 but src2 is a zero-extended 8-bit immediate.

Source§

fn xsub64_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return

Same as xsub64 but src2 is a zero-extended 32-bit immediate.

Source§

fn xmul32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) * low32(src2)

Source§

fn xmul32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xmul64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xmul32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xmul32 but src2 is a sign-extended 32-bit immediate.

Source§

fn xmul64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 * src2

Source§

fn xmul64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xmul64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xmul64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xmul64 but src2 is a sign-extended 64-bit immediate.

Source§

fn xctz32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = trailing_zeros(low32(src))

Source§

fn xctz64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = trailing_zeros(src)

Source§

fn xclz32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = leading_zeros(low32(src))

Source§

fn xclz64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = leading_zeros(src)

Source§

fn xpopcnt32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = count_ones(low32(src))

Source§

fn xpopcnt64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = count_ones(src)

Source§

fn xrotl32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = rotate_left(low32(src1), low32(src2))

Source§

fn xrotl64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = rotate_left(src1, src2)

Source§

fn xrotr32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = rotate_right(low32(src1), low32(src2))

Source§

fn xrotr64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = rotate_right(src1, src2)

Source§

fn xshl32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) << low5(src2)

Source§

fn xshr32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) >> low5(src2)

Source§

fn xshr32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) >> low5(src2)

Source§

fn xshl64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 << low5(src2)

Source§

fn xshr64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 >> low6(src2)

Source§

fn xshr64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 >> low6(src2)

Source§

fn xshl32_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return

low32(dst) = low32(src1) << low5(src2)

Source§

fn xshr32_s_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return

low32(dst) = low32(src1) >> low5(src2)

Source§

fn xshr32_u_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return

low32(dst) = low32(src1) >> low5(src2)

Source§

fn xshl64_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return

dst = src1 << low5(src2)

Source§

fn xshr64_s_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return

dst = src1 >> low6(src2)

Source§

fn xshr64_u_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return

dst = src1 >> low6(src2)

Source§

fn xneg32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = -low32(src)

Source§

fn xneg64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = -src

Source§

fn xeq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = src1 == src2

Source§

fn xneq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = src1 != src2

Source§

fn xslt64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = src1 < src2 (signed)

Source§

fn xslteq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = src1 <= src2 (signed)

Source§

fn xult64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = src1 < src2 (unsigned)

Source§

fn xulteq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = src1 <= src2 (unsigned)

Source§

fn xeq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) == low32(src2)

Source§

fn xneq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) != low32(src2)

Source§

fn xslt32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) < low32(src2) (signed)

Source§

fn xslteq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) <= low32(src2) (signed)

Source§

fn xult32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) < low32(src2) (unsigned)

Source§

fn xulteq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) <= low32(src2) (unsigned)

Source§

fn xload8_u32_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source§

fn xload8_s32_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = sext(*(ptr + offset))

Source§

fn xload16le_u32_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source§

fn xload16le_s32_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = sext(*(ptr + offset))

Source§

fn xload32le_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

low32(dst) = *(ptr + offset)

Source§

fn xload8_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload8_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload16le_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload16le_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload32le_u64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload32le_s64_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload64le_offset32( &mut self, dst: XReg, ptr: XReg, offset: i32, ) -> Self::Return

dst = *(ptr + offset)

Source§

fn xstore8_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low8(src)

Source§

fn xstore16le_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low16(src)

Source§

fn xstore32le_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source§

fn xstore64le_offset32( &mut self, ptr: XReg, offset: i32, src: XReg, ) -> Self::Return

*(ptr + offset) = low64(src)

Source§

fn xload8_u32_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source§

fn xload8_s32_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

low32(dst) = sext(*(ptr + offset))

Source§

fn xload16le_u32_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

low32(dst) = zext(*(ptr + offset))

Source§

fn xload16le_s32_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

low32(dst) = sext(*(ptr + offset))

Source§

fn xload32le_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

low32(dst) = *(ptr + offset)

Source§

fn xload8_u64_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload8_s64_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload16le_u64_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload16le_s64_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload32le_u64_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = zext(*(ptr + offset))

Source§

fn xload32le_s64_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = sext(*(ptr + offset))

Source§

fn xload64le_offset8( &mut self, dst: XReg, ptr: XReg, offset: u8, ) -> Self::Return

dst = *(ptr + offset)

Source§

fn xstore8_offset8(&mut self, ptr: XReg, offset: u8, src: XReg) -> Self::Return

*(ptr + offset) = low8(src)

Source§

fn xstore16le_offset8( &mut self, ptr: XReg, offset: u8, src: XReg, ) -> Self::Return

*(ptr + offset) = low16(src)

Source§

fn xstore32le_offset8( &mut self, ptr: XReg, offset: u8, src: XReg, ) -> Self::Return

*(ptr + offset) = low32(src)

Source§

fn xstore64le_offset8( &mut self, ptr: XReg, offset: u8, src: XReg, ) -> Self::Return

*(ptr + offset) = low64(src)

Source§

fn push_frame(&mut self) -> Self::Return

push lr; push fp; fp = sp

Source§

fn pop_frame(&mut self) -> Self::Return

sp = fp; pop fp; pop lr

Source§

fn push_frame_save(&mut self, amt: u32, regs: RegSet<XReg>) -> Self::Return

Macro-instruction to enter a function, allocate some stack, and then save some registers.

This is equivalent to push_frame, stack_alloc32 amt, then saving all of regs to the top of the stack just allocated.

Source§

fn pop_frame_restore(&mut self, amt: u32, regs: RegSet<XReg>) -> Self::Return

Inverse of push_frame_save. Restores regs from the top of the stack, then runs stack_free32 amt, then runs pop_frame.

Source§

fn stack_alloc32(&mut self, amt: u32) -> Self::Return

sp = sp.checked_sub(amt)

Source§

fn stack_free32(&mut self, amt: u32) -> Self::Return

sp = sp + amt

Source§

fn zext8(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = zext(low8(src))

Source§

fn zext16(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = zext(low16(src))

Source§

fn zext32(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = zext(low32(src))

Source§

fn sext8(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = sext(low8(src))

Source§

fn sext16(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = sext(low16(src))

Source§

fn sext32(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = sext(low32(src))

Source§

fn xabs32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = |low32(src)|

Source§

fn xabs64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = |src|

Source§

fn xdiv32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) / low32(src2) (signed)

Source§

fn xdiv64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 / src2 (signed)

Source§

fn xdiv32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) / low32(src2) (unsigned)

Source§

fn xdiv64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 / src2 (unsigned)

Source§

fn xrem32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) % low32(src2) (signed)

Source§

fn xrem64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 / src2 (signed)

Source§

fn xrem32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) % low32(src2) (unsigned)

Source§

fn xrem64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 / src2 (unsigned)

Source§

fn xband32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) & low32(src2)

Source§

fn xband32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xband64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xband32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xband32 but src2 is a sign-extended 32-bit immediate.

Source§

fn xband64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 & src2

Source§

fn xband64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xband64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xband64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xband64 but src2 is a sign-extended 32-bit immediate.

Source§

fn xbor32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) | low32(src2)

Source§

fn xbor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xbor64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xbor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xbor32 but src2 is a sign-extended 32-bit immediate.

Source§

fn xbor64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 | src2

Source§

fn xbor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xbor64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xbor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xbor64 but src2 is a sign-extended 32-bit immediate.

Source§

fn xbxor32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = low32(src1) ^ low32(src2)

Source§

fn xbxor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xbxor64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xbxor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xbxor32 but src2 is a sign-extended 32-bit immediate.

Source§

fn xbxor64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = src1 ^ src2

Source§

fn xbxor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return

Same as xbxor64 but src2 is a sign-extended 8-bit immediate.

Source§

fn xbxor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return

Same as xbxor64 but src2 is a sign-extended 32-bit immediate.

Source§

fn xbnot32(&mut self, dst: XReg, src: XReg) -> Self::Return

low32(dst) = !low32(src1)

Source§

fn xbnot64(&mut self, dst: XReg, src: XReg) -> Self::Return

dst = !src1

Source§

fn xmin32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = min(low32(src1), low32(src2)) (unsigned)

Source§

fn xmin32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = min(low32(src1), low32(src2)) (signed)

Source§

fn xmax32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = max(low32(src1), low32(src2)) (unsigned)

Source§

fn xmax32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

low32(dst) = max(low32(src1), low32(src2)) (signed)

Source§

fn xmin64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = min(src1, src2) (unsigned)

Source§

fn xmin64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = min(src1, src2) (signed)

Source§

fn xmax64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = max(src1, src2) (unsigned)

Source§

fn xmax64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return

dst = max(src1, src2) (signed)

Source§

fn xselect32( &mut self, dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg, ) -> Self::Return

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

Source§

fn xselect64( &mut self, dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg, ) -> Self::Return

dst = low32(cond) ? if_nonzero : if_zero

Source§

type BytecodeStream = <V1 as OpVisitor>::BytecodeStream

The type of this visitor’s bytecode stream.
Source§

type Return = T

The type of values returned by each visitor method.
Source§

fn bytecode(&mut self) -> &mut Self::BytecodeStream

Get this visitor’s underlying bytecode stream.
Source§

fn before_visit(&mut self)

A callback invoked before starting to decode an instruction. Read more
Source§

fn after_visit(&mut self)

A callback invoked after an instruction has been completely decoded. Read more

Auto Trait Implementations§

§

impl<'a, F, V1, V2> Freeze for SequencedVisitor<'a, F, V1, V2>
where F: Freeze,

§

impl<'a, F, V1, V2> RefUnwindSafe for SequencedVisitor<'a, F, V1, V2>

§

impl<'a, F, V1, V2> Send for SequencedVisitor<'a, F, V1, V2>
where F: Send, V1: Send, V2: Send,

§

impl<'a, F, V1, V2> Sync for SequencedVisitor<'a, F, V1, V2>
where F: Sync, V1: Sync, V2: Sync,

§

impl<'a, F, V1, V2> Unpin for SequencedVisitor<'a, F, V1, V2>
where F: Unpin,

§

impl<'a, F, V1, V2> !UnwindSafe for SequencedVisitor<'a, F, V1, V2>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.