pulley_interpreter

Module encode

Source
Available on crate feature encode only.
Expand description

Encoding support for pulley bytecode.

Traits§

Encode
Helper trait to encode instructions into a “sink”.

Functions§

bitcast_float_from_int_32
low32(dst) = bitcast low32(src) as f32
bitcast_float_from_int_64
dst = bitcast src as f64
bitcast_int_from_float_32
low32(dst) = bitcast low32(src) as i32
bitcast_int_from_float_64
dst = bitcast src as i64
br_if32
Conditionally transfer control to the given PC offset if low32(cond) contains a non-zero value.
br_if_not32
Conditionally transfer control to the given PC offset if low32(cond) contains a zero value.
br_if_xeq32
Branch if a == b.
br_if_xeq64
Branch if a == b.
br_if_xeq32_i8
Branch if a == b.
br_if_xeq32_i32
Branch if a == b.
br_if_xeq64_i8
Branch if a == b.
br_if_xeq64_i32
Branch if a == b.
br_if_xneq32
Branch if a != b.
br_if_xneq64
Branch if a != b.
br_if_xneq32_i8
Branch if a != b.
br_if_xneq32_i32
Branch if a != b.
br_if_xneq64_i8
Branch if a != b.
br_if_xneq64_i32
Branch if a != b.
br_if_xsgt32_i8
Branch if signed a > b.
br_if_xsgt32_i32
Branch if signed a > b.
br_if_xsgt64_i8
Branch if signed a > b.
br_if_xsgt64_i32
Branch if signed a > b.
br_if_xsgteq32_i8
Branch if signed a >= b.
br_if_xsgteq32_i32
Branch if signed a >= b.
br_if_xsgteq64_i8
Branch if signed a >= b.
br_if_xsgteq64_i32
Branch if signed a >= b.
br_if_xslt32
Branch if signed a < b.
br_if_xslt64
Branch if signed a < b.
br_if_xslt32_i8
Branch if signed a < b.
br_if_xslt32_i32
Branch if signed a < b.
br_if_xslt64_i8
Branch if signed a < b.
br_if_xslt64_i32
Branch if signed a < b.
br_if_xslteq32
Branch if signed a <= b.
br_if_xslteq64
Branch if signed a <= b.
br_if_xslteq32_i8
Branch if signed a <= b.
br_if_xslteq32_i32
Branch if signed a <= b.
br_if_xslteq64_i8
Branch if signed a <= b.
br_if_xslteq64_i32
Branch if signed a <= b.
br_if_xugt32_u8
Branch if unsigned a > b.
br_if_xugt32_u32
Branch if unsigned a > b.
br_if_xugt64_u8
Branch if unsigned a > b.
br_if_xugt64_u32
Branch if unsigned a > b.
br_if_xugteq32_u8
Branch if unsigned a >= b.
br_if_xugteq32_u32
Branch if unsigned a >= b.
br_if_xugteq64_u8
Branch if unsigned a >= b.
br_if_xugteq64_u32
Branch if unsigned a >= b.
br_if_xult32
Branch if unsigned a < b.
br_if_xult64
Branch if unsigned a < b.
br_if_xult32_u8
Branch if unsigned a < b.
br_if_xult32_u32
Branch if unsigned a < b.
br_if_xult64_u8
Branch if unsigned a < b.
br_if_xult64_u32
Branch if unsigned a < b.
br_if_xulteq32
Branch if unsigned a <= b.
br_if_xulteq64
Branch if unsigned a <= b.
br_if_xulteq32_u8
Branch if unsigned a <= b.
br_if_xulteq32_u32
Branch if unsigned a <= b.
br_if_xulteq64_u8
Branch if unsigned a <= b.
br_if_xulteq64_u32
Branch if unsigned a <= b.
br_table32
Branch to the label indicated by low32(idx).
bswap32
dst = byteswap(low32(src))
bswap64
dst = byteswap(src)
call
Transfer control to the PC at the given offset and set the lr register to the PC just after this instruction.
call1
Like call, but also x0 = arg1
call2
Like call, but also x0, x1 = arg1, arg2
call3
Like call, but also x0, x1, x2 = arg1, arg2, arg3
call4
Like call, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4
call_indirect
Transfer control to the PC in reg and set lr to the PC just after this instruction.
call_indirect_host
A special opcode to halt interpreter execution and yield control back to the host.
f32_from_f64
low32(dst) = demote(src)
f32_from_x32_s
low32(dst) = checked_f32_from_signed(low32(src))
f32_from_x32_u
low32(dst) = checked_f32_from_unsigned(low32(src))
f32_from_x64_s
low32(dst) = checked_f32_from_signed(src)
f32_from_x64_u
low32(dst) = checked_f32_from_unsigned(src)
f64_from_f32
(st) = promote(low32(src))
f64_from_x32_s
dst = checked_f64_from_signed(low32(src))
f64_from_x32_u
dst = checked_f64_from_unsigned(low32(src))
f64_from_x64_s
dst = checked_f64_from_signed(src)
f64_from_x64_u
dst = checked_f64_from_unsigned(src)
fabs32
low32(dst) = |low32(src)|
fabs64
dst = |src|
fadd32
low32(dst) = low32(src1) + low32(src2)
fadd64
dst = src1 + src2
fceil32
low32(dst) = ieee_ceil(low32(src))
fceil64
dst = ieee_ceil(src)
fconst32
low32(dst) = bits
fconst64
dst = bits
fcopysign32
low32(dst) = copysign(low32(src1), low32(src2))
fcopysign64
dst = copysign(src1, src2)
fdiv32
low32(dst) = low32(src1) / low32(src2)
fdiv64
dst = src1 / src2
feq32
low32(dst) = zext(src1 == src2)
feq64
low32(dst) = zext(src1 == src2)
fextractv32x4
low32(dst) = src[lane]
fextractv64x2
dst = src[lane]
ffloor32
low32(dst) = ieee_floor(low32(src))
ffloor64
dst = ieee_floor(src)
fload32be_offset32
low32(dst) = zext(*(ptr + offset))
fload32le_offset32
low32(dst) = zext(*(ptr + offset))
fload64be_offset32
dst = *(ptr + offset)
fload64le_offset32
dst = *(ptr + offset)
flt32
low32(dst) = zext(src1 < src2)
flt64
low32(dst) = zext(src1 < src2)
flteq32
low32(dst) = zext(src1 <= src2)
flteq64
low32(dst) = zext(src1 <= src2)
fmaximum32
low32(dst) = ieee_maximum(low32(src1), low32(src2))
fmaximum64
dst = ieee_maximum(src1, src2)
fminimum32
low32(dst) = ieee_minimum(low32(src1), low32(src2))
fminimum64
dst = ieee_minimum(src1, src2)
fmov
Move between f registers.
fmul32
low32(dst) = low32(src1) * low32(src2)
fmul64
dst = src1 * src2
fnearest32
low32(dst) = ieee_nearest(low32(src))
fnearest64
dst = ieee_nearest(src)
fneg32
low32(dst) = -low32(src)
fneg64
dst = -src
fneq32
low32(dst) = zext(src1 != src2)
fneq64
low32(dst) = zext(src1 != src2)
fselect32
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
fselect64
dst = low32(cond) ? if_nonzero : if_zero
fsqrt32
low32(dst) = ieee_sqrt(low32(src))
fsqrt64
dst = ieee_sqrt(src)
fstore32be_offset32
*(ptr + offset) = low32(src)
fstore32le_offset32
*(ptr + offset) = low32(src)
fstore64be_offset32
*(ptr + offset) = src
fstore64le_offset32
*(ptr + offset) = src
fsub32
low32(dst) = low32(src1) - low32(src2)
fsub64
dst = src1 - src2
ftrunc32
low32(dst) = ieee_trunc(low32(src))
ftrunc64
dst = ieee_trunc(src)
jump
Unconditionally transfer control to the PC at the given offset.
nop
Do nothing.
pop_frame
sp = fp; pop fp; pop lr
pop_frame_restore
Inverse of push_frame_save. Restores regs from the top of the stack, then runs stack_free32 amt, then runs pop_frame.
push_frame
push lr; push fp; fp = sp
push_frame_save
Macro-instruction to enter a function, allocate some stack, and then save some registers.
ret
Transfer control the address in the lr register.
sext8
dst = sext(low8(src))
sext16
dst = sext(low16(src))
sext32
dst = sext(low32(src))
stack_alloc32
sp = sp.checked_sub(amt)
stack_free32
sp = sp + amt
trap
Raise a trap.
vabs8x16
dst = |src|
vabs16x8
dst = |src|
vabs32x4
dst = |src|
vabs64x2
dst = |src|
vabsf32x4
dst = |src|
vabsf64x2
dst = |src|
vaddf32x4
dst = src1 + src2
vaddf64x2
dst = src1 + src2
vaddi8x16
dst = src1 + src2
vaddi8x16_sat
dst = satruating_add(src1, src2)
vaddi16x8
dst = src1 + src2
vaddi16x8_sat
dst = satruating_add(src1, src2)
vaddi32x4
dst = src1 + src2
vaddi64x2
dst = src1 + src2
vaddpairwisei16x8_s
dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
vaddpairwisei32x4_s
dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
vaddu8x16_sat
dst = satruating_add(src1, src2)
vaddu16x8_sat
dst = satruating_add(src1, src2)
valltrue8x16
Store whether all lanes are nonzero in dst.
valltrue16x8
Store whether all lanes are nonzero in dst.
valltrue32x4
Store whether all lanes are nonzero in dst.
valltrue64x2
Store whether any lanes are nonzero in dst.
vanytrue8x16
Store whether any lanes are nonzero in dst.
vanytrue16x8
Store whether any lanes are nonzero in dst.
vanytrue32x4
Store whether any lanes are nonzero in dst.
vanytrue64x2
Store whether any lanes are nonzero in dst.
vavground8x16
dst = (src1 + src2 + 1) // 2
vavground16x8
dst = (src1 + src2 + 1) // 2
vband128
dst = src1 & src2
vbitmask8x16
Collect high bits of each lane into the low 32-bits of the destination.
vbitmask16x8
Collect high bits of each lane into the low 32-bits of the destination.
vbitmask32x4
Collect high bits of each lane into the low 32-bits of the destination.
vbitmask64x2
Collect high bits of each lane into the low 32-bits of the destination.
vbitselect128
dst = (c & x) | (!c & y)
vbnot128
dst = !src1
vbor128
dst = src1 | src2
vbxor128
dst = src1 ^ src2
vceil32x4
low128(dst) = ieee_ceil(low128(src))
vceil64x2
low128(dst) = ieee_ceil(low128(src))
vconst128
dst = imm
vdivf32x4
low128(dst) = low128(src1) / low128(src2)
vdivf64x2
dst = src1 / src2
veq8x16
dst = src == dst
veq16x8
dst = src == dst
veq32x4
dst = src == dst
veq64x2
dst = src == dst
vf32x4_from_i32x4_s
Int-to-float conversion (same as f32_from_x32_s)
vf32x4_from_i32x4_u
Int-to-float conversion (same as f32_from_x32_u)
vf64x2_from_i64x2_s
Int-to-float conversion (same as f64_from_x64_s)
vf64x2_from_i64x2_u
Int-to-float conversion (same as f64_from_x64_u)
vfdemote
Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
vfloor32x4
low128(dst) = ieee_floor(low128(src))
vfloor64x2
low128(dst) = ieee_floor(low128(src))
vfpromotelow
Promotes the low two lanes of the f32x4 input to f64x2.
vinsertf32
dst = src1; dst[lane] = src2
vinsertf64
dst = src1; dst[lane] = src2
vinsertx8
dst = src1; dst[lane] = src2
vinsertx16
dst = src1; dst[lane] = src2
vinsertx32
dst = src1; dst[lane] = src2
vinsertx64
dst = src1; dst[lane] = src2
vload8x8_s_offset32
Load the 64-bit source as i8x8 and sign-extend to i16x8.
vload8x8_u_offset32
Load the 64-bit source as u8x8 and zero-extend to i16x8.
vload16x4le_s_offset32
Load the 64-bit source as i16x4 and sign-extend to i32x4.
vload16x4le_u_offset32
Load the 64-bit source as u16x4 and zero-extend to i32x4.
vload32x2le_s_offset32
Load the 64-bit source as i32x2 and sign-extend to i64x2.
vload32x2le_u_offset32
Load the 64-bit source as u32x2 and zero-extend to i64x2.
vload128le_offset32
dst = *(ptr + offset)
vmax8x16_s
dst = max(src1, src2) (signed)
vmax8x16_u
dst = max(src1, src2) (unsigned)
vmax16x8_s
dst = max(src1, src2) (signed)
vmax16x8_u
dst = max(src1, src2) (unsigned)
vmax32x4_s
dst = max(src1, src2) (signed)
vmax32x4_u
dst = max(src1, src2) (unsigned)
vmaximumf32x4
dst = ieee_maximum(src1, src2)
vmaximumf64x2
dst = ieee_maximum(src1, src2)
vmin8x16_s
dst = min(src1, src2) (signed)
vmin8x16_u
dst = min(src1, src2) (unsigned)
vmin16x8_s
dst = min(src1, src2) (signed)
vmin16x8_u
dst = min(src1, src2) (unsigned)
vmin32x4_s
dst = min(src1, src2) (signed)
vmin32x4_u
dst = min(src1, src2) (unsigned)
vminimumf32x4
dst = ieee_minimum(src1, src2)
vminimumf64x2
dst = ieee_minimum(src1, src2)
vmov
Move between v registers.
vmulf64x2
dst = src1 * src2
vmuli8x16
dst = src1 * src2
vmuli16x8
dst = src1 * src2
vmuli32x4
dst = src1 * src2
vmuli64x2
dst = src1 * src2
vnarrow16x8_s
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
vnarrow16x8_u
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
vnarrow32x4_s
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
vnarrow32x4_u
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
vnearest32x4
low128(dst) = ieee_nearest(low128(src))
vnearest64x2
low128(dst) = ieee_nearest(low128(src))
vneg8x16
dst = -src
vneg16x8
dst = -src
vneg32x4
dst = -src
vneg64x2
dst = -src
vnegf64x2
dst = -src
vneq8x16
dst = src != dst
vneq16x8
dst = src != dst
vneq32x4
dst = src != dst
vneq64x2
dst = src != dst
vpopcnt8x16
dst = count_ones(src)
vqmulrsi16x8
dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
vshli8x16
dst = src1 << src2
vshli16x8
dst = src1 << src2
vshli32x4
dst = src1 << src2
vshli64x2
dst = src1 << src2
vshri8x16_s
dst = src1 >> src2 (signed)
vshri8x16_u
dst = src1 >> src2 (unsigned)
vshri16x8_s
dst = src1 >> src2 (signed)
vshri16x8_u
dst = src1 >> src2 (unsigned)
vshri32x4_s
dst = src1 >> src2 (signed)
vshri32x4_u
dst = src1 >> src2 (unsigned)
vshri64x2_s
dst = src1 >> src2 (signed)
vshri64x2_u
dst = src1 >> src2 (unsigned)
vshuffle
dst = shuffle(src1, src2, mask)
vslt8x16
dst = src < dst (signed)
vslt16x8
dst = src < dst (signed)
vslt32x4
dst = src < dst (signed)
vslt64x2
dst = src < dst (signed)
vslteq8x16
dst = src <= dst (signed)
vslteq16x8
dst = src <= dst (signed)
vslteq32x4
dst = src <= dst (signed)
vslteq64x2
dst = src <= dst (signed)
vsplatf32
dst = splat(low32(src))
vsplatf64
dst = splat(src)
vsplatx8
dst = splat(low8(src))
vsplatx16
dst = splat(low16(src))
vsplatx32
dst = splat(low32(src))
vsplatx64
dst = splat(src)
vsqrt32x4
low32(dst) = ieee_sqrt(low32(src))
vsqrt64x2
low32(dst) = ieee_sqrt(low32(src))
vstore128le_offset32
*(ptr + offset) = src
vsubf64x2
dst = src1 - src2
vsubi8x16
dst = src1 - src2
vsubi8x16_sat
dst = saturating_sub(src1, src2)
vsubi16x8
dst = src1 - src2
vsubi16x8_sat
dst = saturating_sub(src1, src2)
vsubi32x4
dst = src1 - src2
vsubi64x2
dst = src1 - src2
vsubu8x16_sat
dst = saturating_sub(src1, src2)
vsubu16x8_sat
dst = saturating_sub(src1, src2)
vswizzlei8x16
dst = swizzle(src1, src2)
vtrunc32x4
low128(dst) = ieee_trunc(low128(src))
vtrunc64x2
low128(dst) = ieee_trunc(low128(src))
vult8x16
dst = src < dst (unsigned)
vult16x8
dst = src < dst (unsigned)
vult32x4
dst = src < dst (unsigned)
vult64x2
dst = src < dst (unsigned)
vulteq8x16
dst = src <= dst (unsigned)
vulteq16x8
dst = src <= dst (unsigned)
vulteq32x4
dst = src <= dst (unsigned)
vulteq64x2
dst = src <= dst (unsigned)
vwidenhigh8x16_s
Widens the high lanes of the input vector, as signed, to twice the width.
vwidenhigh8x16_u
Widens the high lanes of the input vector, as unsigned, to twice the width.
vwidenhigh16x8_s
Widens the high lanes of the input vector, as signed, to twice the width.
vwidenhigh16x8_u
Widens the high lanes of the input vector, as unsigned, to twice the width.
vwidenhigh32x4_s
Widens the high lanes of the input vector, as signed, to twice the width.
vwidenhigh32x4_u
Widens the high lanes of the input vector, as unsigned, to twice the width.
vwidenlow8x16_s
Widens the low lanes of the input vector, as signed, to twice the width.
vwidenlow8x16_u
Widens the low lanes of the input vector, as unsigned, to twice the width.
vwidenlow16x8_s
Widens the low lanes of the input vector, as signed, to twice the width.
vwidenlow16x8_u
Widens the low lanes of the input vector, as unsigned, to twice the width.
vwidenlow32x4_s
Widens the low lanes of the input vector, as signed, to twice the width.
vwidenlow32x4_u
Widens the low lanes of the input vector, as unsigned, to twice the width.
x32_from_f32_s
low32(dst) = checked_signed_from_f32(low32(src))
x32_from_f32_s_sat
low32(dst) = saturating_signed_from_f32(low32(src))
x32_from_f32_u
low32(dst) = checked_unsigned_from_f32(low32(src))
x32_from_f32_u_sat
low32(dst) = saturating_unsigned_from_f32(low32(src))
x32_from_f64_s
low32(dst) = checked_signed_from_f64(src)
x32_from_f64_s_sat
low32(dst) = saturating_signed_from_f64(src)
x32_from_f64_u
low32(dst) = checked_unsigned_from_f64(src)
x32_from_f64_u_sat
low32(dst) = saturating_unsigned_from_f64(src)
x64_from_f32_s
dst = checked_signed_from_f32(low32(src))
x64_from_f32_s_sat
dst = saturating_signed_from_f32(low32(src))
x64_from_f32_u
dst = checked_unsigned_from_f32(low32(src))
x64_from_f32_u_sat
dst = saturating_unsigned_from_f32(low32(src))
x64_from_f64_s
dst = checked_signed_from_f64(src)
x64_from_f64_s_sat
dst = saturating_signed_from_f64(src)
x64_from_f64_u
dst = checked_unsigned_from_f64(src)
x64_from_f64_u_sat
dst = saturating_unsigned_from_f64(src)
xabs32
low32(dst) = |low32(src)|
xabs64
dst = |src|
xadd32
32-bit wrapping addition: low32(dst) = low32(src1) + low32(src2).
xadd64
64-bit wrapping addition: dst = src1 + src2.
xadd32_u8
Same as xadd32 but src2 is a zero-extended 8-bit immediate.
xadd32_u32
Same as xadd32 but src2 is a 32-bit immediate.
xadd32_uoverflow_trap
32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).
xadd64_u8
Same as xadd64 but src2 is a zero-extended 8-bit immediate.
xadd64_u32
Same as xadd64 but src2 is a zero-extended 32-bit immediate.
xadd64_uoverflow_trap
64-bit checked unsigned addition: dst = src1 + src2.
xband32
low32(dst) = low32(src1) & low32(src2)
xband64
dst = src1 & src2
xband32_s8
Same as xband64 but src2 is a sign-extended 8-bit immediate.
xband32_s32
Same as xband32 but src2 is a sign-extended 32-bit immediate.
xband64_s8
Same as xband64 but src2 is a sign-extended 8-bit immediate.
xband64_s32
Same as xband64 but src2 is a sign-extended 32-bit immediate.
xbmask32
low32(dst) = if low32(src) == 0 { 0 } else { -1 }
xbmask64
dst = if src == 0 { 0 } else { -1 }
xbnot32
low32(dst) = !low32(src1)
xbnot64
dst = !src1
xbor32
low32(dst) = low32(src1) | low32(src2)
xbor64
dst = src1 | src2
xbor32_s8
Same as xbor64 but src2 is a sign-extended 8-bit immediate.
xbor32_s32
Same as xbor32 but src2 is a sign-extended 32-bit immediate.
xbor64_s8
Same as xbor64 but src2 is a sign-extended 8-bit immediate.
xbor64_s32
Same as xbor64 but src2 is a sign-extended 32-bit immediate.
xbxor32
low32(dst) = low32(src1) ^ low32(src2)
xbxor64
dst = src1 ^ src2
xbxor32_s8
Same as xbxor64 but src2 is a sign-extended 8-bit immediate.
xbxor32_s32
Same as xbxor32 but src2 is a sign-extended 32-bit immediate.
xbxor64_s8
Same as xbxor64 but src2 is a sign-extended 8-bit immediate.
xbxor64_s32
Same as xbxor64 but src2 is a sign-extended 32-bit immediate.
xclz32
low32(dst) = leading_zeros(low32(src))
xclz64
dst = leading_zeros(src)
xconst8
Set dst = sign_extend(imm8).
xconst16
Set dst = sign_extend(imm16).
xconst32
Set dst = sign_extend(imm32).
xconst64
Set dst = imm64.
xctz32
low32(dst) = trailing_zeros(low32(src))
xctz64
dst = trailing_zeros(src)
xdiv32_s
low32(dst) = low32(src1) / low32(src2) (signed)
xdiv32_u
low32(dst) = low32(src1) / low32(src2) (unsigned)
xdiv64_s
dst = src1 / src2 (signed)
xdiv64_u
dst = src1 / src2 (unsigned)
xeq32
low32(dst) = low32(src1) == low32(src2)
xeq64
low32(dst) = src1 == src2
xextractv8x16
low32(dst) = zext(src[lane])
xextractv16x8
low32(dst) = zext(src[lane])
xextractv32x4
low32(dst) = src[lane]
xextractv64x2
dst = src[lane]
xjump
Unconditionally transfer control to the PC at specified register.
xload8_s32_offset8
low32(dst) = sext(*(ptr + offset))
xload8_s32_offset32
low32(dst) = sext(*(ptr + offset))
xload8_s64_offset8
dst = sext(*(ptr + offset))
xload8_s64_offset32
dst = sext(*(ptr + offset))
xload8_u32_offset8
low32(dst) = zext(*(ptr + offset))
xload8_u32_offset32
low32(dst) = zext(*(ptr + offset))
xload8_u64_offset8
dst = zext(*(ptr + offset))
xload8_u64_offset32
dst = zext(*(ptr + offset))
xload16be_s64_offset32
dst = sext(*(ptr + offset))
xload16be_u64_offset32
dst = zext(*(ptr + offset))
xload16le_s32_offset8
low32(dst) = sext(*(ptr + offset))
xload16le_s32_offset32
low32(dst) = sext(*(ptr + offset))
xload16le_s64_offset8
dst = sext(*(ptr + offset))
xload16le_s64_offset32
dst = sext(*(ptr + offset))
xload16le_u32_offset8
low32(dst) = zext(*(ptr + offset))
xload16le_u32_offset32
low32(dst) = zext(*(ptr + offset))
xload16le_u64_offset8
dst = zext(*(ptr + offset))
xload16le_u64_offset32
dst = zext(*(ptr + offset))
xload32be_s64_offset32
dst = sext(*(ptr + offset))
xload32be_u64_offset32
dst = zext(*(ptr + offset))
xload32le_offset8
low32(dst) = *(ptr + offset)
xload32le_offset32
low32(dst) = *(ptr + offset)
xload32le_s64_offset8
dst = sext(*(ptr + offset))
xload32le_s64_offset32
dst = sext(*(ptr + offset))
xload32le_u64_offset8
dst = zext(*(ptr + offset))
xload32le_u64_offset32
dst = zext(*(ptr + offset))
xload64be_offset32
dst = *(ptr + offset)
xload64le_offset8
dst = *(ptr + offset)
xload64le_offset32
dst = *(ptr + offset)
xmax32_s
low32(dst) = max(low32(src1), low32(src2)) (signed)
xmax32_u
low32(dst) = max(low32(src1), low32(src2)) (unsigned)
xmax64_s
dst = max(src1, src2) (signed)
xmax64_u
dst = max(src1, src2) (unsigned)
xmin32_s
low32(dst) = min(low32(src1), low32(src2)) (signed)
xmin32_u
low32(dst) = min(low32(src1), low32(src2)) (unsigned)
xmin64_s
dst = min(src1, src2) (signed)
xmin64_u
dst = min(src1, src2) (unsigned)
xmov
Move between x registers.
xmov_fp
Gets the special “fp” register and moves it into dst.
xmov_lr
Gets the special “lr” register and moves it into dst.
xmul32
low32(dst) = low32(src1) * low32(src2)
xmul64
dst = src1 * src2
xmul32_s8
Same as xmul64 but src2 is a sign-extended 8-bit immediate.
xmul32_s32
Same as xmul32 but src2 is a sign-extended 32-bit immediate.
xmul64_s8
Same as xmul64 but src2 is a sign-extended 8-bit immediate.
xmul64_s32
Same as xmul64 but src2 is a sign-extended 64-bit immediate.
xmulhi64_s
dst = high64(src1 * src2) (signed)
xmulhi64_u
dst = high64(src1 * src2) (unsigned)
xneg32
low32(dst) = -low32(src)
xneg64
dst = -src
xneq32
low32(dst) = low32(src1) != low32(src2)
xneq64
low32(dst) = src1 != src2
xpop32
*dst = *sp; sp -= 4
xpop64
*dst = *sp; sp -= 8
xpop32_many
for dst in dsts.rev() { xpop32 dst }
xpop64_many
for dst in dsts.rev() { xpop64 dst }
xpopcnt32
low32(dst) = count_ones(low32(src))
xpopcnt64
dst = count_ones(src)
xpush32
*sp = low32(src); sp = sp.checked_add(4)
xpush64
*sp = src; sp = sp.checked_add(8)
xpush32_many
for src in srcs { xpush32 src }
xpush64_many
for src in srcs { xpush64 src }
xrem32_s
low32(dst) = low32(src1) % low32(src2) (signed)
xrem32_u
low32(dst) = low32(src1) % low32(src2) (unsigned)
xrem64_s
dst = src1 / src2 (signed)
xrem64_u
dst = src1 / src2 (unsigned)
xrotl32
low32(dst) = rotate_left(low32(src1), low32(src2))
xrotl64
dst = rotate_left(src1, src2)
xrotr32
low32(dst) = rotate_right(low32(src1), low32(src2))
xrotr64
dst = rotate_right(src1, src2)
xselect32
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
xselect64
dst = low32(cond) ? if_nonzero : if_zero
xshl32
low32(dst) = low32(src1) << low5(src2)
xshl64
dst = src1 << low5(src2)
xshl32_u6
low32(dst) = low32(src1) << low5(src2)
xshl64_u6
dst = src1 << low5(src2)
xshr32_s
low32(dst) = low32(src1) >> low5(src2)
xshr32_s_u6
low32(dst) = low32(src1) >> low5(src2)
xshr32_u
low32(dst) = low32(src1) >> low5(src2)
xshr32_u_u6
low32(dst) = low32(src1) >> low5(src2)
xshr64_s
dst = src1 >> low6(src2)
xshr64_s_u6
dst = src1 >> low6(src2)
xshr64_u
dst = src1 >> low6(src2)
xshr64_u_u6
dst = src1 >> low6(src2)
xslt32
low32(dst) = low32(src1) < low32(src2) (signed)
xslt64
low32(dst) = src1 < src2 (signed)
xslteq32
low32(dst) = low32(src1) <= low32(src2) (signed)
xslteq64
low32(dst) = src1 <= src2 (signed)
xstore8_offset8
*(ptr + offset) = low8(src)
xstore8_offset32
*(ptr + offset) = low8(src)
xstore16be_offset32
*(ptr + offset) = low16(src)
xstore16le_offset8
*(ptr + offset) = low16(src)
xstore16le_offset32
*(ptr + offset) = low16(src)
xstore32be_offset32
*(ptr + offset) = low32(src)
xstore32le_offset8
*(ptr + offset) = low32(src)
xstore32le_offset32
*(ptr + offset) = low32(src)
xstore64be_offset32
*(ptr + offset) = low64(src)
xstore64le_offset8
*(ptr + offset) = low64(src)
xstore64le_offset32
*(ptr + offset) = low64(src)
xsub32
32-bit wrapping subtraction: low32(dst) = low32(src1) - low32(src2).
xsub64
64-bit wrapping subtraction: dst = src1 - src2.
xsub32_u8
Same as xsub32 but src2 is a zero-extended 8-bit immediate.
xsub32_u32
Same as xsub32 but src2 is a 32-bit immediate.
xsub64_u8
Same as xsub64 but src2 is a zero-extended 8-bit immediate.
xsub64_u32
Same as xsub64 but src2 is a zero-extended 32-bit immediate.
xult32
low32(dst) = low32(src1) < low32(src2) (unsigned)
xult64
low32(dst) = src1 < src2 (unsigned)
xulteq32
low32(dst) = low32(src1) <= low32(src2) (unsigned)
xulteq64
low32(dst) = src1 <= src2 (unsigned)
zext8
dst = zext(low8(src))
zext16
dst = zext(low16(src))
zext32
dst = zext(low32(src))