pub enum ExtendedOp {
Show 282 variants
Trap(Trap),
Nop(Nop),
CallIndirectHost(CallIndirectHost),
XmovFp(XmovFp),
XmovLr(XmovLr),
Bswap32(Bswap32),
Bswap64(Bswap64),
Xadd32UoverflowTrap(Xadd32UoverflowTrap),
Xadd64UoverflowTrap(Xadd64UoverflowTrap),
XMulHi64S(XMulHi64S),
XMulHi64U(XMulHi64U),
Xbmask32(Xbmask32),
Xbmask64(Xbmask64),
XPush32(XPush32),
XPush32Many(XPush32Many),
XPush64(XPush64),
XPush64Many(XPush64Many),
XPop32(XPop32),
XPop32Many(XPop32Many),
XPop64(XPop64),
XPop64Many(XPop64Many),
XLoad16BeU64Offset32(XLoad16BeU64Offset32),
XLoad16BeS64Offset32(XLoad16BeS64Offset32),
XLoad32BeU64Offset32(XLoad32BeU64Offset32),
XLoad32BeS64Offset32(XLoad32BeS64Offset32),
XLoad64BeOffset32(XLoad64BeOffset32),
XStore16BeOffset32(XStore16BeOffset32),
XStore32BeOffset32(XStore32BeOffset32),
XStore64BeOffset32(XStore64BeOffset32),
Fload32BeOffset32(Fload32BeOffset32),
Fload64BeOffset32(Fload64BeOffset32),
Fstore32BeOffset32(Fstore32BeOffset32),
Fstore64BeOffset32(Fstore64BeOffset32),
Fload32LeOffset32(Fload32LeOffset32),
Fload64LeOffset32(Fload64LeOffset32),
Fstore32LeOffset32(Fstore32LeOffset32),
Fstore64LeOffset32(Fstore64LeOffset32),
VLoad128Offset32(VLoad128Offset32),
Vstore128LeOffset32(Vstore128LeOffset32),
Fmov(Fmov),
Vmov(Vmov),
BitcastIntFromFloat32(BitcastIntFromFloat32),
BitcastIntFromFloat64(BitcastIntFromFloat64),
BitcastFloatFromInt32(BitcastFloatFromInt32),
BitcastFloatFromInt64(BitcastFloatFromInt64),
FConst32(FConst32),
FConst64(FConst64),
Feq32(Feq32),
Fneq32(Fneq32),
Flt32(Flt32),
Flteq32(Flteq32),
Feq64(Feq64),
Fneq64(Fneq64),
Flt64(Flt64),
Flteq64(Flteq64),
FSelect32(FSelect32),
FSelect64(FSelect64),
F32FromF64(F32FromF64),
F64FromF32(F64FromF32),
F32FromX32S(F32FromX32S),
F32FromX32U(F32FromX32U),
F32FromX64S(F32FromX64S),
F32FromX64U(F32FromX64U),
F64FromX32S(F64FromX32S),
F64FromX32U(F64FromX32U),
F64FromX64S(F64FromX64S),
F64FromX64U(F64FromX64U),
X32FromF32S(X32FromF32S),
X32FromF32U(X32FromF32U),
X32FromF64S(X32FromF64S),
X32FromF64U(X32FromF64U),
X64FromF32S(X64FromF32S),
X64FromF32U(X64FromF32U),
X64FromF64S(X64FromF64S),
X64FromF64U(X64FromF64U),
X32FromF32SSat(X32FromF32SSat),
X32FromF32USat(X32FromF32USat),
X32FromF64SSat(X32FromF64SSat),
X32FromF64USat(X32FromF64USat),
X64FromF32SSat(X64FromF32SSat),
X64FromF32USat(X64FromF32USat),
X64FromF64SSat(X64FromF64SSat),
X64FromF64USat(X64FromF64USat),
FCopySign32(FCopySign32),
FCopySign64(FCopySign64),
Fadd32(Fadd32),
Fsub32(Fsub32),
Fmul32(Fmul32),
Fdiv32(Fdiv32),
Vdivf32x4(Vdivf32x4),
Fmaximum32(Fmaximum32),
Fminimum32(Fminimum32),
Ftrunc32(Ftrunc32),
Vtrunc32x4(Vtrunc32x4),
Vtrunc64x2(Vtrunc64x2),
Ffloor32(Ffloor32),
Vfloor32x4(Vfloor32x4),
Vfloor64x2(Vfloor64x2),
Fceil32(Fceil32),
Vceil32x4(Vceil32x4),
Vceil64x2(Vceil64x2),
Fnearest32(Fnearest32),
Fsqrt32(Fsqrt32),
Vsqrt32x4(Vsqrt32x4),
Vsqrt64x2(Vsqrt64x2),
Fneg32(Fneg32),
Fabs32(Fabs32),
Fadd64(Fadd64),
Fsub64(Fsub64),
Fmul64(Fmul64),
Fdiv64(Fdiv64),
VDivF64x2(VDivF64x2),
Fmaximum64(Fmaximum64),
Fminimum64(Fminimum64),
Ftrunc64(Ftrunc64),
Ffloor64(Ffloor64),
Fceil64(Fceil64),
Fnearest64(Fnearest64),
Vnearest32x4(Vnearest32x4),
Vnearest64x2(Vnearest64x2),
Fsqrt64(Fsqrt64),
Fneg64(Fneg64),
Fabs64(Fabs64),
Vconst128(Vconst128),
VAddI8x16(VAddI8x16),
VAddI16x8(VAddI16x8),
VAddI32x4(VAddI32x4),
VAddI64x2(VAddI64x2),
VAddF32x4(VAddF32x4),
VAddF64x2(VAddF64x2),
VAddI8x16Sat(VAddI8x16Sat),
VAddU8x16Sat(VAddU8x16Sat),
VAddI16x8Sat(VAddI16x8Sat),
VAddU16x8Sat(VAddU16x8Sat),
VAddpairwiseI16x8S(VAddpairwiseI16x8S),
VAddpairwiseI32x4S(VAddpairwiseI32x4S),
VShlI8x16(VShlI8x16),
VShlI16x8(VShlI16x8),
VShlI32x4(VShlI32x4),
VShlI64x2(VShlI64x2),
VShrI8x16S(VShrI8x16S),
VShrI16x8S(VShrI16x8S),
VShrI32x4S(VShrI32x4S),
VShrI64x2S(VShrI64x2S),
VShrI8x16U(VShrI8x16U),
VShrI16x8U(VShrI16x8U),
VShrI32x4U(VShrI32x4U),
VShrI64x2U(VShrI64x2U),
VSplatX8(VSplatX8),
VSplatX16(VSplatX16),
VSplatX32(VSplatX32),
VSplatX64(VSplatX64),
VSplatF32(VSplatF32),
VSplatF64(VSplatF64),
VLoad8x8SOffset32(VLoad8x8SOffset32),
VLoad8x8UOffset32(VLoad8x8UOffset32),
VLoad16x4LeSOffset32(VLoad16x4LeSOffset32),
VLoad16x4LeUOffset32(VLoad16x4LeUOffset32),
VLoad32x2LeSOffset32(VLoad32x2LeSOffset32),
VLoad32x2LeUOffset32(VLoad32x2LeUOffset32),
VBand128(VBand128),
VBor128(VBor128),
VBxor128(VBxor128),
VBnot128(VBnot128),
VBitselect128(VBitselect128),
Vbitmask8x16(Vbitmask8x16),
Vbitmask16x8(Vbitmask16x8),
Vbitmask32x4(Vbitmask32x4),
Vbitmask64x2(Vbitmask64x2),
Valltrue8x16(Valltrue8x16),
Valltrue16x8(Valltrue16x8),
Valltrue32x4(Valltrue32x4),
Valltrue64x2(Valltrue64x2),
Vanytrue8x16(Vanytrue8x16),
Vanytrue16x8(Vanytrue16x8),
Vanytrue32x4(Vanytrue32x4),
Vanytrue64x2(Vanytrue64x2),
VF32x4FromI32x4S(VF32x4FromI32x4S),
VF32x4FromI32x4U(VF32x4FromI32x4U),
VF64x2FromI64x2S(VF64x2FromI64x2S),
VF64x2FromI64x2U(VF64x2FromI64x2U),
VWidenLow8x16S(VWidenLow8x16S),
VWidenLow8x16U(VWidenLow8x16U),
VWidenLow16x8S(VWidenLow16x8S),
VWidenLow16x8U(VWidenLow16x8U),
VWidenLow32x4S(VWidenLow32x4S),
VWidenLow32x4U(VWidenLow32x4U),
VWidenHigh8x16S(VWidenHigh8x16S),
VWidenHigh8x16U(VWidenHigh8x16U),
VWidenHigh16x8S(VWidenHigh16x8S),
VWidenHigh16x8U(VWidenHigh16x8U),
VWidenHigh32x4S(VWidenHigh32x4S),
VWidenHigh32x4U(VWidenHigh32x4U),
Vnarrow16x8S(Vnarrow16x8S),
Vnarrow16x8U(Vnarrow16x8U),
Vnarrow32x4S(Vnarrow32x4S),
Vnarrow32x4U(Vnarrow32x4U),
VFpromoteLow(VFpromoteLow),
VFdemote(VFdemote),
VSubI8x16(VSubI8x16),
VSubI16x8(VSubI16x8),
VSubI32x4(VSubI32x4),
VSubI64x2(VSubI64x2),
VSubF64x2(VSubF64x2),
VSubI8x16Sat(VSubI8x16Sat),
VSubU8x16Sat(VSubU8x16Sat),
VSubI16x8Sat(VSubI16x8Sat),
VSubU16x8Sat(VSubU16x8Sat),
VMulI8x16(VMulI8x16),
VMulI16x8(VMulI16x8),
VMulI32x4(VMulI32x4),
VMulI64x2(VMulI64x2),
VMulF64x2(VMulF64x2),
VQmulrsI16x8(VQmulrsI16x8),
VPopcnt8x16(VPopcnt8x16),
XExtractV8x16(XExtractV8x16),
XExtractV16x8(XExtractV16x8),
XExtractV32x4(XExtractV32x4),
XExtractV64x2(XExtractV64x2),
FExtractV32x4(FExtractV32x4),
FExtractV64x2(FExtractV64x2),
VInsertX8(VInsertX8),
VInsertX16(VInsertX16),
VInsertX32(VInsertX32),
VInsertX64(VInsertX64),
VInsertF32(VInsertF32),
VInsertF64(VInsertF64),
Veq8x16(Veq8x16),
Vneq8x16(Vneq8x16),
Vslt8x16(Vslt8x16),
Vslteq8x16(Vslteq8x16),
Vult8x16(Vult8x16),
Vulteq8x16(Vulteq8x16),
Veq16x8(Veq16x8),
Vneq16x8(Vneq16x8),
Vslt16x8(Vslt16x8),
Vslteq16x8(Vslteq16x8),
Vult16x8(Vult16x8),
Vulteq16x8(Vulteq16x8),
Veq32x4(Veq32x4),
Vneq32x4(Vneq32x4),
Vslt32x4(Vslt32x4),
Vslteq32x4(Vslteq32x4),
Vult32x4(Vult32x4),
Vulteq32x4(Vulteq32x4),
Veq64x2(Veq64x2),
Vneq64x2(Vneq64x2),
Vslt64x2(Vslt64x2),
Vslteq64x2(Vslteq64x2),
Vult64x2(Vult64x2),
Vulteq64x2(Vulteq64x2),
Vneg8x16(Vneg8x16),
Vneg16x8(Vneg16x8),
Vneg32x4(Vneg32x4),
Vneg64x2(Vneg64x2),
VnegF64x2(VnegF64x2),
Vmin8x16S(Vmin8x16S),
Vmin8x16U(Vmin8x16U),
Vmin16x8S(Vmin16x8S),
Vmin16x8U(Vmin16x8U),
Vmax8x16S(Vmax8x16S),
Vmax8x16U(Vmax8x16U),
Vmax16x8S(Vmax16x8S),
Vmax16x8U(Vmax16x8U),
Vmin32x4S(Vmin32x4S),
Vmin32x4U(Vmin32x4U),
Vmax32x4S(Vmax32x4S),
Vmax32x4U(Vmax32x4U),
Vabs8x16(Vabs8x16),
Vabs16x8(Vabs16x8),
Vabs32x4(Vabs32x4),
Vabs64x2(Vabs64x2),
Vabsf32x4(Vabsf32x4),
Vabsf64x2(Vabsf64x2),
Vmaximumf32x4(Vmaximumf32x4),
Vmaximumf64x2(Vmaximumf64x2),
Vminimumf32x4(Vminimumf32x4),
Vminimumf64x2(Vminimumf64x2),
VShuffle(VShuffle),
Vswizzlei8x16(Vswizzlei8x16),
Vavground8x16(Vavground8x16),
Vavground16x8(Vavground16x8),
}
Expand description
An extended operation/instruction.
These tend to be colder than Op
s.
Variants§
Trap(Trap)
Raise a trap.
Nop(Nop)
Do nothing.
CallIndirectHost(CallIndirectHost)
A special opcode to halt interpreter execution and yield control back to the host.
This opcode results in DoneReason::CallIndirectHost
where the
id
here is shepherded along to the embedder. It’s up to the
embedder to determine what to do with the id
and the current
state of registers and the stack.
In Wasmtime this is used to implement interpreter-to-host calls.
This is modeled as a call
instruction where the first
parameter is the native function pointer to invoke and all
remaining parameters for the native function are in following
parameter positions (e.g. x1
, x2
, …). The results of the
host call are then store in x0
.
Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.
XmovFp(XmovFp)
Gets the special “fp” register and moves it into dst
.
XmovLr(XmovLr)
Gets the special “lr” register and moves it into dst
.
Bswap32(Bswap32)
dst = byteswap(low32(src))
Bswap64(Bswap64)
dst = byteswap(src)
Xadd32UoverflowTrap(Xadd32UoverflowTrap)
32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2)
.
The upper 32-bits of dst
are unmodified. Traps if the addition
overflows.
Xadd64UoverflowTrap(Xadd64UoverflowTrap)
64-bit checked unsigned addition: dst = src1 + src2
.
XMulHi64S(XMulHi64S)
dst = high64(src1 * src2)
(signed)
XMulHi64U(XMulHi64U)
dst = high64(src1 * src2)
(unsigned)
Xbmask32(Xbmask32)
low32(dst) = if low32(src) == 0 { 0 } else { -1 }
Xbmask64(Xbmask64)
dst = if src == 0 { 0 } else { -1 }
XPush32(XPush32)
*sp = low32(src); sp = sp.checked_add(4)
XPush32Many(XPush32Many)
for src in srcs { xpush32 src }
XPush64(XPush64)
*sp = src; sp = sp.checked_add(8)
XPush64Many(XPush64Many)
for src in srcs { xpush64 src }
XPop32(XPop32)
*dst = *sp; sp -= 4
XPop32Many(XPop32Many)
for dst in dsts.rev() { xpop32 dst }
XPop64(XPop64)
*dst = *sp; sp -= 8
XPop64Many(XPop64Many)
for dst in dsts.rev() { xpop64 dst }
XLoad16BeU64Offset32(XLoad16BeU64Offset32)
dst = zext(*(ptr + offset))
XLoad16BeS64Offset32(XLoad16BeS64Offset32)
dst = sext(*(ptr + offset))
XLoad32BeU64Offset32(XLoad32BeU64Offset32)
dst = zext(*(ptr + offset))
XLoad32BeS64Offset32(XLoad32BeS64Offset32)
dst = sext(*(ptr + offset))
XLoad64BeOffset32(XLoad64BeOffset32)
dst = *(ptr + offset)
XStore16BeOffset32(XStore16BeOffset32)
*(ptr + offset) = low16(src)
XStore32BeOffset32(XStore32BeOffset32)
*(ptr + offset) = low32(src)
XStore64BeOffset32(XStore64BeOffset32)
*(ptr + offset) = low64(src)
Fload32BeOffset32(Fload32BeOffset32)
low32(dst) = zext(*(ptr + offset))
Fload64BeOffset32(Fload64BeOffset32)
dst = *(ptr + offset)
Fstore32BeOffset32(Fstore32BeOffset32)
*(ptr + offset) = low32(src)
Fstore64BeOffset32(Fstore64BeOffset32)
*(ptr + offset) = src
Fload32LeOffset32(Fload32LeOffset32)
low32(dst) = zext(*(ptr + offset))
Fload64LeOffset32(Fload64LeOffset32)
dst = *(ptr + offset)
Fstore32LeOffset32(Fstore32LeOffset32)
*(ptr + offset) = low32(src)
Fstore64LeOffset32(Fstore64LeOffset32)
*(ptr + offset) = src
VLoad128Offset32(VLoad128Offset32)
dst = *(ptr + offset)
Vstore128LeOffset32(Vstore128LeOffset32)
*(ptr + offset) = src
Fmov(Fmov)
Move between f
registers.
Vmov(Vmov)
Move between v
registers.
BitcastIntFromFloat32(BitcastIntFromFloat32)
low32(dst) = bitcast low32(src) as i32
BitcastIntFromFloat64(BitcastIntFromFloat64)
dst = bitcast src as i64
BitcastFloatFromInt32(BitcastFloatFromInt32)
low32(dst) = bitcast low32(src) as f32
BitcastFloatFromInt64(BitcastFloatFromInt64)
dst = bitcast src as f64
FConst32(FConst32)
low32(dst) = bits
FConst64(FConst64)
dst = bits
Feq32(Feq32)
low32(dst) = zext(src1 == src2)
Fneq32(Fneq32)
low32(dst) = zext(src1 != src2)
Flt32(Flt32)
low32(dst) = zext(src1 < src2)
Flteq32(Flteq32)
low32(dst) = zext(src1 <= src2)
Feq64(Feq64)
low32(dst) = zext(src1 == src2)
Fneq64(Fneq64)
low32(dst) = zext(src1 != src2)
Flt64(Flt64)
low32(dst) = zext(src1 < src2)
Flteq64(Flteq64)
low32(dst) = zext(src1 <= src2)
FSelect32(FSelect32)
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
FSelect64(FSelect64)
dst = low32(cond) ? if_nonzero : if_zero
F32FromF64(F32FromF64)
low32(dst) = demote(src)
F64FromF32(F64FromF32)
(st) = promote(low32(src))
F32FromX32S(F32FromX32S)
low32(dst) = checked_f32_from_signed(low32(src))
F32FromX32U(F32FromX32U)
low32(dst) = checked_f32_from_unsigned(low32(src))
F32FromX64S(F32FromX64S)
low32(dst) = checked_f32_from_signed(src)
F32FromX64U(F32FromX64U)
low32(dst) = checked_f32_from_unsigned(src)
F64FromX32S(F64FromX32S)
dst = checked_f64_from_signed(low32(src))
F64FromX32U(F64FromX32U)
dst = checked_f64_from_unsigned(low32(src))
F64FromX64S(F64FromX64S)
dst = checked_f64_from_signed(src)
F64FromX64U(F64FromX64U)
dst = checked_f64_from_unsigned(src)
X32FromF32S(X32FromF32S)
low32(dst) = checked_signed_from_f32(low32(src))
X32FromF32U(X32FromF32U)
low32(dst) = checked_unsigned_from_f32(low32(src))
X32FromF64S(X32FromF64S)
low32(dst) = checked_signed_from_f64(src)
X32FromF64U(X32FromF64U)
low32(dst) = checked_unsigned_from_f64(src)
X64FromF32S(X64FromF32S)
dst = checked_signed_from_f32(low32(src))
X64FromF32U(X64FromF32U)
dst = checked_unsigned_from_f32(low32(src))
X64FromF64S(X64FromF64S)
dst = checked_signed_from_f64(src)
X64FromF64U(X64FromF64U)
dst = checked_unsigned_from_f64(src)
X32FromF32SSat(X32FromF32SSat)
low32(dst) = saturating_signed_from_f32(low32(src))
X32FromF32USat(X32FromF32USat)
low32(dst) = saturating_unsigned_from_f32(low32(src))
X32FromF64SSat(X32FromF64SSat)
low32(dst) = saturating_signed_from_f64(src)
X32FromF64USat(X32FromF64USat)
low32(dst) = saturating_unsigned_from_f64(src)
X64FromF32SSat(X64FromF32SSat)
dst = saturating_signed_from_f32(low32(src))
X64FromF32USat(X64FromF32USat)
dst = saturating_unsigned_from_f32(low32(src))
X64FromF64SSat(X64FromF64SSat)
dst = saturating_signed_from_f64(src)
X64FromF64USat(X64FromF64USat)
dst = saturating_unsigned_from_f64(src)
FCopySign32(FCopySign32)
low32(dst) = copysign(low32(src1), low32(src2))
FCopySign64(FCopySign64)
dst = copysign(src1, src2)
Fadd32(Fadd32)
low32(dst) = low32(src1) + low32(src2)
Fsub32(Fsub32)
low32(dst) = low32(src1) - low32(src2)
Fmul32(Fmul32)
low32(dst) = low32(src1) * low32(src2)
Fdiv32(Fdiv32)
low32(dst) = low32(src1) / low32(src2)
Vdivf32x4(Vdivf32x4)
low128(dst) = low128(src1) / low128(src2)
Fmaximum32(Fmaximum32)
low32(dst) = ieee_maximum(low32(src1), low32(src2))
Fminimum32(Fminimum32)
low32(dst) = ieee_minimum(low32(src1), low32(src2))
Ftrunc32(Ftrunc32)
low32(dst) = ieee_trunc(low32(src))
Vtrunc32x4(Vtrunc32x4)
low128(dst) = ieee_trunc(low128(src))
Vtrunc64x2(Vtrunc64x2)
low128(dst) = ieee_trunc(low128(src))
Ffloor32(Ffloor32)
low32(dst) = ieee_floor(low32(src))
Vfloor32x4(Vfloor32x4)
low128(dst) = ieee_floor(low128(src))
Vfloor64x2(Vfloor64x2)
low128(dst) = ieee_floor(low128(src))
Fceil32(Fceil32)
low32(dst) = ieee_ceil(low32(src))
Vceil32x4(Vceil32x4)
low128(dst) = ieee_ceil(low128(src))
Vceil64x2(Vceil64x2)
low128(dst) = ieee_ceil(low128(src))
Fnearest32(Fnearest32)
low32(dst) = ieee_nearest(low32(src))
Fsqrt32(Fsqrt32)
low32(dst) = ieee_sqrt(low32(src))
Vsqrt32x4(Vsqrt32x4)
low32(dst) = ieee_sqrt(low32(src))
Vsqrt64x2(Vsqrt64x2)
low32(dst) = ieee_sqrt(low32(src))
Fneg32(Fneg32)
low32(dst) = -low32(src)
Fabs32(Fabs32)
low32(dst) = |low32(src)|
Fadd64(Fadd64)
dst = src1 + src2
Fsub64(Fsub64)
dst = src1 - src2
Fmul64(Fmul64)
dst = src1 * src2
Fdiv64(Fdiv64)
dst = src1 / src2
VDivF64x2(VDivF64x2)
dst = src1 / src2
Fmaximum64(Fmaximum64)
dst = ieee_maximum(src1, src2)
Fminimum64(Fminimum64)
dst = ieee_minimum(src1, src2)
Ftrunc64(Ftrunc64)
dst = ieee_trunc(src)
Ffloor64(Ffloor64)
dst = ieee_floor(src)
Fceil64(Fceil64)
dst = ieee_ceil(src)
Fnearest64(Fnearest64)
dst = ieee_nearest(src)
Vnearest32x4(Vnearest32x4)
low128(dst) = ieee_nearest(low128(src))
Vnearest64x2(Vnearest64x2)
low128(dst) = ieee_nearest(low128(src))
Fsqrt64(Fsqrt64)
dst = ieee_sqrt(src)
Fneg64(Fneg64)
dst = -src
Fabs64(Fabs64)
dst = |src|
Vconst128(Vconst128)
dst = imm
VAddI8x16(VAddI8x16)
dst = src1 + src2
VAddI16x8(VAddI16x8)
dst = src1 + src2
VAddI32x4(VAddI32x4)
dst = src1 + src2
VAddI64x2(VAddI64x2)
dst = src1 + src2
VAddF32x4(VAddF32x4)
dst = src1 + src2
VAddF64x2(VAddF64x2)
dst = src1 + src2
VAddI8x16Sat(VAddI8x16Sat)
dst = satruating_add(src1, src2)
VAddU8x16Sat(VAddU8x16Sat)
dst = satruating_add(src1, src2)
VAddI16x8Sat(VAddI16x8Sat)
dst = satruating_add(src1, src2)
VAddU16x8Sat(VAddU16x8Sat)
dst = satruating_add(src1, src2)
VAddpairwiseI16x8S(VAddpairwiseI16x8S)
dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
VAddpairwiseI32x4S(VAddpairwiseI32x4S)
dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
VShlI8x16(VShlI8x16)
dst = src1 << src2
VShlI16x8(VShlI16x8)
dst = src1 << src2
VShlI32x4(VShlI32x4)
dst = src1 << src2
VShlI64x2(VShlI64x2)
dst = src1 << src2
VShrI8x16S(VShrI8x16S)
dst = src1 >> src2
(signed)
VShrI16x8S(VShrI16x8S)
dst = src1 >> src2
(signed)
VShrI32x4S(VShrI32x4S)
dst = src1 >> src2
(signed)
VShrI64x2S(VShrI64x2S)
dst = src1 >> src2
(signed)
VShrI8x16U(VShrI8x16U)
dst = src1 >> src2
(unsigned)
VShrI16x8U(VShrI16x8U)
dst = src1 >> src2
(unsigned)
VShrI32x4U(VShrI32x4U)
dst = src1 >> src2
(unsigned)
VShrI64x2U(VShrI64x2U)
dst = src1 >> src2
(unsigned)
VSplatX8(VSplatX8)
dst = splat(low8(src))
VSplatX16(VSplatX16)
dst = splat(low16(src))
VSplatX32(VSplatX32)
dst = splat(low32(src))
VSplatX64(VSplatX64)
dst = splat(src)
VSplatF32(VSplatF32)
dst = splat(low32(src))
VSplatF64(VSplatF64)
dst = splat(src)
VLoad8x8SOffset32(VLoad8x8SOffset32)
Load the 64-bit source as i8x8 and sign-extend to i16x8.
VLoad8x8UOffset32(VLoad8x8UOffset32)
Load the 64-bit source as u8x8 and zero-extend to i16x8.
VLoad16x4LeSOffset32(VLoad16x4LeSOffset32)
Load the 64-bit source as i16x4 and sign-extend to i32x4.
VLoad16x4LeUOffset32(VLoad16x4LeUOffset32)
Load the 64-bit source as u16x4 and zero-extend to i32x4.
VLoad32x2LeSOffset32(VLoad32x2LeSOffset32)
Load the 64-bit source as i32x2 and sign-extend to i64x2.
VLoad32x2LeUOffset32(VLoad32x2LeUOffset32)
Load the 64-bit source as u32x2 and zero-extend to i64x2.
VBand128(VBand128)
dst = src1 & src2
VBor128(VBor128)
dst = src1 | src2
VBxor128(VBxor128)
dst = src1 ^ src2
VBnot128(VBnot128)
dst = !src1
VBitselect128(VBitselect128)
dst = (c & x) | (!c & y)
Vbitmask8x16(Vbitmask8x16)
Collect high bits of each lane into the low 32-bits of the destination.
Vbitmask16x8(Vbitmask16x8)
Collect high bits of each lane into the low 32-bits of the destination.
Vbitmask32x4(Vbitmask32x4)
Collect high bits of each lane into the low 32-bits of the destination.
Vbitmask64x2(Vbitmask64x2)
Collect high bits of each lane into the low 32-bits of the destination.
Valltrue8x16(Valltrue8x16)
Store whether all lanes are nonzero in dst
.
Valltrue16x8(Valltrue16x8)
Store whether all lanes are nonzero in dst
.
Valltrue32x4(Valltrue32x4)
Store whether all lanes are nonzero in dst
.
Valltrue64x2(Valltrue64x2)
Store whether any lanes are nonzero in dst
.
Vanytrue8x16(Vanytrue8x16)
Store whether any lanes are nonzero in dst
.
Vanytrue16x8(Vanytrue16x8)
Store whether any lanes are nonzero in dst
.
Vanytrue32x4(Vanytrue32x4)
Store whether any lanes are nonzero in dst
.
Vanytrue64x2(Vanytrue64x2)
Store whether any lanes are nonzero in dst
.
VF32x4FromI32x4S(VF32x4FromI32x4S)
Int-to-float conversion (same as f32_from_x32_s
)
VF32x4FromI32x4U(VF32x4FromI32x4U)
Int-to-float conversion (same as f32_from_x32_u
)
VF64x2FromI64x2S(VF64x2FromI64x2S)
Int-to-float conversion (same as f64_from_x64_s
)
VF64x2FromI64x2U(VF64x2FromI64x2U)
Int-to-float conversion (same as f64_from_x64_u
)
VWidenLow8x16S(VWidenLow8x16S)
Widens the low lanes of the input vector, as signed, to twice the width.
VWidenLow8x16U(VWidenLow8x16U)
Widens the low lanes of the input vector, as unsigned, to twice the width.
VWidenLow16x8S(VWidenLow16x8S)
Widens the low lanes of the input vector, as signed, to twice the width.
VWidenLow16x8U(VWidenLow16x8U)
Widens the low lanes of the input vector, as unsigned, to twice the width.
VWidenLow32x4S(VWidenLow32x4S)
Widens the low lanes of the input vector, as signed, to twice the width.
VWidenLow32x4U(VWidenLow32x4U)
Widens the low lanes of the input vector, as unsigned, to twice the width.
VWidenHigh8x16S(VWidenHigh8x16S)
Widens the high lanes of the input vector, as signed, to twice the width.
VWidenHigh8x16U(VWidenHigh8x16U)
Widens the high lanes of the input vector, as unsigned, to twice the width.
VWidenHigh16x8S(VWidenHigh16x8S)
Widens the high lanes of the input vector, as signed, to twice the width.
VWidenHigh16x8U(VWidenHigh16x8U)
Widens the high lanes of the input vector, as unsigned, to twice the width.
VWidenHigh32x4S(VWidenHigh32x4S)
Widens the high lanes of the input vector, as signed, to twice the width.
VWidenHigh32x4U(VWidenHigh32x4U)
Widens the high lanes of the input vector, as unsigned, to twice the width.
Vnarrow16x8S(Vnarrow16x8S)
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Vnarrow16x8U(Vnarrow16x8U)
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Vnarrow32x4S(Vnarrow32x4S)
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Vnarrow32x4U(Vnarrow32x4U)
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
VFpromoteLow(VFpromoteLow)
Promotes the low two lanes of the f32x4 input to f64x2.
VFdemote(VFdemote)
Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
VSubI8x16(VSubI8x16)
dst = src1 - src2
VSubI16x8(VSubI16x8)
dst = src1 - src2
VSubI32x4(VSubI32x4)
dst = src1 - src2
VSubI64x2(VSubI64x2)
dst = src1 - src2
VSubF64x2(VSubF64x2)
dst = src1 - src2
VSubI8x16Sat(VSubI8x16Sat)
dst = saturating_sub(src1, src2)
VSubU8x16Sat(VSubU8x16Sat)
dst = saturating_sub(src1, src2)
VSubI16x8Sat(VSubI16x8Sat)
dst = saturating_sub(src1, src2)
VSubU16x8Sat(VSubU16x8Sat)
dst = saturating_sub(src1, src2)
VMulI8x16(VMulI8x16)
dst = src1 * src2
VMulI16x8(VMulI16x8)
dst = src1 * src2
VMulI32x4(VMulI32x4)
dst = src1 * src2
VMulI64x2(VMulI64x2)
dst = src1 * src2
VMulF64x2(VMulF64x2)
dst = src1 * src2
VQmulrsI16x8(VQmulrsI16x8)
dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
VPopcnt8x16(VPopcnt8x16)
dst = count_ones(src)
XExtractV8x16(XExtractV8x16)
low32(dst) = zext(src[lane])
XExtractV16x8(XExtractV16x8)
low32(dst) = zext(src[lane])
XExtractV32x4(XExtractV32x4)
low32(dst) = src[lane]
XExtractV64x2(XExtractV64x2)
dst = src[lane]
FExtractV32x4(FExtractV32x4)
low32(dst) = src[lane]
FExtractV64x2(FExtractV64x2)
dst = src[lane]
VInsertX8(VInsertX8)
dst = src1; dst[lane] = src2
VInsertX16(VInsertX16)
dst = src1; dst[lane] = src2
VInsertX32(VInsertX32)
dst = src1; dst[lane] = src2
VInsertX64(VInsertX64)
dst = src1; dst[lane] = src2
VInsertF32(VInsertF32)
dst = src1; dst[lane] = src2
VInsertF64(VInsertF64)
dst = src1; dst[lane] = src2
Veq8x16(Veq8x16)
dst = src == dst
Vneq8x16(Vneq8x16)
dst = src != dst
Vslt8x16(Vslt8x16)
dst = src < dst
(signed)
Vslteq8x16(Vslteq8x16)
dst = src <= dst
(signed)
Vult8x16(Vult8x16)
dst = src < dst
(unsigned)
Vulteq8x16(Vulteq8x16)
dst = src <= dst
(unsigned)
Veq16x8(Veq16x8)
dst = src == dst
Vneq16x8(Vneq16x8)
dst = src != dst
Vslt16x8(Vslt16x8)
dst = src < dst
(signed)
Vslteq16x8(Vslteq16x8)
dst = src <= dst
(signed)
Vult16x8(Vult16x8)
dst = src < dst
(unsigned)
Vulteq16x8(Vulteq16x8)
dst = src <= dst
(unsigned)
Veq32x4(Veq32x4)
dst = src == dst
Vneq32x4(Vneq32x4)
dst = src != dst
Vslt32x4(Vslt32x4)
dst = src < dst
(signed)
Vslteq32x4(Vslteq32x4)
dst = src <= dst
(signed)
Vult32x4(Vult32x4)
dst = src < dst
(unsigned)
Vulteq32x4(Vulteq32x4)
dst = src <= dst
(unsigned)
Veq64x2(Veq64x2)
dst = src == dst
Vneq64x2(Vneq64x2)
dst = src != dst
Vslt64x2(Vslt64x2)
dst = src < dst
(signed)
Vslteq64x2(Vslteq64x2)
dst = src <= dst
(signed)
Vult64x2(Vult64x2)
dst = src < dst
(unsigned)
Vulteq64x2(Vulteq64x2)
dst = src <= dst
(unsigned)
Vneg8x16(Vneg8x16)
dst = -src
Vneg16x8(Vneg16x8)
dst = -src
Vneg32x4(Vneg32x4)
dst = -src
Vneg64x2(Vneg64x2)
dst = -src
VnegF64x2(VnegF64x2)
dst = -src
Vmin8x16S(Vmin8x16S)
dst = min(src1, src2)
(signed)
Vmin8x16U(Vmin8x16U)
dst = min(src1, src2)
(unsigned)
Vmin16x8S(Vmin16x8S)
dst = min(src1, src2)
(signed)
Vmin16x8U(Vmin16x8U)
dst = min(src1, src2)
(unsigned)
Vmax8x16S(Vmax8x16S)
dst = max(src1, src2)
(signed)
Vmax8x16U(Vmax8x16U)
dst = max(src1, src2)
(unsigned)
Vmax16x8S(Vmax16x8S)
dst = max(src1, src2)
(signed)
Vmax16x8U(Vmax16x8U)
dst = max(src1, src2)
(unsigned)
Vmin32x4S(Vmin32x4S)
dst = min(src1, src2)
(signed)
Vmin32x4U(Vmin32x4U)
dst = min(src1, src2)
(unsigned)
Vmax32x4S(Vmax32x4S)
dst = max(src1, src2)
(signed)
Vmax32x4U(Vmax32x4U)
dst = max(src1, src2)
(unsigned)
Vabs8x16(Vabs8x16)
dst = |src|
Vabs16x8(Vabs16x8)
dst = |src|
Vabs32x4(Vabs32x4)
dst = |src|
Vabs64x2(Vabs64x2)
dst = |src|
Vabsf32x4(Vabsf32x4)
dst = |src|
Vabsf64x2(Vabsf64x2)
dst = |src|
Vmaximumf32x4(Vmaximumf32x4)
dst = ieee_maximum(src1, src2)
Vmaximumf64x2(Vmaximumf64x2)
dst = ieee_maximum(src1, src2)
Vminimumf32x4(Vminimumf32x4)
dst = ieee_minimum(src1, src2)
Vminimumf64x2(Vminimumf64x2)
dst = ieee_minimum(src1, src2)
VShuffle(VShuffle)
dst = shuffle(src1, src2, mask)
Vswizzlei8x16(Vswizzlei8x16)
dst = swizzle(src1, src2)
Vavground8x16(Vavground8x16)
dst = (src1 + src2 + 1) // 2
Vavground16x8(Vavground16x8)
dst = (src1 + src2 + 1) // 2
Implementations§
Trait Implementations§
Source§impl<'arbitrary> Arbitrary<'arbitrary> for ExtendedOp
impl<'arbitrary> Arbitrary<'arbitrary> for ExtendedOp
Source§fn arbitrary(u: &mut Unstructured<'arbitrary>) -> Result<Self>
fn arbitrary(u: &mut Unstructured<'arbitrary>) -> Result<Self>
Self
from the given unstructured data. Read moreSource§fn arbitrary_take_rest(u: Unstructured<'arbitrary>) -> Result<Self>
fn arbitrary_take_rest(u: Unstructured<'arbitrary>) -> Result<Self>
Self
from the entirety of the given
unstructured data. Read moreSource§fn size_hint(depth: usize) -> (usize, Option<usize>)
fn size_hint(depth: usize) -> (usize, Option<usize>)
Unstructured
this type
needs to construct itself. Read moreSource§fn try_size_hint(
depth: usize,
) -> Result<(usize, Option<usize>), MaxRecursionReached>
fn try_size_hint( depth: usize, ) -> Result<(usize, Option<usize>), MaxRecursionReached>
Unstructured
this type
needs to construct itself. Read moreSource§impl Clone for ExtendedOp
impl Clone for ExtendedOp
Source§fn clone(&self) -> ExtendedOp
fn clone(&self) -> ExtendedOp
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read more