Enum ExtendedOpcode

Source
#[repr(u16)]
pub enum ExtendedOpcode {
Show 299 variants Trap = 0, Nop = 1, CallIndirectHost = 2, XmovFp = 3, XmovLr = 4, Bswap32 = 5, Bswap64 = 6, Xadd32UoverflowTrap = 7, Xadd64UoverflowTrap = 8, XMulHi64S = 9, XMulHi64U = 10, Xbmask32 = 11, Xbmask64 = 12, XLoad16BeU64Offset32 = 13, XLoad16BeS64Offset32 = 14, XLoad32BeU64Offset32 = 15, XLoad32BeS64Offset32 = 16, XLoad64BeOffset32 = 17, XStore16BeOffset32 = 18, XStore32BeOffset32 = 19, XStore64BeOffset32 = 20, Fload32BeOffset32 = 21, Fload64BeOffset32 = 22, Fstore32BeOffset32 = 23, Fstore64BeOffset32 = 24, Fload32LeOffset32 = 25, Fload64LeOffset32 = 26, Fstore32LeOffset32 = 27, Fstore64LeOffset32 = 28, VLoad128Offset32 = 29, Vstore128LeOffset32 = 30, Fmov = 31, Vmov = 32, BitcastIntFromFloat32 = 33, BitcastIntFromFloat64 = 34, BitcastFloatFromInt32 = 35, BitcastFloatFromInt64 = 36, FConst32 = 37, FConst64 = 38, Feq32 = 39, Fneq32 = 40, Flt32 = 41, Flteq32 = 42, Feq64 = 43, Fneq64 = 44, Flt64 = 45, Flteq64 = 46, FSelect32 = 47, FSelect64 = 48, F32FromF64 = 49, F64FromF32 = 50, F32FromX32S = 51, F32FromX32U = 52, F32FromX64S = 53, F32FromX64U = 54, F64FromX32S = 55, F64FromX32U = 56, F64FromX64S = 57, F64FromX64U = 58, X32FromF32S = 59, X32FromF32U = 60, X32FromF64S = 61, X32FromF64U = 62, X64FromF32S = 63, X64FromF32U = 64, X64FromF64S = 65, X64FromF64U = 66, X32FromF32SSat = 67, X32FromF32USat = 68, X32FromF64SSat = 69, X32FromF64USat = 70, X64FromF32SSat = 71, X64FromF32USat = 72, X64FromF64SSat = 73, X64FromF64USat = 74, FCopySign32 = 75, FCopySign64 = 76, Fadd32 = 77, Fsub32 = 78, Vsubf32x4 = 79, Fmul32 = 80, Vmulf32x4 = 81, Fdiv32 = 82, Vdivf32x4 = 83, Fmaximum32 = 84, Fminimum32 = 85, Ftrunc32 = 86, Vtrunc32x4 = 87, Vtrunc64x2 = 88, Ffloor32 = 89, Vfloor32x4 = 90, Vfloor64x2 = 91, Fceil32 = 92, Vceil32x4 = 93, Vceil64x2 = 94, Fnearest32 = 95, Fsqrt32 = 96, Vsqrt32x4 = 97, Vsqrt64x2 = 98, Fneg32 = 99, Vnegf32x4 = 100, Fabs32 = 101, Fadd64 = 102, Fsub64 = 103, Fmul64 = 104, Fdiv64 = 105, VDivF64x2 = 106, Fmaximum64 = 107, Fminimum64 = 108, Ftrunc64 = 109, Ffloor64 = 110, Fceil64 = 111, Fnearest64 = 112, Vnearest32x4 = 113, Vnearest64x2 = 114, Fsqrt64 = 115, Fneg64 = 116, Fabs64 = 117, Vconst128 = 118, VAddI8x16 = 119, VAddI16x8 = 120, VAddI32x4 = 121, VAddI64x2 = 122, VAddF32x4 = 123, VAddF64x2 = 124, VAddI8x16Sat = 125, VAddU8x16Sat = 126, VAddI16x8Sat = 127, VAddU16x8Sat = 128, VAddpairwiseI16x8S = 129, VAddpairwiseI32x4S = 130, VShlI8x16 = 131, VShlI16x8 = 132, VShlI32x4 = 133, VShlI64x2 = 134, VShrI8x16S = 135, VShrI16x8S = 136, VShrI32x4S = 137, VShrI64x2S = 138, VShrI8x16U = 139, VShrI16x8U = 140, VShrI32x4U = 141, VShrI64x2U = 142, VSplatX8 = 143, VSplatX16 = 144, VSplatX32 = 145, VSplatX64 = 146, VSplatF32 = 147, VSplatF64 = 148, VLoad8x8SOffset32 = 149, VLoad8x8UOffset32 = 150, VLoad16x4LeSOffset32 = 151, VLoad16x4LeUOffset32 = 152, VLoad32x2LeSOffset32 = 153, VLoad32x2LeUOffset32 = 154, VBand128 = 155, VBor128 = 156, VBxor128 = 157, VBnot128 = 158, VBitselect128 = 159, Vbitmask8x16 = 160, Vbitmask16x8 = 161, Vbitmask32x4 = 162, Vbitmask64x2 = 163, Valltrue8x16 = 164, Valltrue16x8 = 165, Valltrue32x4 = 166, Valltrue64x2 = 167, Vanytrue8x16 = 168, Vanytrue16x8 = 169, Vanytrue32x4 = 170, Vanytrue64x2 = 171, VF32x4FromI32x4S = 172, VF32x4FromI32x4U = 173, VF64x2FromI64x2S = 174, VF64x2FromI64x2U = 175, VI32x4FromF32x4S = 176, VI32x4FromF32x4U = 177, VI64x2FromF64x2S = 178, VI64x2FromF64x2U = 179, VWidenLow8x16S = 180, VWidenLow8x16U = 181, VWidenLow16x8S = 182, VWidenLow16x8U = 183, VWidenLow32x4S = 184, VWidenLow32x4U = 185, VWidenHigh8x16S = 186, VWidenHigh8x16U = 187, VWidenHigh16x8S = 188, VWidenHigh16x8U = 189, VWidenHigh32x4S = 190, VWidenHigh32x4U = 191, Vnarrow16x8S = 192, Vnarrow16x8U = 193, Vnarrow32x4S = 194, Vnarrow32x4U = 195, Vnarrow64x2S = 196, Vnarrow64x2U = 197, Vunarrow64x2U = 198, VFpromoteLow = 199, VFdemote = 200, VSubI8x16 = 201, VSubI16x8 = 202, VSubI32x4 = 203, VSubI64x2 = 204, VSubF64x2 = 205, VSubI8x16Sat = 206, VSubU8x16Sat = 207, VSubI16x8Sat = 208, VSubU16x8Sat = 209, VMulI8x16 = 210, VMulI16x8 = 211, VMulI32x4 = 212, VMulI64x2 = 213, VMulF64x2 = 214, VQmulrsI16x8 = 215, VPopcnt8x16 = 216, XExtractV8x16 = 217, XExtractV16x8 = 218, XExtractV32x4 = 219, XExtractV64x2 = 220, FExtractV32x4 = 221, FExtractV64x2 = 222, VInsertX8 = 223, VInsertX16 = 224, VInsertX32 = 225, VInsertX64 = 226, VInsertF32 = 227, VInsertF64 = 228, Veq8x16 = 229, Vneq8x16 = 230, Vslt8x16 = 231, Vslteq8x16 = 232, Vult8x16 = 233, Vulteq8x16 = 234, Veq16x8 = 235, Vneq16x8 = 236, Vslt16x8 = 237, Vslteq16x8 = 238, Vult16x8 = 239, Vulteq16x8 = 240, Veq32x4 = 241, Vneq32x4 = 242, Vslt32x4 = 243, Vslteq32x4 = 244, Vult32x4 = 245, Vulteq32x4 = 246, Veq64x2 = 247, Vneq64x2 = 248, Vslt64x2 = 249, Vslteq64x2 = 250, Vult64x2 = 251, Vulteq64x2 = 252, Vneg8x16 = 253, Vneg16x8 = 254, Vneg32x4 = 255, Vneg64x2 = 256, VnegF64x2 = 257, Vmin8x16S = 258, Vmin8x16U = 259, Vmin16x8S = 260, Vmin16x8U = 261, Vmax8x16S = 262, Vmax8x16U = 263, Vmax16x8S = 264, Vmax16x8U = 265, Vmin32x4S = 266, Vmin32x4U = 267, Vmax32x4S = 268, Vmax32x4U = 269, Vabs8x16 = 270, Vabs16x8 = 271, Vabs32x4 = 272, Vabs64x2 = 273, Vabsf32x4 = 274, Vabsf64x2 = 275, Vmaximumf32x4 = 276, Vmaximumf64x2 = 277, Vminimumf32x4 = 278, Vminimumf64x2 = 279, VShuffle = 280, Vswizzlei8x16 = 281, Vavground8x16 = 282, Vavground16x8 = 283, VeqF32x4 = 284, VneqF32x4 = 285, VltF32x4 = 286, VlteqF32x4 = 287, VeqF64x2 = 288, VneqF64x2 = 289, VltF64x2 = 290, VlteqF64x2 = 291, Vfma32x4 = 292, Vfma64x2 = 293, Vselect = 294, Xadd128 = 295, Xsub128 = 296, Xwidemul64S = 297, Xwidemul64U = 298,
}
Expand description

An extended opcode.

Variants§

§

Trap = 0

Raise a trap.

§

Nop = 1

Do nothing.

§

CallIndirectHost = 2

A special opcode to halt interpreter execution and yield control back to the host.

This opcode results in DoneReason::CallIndirectHost where the id here is shepherded along to the embedder. It’s up to the embedder to determine what to do with the id and the current state of registers and the stack.

In Wasmtime this is used to implement interpreter-to-host calls. This is modeled as a call instruction where the first parameter is the native function pointer to invoke and all remaining parameters for the native function are in following parameter positions (e.g. x1, x2, …). The results of the host call are then store in x0.

Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.

§

XmovFp = 3

Gets the special “fp” register and moves it into dst.

§

XmovLr = 4

Gets the special “lr” register and moves it into dst.

§

Bswap32 = 5

dst = byteswap(low32(src))

§

Bswap64 = 6

dst = byteswap(src)

§

Xadd32UoverflowTrap = 7

32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified. Traps if the addition overflows.

§

Xadd64UoverflowTrap = 8

64-bit checked unsigned addition: dst = src1 + src2.

§

XMulHi64S = 9

dst = high64(src1 * src2) (signed)

§

XMulHi64U = 10

dst = high64(src1 * src2) (unsigned)

§

Xbmask32 = 11

low32(dst) = if low32(src) == 0 { 0 } else { -1 }

§

Xbmask64 = 12

dst = if src == 0 { 0 } else { -1 }

§

XLoad16BeU64Offset32 = 13

dst = zext(*(ptr + offset))

§

XLoad16BeS64Offset32 = 14

dst = sext(*(ptr + offset))

§

XLoad32BeU64Offset32 = 15

dst = zext(*(ptr + offset))

§

XLoad32BeS64Offset32 = 16

dst = sext(*(ptr + offset))

§

XLoad64BeOffset32 = 17

dst = *(ptr + offset)

§

XStore16BeOffset32 = 18

*(ptr + offset) = low16(src)

§

XStore32BeOffset32 = 19

*(ptr + offset) = low32(src)

§

XStore64BeOffset32 = 20

*(ptr + offset) = low64(src)

§

Fload32BeOffset32 = 21

low32(dst) = zext(*(ptr + offset))

§

Fload64BeOffset32 = 22

dst = *(ptr + offset)

§

Fstore32BeOffset32 = 23

*(ptr + offset) = low32(src)

§

Fstore64BeOffset32 = 24

*(ptr + offset) = src

§

Fload32LeOffset32 = 25

low32(dst) = zext(*(ptr + offset))

§

Fload64LeOffset32 = 26

dst = *(ptr + offset)

§

Fstore32LeOffset32 = 27

*(ptr + offset) = low32(src)

§

Fstore64LeOffset32 = 28

*(ptr + offset) = src

§

VLoad128Offset32 = 29

dst = *(ptr + offset)

§

Vstore128LeOffset32 = 30

*(ptr + offset) = src

§

Fmov = 31

Move between f registers.

§

Vmov = 32

Move between v registers.

§

BitcastIntFromFloat32 = 33

low32(dst) = bitcast low32(src) as i32

§

BitcastIntFromFloat64 = 34

dst = bitcast src as i64

§

BitcastFloatFromInt32 = 35

low32(dst) = bitcast low32(src) as f32

§

BitcastFloatFromInt64 = 36

dst = bitcast src as f64

§

FConst32 = 37

low32(dst) = bits

§

FConst64 = 38

dst = bits

§

Feq32 = 39

low32(dst) = zext(src1 == src2)

§

Fneq32 = 40

low32(dst) = zext(src1 != src2)

§

Flt32 = 41

low32(dst) = zext(src1 < src2)

§

Flteq32 = 42

low32(dst) = zext(src1 <= src2)

§

Feq64 = 43

low32(dst) = zext(src1 == src2)

§

Fneq64 = 44

low32(dst) = zext(src1 != src2)

§

Flt64 = 45

low32(dst) = zext(src1 < src2)

§

Flteq64 = 46

low32(dst) = zext(src1 <= src2)

§

FSelect32 = 47

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

§

FSelect64 = 48

dst = low32(cond) ? if_nonzero : if_zero

§

F32FromF64 = 49

low32(dst) = demote(src)

§

F64FromF32 = 50

(st) = promote(low32(src))

§

F32FromX32S = 51

low32(dst) = checked_f32_from_signed(low32(src))

§

F32FromX32U = 52

low32(dst) = checked_f32_from_unsigned(low32(src))

§

F32FromX64S = 53

low32(dst) = checked_f32_from_signed(src)

§

F32FromX64U = 54

low32(dst) = checked_f32_from_unsigned(src)

§

F64FromX32S = 55

dst = checked_f64_from_signed(low32(src))

§

F64FromX32U = 56

dst = checked_f64_from_unsigned(low32(src))

§

F64FromX64S = 57

dst = checked_f64_from_signed(src)

§

F64FromX64U = 58

dst = checked_f64_from_unsigned(src)

§

X32FromF32S = 59

low32(dst) = checked_signed_from_f32(low32(src))

§

X32FromF32U = 60

low32(dst) = checked_unsigned_from_f32(low32(src))

§

X32FromF64S = 61

low32(dst) = checked_signed_from_f64(src)

§

X32FromF64U = 62

low32(dst) = checked_unsigned_from_f64(src)

§

X64FromF32S = 63

dst = checked_signed_from_f32(low32(src))

§

X64FromF32U = 64

dst = checked_unsigned_from_f32(low32(src))

§

X64FromF64S = 65

dst = checked_signed_from_f64(src)

§

X64FromF64U = 66

dst = checked_unsigned_from_f64(src)

§

X32FromF32SSat = 67

low32(dst) = saturating_signed_from_f32(low32(src))

§

X32FromF32USat = 68

low32(dst) = saturating_unsigned_from_f32(low32(src))

§

X32FromF64SSat = 69

low32(dst) = saturating_signed_from_f64(src)

§

X32FromF64USat = 70

low32(dst) = saturating_unsigned_from_f64(src)

§

X64FromF32SSat = 71

dst = saturating_signed_from_f32(low32(src))

§

X64FromF32USat = 72

dst = saturating_unsigned_from_f32(low32(src))

§

X64FromF64SSat = 73

dst = saturating_signed_from_f64(src)

§

X64FromF64USat = 74

dst = saturating_unsigned_from_f64(src)

§

FCopySign32 = 75

low32(dst) = copysign(low32(src1), low32(src2))

§

FCopySign64 = 76

dst = copysign(src1, src2)

§

Fadd32 = 77

low32(dst) = low32(src1) + low32(src2)

§

Fsub32 = 78

low32(dst) = low32(src1) - low32(src2)

§

Vsubf32x4 = 79

low128(dst) = low128(src1) - low128(src2)

§

Fmul32 = 80

low32(dst) = low32(src1) * low32(src2)

§

Vmulf32x4 = 81

low128(dst) = low128(src1) * low128(src2)

§

Fdiv32 = 82

low32(dst) = low32(src1) / low32(src2)

§

Vdivf32x4 = 83

low128(dst) = low128(src1) / low128(src2)

§

Fmaximum32 = 84

low32(dst) = ieee_maximum(low32(src1), low32(src2))

§

Fminimum32 = 85

low32(dst) = ieee_minimum(low32(src1), low32(src2))

§

Ftrunc32 = 86

low32(dst) = ieee_trunc(low32(src))

§

Vtrunc32x4 = 87

low128(dst) = ieee_trunc(low128(src))

§

Vtrunc64x2 = 88

low128(dst) = ieee_trunc(low128(src))

§

Ffloor32 = 89

low32(dst) = ieee_floor(low32(src))

§

Vfloor32x4 = 90

low128(dst) = ieee_floor(low128(src))

§

Vfloor64x2 = 91

low128(dst) = ieee_floor(low128(src))

§

Fceil32 = 92

low32(dst) = ieee_ceil(low32(src))

§

Vceil32x4 = 93

low128(dst) = ieee_ceil(low128(src))

§

Vceil64x2 = 94

low128(dst) = ieee_ceil(low128(src))

§

Fnearest32 = 95

low32(dst) = ieee_nearest(low32(src))

§

Fsqrt32 = 96

low32(dst) = ieee_sqrt(low32(src))

§

Vsqrt32x4 = 97

low32(dst) = ieee_sqrt(low32(src))

§

Vsqrt64x2 = 98

low32(dst) = ieee_sqrt(low32(src))

§

Fneg32 = 99

low32(dst) = -low32(src)

§

Vnegf32x4 = 100

low128(dst) = -low128(src)

§

Fabs32 = 101

low32(dst) = |low32(src)|

§

Fadd64 = 102

dst = src1 + src2

§

Fsub64 = 103

dst = src1 - src2

§

Fmul64 = 104

dst = src1 * src2

§

Fdiv64 = 105

dst = src1 / src2

§

VDivF64x2 = 106

dst = src1 / src2

§

Fmaximum64 = 107

dst = ieee_maximum(src1, src2)

§

Fminimum64 = 108

dst = ieee_minimum(src1, src2)

§

Ftrunc64 = 109

dst = ieee_trunc(src)

§

Ffloor64 = 110

dst = ieee_floor(src)

§

Fceil64 = 111

dst = ieee_ceil(src)

§

Fnearest64 = 112

dst = ieee_nearest(src)

§

Vnearest32x4 = 113

low128(dst) = ieee_nearest(low128(src))

§

Vnearest64x2 = 114

low128(dst) = ieee_nearest(low128(src))

§

Fsqrt64 = 115

dst = ieee_sqrt(src)

§

Fneg64 = 116

dst = -src

§

Fabs64 = 117

dst = |src|

§

Vconst128 = 118

dst = imm

§

VAddI8x16 = 119

dst = src1 + src2

§

VAddI16x8 = 120

dst = src1 + src2

§

VAddI32x4 = 121

dst = src1 + src2

§

VAddI64x2 = 122

dst = src1 + src2

§

VAddF32x4 = 123

dst = src1 + src2

§

VAddF64x2 = 124

dst = src1 + src2

§

VAddI8x16Sat = 125

dst = satruating_add(src1, src2)

§

VAddU8x16Sat = 126

dst = satruating_add(src1, src2)

§

VAddI16x8Sat = 127

dst = satruating_add(src1, src2)

§

VAddU16x8Sat = 128

dst = satruating_add(src1, src2)

§

VAddpairwiseI16x8S = 129

dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]

§

VAddpairwiseI32x4S = 130

dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]

§

VShlI8x16 = 131

dst = src1 << src2

§

VShlI16x8 = 132

dst = src1 << src2

§

VShlI32x4 = 133

dst = src1 << src2

§

VShlI64x2 = 134

dst = src1 << src2

§

VShrI8x16S = 135

dst = src1 >> src2 (signed)

§

VShrI16x8S = 136

dst = src1 >> src2 (signed)

§

VShrI32x4S = 137

dst = src1 >> src2 (signed)

§

VShrI64x2S = 138

dst = src1 >> src2 (signed)

§

VShrI8x16U = 139

dst = src1 >> src2 (unsigned)

§

VShrI16x8U = 140

dst = src1 >> src2 (unsigned)

§

VShrI32x4U = 141

dst = src1 >> src2 (unsigned)

§

VShrI64x2U = 142

dst = src1 >> src2 (unsigned)

§

VSplatX8 = 143

dst = splat(low8(src))

§

VSplatX16 = 144

dst = splat(low16(src))

§

VSplatX32 = 145

dst = splat(low32(src))

§

VSplatX64 = 146

dst = splat(src)

§

VSplatF32 = 147

dst = splat(low32(src))

§

VSplatF64 = 148

dst = splat(src)

§

VLoad8x8SOffset32 = 149

Load the 64-bit source as i8x8 and sign-extend to i16x8.

§

VLoad8x8UOffset32 = 150

Load the 64-bit source as u8x8 and zero-extend to i16x8.

§

VLoad16x4LeSOffset32 = 151

Load the 64-bit source as i16x4 and sign-extend to i32x4.

§

VLoad16x4LeUOffset32 = 152

Load the 64-bit source as u16x4 and zero-extend to i32x4.

§

VLoad32x2LeSOffset32 = 153

Load the 64-bit source as i32x2 and sign-extend to i64x2.

§

VLoad32x2LeUOffset32 = 154

Load the 64-bit source as u32x2 and zero-extend to i64x2.

§

VBand128 = 155

dst = src1 & src2

§

VBor128 = 156

dst = src1 | src2

§

VBxor128 = 157

dst = src1 ^ src2

§

VBnot128 = 158

dst = !src1

§

VBitselect128 = 159

dst = (c & x) | (!c & y)

§

Vbitmask8x16 = 160

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask16x8 = 161

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask32x4 = 162

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask64x2 = 163

Collect high bits of each lane into the low 32-bits of the destination.

§

Valltrue8x16 = 164

Store whether all lanes are nonzero in dst.

§

Valltrue16x8 = 165

Store whether all lanes are nonzero in dst.

§

Valltrue32x4 = 166

Store whether all lanes are nonzero in dst.

§

Valltrue64x2 = 167

Store whether any lanes are nonzero in dst.

§

Vanytrue8x16 = 168

Store whether any lanes are nonzero in dst.

§

Vanytrue16x8 = 169

Store whether any lanes are nonzero in dst.

§

Vanytrue32x4 = 170

Store whether any lanes are nonzero in dst.

§

Vanytrue64x2 = 171

Store whether any lanes are nonzero in dst.

§

VF32x4FromI32x4S = 172

Int-to-float conversion (same as f32_from_x32_s)

§

VF32x4FromI32x4U = 173

Int-to-float conversion (same as f32_from_x32_u)

§

VF64x2FromI64x2S = 174

Int-to-float conversion (same as f64_from_x64_s)

§

VF64x2FromI64x2U = 175

Int-to-float conversion (same as f64_from_x64_u)

§

VI32x4FromF32x4S = 176

Float-to-int conversion (same as x32_from_f32_s

§

VI32x4FromF32x4U = 177

Float-to-int conversion (same as x32_from_f32_u

§

VI64x2FromF64x2S = 178

Float-to-int conversion (same as x64_from_f64_s

§

VI64x2FromF64x2U = 179

Float-to-int conversion (same as x64_from_f64_u

§

VWidenLow8x16S = 180

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow8x16U = 181

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenLow16x8S = 182

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow16x8U = 183

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenLow32x4S = 184

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow32x4U = 185

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh8x16S = 186

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh8x16U = 187

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh16x8S = 188

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh16x8U = 189

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh32x4S = 190

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh32x4U = 191

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

Vnarrow16x8S = 192

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow16x8U = 193

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vnarrow32x4S = 194

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow32x4U = 195

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vnarrow64x2S = 196

Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow64x2U = 197

Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vunarrow64x2U = 198

Narrows the two 64x2 vectors, assuming all input lanes are unsigned, to half the width. Narrowing is unsigned and saturating.

§

VFpromoteLow = 199

Promotes the low two lanes of the f32x4 input to f64x2.

§

VFdemote = 200

Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.

§

VSubI8x16 = 201

dst = src1 - src2

§

VSubI16x8 = 202

dst = src1 - src2

§

VSubI32x4 = 203

dst = src1 - src2

§

VSubI64x2 = 204

dst = src1 - src2

§

VSubF64x2 = 205

dst = src1 - src2

§

VSubI8x16Sat = 206

dst = saturating_sub(src1, src2)

§

VSubU8x16Sat = 207

dst = saturating_sub(src1, src2)

§

VSubI16x8Sat = 208

dst = saturating_sub(src1, src2)

§

VSubU16x8Sat = 209

dst = saturating_sub(src1, src2)

§

VMulI8x16 = 210

dst = src1 * src2

§

VMulI16x8 = 211

dst = src1 * src2

§

VMulI32x4 = 212

dst = src1 * src2

§

VMulI64x2 = 213

dst = src1 * src2

§

VMulF64x2 = 214

dst = src1 * src2

§

VQmulrsI16x8 = 215

dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)

§

VPopcnt8x16 = 216

dst = count_ones(src)

§

XExtractV8x16 = 217

low32(dst) = zext(src[lane])

§

XExtractV16x8 = 218

low32(dst) = zext(src[lane])

§

XExtractV32x4 = 219

low32(dst) = src[lane]

§

XExtractV64x2 = 220

dst = src[lane]

§

FExtractV32x4 = 221

low32(dst) = src[lane]

§

FExtractV64x2 = 222

dst = src[lane]

§

VInsertX8 = 223

dst = src1; dst[lane] = src2

§

VInsertX16 = 224

dst = src1; dst[lane] = src2

§

VInsertX32 = 225

dst = src1; dst[lane] = src2

§

VInsertX64 = 226

dst = src1; dst[lane] = src2

§

VInsertF32 = 227

dst = src1; dst[lane] = src2

§

VInsertF64 = 228

dst = src1; dst[lane] = src2

§

Veq8x16 = 229

dst = src == dst

§

Vneq8x16 = 230

dst = src != dst

§

Vslt8x16 = 231

dst = src < dst (signed)

§

Vslteq8x16 = 232

dst = src <= dst (signed)

§

Vult8x16 = 233

dst = src < dst (unsigned)

§

Vulteq8x16 = 234

dst = src <= dst (unsigned)

§

Veq16x8 = 235

dst = src == dst

§

Vneq16x8 = 236

dst = src != dst

§

Vslt16x8 = 237

dst = src < dst (signed)

§

Vslteq16x8 = 238

dst = src <= dst (signed)

§

Vult16x8 = 239

dst = src < dst (unsigned)

§

Vulteq16x8 = 240

dst = src <= dst (unsigned)

§

Veq32x4 = 241

dst = src == dst

§

Vneq32x4 = 242

dst = src != dst

§

Vslt32x4 = 243

dst = src < dst (signed)

§

Vslteq32x4 = 244

dst = src <= dst (signed)

§

Vult32x4 = 245

dst = src < dst (unsigned)

§

Vulteq32x4 = 246

dst = src <= dst (unsigned)

§

Veq64x2 = 247

dst = src == dst

§

Vneq64x2 = 248

dst = src != dst

§

Vslt64x2 = 249

dst = src < dst (signed)

§

Vslteq64x2 = 250

dst = src <= dst (signed)

§

Vult64x2 = 251

dst = src < dst (unsigned)

§

Vulteq64x2 = 252

dst = src <= dst (unsigned)

§

Vneg8x16 = 253

dst = -src

§

Vneg16x8 = 254

dst = -src

§

Vneg32x4 = 255

dst = -src

§

Vneg64x2 = 256

dst = -src

§

VnegF64x2 = 257

dst = -src

§

Vmin8x16S = 258

dst = min(src1, src2) (signed)

§

Vmin8x16U = 259

dst = min(src1, src2) (unsigned)

§

Vmin16x8S = 260

dst = min(src1, src2) (signed)

§

Vmin16x8U = 261

dst = min(src1, src2) (unsigned)

§

Vmax8x16S = 262

dst = max(src1, src2) (signed)

§

Vmax8x16U = 263

dst = max(src1, src2) (unsigned)

§

Vmax16x8S = 264

dst = max(src1, src2) (signed)

§

Vmax16x8U = 265

dst = max(src1, src2) (unsigned)

§

Vmin32x4S = 266

dst = min(src1, src2) (signed)

§

Vmin32x4U = 267

dst = min(src1, src2) (unsigned)

§

Vmax32x4S = 268

dst = max(src1, src2) (signed)

§

Vmax32x4U = 269

dst = max(src1, src2) (unsigned)

§

Vabs8x16 = 270

dst = |src|

§

Vabs16x8 = 271

dst = |src|

§

Vabs32x4 = 272

dst = |src|

§

Vabs64x2 = 273

dst = |src|

§

Vabsf32x4 = 274

dst = |src|

§

Vabsf64x2 = 275

dst = |src|

§

Vmaximumf32x4 = 276

dst = ieee_maximum(src1, src2)

§

Vmaximumf64x2 = 277

dst = ieee_maximum(src1, src2)

§

Vminimumf32x4 = 278

dst = ieee_minimum(src1, src2)

§

Vminimumf64x2 = 279

dst = ieee_minimum(src1, src2)

§

VShuffle = 280

dst = shuffle(src1, src2, mask)

§

Vswizzlei8x16 = 281

dst = swizzle(src1, src2)

§

Vavground8x16 = 282

dst = (src1 + src2 + 1) // 2

§

Vavground16x8 = 283

dst = (src1 + src2 + 1) // 2

§

VeqF32x4 = 284

dst = src == dst

§

VneqF32x4 = 285

dst = src != dst

§

VltF32x4 = 286

dst = src < dst

§

VlteqF32x4 = 287

dst = src <= dst

§

VeqF64x2 = 288

dst = src == dst

§

VneqF64x2 = 289

dst = src != dst

§

VltF64x2 = 290

dst = src < dst

§

VlteqF64x2 = 291

dst = src <= dst

§

Vfma32x4 = 292

dst = ieee_fma(a, b, c)

§

Vfma64x2 = 293

dst = ieee_fma(a, b, c)

§

Vselect = 294

dst = low32(cond) ? if_nonzero : if_zero

§

Xadd128 = 295

dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo

§

Xsub128 = 296

dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo

§

Xwidemul64S = 297

dst_hi:dst_lo = sext(lhs) * sext(rhs)

§

Xwidemul64U = 298

dst_hi:dst_lo = zext(lhs) * zext(rhs)

Implementations§

Source§

impl ExtendedOpcode

Source

pub const MAX: u16 = 299u16

The value of the maximum defined extended opcode.

Source§

impl ExtendedOpcode

Source

pub fn new(bytes: u16) -> Option<Self>

Create a new ExtendedOpcode from the given bytes.

Returns None if bytes is not a valid extended opcode.

Source

pub unsafe fn unchecked_new(byte: u16) -> Self

Like new but does not check whether bytes is a valid opcode.

§Safety

It is unsafe to pass bytes that is not a valid opcode.

Trait Implementations§

Source§

impl Clone for ExtendedOpcode

Source§

fn clone(&self) -> ExtendedOpcode

Returns a copy of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ExtendedOpcode

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Decode for ExtendedOpcode

Available on crate feature decode only.
Source§

fn decode<T>(bytecode: &mut T) -> Result<Self, T::Error>
where T: BytecodeStream,

Decode this type from the given bytecode stream.
Source§

impl Hash for ExtendedOpcode

Source§

fn hash<__H: Hasher>(&self, state: &mut __H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl Ord for ExtendedOpcode

Source§

fn cmp(&self, other: &ExtendedOpcode) -> Ordering

This method returns an Ordering between self and other. Read more
1.21.0 · Source§

fn max(self, other: Self) -> Self
where Self: Sized,

Compares and returns the maximum of two values. Read more
1.21.0 · Source§

fn min(self, other: Self) -> Self
where Self: Sized,

Compares and returns the minimum of two values. Read more
1.50.0 · Source§

fn clamp(self, min: Self, max: Self) -> Self
where Self: Sized,

Restrict a value to a certain interval. Read more
Source§

impl PartialEq for ExtendedOpcode

Source§

fn eq(&self, other: &ExtendedOpcode) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl PartialOrd for ExtendedOpcode

Source§

fn partial_cmp(&self, other: &ExtendedOpcode) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · Source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · Source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the <= operator. Read more
1.0.0 · Source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > operator. Read more
1.0.0 · Source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by the >= operator. Read more
Source§

impl Copy for ExtendedOpcode

Source§

impl Eq for ExtendedOpcode

Source§

impl StructuralPartialEq for ExtendedOpcode

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dst: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.