pulley_interpreter::opcode

Enum ExtendedOpcode

Source
#[repr(u16)]
pub enum ExtendedOpcode {
Show 282 variants Trap = 0, Nop = 1, CallIndirectHost = 2, XmovFp = 3, XmovLr = 4, Bswap32 = 5, Bswap64 = 6, Xadd32UoverflowTrap = 7, Xadd64UoverflowTrap = 8, XMulHi64S = 9, XMulHi64U = 10, Xbmask32 = 11, Xbmask64 = 12, XPush32 = 13, XPush32Many = 14, XPush64 = 15, XPush64Many = 16, XPop32 = 17, XPop32Many = 18, XPop64 = 19, XPop64Many = 20, XLoad16BeU64Offset32 = 21, XLoad16BeS64Offset32 = 22, XLoad32BeU64Offset32 = 23, XLoad32BeS64Offset32 = 24, XLoad64BeOffset32 = 25, XStore16BeOffset32 = 26, XStore32BeOffset32 = 27, XStore64BeOffset32 = 28, Fload32BeOffset32 = 29, Fload64BeOffset32 = 30, Fstore32BeOffset32 = 31, Fstore64BeOffset32 = 32, Fload32LeOffset32 = 33, Fload64LeOffset32 = 34, Fstore32LeOffset32 = 35, Fstore64LeOffset32 = 36, VLoad128Offset32 = 37, Vstore128LeOffset32 = 38, Fmov = 39, Vmov = 40, BitcastIntFromFloat32 = 41, BitcastIntFromFloat64 = 42, BitcastFloatFromInt32 = 43, BitcastFloatFromInt64 = 44, FConst32 = 45, FConst64 = 46, Feq32 = 47, Fneq32 = 48, Flt32 = 49, Flteq32 = 50, Feq64 = 51, Fneq64 = 52, Flt64 = 53, Flteq64 = 54, FSelect32 = 55, FSelect64 = 56, F32FromF64 = 57, F64FromF32 = 58, F32FromX32S = 59, F32FromX32U = 60, F32FromX64S = 61, F32FromX64U = 62, F64FromX32S = 63, F64FromX32U = 64, F64FromX64S = 65, F64FromX64U = 66, X32FromF32S = 67, X32FromF32U = 68, X32FromF64S = 69, X32FromF64U = 70, X64FromF32S = 71, X64FromF32U = 72, X64FromF64S = 73, X64FromF64U = 74, X32FromF32SSat = 75, X32FromF32USat = 76, X32FromF64SSat = 77, X32FromF64USat = 78, X64FromF32SSat = 79, X64FromF32USat = 80, X64FromF64SSat = 81, X64FromF64USat = 82, FCopySign32 = 83, FCopySign64 = 84, Fadd32 = 85, Fsub32 = 86, Fmul32 = 87, Fdiv32 = 88, Vdivf32x4 = 89, Fmaximum32 = 90, Fminimum32 = 91, Ftrunc32 = 92, Vtrunc32x4 = 93, Vtrunc64x2 = 94, Ffloor32 = 95, Vfloor32x4 = 96, Vfloor64x2 = 97, Fceil32 = 98, Vceil32x4 = 99, Vceil64x2 = 100, Fnearest32 = 101, Fsqrt32 = 102, Vsqrt32x4 = 103, Vsqrt64x2 = 104, Fneg32 = 105, Fabs32 = 106, Fadd64 = 107, Fsub64 = 108, Fmul64 = 109, Fdiv64 = 110, VDivF64x2 = 111, Fmaximum64 = 112, Fminimum64 = 113, Ftrunc64 = 114, Ffloor64 = 115, Fceil64 = 116, Fnearest64 = 117, Vnearest32x4 = 118, Vnearest64x2 = 119, Fsqrt64 = 120, Fneg64 = 121, Fabs64 = 122, Vconst128 = 123, VAddI8x16 = 124, VAddI16x8 = 125, VAddI32x4 = 126, VAddI64x2 = 127, VAddF32x4 = 128, VAddF64x2 = 129, VAddI8x16Sat = 130, VAddU8x16Sat = 131, VAddI16x8Sat = 132, VAddU16x8Sat = 133, VAddpairwiseI16x8S = 134, VAddpairwiseI32x4S = 135, VShlI8x16 = 136, VShlI16x8 = 137, VShlI32x4 = 138, VShlI64x2 = 139, VShrI8x16S = 140, VShrI16x8S = 141, VShrI32x4S = 142, VShrI64x2S = 143, VShrI8x16U = 144, VShrI16x8U = 145, VShrI32x4U = 146, VShrI64x2U = 147, VSplatX8 = 148, VSplatX16 = 149, VSplatX32 = 150, VSplatX64 = 151, VSplatF32 = 152, VSplatF64 = 153, VLoad8x8SOffset32 = 154, VLoad8x8UOffset32 = 155, VLoad16x4LeSOffset32 = 156, VLoad16x4LeUOffset32 = 157, VLoad32x2LeSOffset32 = 158, VLoad32x2LeUOffset32 = 159, VBand128 = 160, VBor128 = 161, VBxor128 = 162, VBnot128 = 163, VBitselect128 = 164, Vbitmask8x16 = 165, Vbitmask16x8 = 166, Vbitmask32x4 = 167, Vbitmask64x2 = 168, Valltrue8x16 = 169, Valltrue16x8 = 170, Valltrue32x4 = 171, Valltrue64x2 = 172, Vanytrue8x16 = 173, Vanytrue16x8 = 174, Vanytrue32x4 = 175, Vanytrue64x2 = 176, VF32x4FromI32x4S = 177, VF32x4FromI32x4U = 178, VF64x2FromI64x2S = 179, VF64x2FromI64x2U = 180, VWidenLow8x16S = 181, VWidenLow8x16U = 182, VWidenLow16x8S = 183, VWidenLow16x8U = 184, VWidenLow32x4S = 185, VWidenLow32x4U = 186, VWidenHigh8x16S = 187, VWidenHigh8x16U = 188, VWidenHigh16x8S = 189, VWidenHigh16x8U = 190, VWidenHigh32x4S = 191, VWidenHigh32x4U = 192, Vnarrow16x8S = 193, Vnarrow16x8U = 194, Vnarrow32x4S = 195, Vnarrow32x4U = 196, VFpromoteLow = 197, VFdemote = 198, VSubI8x16 = 199, VSubI16x8 = 200, VSubI32x4 = 201, VSubI64x2 = 202, VSubF64x2 = 203, VSubI8x16Sat = 204, VSubU8x16Sat = 205, VSubI16x8Sat = 206, VSubU16x8Sat = 207, VMulI8x16 = 208, VMulI16x8 = 209, VMulI32x4 = 210, VMulI64x2 = 211, VMulF64x2 = 212, VQmulrsI16x8 = 213, VPopcnt8x16 = 214, XExtractV8x16 = 215, XExtractV16x8 = 216, XExtractV32x4 = 217, XExtractV64x2 = 218, FExtractV32x4 = 219, FExtractV64x2 = 220, VInsertX8 = 221, VInsertX16 = 222, VInsertX32 = 223, VInsertX64 = 224, VInsertF32 = 225, VInsertF64 = 226, Veq8x16 = 227, Vneq8x16 = 228, Vslt8x16 = 229, Vslteq8x16 = 230, Vult8x16 = 231, Vulteq8x16 = 232, Veq16x8 = 233, Vneq16x8 = 234, Vslt16x8 = 235, Vslteq16x8 = 236, Vult16x8 = 237, Vulteq16x8 = 238, Veq32x4 = 239, Vneq32x4 = 240, Vslt32x4 = 241, Vslteq32x4 = 242, Vult32x4 = 243, Vulteq32x4 = 244, Veq64x2 = 245, Vneq64x2 = 246, Vslt64x2 = 247, Vslteq64x2 = 248, Vult64x2 = 249, Vulteq64x2 = 250, Vneg8x16 = 251, Vneg16x8 = 252, Vneg32x4 = 253, Vneg64x2 = 254, VnegF64x2 = 255, Vmin8x16S = 256, Vmin8x16U = 257, Vmin16x8S = 258, Vmin16x8U = 259, Vmax8x16S = 260, Vmax8x16U = 261, Vmax16x8S = 262, Vmax16x8U = 263, Vmin32x4S = 264, Vmin32x4U = 265, Vmax32x4S = 266, Vmax32x4U = 267, Vabs8x16 = 268, Vabs16x8 = 269, Vabs32x4 = 270, Vabs64x2 = 271, Vabsf32x4 = 272, Vabsf64x2 = 273, Vmaximumf32x4 = 274, Vmaximumf64x2 = 275, Vminimumf32x4 = 276, Vminimumf64x2 = 277, VShuffle = 278, Vswizzlei8x16 = 279, Vavground8x16 = 280, Vavground16x8 = 281,
}
Expand description

An extended opcode.

Variants§

§

Trap = 0

Raise a trap.

§

Nop = 1

Do nothing.

§

CallIndirectHost = 2

A special opcode to halt interpreter execution and yield control back to the host.

This opcode results in DoneReason::CallIndirectHost where the id here is shepherded along to the embedder. It’s up to the embedder to determine what to do with the id and the current state of registers and the stack.

In Wasmtime this is used to implement interpreter-to-host calls. This is modeled as a call instruction where the first parameter is the native function pointer to invoke and all remaining parameters for the native function are in following parameter positions (e.g. x1, x2, …). The results of the host call are then store in x0.

Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.

§

XmovFp = 3

Gets the special “fp” register and moves it into dst.

§

XmovLr = 4

Gets the special “lr” register and moves it into dst.

§

Bswap32 = 5

dst = byteswap(low32(src))

§

Bswap64 = 6

dst = byteswap(src)

§

Xadd32UoverflowTrap = 7

32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified. Traps if the addition overflows.

§

Xadd64UoverflowTrap = 8

64-bit checked unsigned addition: dst = src1 + src2.

§

XMulHi64S = 9

dst = high64(src1 * src2) (signed)

§

XMulHi64U = 10

dst = high64(src1 * src2) (unsigned)

§

Xbmask32 = 11

low32(dst) = if low32(src) == 0 { 0 } else { -1 }

§

Xbmask64 = 12

dst = if src == 0 { 0 } else { -1 }

§

XPush32 = 13

*sp = low32(src); sp = sp.checked_add(4)

§

XPush32Many = 14

for src in srcs { xpush32 src }

§

XPush64 = 15

*sp = src; sp = sp.checked_add(8)

§

XPush64Many = 16

for src in srcs { xpush64 src }

§

XPop32 = 17

*dst = *sp; sp -= 4

§

XPop32Many = 18

for dst in dsts.rev() { xpop32 dst }

§

XPop64 = 19

*dst = *sp; sp -= 8

§

XPop64Many = 20

for dst in dsts.rev() { xpop64 dst }

§

XLoad16BeU64Offset32 = 21

dst = zext(*(ptr + offset))

§

XLoad16BeS64Offset32 = 22

dst = sext(*(ptr + offset))

§

XLoad32BeU64Offset32 = 23

dst = zext(*(ptr + offset))

§

XLoad32BeS64Offset32 = 24

dst = sext(*(ptr + offset))

§

XLoad64BeOffset32 = 25

dst = *(ptr + offset)

§

XStore16BeOffset32 = 26

*(ptr + offset) = low16(src)

§

XStore32BeOffset32 = 27

*(ptr + offset) = low32(src)

§

XStore64BeOffset32 = 28

*(ptr + offset) = low64(src)

§

Fload32BeOffset32 = 29

low32(dst) = zext(*(ptr + offset))

§

Fload64BeOffset32 = 30

dst = *(ptr + offset)

§

Fstore32BeOffset32 = 31

*(ptr + offset) = low32(src)

§

Fstore64BeOffset32 = 32

*(ptr + offset) = src

§

Fload32LeOffset32 = 33

low32(dst) = zext(*(ptr + offset))

§

Fload64LeOffset32 = 34

dst = *(ptr + offset)

§

Fstore32LeOffset32 = 35

*(ptr + offset) = low32(src)

§

Fstore64LeOffset32 = 36

*(ptr + offset) = src

§

VLoad128Offset32 = 37

dst = *(ptr + offset)

§

Vstore128LeOffset32 = 38

*(ptr + offset) = src

§

Fmov = 39

Move between f registers.

§

Vmov = 40

Move between v registers.

§

BitcastIntFromFloat32 = 41

low32(dst) = bitcast low32(src) as i32

§

BitcastIntFromFloat64 = 42

dst = bitcast src as i64

§

BitcastFloatFromInt32 = 43

low32(dst) = bitcast low32(src) as f32

§

BitcastFloatFromInt64 = 44

dst = bitcast src as f64

§

FConst32 = 45

low32(dst) = bits

§

FConst64 = 46

dst = bits

§

Feq32 = 47

low32(dst) = zext(src1 == src2)

§

Fneq32 = 48

low32(dst) = zext(src1 != src2)

§

Flt32 = 49

low32(dst) = zext(src1 < src2)

§

Flteq32 = 50

low32(dst) = zext(src1 <= src2)

§

Feq64 = 51

low32(dst) = zext(src1 == src2)

§

Fneq64 = 52

low32(dst) = zext(src1 != src2)

§

Flt64 = 53

low32(dst) = zext(src1 < src2)

§

Flteq64 = 54

low32(dst) = zext(src1 <= src2)

§

FSelect32 = 55

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

§

FSelect64 = 56

dst = low32(cond) ? if_nonzero : if_zero

§

F32FromF64 = 57

low32(dst) = demote(src)

§

F64FromF32 = 58

(st) = promote(low32(src))

§

F32FromX32S = 59

low32(dst) = checked_f32_from_signed(low32(src))

§

F32FromX32U = 60

low32(dst) = checked_f32_from_unsigned(low32(src))

§

F32FromX64S = 61

low32(dst) = checked_f32_from_signed(src)

§

F32FromX64U = 62

low32(dst) = checked_f32_from_unsigned(src)

§

F64FromX32S = 63

dst = checked_f64_from_signed(low32(src))

§

F64FromX32U = 64

dst = checked_f64_from_unsigned(low32(src))

§

F64FromX64S = 65

dst = checked_f64_from_signed(src)

§

F64FromX64U = 66

dst = checked_f64_from_unsigned(src)

§

X32FromF32S = 67

low32(dst) = checked_signed_from_f32(low32(src))

§

X32FromF32U = 68

low32(dst) = checked_unsigned_from_f32(low32(src))

§

X32FromF64S = 69

low32(dst) = checked_signed_from_f64(src)

§

X32FromF64U = 70

low32(dst) = checked_unsigned_from_f64(src)

§

X64FromF32S = 71

dst = checked_signed_from_f32(low32(src))

§

X64FromF32U = 72

dst = checked_unsigned_from_f32(low32(src))

§

X64FromF64S = 73

dst = checked_signed_from_f64(src)

§

X64FromF64U = 74

dst = checked_unsigned_from_f64(src)

§

X32FromF32SSat = 75

low32(dst) = saturating_signed_from_f32(low32(src))

§

X32FromF32USat = 76

low32(dst) = saturating_unsigned_from_f32(low32(src))

§

X32FromF64SSat = 77

low32(dst) = saturating_signed_from_f64(src)

§

X32FromF64USat = 78

low32(dst) = saturating_unsigned_from_f64(src)

§

X64FromF32SSat = 79

dst = saturating_signed_from_f32(low32(src))

§

X64FromF32USat = 80

dst = saturating_unsigned_from_f32(low32(src))

§

X64FromF64SSat = 81

dst = saturating_signed_from_f64(src)

§

X64FromF64USat = 82

dst = saturating_unsigned_from_f64(src)

§

FCopySign32 = 83

low32(dst) = copysign(low32(src1), low32(src2))

§

FCopySign64 = 84

dst = copysign(src1, src2)

§

Fadd32 = 85

low32(dst) = low32(src1) + low32(src2)

§

Fsub32 = 86

low32(dst) = low32(src1) - low32(src2)

§

Fmul32 = 87

low32(dst) = low32(src1) * low32(src2)

§

Fdiv32 = 88

low32(dst) = low32(src1) / low32(src2)

§

Vdivf32x4 = 89

low128(dst) = low128(src1) / low128(src2)

§

Fmaximum32 = 90

low32(dst) = ieee_maximum(low32(src1), low32(src2))

§

Fminimum32 = 91

low32(dst) = ieee_minimum(low32(src1), low32(src2))

§

Ftrunc32 = 92

low32(dst) = ieee_trunc(low32(src))

§

Vtrunc32x4 = 93

low128(dst) = ieee_trunc(low128(src))

§

Vtrunc64x2 = 94

low128(dst) = ieee_trunc(low128(src))

§

Ffloor32 = 95

low32(dst) = ieee_floor(low32(src))

§

Vfloor32x4 = 96

low128(dst) = ieee_floor(low128(src))

§

Vfloor64x2 = 97

low128(dst) = ieee_floor(low128(src))

§

Fceil32 = 98

low32(dst) = ieee_ceil(low32(src))

§

Vceil32x4 = 99

low128(dst) = ieee_ceil(low128(src))

§

Vceil64x2 = 100

low128(dst) = ieee_ceil(low128(src))

§

Fnearest32 = 101

low32(dst) = ieee_nearest(low32(src))

§

Fsqrt32 = 102

low32(dst) = ieee_sqrt(low32(src))

§

Vsqrt32x4 = 103

low32(dst) = ieee_sqrt(low32(src))

§

Vsqrt64x2 = 104

low32(dst) = ieee_sqrt(low32(src))

§

Fneg32 = 105

low32(dst) = -low32(src)

§

Fabs32 = 106

low32(dst) = |low32(src)|

§

Fadd64 = 107

dst = src1 + src2

§

Fsub64 = 108

dst = src1 - src2

§

Fmul64 = 109

dst = src1 * src2

§

Fdiv64 = 110

dst = src1 / src2

§

VDivF64x2 = 111

dst = src1 / src2

§

Fmaximum64 = 112

dst = ieee_maximum(src1, src2)

§

Fminimum64 = 113

dst = ieee_minimum(src1, src2)

§

Ftrunc64 = 114

dst = ieee_trunc(src)

§

Ffloor64 = 115

dst = ieee_floor(src)

§

Fceil64 = 116

dst = ieee_ceil(src)

§

Fnearest64 = 117

dst = ieee_nearest(src)

§

Vnearest32x4 = 118

low128(dst) = ieee_nearest(low128(src))

§

Vnearest64x2 = 119

low128(dst) = ieee_nearest(low128(src))

§

Fsqrt64 = 120

dst = ieee_sqrt(src)

§

Fneg64 = 121

dst = -src

§

Fabs64 = 122

dst = |src|

§

Vconst128 = 123

dst = imm

§

VAddI8x16 = 124

dst = src1 + src2

§

VAddI16x8 = 125

dst = src1 + src2

§

VAddI32x4 = 126

dst = src1 + src2

§

VAddI64x2 = 127

dst = src1 + src2

§

VAddF32x4 = 128

dst = src1 + src2

§

VAddF64x2 = 129

dst = src1 + src2

§

VAddI8x16Sat = 130

dst = satruating_add(src1, src2)

§

VAddU8x16Sat = 131

dst = satruating_add(src1, src2)

§

VAddI16x8Sat = 132

dst = satruating_add(src1, src2)

§

VAddU16x8Sat = 133

dst = satruating_add(src1, src2)

§

VAddpairwiseI16x8S = 134

dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]

§

VAddpairwiseI32x4S = 135

dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]

§

VShlI8x16 = 136

dst = src1 << src2

§

VShlI16x8 = 137

dst = src1 << src2

§

VShlI32x4 = 138

dst = src1 << src2

§

VShlI64x2 = 139

dst = src1 << src2

§

VShrI8x16S = 140

dst = src1 >> src2 (signed)

§

VShrI16x8S = 141

dst = src1 >> src2 (signed)

§

VShrI32x4S = 142

dst = src1 >> src2 (signed)

§

VShrI64x2S = 143

dst = src1 >> src2 (signed)

§

VShrI8x16U = 144

dst = src1 >> src2 (unsigned)

§

VShrI16x8U = 145

dst = src1 >> src2 (unsigned)

§

VShrI32x4U = 146

dst = src1 >> src2 (unsigned)

§

VShrI64x2U = 147

dst = src1 >> src2 (unsigned)

§

VSplatX8 = 148

dst = splat(low8(src))

§

VSplatX16 = 149

dst = splat(low16(src))

§

VSplatX32 = 150

dst = splat(low32(src))

§

VSplatX64 = 151

dst = splat(src)

§

VSplatF32 = 152

dst = splat(low32(src))

§

VSplatF64 = 153

dst = splat(src)

§

VLoad8x8SOffset32 = 154

Load the 64-bit source as i8x8 and sign-extend to i16x8.

§

VLoad8x8UOffset32 = 155

Load the 64-bit source as u8x8 and zero-extend to i16x8.

§

VLoad16x4LeSOffset32 = 156

Load the 64-bit source as i16x4 and sign-extend to i32x4.

§

VLoad16x4LeUOffset32 = 157

Load the 64-bit source as u16x4 and zero-extend to i32x4.

§

VLoad32x2LeSOffset32 = 158

Load the 64-bit source as i32x2 and sign-extend to i64x2.

§

VLoad32x2LeUOffset32 = 159

Load the 64-bit source as u32x2 and zero-extend to i64x2.

§

VBand128 = 160

dst = src1 & src2

§

VBor128 = 161

dst = src1 | src2

§

VBxor128 = 162

dst = src1 ^ src2

§

VBnot128 = 163

dst = !src1

§

VBitselect128 = 164

dst = (c & x) | (!c & y)

§

Vbitmask8x16 = 165

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask16x8 = 166

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask32x4 = 167

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask64x2 = 168

Collect high bits of each lane into the low 32-bits of the destination.

§

Valltrue8x16 = 169

Store whether all lanes are nonzero in dst.

§

Valltrue16x8 = 170

Store whether all lanes are nonzero in dst.

§

Valltrue32x4 = 171

Store whether all lanes are nonzero in dst.

§

Valltrue64x2 = 172

Store whether any lanes are nonzero in dst.

§

Vanytrue8x16 = 173

Store whether any lanes are nonzero in dst.

§

Vanytrue16x8 = 174

Store whether any lanes are nonzero in dst.

§

Vanytrue32x4 = 175

Store whether any lanes are nonzero in dst.

§

Vanytrue64x2 = 176

Store whether any lanes are nonzero in dst.

§

VF32x4FromI32x4S = 177

Int-to-float conversion (same as f32_from_x32_s)

§

VF32x4FromI32x4U = 178

Int-to-float conversion (same as f32_from_x32_u)

§

VF64x2FromI64x2S = 179

Int-to-float conversion (same as f64_from_x64_s)

§

VF64x2FromI64x2U = 180

Int-to-float conversion (same as f64_from_x64_u)

§

VWidenLow8x16S = 181

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow8x16U = 182

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenLow16x8S = 183

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow16x8U = 184

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenLow32x4S = 185

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow32x4U = 186

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh8x16S = 187

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh8x16U = 188

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh16x8S = 189

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh16x8U = 190

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh32x4S = 191

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh32x4U = 192

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

Vnarrow16x8S = 193

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow16x8U = 194

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vnarrow32x4S = 195

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow32x4U = 196

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

VFpromoteLow = 197

Promotes the low two lanes of the f32x4 input to f64x2.

§

VFdemote = 198

Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.

§

VSubI8x16 = 199

dst = src1 - src2

§

VSubI16x8 = 200

dst = src1 - src2

§

VSubI32x4 = 201

dst = src1 - src2

§

VSubI64x2 = 202

dst = src1 - src2

§

VSubF64x2 = 203

dst = src1 - src2

§

VSubI8x16Sat = 204

dst = saturating_sub(src1, src2)

§

VSubU8x16Sat = 205

dst = saturating_sub(src1, src2)

§

VSubI16x8Sat = 206

dst = saturating_sub(src1, src2)

§

VSubU16x8Sat = 207

dst = saturating_sub(src1, src2)

§

VMulI8x16 = 208

dst = src1 * src2

§

VMulI16x8 = 209

dst = src1 * src2

§

VMulI32x4 = 210

dst = src1 * src2

§

VMulI64x2 = 211

dst = src1 * src2

§

VMulF64x2 = 212

dst = src1 * src2

§

VQmulrsI16x8 = 213

dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)

§

VPopcnt8x16 = 214

dst = count_ones(src)

§

XExtractV8x16 = 215

low32(dst) = zext(src[lane])

§

XExtractV16x8 = 216

low32(dst) = zext(src[lane])

§

XExtractV32x4 = 217

low32(dst) = src[lane]

§

XExtractV64x2 = 218

dst = src[lane]

§

FExtractV32x4 = 219

low32(dst) = src[lane]

§

FExtractV64x2 = 220

dst = src[lane]

§

VInsertX8 = 221

dst = src1; dst[lane] = src2

§

VInsertX16 = 222

dst = src1; dst[lane] = src2

§

VInsertX32 = 223

dst = src1; dst[lane] = src2

§

VInsertX64 = 224

dst = src1; dst[lane] = src2

§

VInsertF32 = 225

dst = src1; dst[lane] = src2

§

VInsertF64 = 226

dst = src1; dst[lane] = src2

§

Veq8x16 = 227

dst = src == dst

§

Vneq8x16 = 228

dst = src != dst

§

Vslt8x16 = 229

dst = src < dst (signed)

§

Vslteq8x16 = 230

dst = src <= dst (signed)

§

Vult8x16 = 231

dst = src < dst (unsigned)

§

Vulteq8x16 = 232

dst = src <= dst (unsigned)

§

Veq16x8 = 233

dst = src == dst

§

Vneq16x8 = 234

dst = src != dst

§

Vslt16x8 = 235

dst = src < dst (signed)

§

Vslteq16x8 = 236

dst = src <= dst (signed)

§

Vult16x8 = 237

dst = src < dst (unsigned)

§

Vulteq16x8 = 238

dst = src <= dst (unsigned)

§

Veq32x4 = 239

dst = src == dst

§

Vneq32x4 = 240

dst = src != dst

§

Vslt32x4 = 241

dst = src < dst (signed)

§

Vslteq32x4 = 242

dst = src <= dst (signed)

§

Vult32x4 = 243

dst = src < dst (unsigned)

§

Vulteq32x4 = 244

dst = src <= dst (unsigned)

§

Veq64x2 = 245

dst = src == dst

§

Vneq64x2 = 246

dst = src != dst

§

Vslt64x2 = 247

dst = src < dst (signed)

§

Vslteq64x2 = 248

dst = src <= dst (signed)

§

Vult64x2 = 249

dst = src < dst (unsigned)

§

Vulteq64x2 = 250

dst = src <= dst (unsigned)

§

Vneg8x16 = 251

dst = -src

§

Vneg16x8 = 252

dst = -src

§

Vneg32x4 = 253

dst = -src

§

Vneg64x2 = 254

dst = -src

§

VnegF64x2 = 255

dst = -src

§

Vmin8x16S = 256

dst = min(src1, src2) (signed)

§

Vmin8x16U = 257

dst = min(src1, src2) (unsigned)

§

Vmin16x8S = 258

dst = min(src1, src2) (signed)

§

Vmin16x8U = 259

dst = min(src1, src2) (unsigned)

§

Vmax8x16S = 260

dst = max(src1, src2) (signed)

§

Vmax8x16U = 261

dst = max(src1, src2) (unsigned)

§

Vmax16x8S = 262

dst = max(src1, src2) (signed)

§

Vmax16x8U = 263

dst = max(src1, src2) (unsigned)

§

Vmin32x4S = 264

dst = min(src1, src2) (signed)

§

Vmin32x4U = 265

dst = min(src1, src2) (unsigned)

§

Vmax32x4S = 266

dst = max(src1, src2) (signed)

§

Vmax32x4U = 267

dst = max(src1, src2) (unsigned)

§

Vabs8x16 = 268

dst = |src|

§

Vabs16x8 = 269

dst = |src|

§

Vabs32x4 = 270

dst = |src|

§

Vabs64x2 = 271

dst = |src|

§

Vabsf32x4 = 272

dst = |src|

§

Vabsf64x2 = 273

dst = |src|

§

Vmaximumf32x4 = 274

dst = ieee_maximum(src1, src2)

§

Vmaximumf64x2 = 275

dst = ieee_maximum(src1, src2)

§

Vminimumf32x4 = 276

dst = ieee_minimum(src1, src2)

§

Vminimumf64x2 = 277

dst = ieee_minimum(src1, src2)

§

VShuffle = 278

dst = shuffle(src1, src2, mask)

§

Vswizzlei8x16 = 279

dst = swizzle(src1, src2)

§

Vavground8x16 = 280

dst = (src1 + src2 + 1) // 2

§

Vavground16x8 = 281

dst = (src1 + src2 + 1) // 2

Implementations§

Source§

impl ExtendedOpcode

Source

pub const MAX: u16 = 282u16

The value of the maximum defined extended opcode.

Source§

impl ExtendedOpcode

Source

pub fn new(bytes: u16) -> Option<Self>

Create a new ExtendedOpcode from the given bytes.

Returns None if bytes is not a valid extended opcode.

Source

pub unsafe fn unchecked_new(byte: u16) -> Self

Like new but does not check whether bytes is a valid opcode.

§Safety

It is unsafe to pass bytes that is not a valid opcode.

Trait Implementations§

Source§

impl Clone for ExtendedOpcode

Source§

fn clone(&self) -> ExtendedOpcode

Returns a copy of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ExtendedOpcode

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Decode for ExtendedOpcode

Available on crate feature decode only.
Source§

fn decode<T>(bytecode: &mut T) -> Result<Self, T::Error>
where T: BytecodeStream,

Decode this type from the given bytecode stream.
Source§

impl Hash for ExtendedOpcode

Source§

fn hash<__H: Hasher>(&self, state: &mut __H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl Ord for ExtendedOpcode

Source§

fn cmp(&self, other: &ExtendedOpcode) -> Ordering

This method returns an Ordering between self and other. Read more
1.21.0 · Source§

fn max(self, other: Self) -> Self
where Self: Sized,

Compares and returns the maximum of two values. Read more
1.21.0 · Source§

fn min(self, other: Self) -> Self
where Self: Sized,

Compares and returns the minimum of two values. Read more
1.50.0 · Source§

fn clamp(self, min: Self, max: Self) -> Self
where Self: Sized,

Restrict a value to a certain interval. Read more
Source§

impl PartialEq for ExtendedOpcode

Source§

fn eq(&self, other: &ExtendedOpcode) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl PartialOrd for ExtendedOpcode

Source§

fn partial_cmp(&self, other: &ExtendedOpcode) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · Source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · Source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the <= operator. Read more
1.0.0 · Source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > operator. Read more
1.0.0 · Source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by the >= operator. Read more
Source§

impl Copy for ExtendedOpcode

Source§

impl Eq for ExtendedOpcode

Source§

impl StructuralPartialEq for ExtendedOpcode

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dst: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.