pulley_interpreter::opcode

Enum Opcode

Source
#[repr(u8)]
pub enum Opcode {
Show 208 variants Ret = 0, Call = 1, Call1 = 2, Call2 = 3, Call3 = 4, Call4 = 5, CallIndirect = 6, Jump = 7, XJump = 8, BrIf = 9, BrIfNot = 10, BrIfXeq32 = 11, BrIfXneq32 = 12, BrIfXslt32 = 13, BrIfXslteq32 = 14, BrIfXult32 = 15, BrIfXulteq32 = 16, BrIfXeq64 = 17, BrIfXneq64 = 18, BrIfXslt64 = 19, BrIfXslteq64 = 20, BrIfXult64 = 21, BrIfXulteq64 = 22, BrIfXeq32I8 = 23, BrIfXeq32I32 = 24, BrIfXneq32I8 = 25, BrIfXneq32I32 = 26, BrIfXslt32I8 = 27, BrIfXslt32I32 = 28, BrIfXsgt32I8 = 29, BrIfXsgt32I32 = 30, BrIfXslteq32I8 = 31, BrIfXslteq32I32 = 32, BrIfXsgteq32I8 = 33, BrIfXsgteq32I32 = 34, BrIfXult32U8 = 35, BrIfXult32U32 = 36, BrIfXulteq32U8 = 37, BrIfXulteq32U32 = 38, BrIfXugt32U8 = 39, BrIfXugt32U32 = 40, BrIfXugteq32U8 = 41, BrIfXugteq32U32 = 42, BrIfXeq64I8 = 43, BrIfXeq64I32 = 44, BrIfXneq64I8 = 45, BrIfXneq64I32 = 46, BrIfXslt64I8 = 47, BrIfXslt64I32 = 48, BrIfXsgt64I8 = 49, BrIfXsgt64I32 = 50, BrIfXslteq64I8 = 51, BrIfXslteq64I32 = 52, BrIfXsgteq64I8 = 53, BrIfXsgteq64I32 = 54, BrIfXult64U8 = 55, BrIfXult64U32 = 56, BrIfXulteq64U8 = 57, BrIfXulteq64U32 = 58, BrIfXugt64U8 = 59, BrIfXugt64U32 = 60, BrIfXugteq64U8 = 61, BrIfXugteq64U32 = 62, BrTable32 = 63, Xmov = 64, Xconst8 = 65, Xconst16 = 66, Xconst32 = 67, Xconst64 = 68, Xadd32 = 69, Xadd32U8 = 70, Xadd32U32 = 71, Xadd64 = 72, Xadd64U8 = 73, Xadd64U32 = 74, Xsub32 = 75, Xsub32U8 = 76, Xsub32U32 = 77, Xsub64 = 78, Xsub64U8 = 79, Xsub64U32 = 80, XMul32 = 81, Xmul32S8 = 82, Xmul32S32 = 83, XMul64 = 84, Xmul64S8 = 85, Xmul64S32 = 86, Xctz32 = 87, Xctz64 = 88, Xclz32 = 89, Xclz64 = 90, Xpopcnt32 = 91, Xpopcnt64 = 92, Xrotl32 = 93, Xrotl64 = 94, Xrotr32 = 95, Xrotr64 = 96, Xshl32 = 97, Xshr32S = 98, Xshr32U = 99, Xshl64 = 100, Xshr64S = 101, Xshr64U = 102, Xshl32U6 = 103, Xshr32SU6 = 104, Xshr32UU6 = 105, Xshl64U6 = 106, Xshr64SU6 = 107, Xshr64UU6 = 108, Xneg32 = 109, Xneg64 = 110, Xeq64 = 111, Xneq64 = 112, Xslt64 = 113, Xslteq64 = 114, Xult64 = 115, Xulteq64 = 116, Xeq32 = 117, Xneq32 = 118, Xslt32 = 119, Xslteq32 = 120, Xult32 = 121, Xulteq32 = 122, XLoad8U32Offset32 = 123, XLoad8S32Offset32 = 124, XLoad16LeU32Offset32 = 125, XLoad16LeS32Offset32 = 126, XLoad32LeOffset32 = 127, XLoad8U64Offset32 = 128, XLoad8S64Offset32 = 129, XLoad16LeU64Offset32 = 130, XLoad16LeS64Offset32 = 131, XLoad32LeU64Offset32 = 132, XLoad32LeS64Offset32 = 133, XLoad64LeOffset32 = 134, XStore8Offset32 = 135, XStore16LeOffset32 = 136, XStore32LeOffset32 = 137, XStore64LeOffset32 = 138, XLoad8U32Offset8 = 139, XLoad8S32Offset8 = 140, XLoad16LeU32Offset8 = 141, XLoad16LeS32Offset8 = 142, XLoad32LeOffset8 = 143, XLoad8U64Offset8 = 144, XLoad8S64Offset8 = 145, XLoad16LeU64Offset8 = 146, XLoad16LeS64Offset8 = 147, XLoad32LeU64Offset8 = 148, XLoad32LeS64Offset8 = 149, XLoad64LeOffset8 = 150, XStore8Offset8 = 151, XStore16LeOffset8 = 152, XStore32LeOffset8 = 153, XStore64LeOffset8 = 154, PushFrame = 155, PopFrame = 156, PushFrameSave = 157, PopFrameRestore = 158, StackAlloc32 = 159, StackFree32 = 160, Zext8 = 161, Zext16 = 162, Zext32 = 163, Sext8 = 164, Sext16 = 165, Sext32 = 166, XAbs32 = 167, XAbs64 = 168, XDiv32S = 169, XDiv64S = 170, XDiv32U = 171, XDiv64U = 172, XRem32S = 173, XRem64S = 174, XRem32U = 175, XRem64U = 176, XBand32 = 177, Xband32S8 = 178, Xband32S32 = 179, XBand64 = 180, Xband64S8 = 181, Xband64S32 = 182, XBor32 = 183, Xbor32S8 = 184, Xbor32S32 = 185, XBor64 = 186, Xbor64S8 = 187, Xbor64S32 = 188, XBxor32 = 189, Xbxor32S8 = 190, Xbxor32S32 = 191, XBxor64 = 192, Xbxor64S8 = 193, Xbxor64S32 = 194, XBnot32 = 195, XBnot64 = 196, Xmin32U = 197, Xmin32S = 198, Xmax32U = 199, Xmax32S = 200, Xmin64U = 201, Xmin64S = 202, Xmax64U = 203, Xmax64S = 204, XSelect32 = 205, XSelect64 = 206, ExtendedOp = 207,
}
Expand description

An opcode without its immediates and operands.

Variants§

§

Ret = 0

Transfer control the address in the lr register.

§

Call = 1

Transfer control to the PC at the given offset and set the lr register to the PC just after this instruction.

This instruction generally assumes that the Pulley ABI is being respected where arguments are in argument registers (starting at x0 for integer arguments) and results are in result registers. This instruction itself assume that all arguments are already in their registers. Subsequent instructions below enable moving arguments into the correct registers as part of the same call instruction.

§

Call1 = 2

Like call, but also x0 = arg1

§

Call2 = 3

Like call, but also x0, x1 = arg1, arg2

§

Call3 = 4

Like call, but also x0, x1, x2 = arg1, arg2, arg3

§

Call4 = 5

Like call, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4

§

CallIndirect = 6

Transfer control to the PC in reg and set lr to the PC just after this instruction.

§

Jump = 7

Unconditionally transfer control to the PC at the given offset.

§

XJump = 8

Unconditionally transfer control to the PC at specified register.

§

BrIf = 9

Conditionally transfer control to the given PC offset if low32(cond) contains a non-zero value.

§

BrIfNot = 10

Conditionally transfer control to the given PC offset if low32(cond) contains a zero value.

§

BrIfXeq32 = 11

Branch if a == b.

§

BrIfXneq32 = 12

Branch if a != b.

§

BrIfXslt32 = 13

Branch if signed a < b.

§

BrIfXslteq32 = 14

Branch if signed a <= b.

§

BrIfXult32 = 15

Branch if unsigned a < b.

§

BrIfXulteq32 = 16

Branch if unsigned a <= b.

§

BrIfXeq64 = 17

Branch if a == b.

§

BrIfXneq64 = 18

Branch if a != b.

§

BrIfXslt64 = 19

Branch if signed a < b.

§

BrIfXslteq64 = 20

Branch if signed a <= b.

§

BrIfXult64 = 21

Branch if unsigned a < b.

§

BrIfXulteq64 = 22

Branch if unsigned a <= b.

§

BrIfXeq32I8 = 23

Branch if a == b.

§

BrIfXeq32I32 = 24

Branch if a == b.

§

BrIfXneq32I8 = 25

Branch if a != b.

§

BrIfXneq32I32 = 26

Branch if a != b.

§

BrIfXslt32I8 = 27

Branch if signed a < b.

§

BrIfXslt32I32 = 28

Branch if signed a < b.

§

BrIfXsgt32I8 = 29

Branch if signed a > b.

§

BrIfXsgt32I32 = 30

Branch if signed a > b.

§

BrIfXslteq32I8 = 31

Branch if signed a <= b.

§

BrIfXslteq32I32 = 32

Branch if signed a <= b.

§

BrIfXsgteq32I8 = 33

Branch if signed a >= b.

§

BrIfXsgteq32I32 = 34

Branch if signed a >= b.

§

BrIfXult32U8 = 35

Branch if unsigned a < b.

§

BrIfXult32U32 = 36

Branch if unsigned a < b.

§

BrIfXulteq32U8 = 37

Branch if unsigned a <= b.

§

BrIfXulteq32U32 = 38

Branch if unsigned a <= b.

§

BrIfXugt32U8 = 39

Branch if unsigned a > b.

§

BrIfXugt32U32 = 40

Branch if unsigned a > b.

§

BrIfXugteq32U8 = 41

Branch if unsigned a >= b.

§

BrIfXugteq32U32 = 42

Branch if unsigned a >= b.

§

BrIfXeq64I8 = 43

Branch if a == b.

§

BrIfXeq64I32 = 44

Branch if a == b.

§

BrIfXneq64I8 = 45

Branch if a != b.

§

BrIfXneq64I32 = 46

Branch if a != b.

§

BrIfXslt64I8 = 47

Branch if signed a < b.

§

BrIfXslt64I32 = 48

Branch if signed a < b.

§

BrIfXsgt64I8 = 49

Branch if signed a > b.

§

BrIfXsgt64I32 = 50

Branch if signed a > b.

§

BrIfXslteq64I8 = 51

Branch if signed a <= b.

§

BrIfXslteq64I32 = 52

Branch if signed a <= b.

§

BrIfXsgteq64I8 = 53

Branch if signed a >= b.

§

BrIfXsgteq64I32 = 54

Branch if signed a >= b.

§

BrIfXult64U8 = 55

Branch if unsigned a < b.

§

BrIfXult64U32 = 56

Branch if unsigned a < b.

§

BrIfXulteq64U8 = 57

Branch if unsigned a <= b.

§

BrIfXulteq64U32 = 58

Branch if unsigned a <= b.

§

BrIfXugt64U8 = 59

Branch if unsigned a > b.

§

BrIfXugt64U32 = 60

Branch if unsigned a > b.

§

BrIfXugteq64U8 = 61

Branch if unsigned a >= b.

§

BrIfXugteq64U32 = 62

Branch if unsigned a >= b.

§

BrTable32 = 63

Branch to the label indicated by low32(idx).

After this instruction are amt instances of PcRelOffset and the idx selects which one will be branched to. The value of idx is clamped to amt - 1 (e.g. the last offset is the “default” one.

§

Xmov = 64

Move between x registers.

§

Xconst8 = 65

Set dst = sign_extend(imm8).

§

Xconst16 = 66

Set dst = sign_extend(imm16).

§

Xconst32 = 67

Set dst = sign_extend(imm32).

§

Xconst64 = 68

Set dst = imm64.

§

Xadd32 = 69

32-bit wrapping addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified.

§

Xadd32U8 = 70

Same as xadd32 but src2 is a zero-extended 8-bit immediate.

§

Xadd32U32 = 71

Same as xadd32 but src2 is a 32-bit immediate.

§

Xadd64 = 72

64-bit wrapping addition: dst = src1 + src2.

§

Xadd64U8 = 73

Same as xadd64 but src2 is a zero-extended 8-bit immediate.

§

Xadd64U32 = 74

Same as xadd64 but src2 is a zero-extended 32-bit immediate.

§

Xsub32 = 75

32-bit wrapping subtraction: low32(dst) = low32(src1) - low32(src2).

The upper 32-bits of dst are unmodified.

§

Xsub32U8 = 76

Same as xsub32 but src2 is a zero-extended 8-bit immediate.

§

Xsub32U32 = 77

Same as xsub32 but src2 is a 32-bit immediate.

§

Xsub64 = 78

64-bit wrapping subtraction: dst = src1 - src2.

§

Xsub64U8 = 79

Same as xsub64 but src2 is a zero-extended 8-bit immediate.

§

Xsub64U32 = 80

Same as xsub64 but src2 is a zero-extended 32-bit immediate.

§

XMul32 = 81

low32(dst) = low32(src1) * low32(src2)

§

Xmul32S8 = 82

Same as xmul64 but src2 is a sign-extended 8-bit immediate.

§

Xmul32S32 = 83

Same as xmul32 but src2 is a sign-extended 32-bit immediate.

§

XMul64 = 84

dst = src1 * src2

§

Xmul64S8 = 85

Same as xmul64 but src2 is a sign-extended 8-bit immediate.

§

Xmul64S32 = 86

Same as xmul64 but src2 is a sign-extended 64-bit immediate.

§

Xctz32 = 87

low32(dst) = trailing_zeros(low32(src))

§

Xctz64 = 88

dst = trailing_zeros(src)

§

Xclz32 = 89

low32(dst) = leading_zeros(low32(src))

§

Xclz64 = 90

dst = leading_zeros(src)

§

Xpopcnt32 = 91

low32(dst) = count_ones(low32(src))

§

Xpopcnt64 = 92

dst = count_ones(src)

§

Xrotl32 = 93

low32(dst) = rotate_left(low32(src1), low32(src2))

§

Xrotl64 = 94

dst = rotate_left(src1, src2)

§

Xrotr32 = 95

low32(dst) = rotate_right(low32(src1), low32(src2))

§

Xrotr64 = 96

dst = rotate_right(src1, src2)

§

Xshl32 = 97

low32(dst) = low32(src1) << low5(src2)

§

Xshr32S = 98

low32(dst) = low32(src1) >> low5(src2)

§

Xshr32U = 99

low32(dst) = low32(src1) >> low5(src2)

§

Xshl64 = 100

dst = src1 << low5(src2)

§

Xshr64S = 101

dst = src1 >> low6(src2)

§

Xshr64U = 102

dst = src1 >> low6(src2)

§

Xshl32U6 = 103

low32(dst) = low32(src1) << low5(src2)

§

Xshr32SU6 = 104

low32(dst) = low32(src1) >> low5(src2)

§

Xshr32UU6 = 105

low32(dst) = low32(src1) >> low5(src2)

§

Xshl64U6 = 106

dst = src1 << low5(src2)

§

Xshr64SU6 = 107

dst = src1 >> low6(src2)

§

Xshr64UU6 = 108

dst = src1 >> low6(src2)

§

Xneg32 = 109

low32(dst) = -low32(src)

§

Xneg64 = 110

dst = -src

§

Xeq64 = 111

low32(dst) = src1 == src2

§

Xneq64 = 112

low32(dst) = src1 != src2

§

Xslt64 = 113

low32(dst) = src1 < src2 (signed)

§

Xslteq64 = 114

low32(dst) = src1 <= src2 (signed)

§

Xult64 = 115

low32(dst) = src1 < src2 (unsigned)

§

Xulteq64 = 116

low32(dst) = src1 <= src2 (unsigned)

§

Xeq32 = 117

low32(dst) = low32(src1) == low32(src2)

§

Xneq32 = 118

low32(dst) = low32(src1) != low32(src2)

§

Xslt32 = 119

low32(dst) = low32(src1) < low32(src2) (signed)

§

Xslteq32 = 120

low32(dst) = low32(src1) <= low32(src2) (signed)

§

Xult32 = 121

low32(dst) = low32(src1) < low32(src2) (unsigned)

§

Xulteq32 = 122

low32(dst) = low32(src1) <= low32(src2) (unsigned)

§

XLoad8U32Offset32 = 123

low32(dst) = zext(*(ptr + offset))

§

XLoad8S32Offset32 = 124

low32(dst) = sext(*(ptr + offset))

§

XLoad16LeU32Offset32 = 125

low32(dst) = zext(*(ptr + offset))

§

XLoad16LeS32Offset32 = 126

low32(dst) = sext(*(ptr + offset))

§

XLoad32LeOffset32 = 127

low32(dst) = *(ptr + offset)

§

XLoad8U64Offset32 = 128

dst = zext(*(ptr + offset))

§

XLoad8S64Offset32 = 129

dst = sext(*(ptr + offset))

§

XLoad16LeU64Offset32 = 130

dst = zext(*(ptr + offset))

§

XLoad16LeS64Offset32 = 131

dst = sext(*(ptr + offset))

§

XLoad32LeU64Offset32 = 132

dst = zext(*(ptr + offset))

§

XLoad32LeS64Offset32 = 133

dst = sext(*(ptr + offset))

§

XLoad64LeOffset32 = 134

dst = *(ptr + offset)

§

XStore8Offset32 = 135

*(ptr + offset) = low8(src)

§

XStore16LeOffset32 = 136

*(ptr + offset) = low16(src)

§

XStore32LeOffset32 = 137

*(ptr + offset) = low32(src)

§

XStore64LeOffset32 = 138

*(ptr + offset) = low64(src)

§

XLoad8U32Offset8 = 139

low32(dst) = zext(*(ptr + offset))

§

XLoad8S32Offset8 = 140

low32(dst) = sext(*(ptr + offset))

§

XLoad16LeU32Offset8 = 141

low32(dst) = zext(*(ptr + offset))

§

XLoad16LeS32Offset8 = 142

low32(dst) = sext(*(ptr + offset))

§

XLoad32LeOffset8 = 143

low32(dst) = *(ptr + offset)

§

XLoad8U64Offset8 = 144

dst = zext(*(ptr + offset))

§

XLoad8S64Offset8 = 145

dst = sext(*(ptr + offset))

§

XLoad16LeU64Offset8 = 146

dst = zext(*(ptr + offset))

§

XLoad16LeS64Offset8 = 147

dst = sext(*(ptr + offset))

§

XLoad32LeU64Offset8 = 148

dst = zext(*(ptr + offset))

§

XLoad32LeS64Offset8 = 149

dst = sext(*(ptr + offset))

§

XLoad64LeOffset8 = 150

dst = *(ptr + offset)

§

XStore8Offset8 = 151

*(ptr + offset) = low8(src)

§

XStore16LeOffset8 = 152

*(ptr + offset) = low16(src)

§

XStore32LeOffset8 = 153

*(ptr + offset) = low32(src)

§

XStore64LeOffset8 = 154

*(ptr + offset) = low64(src)

§

PushFrame = 155

push lr; push fp; fp = sp

§

PopFrame = 156

sp = fp; pop fp; pop lr

§

PushFrameSave = 157

Macro-instruction to enter a function, allocate some stack, and then save some registers.

This is equivalent to push_frame, stack_alloc32 amt, then saving all of regs to the top of the stack just allocated.

§

PopFrameRestore = 158

Inverse of push_frame_save. Restores regs from the top of the stack, then runs stack_free32 amt, then runs pop_frame.

§

StackAlloc32 = 159

sp = sp.checked_sub(amt)

§

StackFree32 = 160

sp = sp + amt

§

Zext8 = 161

dst = zext(low8(src))

§

Zext16 = 162

dst = zext(low16(src))

§

Zext32 = 163

dst = zext(low32(src))

§

Sext8 = 164

dst = sext(low8(src))

§

Sext16 = 165

dst = sext(low16(src))

§

Sext32 = 166

dst = sext(low32(src))

§

XAbs32 = 167

low32(dst) = |low32(src)|

§

XAbs64 = 168

dst = |src|

§

XDiv32S = 169

low32(dst) = low32(src1) / low32(src2) (signed)

§

XDiv64S = 170

dst = src1 / src2 (signed)

§

XDiv32U = 171

low32(dst) = low32(src1) / low32(src2) (unsigned)

§

XDiv64U = 172

dst = src1 / src2 (unsigned)

§

XRem32S = 173

low32(dst) = low32(src1) % low32(src2) (signed)

§

XRem64S = 174

dst = src1 / src2 (signed)

§

XRem32U = 175

low32(dst) = low32(src1) % low32(src2) (unsigned)

§

XRem64U = 176

dst = src1 / src2 (unsigned)

§

XBand32 = 177

low32(dst) = low32(src1) & low32(src2)

§

Xband32S8 = 178

Same as xband64 but src2 is a sign-extended 8-bit immediate.

§

Xband32S32 = 179

Same as xband32 but src2 is a sign-extended 32-bit immediate.

§

XBand64 = 180

dst = src1 & src2

§

Xband64S8 = 181

Same as xband64 but src2 is a sign-extended 8-bit immediate.

§

Xband64S32 = 182

Same as xband64 but src2 is a sign-extended 32-bit immediate.

§

XBor32 = 183

low32(dst) = low32(src1) | low32(src2)

§

Xbor32S8 = 184

Same as xbor64 but src2 is a sign-extended 8-bit immediate.

§

Xbor32S32 = 185

Same as xbor32 but src2 is a sign-extended 32-bit immediate.

§

XBor64 = 186

dst = src1 | src2

§

Xbor64S8 = 187

Same as xbor64 but src2 is a sign-extended 8-bit immediate.

§

Xbor64S32 = 188

Same as xbor64 but src2 is a sign-extended 32-bit immediate.

§

XBxor32 = 189

low32(dst) = low32(src1) ^ low32(src2)

§

Xbxor32S8 = 190

Same as xbxor64 but src2 is a sign-extended 8-bit immediate.

§

Xbxor32S32 = 191

Same as xbxor32 but src2 is a sign-extended 32-bit immediate.

§

XBxor64 = 192

dst = src1 ^ src2

§

Xbxor64S8 = 193

Same as xbxor64 but src2 is a sign-extended 8-bit immediate.

§

Xbxor64S32 = 194

Same as xbxor64 but src2 is a sign-extended 32-bit immediate.

§

XBnot32 = 195

low32(dst) = !low32(src1)

§

XBnot64 = 196

dst = !src1

§

Xmin32U = 197

low32(dst) = min(low32(src1), low32(src2)) (unsigned)

§

Xmin32S = 198

low32(dst) = min(low32(src1), low32(src2)) (signed)

§

Xmax32U = 199

low32(dst) = max(low32(src1), low32(src2)) (unsigned)

§

Xmax32S = 200

low32(dst) = max(low32(src1), low32(src2)) (signed)

§

Xmin64U = 201

dst = min(src1, src2) (unsigned)

§

Xmin64S = 202

dst = min(src1, src2) (signed)

§

Xmax64U = 203

dst = max(src1, src2) (unsigned)

§

Xmax64S = 204

dst = max(src1, src2) (signed)

§

XSelect32 = 205

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

§

XSelect64 = 206

dst = low32(cond) ? if_nonzero : if_zero

§

ExtendedOp = 207

The extended-op opcode. An ExtendedOpcode follows this opcode.

Implementations§

Source§

impl Opcode

Source

pub const MAX: u8 = 207u8

The value of the maximum defined opcode.

Source§

impl Opcode

Source

pub fn new(byte: u8) -> Option<Self>

Create a new Opcode from the given byte.

Returns None if byte is not a valid opcode.

Source

pub unsafe fn unchecked_new(byte: u8) -> Self

Like new but does not check whether byte is a valid opcode.

§Safety

It is unsafe to pass a byte that is not a valid opcode.

Trait Implementations§

Source§

impl Clone for Opcode

Source§

fn clone(&self) -> Opcode

Returns a copy of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for Opcode

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Decode for Opcode

Available on crate feature decode only.
Source§

fn decode<T>(bytecode: &mut T) -> Result<Self, T::Error>
where T: BytecodeStream,

Decode this type from the given bytecode stream.
Source§

impl Hash for Opcode

Source§

fn hash<__H: Hasher>(&self, state: &mut __H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl Ord for Opcode

Source§

fn cmp(&self, other: &Opcode) -> Ordering

This method returns an Ordering between self and other. Read more
1.21.0 · Source§

fn max(self, other: Self) -> Self
where Self: Sized,

Compares and returns the maximum of two values. Read more
1.21.0 · Source§

fn min(self, other: Self) -> Self
where Self: Sized,

Compares and returns the minimum of two values. Read more
1.50.0 · Source§

fn clamp(self, min: Self, max: Self) -> Self
where Self: Sized,

Restrict a value to a certain interval. Read more
Source§

impl PartialEq for Opcode

Source§

fn eq(&self, other: &Opcode) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl PartialOrd for Opcode

Source§

fn partial_cmp(&self, other: &Opcode) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · Source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · Source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the <= operator. Read more
1.0.0 · Source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > operator. Read more
1.0.0 · Source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by the >= operator. Read more
Source§

impl Copy for Opcode

Source§

impl Eq for Opcode

Source§

impl StructuralPartialEq for Opcode

Auto Trait Implementations§

§

impl Freeze for Opcode

§

impl RefUnwindSafe for Opcode

§

impl Send for Opcode

§

impl Sync for Opcode

§

impl Unpin for Opcode

§

impl UnwindSafe for Opcode

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dst: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.