#[repr(u8)]pub enum Opcode {
Show 226 variants
Ret = 0,
Call = 1,
Call1 = 2,
Call2 = 3,
Call3 = 4,
Call4 = 5,
CallIndirect = 6,
Jump = 7,
XJump = 8,
BrIf = 9,
BrIfNot = 10,
BrIfXeq32 = 11,
BrIfXneq32 = 12,
BrIfXslt32 = 13,
BrIfXslteq32 = 14,
BrIfXult32 = 15,
BrIfXulteq32 = 16,
BrIfXeq64 = 17,
BrIfXneq64 = 18,
BrIfXslt64 = 19,
BrIfXslteq64 = 20,
BrIfXult64 = 21,
BrIfXulteq64 = 22,
BrIfXeq32I8 = 23,
BrIfXeq32I32 = 24,
BrIfXneq32I8 = 25,
BrIfXneq32I32 = 26,
BrIfXslt32I8 = 27,
BrIfXslt32I32 = 28,
BrIfXsgt32I8 = 29,
BrIfXsgt32I32 = 30,
BrIfXslteq32I8 = 31,
BrIfXslteq32I32 = 32,
BrIfXsgteq32I8 = 33,
BrIfXsgteq32I32 = 34,
BrIfXult32U8 = 35,
BrIfXult32U32 = 36,
BrIfXulteq32U8 = 37,
BrIfXulteq32U32 = 38,
BrIfXugt32U8 = 39,
BrIfXugt32U32 = 40,
BrIfXugteq32U8 = 41,
BrIfXugteq32U32 = 42,
BrIfXeq64I8 = 43,
BrIfXeq64I32 = 44,
BrIfXneq64I8 = 45,
BrIfXneq64I32 = 46,
BrIfXslt64I8 = 47,
BrIfXslt64I32 = 48,
BrIfXsgt64I8 = 49,
BrIfXsgt64I32 = 50,
BrIfXslteq64I8 = 51,
BrIfXslteq64I32 = 52,
BrIfXsgteq64I8 = 53,
BrIfXsgteq64I32 = 54,
BrIfXult64U8 = 55,
BrIfXult64U32 = 56,
BrIfXulteq64U8 = 57,
BrIfXulteq64U32 = 58,
BrIfXugt64U8 = 59,
BrIfXugt64U32 = 60,
BrIfXugteq64U8 = 61,
BrIfXugteq64U32 = 62,
BrTable32 = 63,
Xmov = 64,
Xzero = 65,
Xone = 66,
Xconst8 = 67,
Xconst16 = 68,
Xconst32 = 69,
Xconst64 = 70,
Xadd32 = 71,
Xadd32U8 = 72,
Xadd32U32 = 73,
Xadd64 = 74,
Xadd64U8 = 75,
Xadd64U32 = 76,
Xmadd32 = 77,
Xmadd64 = 78,
Xsub32 = 79,
Xsub32U8 = 80,
Xsub32U32 = 81,
Xsub64 = 82,
Xsub64U8 = 83,
Xsub64U32 = 84,
XMul32 = 85,
Xmul32S8 = 86,
Xmul32S32 = 87,
XMul64 = 88,
Xmul64S8 = 89,
Xmul64S32 = 90,
Xctz32 = 91,
Xctz64 = 92,
Xclz32 = 93,
Xclz64 = 94,
Xpopcnt32 = 95,
Xpopcnt64 = 96,
Xrotl32 = 97,
Xrotl64 = 98,
Xrotr32 = 99,
Xrotr64 = 100,
Xshl32 = 101,
Xshr32S = 102,
Xshr32U = 103,
Xshl64 = 104,
Xshr64S = 105,
Xshr64U = 106,
Xshl32U6 = 107,
Xshr32SU6 = 108,
Xshr32UU6 = 109,
Xshl64U6 = 110,
Xshr64SU6 = 111,
Xshr64UU6 = 112,
Xneg32 = 113,
Xneg64 = 114,
Xeq64 = 115,
Xneq64 = 116,
Xslt64 = 117,
Xslteq64 = 118,
Xult64 = 119,
Xulteq64 = 120,
Xeq32 = 121,
Xneq32 = 122,
Xslt32 = 123,
Xslteq32 = 124,
Xult32 = 125,
Xulteq32 = 126,
XLoad8U32Offset32 = 127,
XLoad8S32Offset32 = 128,
XLoad16LeU32Offset32 = 129,
XLoad16LeS32Offset32 = 130,
XLoad32LeOffset32 = 131,
XLoad8U64Offset32 = 132,
XLoad8S64Offset32 = 133,
XLoad16LeU64Offset32 = 134,
XLoad16LeS64Offset32 = 135,
XLoad32LeU64Offset32 = 136,
XLoad32LeS64Offset32 = 137,
XLoad64LeOffset32 = 138,
XStore8Offset32 = 139,
XStore16LeOffset32 = 140,
XStore32LeOffset32 = 141,
XStore64LeOffset32 = 142,
XLoad8U32Offset8 = 143,
XLoad8S32Offset8 = 144,
XLoad16LeU32Offset8 = 145,
XLoad16LeS32Offset8 = 146,
XLoad32LeOffset8 = 147,
XLoad8U64Offset8 = 148,
XLoad8S64Offset8 = 149,
XLoad16LeU64Offset8 = 150,
XLoad16LeS64Offset8 = 151,
XLoad32LeU64Offset8 = 152,
XLoad32LeS64Offset8 = 153,
XLoad64LeOffset8 = 154,
XStore8Offset8 = 155,
XStore16LeOffset8 = 156,
XStore32LeOffset8 = 157,
XStore64LeOffset8 = 158,
XLoad8U32G32 = 159,
XLoad8S32G32 = 160,
XLoad16LeU32G32 = 161,
XLoad16LeS32G32 = 162,
XLoad32LeG32 = 163,
XLoad64LeG32 = 164,
XStore8G32 = 165,
XStore16LeG32 = 166,
XStore32LeG32 = 167,
XStore64LeG32 = 168,
PushFrame = 169,
PopFrame = 170,
PushFrameSave = 171,
PopFrameRestore = 172,
StackAlloc32 = 173,
StackFree32 = 174,
Zext8 = 175,
Zext16 = 176,
Zext32 = 177,
Sext8 = 178,
Sext16 = 179,
Sext32 = 180,
XAbs32 = 181,
XAbs64 = 182,
XDiv32S = 183,
XDiv64S = 184,
XDiv32U = 185,
XDiv64U = 186,
XRem32S = 187,
XRem64S = 188,
XRem32U = 189,
XRem64U = 190,
XBand32 = 191,
Xband32S8 = 192,
Xband32S32 = 193,
XBand64 = 194,
Xband64S8 = 195,
Xband64S32 = 196,
XBor32 = 197,
Xbor32S8 = 198,
Xbor32S32 = 199,
XBor64 = 200,
Xbor64S8 = 201,
Xbor64S32 = 202,
XBxor32 = 203,
Xbxor32S8 = 204,
Xbxor32S32 = 205,
XBxor64 = 206,
Xbxor64S8 = 207,
Xbxor64S32 = 208,
XBnot32 = 209,
XBnot64 = 210,
Xmin32U = 211,
Xmin32S = 212,
Xmax32U = 213,
Xmax32S = 214,
Xmin64U = 215,
Xmin64S = 216,
Xmax64U = 217,
Xmax64S = 218,
XSelect32 = 219,
XSelect64 = 220,
XBc32BoundTrap = 221,
XBc32BoundNeTrap = 222,
XBc32StrictBoundTrap = 223,
XBc32StrictBoundNeTrap = 224,
ExtendedOp = 225,
}
Expand description
An opcode without its immediates and operands.
Variants§
Ret = 0
Transfer control the address in the lr
register.
Call = 1
Transfer control to the PC at the given offset and set the lr
register to the PC just after this instruction.
This instruction generally assumes that the Pulley ABI is being respected where arguments are in argument registers (starting at x0 for integer arguments) and results are in result registers. This instruction itself assume that all arguments are already in their registers. Subsequent instructions below enable moving arguments into the correct registers as part of the same call instruction.
Call1 = 2
Like call
, but also x0 = arg1
Call2 = 3
Like call
, but also x0, x1 = arg1, arg2
Call3 = 4
Like call
, but also x0, x1, x2 = arg1, arg2, arg3
Call4 = 5
Like call
, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4
CallIndirect = 6
Transfer control to the PC in reg
and set lr
to the PC just
after this instruction.
Jump = 7
Unconditionally transfer control to the PC at the given offset.
XJump = 8
Unconditionally transfer control to the PC at specified register.
BrIf = 9
Conditionally transfer control to the given PC offset if
low32(cond)
contains a non-zero value.
BrIfNot = 10
Conditionally transfer control to the given PC offset if
low32(cond)
contains a zero value.
BrIfXeq32 = 11
Branch if a == b
.
BrIfXneq32 = 12
Branch if a !=
b.
BrIfXslt32 = 13
Branch if signed a < b
.
BrIfXslteq32 = 14
Branch if signed a <= b
.
BrIfXult32 = 15
Branch if unsigned a < b
.
BrIfXulteq32 = 16
Branch if unsigned a <= b
.
BrIfXeq64 = 17
Branch if a == b
.
BrIfXneq64 = 18
Branch if a !=
b.
BrIfXslt64 = 19
Branch if signed a < b
.
BrIfXslteq64 = 20
Branch if signed a <= b
.
BrIfXult64 = 21
Branch if unsigned a < b
.
BrIfXulteq64 = 22
Branch if unsigned a <= b
.
BrIfXeq32I8 = 23
Branch if a == b
.
BrIfXeq32I32 = 24
Branch if a == b
.
BrIfXneq32I8 = 25
Branch if a !=
b.
BrIfXneq32I32 = 26
Branch if a !=
b.
BrIfXslt32I8 = 27
Branch if signed a < b
.
BrIfXslt32I32 = 28
Branch if signed a < b
.
BrIfXsgt32I8 = 29
Branch if signed a > b
.
BrIfXsgt32I32 = 30
Branch if signed a > b
.
BrIfXslteq32I8 = 31
Branch if signed a <= b
.
BrIfXslteq32I32 = 32
Branch if signed a <= b
.
BrIfXsgteq32I8 = 33
Branch if signed a >= b
.
BrIfXsgteq32I32 = 34
Branch if signed a >= b
.
BrIfXult32U8 = 35
Branch if unsigned a < b
.
BrIfXult32U32 = 36
Branch if unsigned a < b
.
BrIfXulteq32U8 = 37
Branch if unsigned a <= b
.
BrIfXulteq32U32 = 38
Branch if unsigned a <= b
.
BrIfXugt32U8 = 39
Branch if unsigned a > b
.
BrIfXugt32U32 = 40
Branch if unsigned a > b
.
BrIfXugteq32U8 = 41
Branch if unsigned a >= b
.
BrIfXugteq32U32 = 42
Branch if unsigned a >= b
.
BrIfXeq64I8 = 43
Branch if a == b
.
BrIfXeq64I32 = 44
Branch if a == b
.
BrIfXneq64I8 = 45
Branch if a !=
b.
BrIfXneq64I32 = 46
Branch if a !=
b.
BrIfXslt64I8 = 47
Branch if signed a < b
.
BrIfXslt64I32 = 48
Branch if signed a < b
.
BrIfXsgt64I8 = 49
Branch if signed a > b
.
BrIfXsgt64I32 = 50
Branch if signed a > b
.
BrIfXslteq64I8 = 51
Branch if signed a <= b
.
BrIfXslteq64I32 = 52
Branch if signed a <= b
.
BrIfXsgteq64I8 = 53
Branch if signed a >= b
.
BrIfXsgteq64I32 = 54
Branch if signed a >= b
.
BrIfXult64U8 = 55
Branch if unsigned a < b
.
BrIfXult64U32 = 56
Branch if unsigned a < b
.
BrIfXulteq64U8 = 57
Branch if unsigned a <= b
.
BrIfXulteq64U32 = 58
Branch if unsigned a <= b
.
BrIfXugt64U8 = 59
Branch if unsigned a > b
.
BrIfXugt64U32 = 60
Branch if unsigned a > b
.
BrIfXugteq64U8 = 61
Branch if unsigned a >= b
.
BrIfXugteq64U32 = 62
Branch if unsigned a >= b
.
BrTable32 = 63
Branch to the label indicated by low32(idx)
.
After this instruction are amt
instances of PcRelOffset
and the idx
selects which one will be branched to. The value
of idx
is clamped to amt - 1
(e.g. the last offset is the
“default” one.
Xmov = 64
Move between x
registers.
Xzero = 65
Set dst = 0
Xone = 66
Set dst = 1
Xconst8 = 67
Set dst = sign_extend(imm8)
.
Xconst16 = 68
Set dst = sign_extend(imm16)
.
Xconst32 = 69
Set dst = sign_extend(imm32)
.
Xconst64 = 70
Set dst = imm64
.
Xadd32 = 71
32-bit wrapping addition: low32(dst) = low32(src1) + low32(src2)
.
The upper 32-bits of dst
are unmodified.
Xadd32U8 = 72
Same as xadd32
but src2
is a zero-extended 8-bit immediate.
Xadd32U32 = 73
Same as xadd32
but src2
is a 32-bit immediate.
Xadd64 = 74
64-bit wrapping addition: dst = src1 + src2
.
Xadd64U8 = 75
Same as xadd64
but src2
is a zero-extended 8-bit immediate.
Xadd64U32 = 76
Same as xadd64
but src2
is a zero-extended 32-bit immediate.
Xmadd32 = 77
low32(dst) = low32(src1) * low32(src2) + low32(src3)
Xmadd64 = 78
dst = src1 * src2 + src3
Xsub32 = 79
32-bit wrapping subtraction: low32(dst) = low32(src1) - low32(src2)
.
The upper 32-bits of dst
are unmodified.
Xsub32U8 = 80
Same as xsub32
but src2
is a zero-extended 8-bit immediate.
Xsub32U32 = 81
Same as xsub32
but src2
is a 32-bit immediate.
Xsub64 = 82
64-bit wrapping subtraction: dst = src1 - src2
.
Xsub64U8 = 83
Same as xsub64
but src2
is a zero-extended 8-bit immediate.
Xsub64U32 = 84
Same as xsub64
but src2
is a zero-extended 32-bit immediate.
XMul32 = 85
low32(dst) = low32(src1) * low32(src2)
Xmul32S8 = 86
Same as xmul64
but src2
is a sign-extended 8-bit immediate.
Xmul32S32 = 87
Same as xmul32
but src2
is a sign-extended 32-bit immediate.
XMul64 = 88
dst = src1 * src2
Xmul64S8 = 89
Same as xmul64
but src2
is a sign-extended 8-bit immediate.
Xmul64S32 = 90
Same as xmul64
but src2
is a sign-extended 64-bit immediate.
Xctz32 = 91
low32(dst) = trailing_zeros(low32(src))
Xctz64 = 92
dst = trailing_zeros(src)
Xclz32 = 93
low32(dst) = leading_zeros(low32(src))
Xclz64 = 94
dst = leading_zeros(src)
Xpopcnt32 = 95
low32(dst) = count_ones(low32(src))
Xpopcnt64 = 96
dst = count_ones(src)
Xrotl32 = 97
low32(dst) = rotate_left(low32(src1), low32(src2))
Xrotl64 = 98
dst = rotate_left(src1, src2)
Xrotr32 = 99
low32(dst) = rotate_right(low32(src1), low32(src2))
Xrotr64 = 100
dst = rotate_right(src1, src2)
Xshl32 = 101
low32(dst) = low32(src1) << low5(src2)
Xshr32S = 102
low32(dst) = low32(src1) >> low5(src2)
Xshr32U = 103
low32(dst) = low32(src1) >> low5(src2)
Xshl64 = 104
dst = src1 << low5(src2)
Xshr64S = 105
dst = src1 >> low6(src2)
Xshr64U = 106
dst = src1 >> low6(src2)
Xshl32U6 = 107
low32(dst) = low32(src1) << low5(src2)
Xshr32SU6 = 108
low32(dst) = low32(src1) >> low5(src2)
Xshr32UU6 = 109
low32(dst) = low32(src1) >> low5(src2)
Xshl64U6 = 110
dst = src1 << low5(src2)
Xshr64SU6 = 111
dst = src1 >> low6(src2)
Xshr64UU6 = 112
dst = src1 >> low6(src2)
Xneg32 = 113
low32(dst) = -low32(src)
Xneg64 = 114
dst = -src
Xeq64 = 115
low32(dst) = src1 == src2
Xneq64 = 116
low32(dst) = src1 != src2
Xslt64 = 117
low32(dst) = src1 < src2
(signed)
Xslteq64 = 118
low32(dst) = src1 <= src2
(signed)
Xult64 = 119
low32(dst) = src1 < src2
(unsigned)
Xulteq64 = 120
low32(dst) = src1 <= src2
(unsigned)
Xeq32 = 121
low32(dst) = low32(src1) == low32(src2)
Xneq32 = 122
low32(dst) = low32(src1) != low32(src2)
Xslt32 = 123
low32(dst) = low32(src1) < low32(src2)
(signed)
Xslteq32 = 124
low32(dst) = low32(src1) <= low32(src2)
(signed)
Xult32 = 125
low32(dst) = low32(src1) < low32(src2)
(unsigned)
Xulteq32 = 126
low32(dst) = low32(src1) <= low32(src2)
(unsigned)
XLoad8U32Offset32 = 127
low32(dst) = zext(*(ptr + offset))
XLoad8S32Offset32 = 128
low32(dst) = sext(*(ptr + offset))
XLoad16LeU32Offset32 = 129
low32(dst) = zext(*(ptr + offset))
XLoad16LeS32Offset32 = 130
low32(dst) = sext(*(ptr + offset))
XLoad32LeOffset32 = 131
low32(dst) = *(ptr + offset)
XLoad8U64Offset32 = 132
dst = zext(*(ptr + offset))
XLoad8S64Offset32 = 133
dst = sext(*(ptr + offset))
XLoad16LeU64Offset32 = 134
dst = zext(*(ptr + offset))
XLoad16LeS64Offset32 = 135
dst = sext(*(ptr + offset))
XLoad32LeU64Offset32 = 136
dst = zext(*(ptr + offset))
XLoad32LeS64Offset32 = 137
dst = sext(*(ptr + offset))
XLoad64LeOffset32 = 138
dst = *(ptr + offset)
XStore8Offset32 = 139
*(ptr + offset) = low8(src)
XStore16LeOffset32 = 140
*(ptr + offset) = low16(src)
XStore32LeOffset32 = 141
*(ptr + offset) = low32(src)
XStore64LeOffset32 = 142
*(ptr + offset) = low64(src)
XLoad8U32Offset8 = 143
low32(dst) = zext(*(ptr + offset))
XLoad8S32Offset8 = 144
low32(dst) = sext(*(ptr + offset))
XLoad16LeU32Offset8 = 145
low32(dst) = zext(*(ptr + offset))
XLoad16LeS32Offset8 = 146
low32(dst) = sext(*(ptr + offset))
XLoad32LeOffset8 = 147
low32(dst) = *(ptr + offset)
XLoad8U64Offset8 = 148
dst = zext(*(ptr + offset))
XLoad8S64Offset8 = 149
dst = sext(*(ptr + offset))
XLoad16LeU64Offset8 = 150
dst = zext(*(ptr + offset))
XLoad16LeS64Offset8 = 151
dst = sext(*(ptr + offset))
XLoad32LeU64Offset8 = 152
dst = zext(*(ptr + offset))
XLoad32LeS64Offset8 = 153
dst = sext(*(ptr + offset))
XLoad64LeOffset8 = 154
dst = *(ptr + offset)
XStore8Offset8 = 155
*(ptr + offset) = low8(src)
XStore16LeOffset8 = 156
*(ptr + offset) = low16(src)
XStore32LeOffset8 = 157
*(ptr + offset) = low32(src)
XStore64LeOffset8 = 158
*(ptr + offset) = low64(src)
XLoad8U32G32 = 159
low32(dst) = zext_8_32(*(base + zext(addr) + offset))
XLoad8S32G32 = 160
low32(dst) = sext_8_32(*(base + zext(addr) + offset))
XLoad16LeU32G32 = 161
low32(dst) = zext_16_32(*(base + zext(addr) + offset))
XLoad16LeS32G32 = 162
low32(dst) = sext_16_32(*(base + zext(addr) + offset))
XLoad32LeG32 = 163
low32(dst) = *(base + zext(addr) + offset)
XLoad64LeG32 = 164
dst = *(base + zext(addr) + offset)
XStore8G32 = 165
*(base + zext(addr) + offset) = low8(src)
XStore16LeG32 = 166
*(base + zext(addr) + offset) = low16(src)
XStore32LeG32 = 167
*(base + zext(addr) + offset) = low32(src)
XStore64LeG32 = 168
*(base + zext(addr) + offset) = src
PushFrame = 169
push lr; push fp; fp = sp
PopFrame = 170
sp = fp; pop fp; pop lr
PushFrameSave = 171
Macro-instruction to enter a function, allocate some stack, and then save some registers.
This is equivalent to push_frame
, stack_alloc32 amt
, then
saving all of regs
to the top of the stack just allocated.
PopFrameRestore = 172
Inverse of push_frame_save
. Restores regs
from the top of
the stack, then runs stack_free32 amt
, then runs pop_frame
.
StackAlloc32 = 173
sp = sp.checked_sub(amt)
StackFree32 = 174
sp = sp + amt
Zext8 = 175
dst = zext(low8(src))
Zext16 = 176
dst = zext(low16(src))
Zext32 = 177
dst = zext(low32(src))
Sext8 = 178
dst = sext(low8(src))
Sext16 = 179
dst = sext(low16(src))
Sext32 = 180
dst = sext(low32(src))
XAbs32 = 181
low32(dst) = |low32(src)|
XAbs64 = 182
dst = |src|
XDiv32S = 183
low32(dst) = low32(src1) / low32(src2)
(signed)
XDiv64S = 184
dst = src1 / src2
(signed)
XDiv32U = 185
low32(dst) = low32(src1) / low32(src2)
(unsigned)
XDiv64U = 186
dst = src1 / src2
(unsigned)
XRem32S = 187
low32(dst) = low32(src1) % low32(src2)
(signed)
XRem64S = 188
dst = src1 / src2
(signed)
XRem32U = 189
low32(dst) = low32(src1) % low32(src2)
(unsigned)
XRem64U = 190
dst = src1 / src2
(unsigned)
XBand32 = 191
low32(dst) = low32(src1) & low32(src2)
Xband32S8 = 192
Same as xband64
but src2
is a sign-extended 8-bit immediate.
Xband32S32 = 193
Same as xband32
but src2
is a sign-extended 32-bit immediate.
XBand64 = 194
dst = src1 & src2
Xband64S8 = 195
Same as xband64
but src2
is a sign-extended 8-bit immediate.
Xband64S32 = 196
Same as xband64
but src2
is a sign-extended 32-bit immediate.
XBor32 = 197
low32(dst) = low32(src1) | low32(src2)
Xbor32S8 = 198
Same as xbor64
but src2
is a sign-extended 8-bit immediate.
Xbor32S32 = 199
Same as xbor32
but src2
is a sign-extended 32-bit immediate.
XBor64 = 200
dst = src1 | src2
Xbor64S8 = 201
Same as xbor64
but src2
is a sign-extended 8-bit immediate.
Xbor64S32 = 202
Same as xbor64
but src2
is a sign-extended 32-bit immediate.
XBxor32 = 203
low32(dst) = low32(src1) ^ low32(src2)
Xbxor32S8 = 204
Same as xbxor64
but src2
is a sign-extended 8-bit immediate.
Xbxor32S32 = 205
Same as xbxor32
but src2
is a sign-extended 32-bit immediate.
XBxor64 = 206
dst = src1 ^ src2
Xbxor64S8 = 207
Same as xbxor64
but src2
is a sign-extended 8-bit immediate.
Xbxor64S32 = 208
Same as xbxor64
but src2
is a sign-extended 32-bit immediate.
XBnot32 = 209
low32(dst) = !low32(src1)
XBnot64 = 210
dst = !src1
Xmin32U = 211
low32(dst) = min(low32(src1), low32(src2))
(unsigned)
Xmin32S = 212
low32(dst) = min(low32(src1), low32(src2))
(signed)
Xmax32U = 213
low32(dst) = max(low32(src1), low32(src2))
(unsigned)
Xmax32S = 214
low32(dst) = max(low32(src1), low32(src2))
(signed)
Xmin64U = 215
dst = min(src1, src2)
(unsigned)
Xmin64S = 216
dst = min(src1, src2)
(signed)
Xmax64U = 217
dst = max(src1, src2)
(unsigned)
Xmax64S = 218
dst = max(src1, src2)
(signed)
XSelect32 = 219
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
XSelect64 = 220
dst = low32(cond) ? if_nonzero : if_zero
XBc32BoundTrap = 221
trapif(addr > bound_ptr - size)
(unsigned)
XBc32BoundNeTrap = 222
trapif(addr > *(bound_ptr + bound_off) - size)
(unsigned)
Note that the bound_ptr + bound_off
load loads a
host-native-endian pointer-sized value.
XBc32StrictBoundTrap = 223
trapif(addr >= bound_ptr)
(unsigned)
XBc32StrictBoundNeTrap = 224
trapif(addr >= *(bound_ptr + bound_off))
(unsigned)
ExtendedOp = 225
The extended-op opcode. An ExtendedOpcode
follows this opcode.
Implementations§
Source§impl Opcode
impl Opcode
Sourcepub fn new(byte: u8) -> Option<Self>
pub fn new(byte: u8) -> Option<Self>
Create a new Opcode
from the given byte.
Returns None
if byte
is not a valid opcode.
Sourcepub unsafe fn unchecked_new(byte: u8) -> Self
pub unsafe fn unchecked_new(byte: u8) -> Self
Like new
but does not check whether byte
is a valid opcode.
§Safety
It is unsafe to pass a byte
that is not a valid opcode.