#[repr(u32)]pub enum Instruction {
Show 141 variants
trap = 0,
fallthrough = 17,
jump_indirect(RawReg, u32),
load_imm(RawReg, u32),
load_u8(RawReg, u32),
load_i8(RawReg, u32),
load_u16(RawReg, u32),
load_i16(RawReg, u32),
load_i32(RawReg, u32),
load_u32(RawReg, u32),
load_u64(RawReg, u32),
store_u8(RawReg, u32),
store_u16(RawReg, u32),
store_u32(RawReg, u32),
store_u64(RawReg, u32),
load_imm_and_jump(RawReg, u32, u32),
branch_eq_imm(RawReg, u32, u32),
branch_not_eq_imm(RawReg, u32, u32),
branch_less_unsigned_imm(RawReg, u32, u32),
branch_less_signed_imm(RawReg, u32, u32),
branch_greater_or_equal_unsigned_imm(RawReg, u32, u32),
branch_greater_or_equal_signed_imm(RawReg, u32, u32),
branch_less_or_equal_signed_imm(RawReg, u32, u32),
branch_less_or_equal_unsigned_imm(RawReg, u32, u32),
branch_greater_signed_imm(RawReg, u32, u32),
branch_greater_unsigned_imm(RawReg, u32, u32),
store_imm_indirect_u8(RawReg, u32, u32),
store_imm_indirect_u16(RawReg, u32, u32),
store_imm_indirect_u32(RawReg, u32, u32),
store_imm_indirect_u64(RawReg, u32, u32),
store_indirect_u8(RawReg, RawReg, u32),
store_indirect_u16(RawReg, RawReg, u32),
store_indirect_u32(RawReg, RawReg, u32),
store_indirect_u64(RawReg, RawReg, u32),
load_indirect_u8(RawReg, RawReg, u32),
load_indirect_i8(RawReg, RawReg, u32),
load_indirect_u16(RawReg, RawReg, u32),
load_indirect_i16(RawReg, RawReg, u32),
load_indirect_i32(RawReg, RawReg, u32),
load_indirect_u32(RawReg, RawReg, u32),
load_indirect_u64(RawReg, RawReg, u32),
add_imm_32(RawReg, RawReg, u32),
add_imm_64(RawReg, RawReg, u32),
and_imm(RawReg, RawReg, u32),
xor_imm(RawReg, RawReg, u32),
or_imm(RawReg, RawReg, u32),
mul_imm_32(RawReg, RawReg, u32),
mul_imm_64(RawReg, RawReg, u32),
set_less_than_unsigned_imm(RawReg, RawReg, u32),
set_less_than_signed_imm(RawReg, RawReg, u32),
shift_logical_left_imm_32(RawReg, RawReg, u32),
shift_logical_left_imm_64(RawReg, RawReg, u32),
shift_logical_right_imm_32(RawReg, RawReg, u32),
shift_logical_right_imm_64(RawReg, RawReg, u32),
shift_arithmetic_right_imm_32(RawReg, RawReg, u32),
shift_arithmetic_right_imm_64(RawReg, RawReg, u32),
negate_and_add_imm_32(RawReg, RawReg, u32),
negate_and_add_imm_64(RawReg, RawReg, u32),
set_greater_than_unsigned_imm(RawReg, RawReg, u32),
set_greater_than_signed_imm(RawReg, RawReg, u32),
shift_logical_right_imm_alt_32(RawReg, RawReg, u32),
shift_logical_right_imm_alt_64(RawReg, RawReg, u32),
shift_arithmetic_right_imm_alt_32(RawReg, RawReg, u32),
shift_arithmetic_right_imm_alt_64(RawReg, RawReg, u32),
shift_logical_left_imm_alt_32(RawReg, RawReg, u32),
shift_logical_left_imm_alt_64(RawReg, RawReg, u32),
cmov_if_zero_imm(RawReg, RawReg, u32),
cmov_if_not_zero_imm(RawReg, RawReg, u32),
rotate_right_32_imm(RawReg, RawReg, u32),
rotate_right_32_imm_alt(RawReg, RawReg, u32),
rotate_right_64_imm(RawReg, RawReg, u32),
rotate_right_64_imm_alt(RawReg, RawReg, u32),
branch_eq(RawReg, RawReg, u32),
branch_not_eq(RawReg, RawReg, u32),
branch_less_unsigned(RawReg, RawReg, u32),
branch_less_signed(RawReg, RawReg, u32),
branch_greater_or_equal_unsigned(RawReg, RawReg, u32),
branch_greater_or_equal_signed(RawReg, RawReg, u32),
add_32(RawReg, RawReg, RawReg),
add_64(RawReg, RawReg, RawReg),
sub_32(RawReg, RawReg, RawReg),
sub_64(RawReg, RawReg, RawReg),
and(RawReg, RawReg, RawReg),
xor(RawReg, RawReg, RawReg),
or(RawReg, RawReg, RawReg),
mul_32(RawReg, RawReg, RawReg),
mul_64(RawReg, RawReg, RawReg),
mul_upper_signed_signed(RawReg, RawReg, RawReg),
mul_upper_unsigned_unsigned(RawReg, RawReg, RawReg),
mul_upper_signed_unsigned(RawReg, RawReg, RawReg),
set_less_than_unsigned(RawReg, RawReg, RawReg),
set_less_than_signed(RawReg, RawReg, RawReg),
shift_logical_left_32(RawReg, RawReg, RawReg),
shift_logical_left_64(RawReg, RawReg, RawReg),
shift_logical_right_32(RawReg, RawReg, RawReg),
shift_logical_right_64(RawReg, RawReg, RawReg),
shift_arithmetic_right_32(RawReg, RawReg, RawReg),
shift_arithmetic_right_64(RawReg, RawReg, RawReg),
div_unsigned_32(RawReg, RawReg, RawReg),
div_unsigned_64(RawReg, RawReg, RawReg),
div_signed_32(RawReg, RawReg, RawReg),
div_signed_64(RawReg, RawReg, RawReg),
rem_unsigned_32(RawReg, RawReg, RawReg),
rem_unsigned_64(RawReg, RawReg, RawReg),
rem_signed_32(RawReg, RawReg, RawReg),
rem_signed_64(RawReg, RawReg, RawReg),
cmov_if_zero(RawReg, RawReg, RawReg),
cmov_if_not_zero(RawReg, RawReg, RawReg),
and_inverted(RawReg, RawReg, RawReg),
or_inverted(RawReg, RawReg, RawReg),
xnor(RawReg, RawReg, RawReg),
maximum(RawReg, RawReg, RawReg),
maximum_unsigned(RawReg, RawReg, RawReg),
minimum(RawReg, RawReg, RawReg),
minimum_unsigned(RawReg, RawReg, RawReg),
rotate_left_32(RawReg, RawReg, RawReg),
rotate_left_64(RawReg, RawReg, RawReg),
rotate_right_32(RawReg, RawReg, RawReg),
rotate_right_64(RawReg, RawReg, RawReg),
jump(u32),
ecalli(u32),
store_imm_u8(u32, u32),
store_imm_u16(u32, u32),
store_imm_u32(u32, u32),
store_imm_u64(u32, u32),
move_reg(RawReg, RawReg),
sbrk(RawReg, RawReg),
count_leading_zero_bits_32(RawReg, RawReg),
count_leading_zero_bits_64(RawReg, RawReg),
count_trailing_zero_bits_32(RawReg, RawReg),
count_trailing_zero_bits_64(RawReg, RawReg),
count_set_bits_32(RawReg, RawReg),
count_set_bits_64(RawReg, RawReg),
sign_extend_8(RawReg, RawReg),
sign_extend_16(RawReg, RawReg),
zero_extend_16(RawReg, RawReg),
or_combine_byte(RawReg, RawReg),
reverse_byte(RawReg, RawReg),
load_imm_and_jump_indirect(RawReg, RawReg, u32, u32),
load_imm64(RawReg, u64),
invalid = 256,
}
Variants§
trap = 0
fallthrough = 17
jump_indirect(RawReg, u32)
load_imm(RawReg, u32)
load_u8(RawReg, u32)
load_i8(RawReg, u32)
load_u16(RawReg, u32)
load_i16(RawReg, u32)
load_i32(RawReg, u32)
load_u32(RawReg, u32)
load_u64(RawReg, u32)
store_u8(RawReg, u32)
store_u16(RawReg, u32)
store_u32(RawReg, u32)
store_u64(RawReg, u32)
load_imm_and_jump(RawReg, u32, u32)
branch_eq_imm(RawReg, u32, u32)
branch_not_eq_imm(RawReg, u32, u32)
branch_less_unsigned_imm(RawReg, u32, u32)
branch_less_signed_imm(RawReg, u32, u32)
branch_greater_or_equal_unsigned_imm(RawReg, u32, u32)
branch_greater_or_equal_signed_imm(RawReg, u32, u32)
branch_less_or_equal_signed_imm(RawReg, u32, u32)
branch_less_or_equal_unsigned_imm(RawReg, u32, u32)
branch_greater_signed_imm(RawReg, u32, u32)
branch_greater_unsigned_imm(RawReg, u32, u32)
store_imm_indirect_u8(RawReg, u32, u32)
store_imm_indirect_u16(RawReg, u32, u32)
store_imm_indirect_u32(RawReg, u32, u32)
store_imm_indirect_u64(RawReg, u32, u32)
store_indirect_u8(RawReg, RawReg, u32)
store_indirect_u16(RawReg, RawReg, u32)
store_indirect_u32(RawReg, RawReg, u32)
store_indirect_u64(RawReg, RawReg, u32)
load_indirect_u8(RawReg, RawReg, u32)
load_indirect_i8(RawReg, RawReg, u32)
load_indirect_u16(RawReg, RawReg, u32)
load_indirect_i16(RawReg, RawReg, u32)
load_indirect_i32(RawReg, RawReg, u32)
load_indirect_u32(RawReg, RawReg, u32)
load_indirect_u64(RawReg, RawReg, u32)
add_imm_32(RawReg, RawReg, u32)
add_imm_64(RawReg, RawReg, u32)
and_imm(RawReg, RawReg, u32)
xor_imm(RawReg, RawReg, u32)
or_imm(RawReg, RawReg, u32)
mul_imm_32(RawReg, RawReg, u32)
mul_imm_64(RawReg, RawReg, u32)
set_less_than_unsigned_imm(RawReg, RawReg, u32)
set_less_than_signed_imm(RawReg, RawReg, u32)
shift_logical_left_imm_32(RawReg, RawReg, u32)
shift_logical_left_imm_64(RawReg, RawReg, u32)
shift_logical_right_imm_32(RawReg, RawReg, u32)
shift_logical_right_imm_64(RawReg, RawReg, u32)
shift_arithmetic_right_imm_32(RawReg, RawReg, u32)
shift_arithmetic_right_imm_64(RawReg, RawReg, u32)
negate_and_add_imm_32(RawReg, RawReg, u32)
negate_and_add_imm_64(RawReg, RawReg, u32)
set_greater_than_unsigned_imm(RawReg, RawReg, u32)
set_greater_than_signed_imm(RawReg, RawReg, u32)
shift_logical_right_imm_alt_32(RawReg, RawReg, u32)
shift_logical_right_imm_alt_64(RawReg, RawReg, u32)
shift_arithmetic_right_imm_alt_32(RawReg, RawReg, u32)
shift_arithmetic_right_imm_alt_64(RawReg, RawReg, u32)
shift_logical_left_imm_alt_32(RawReg, RawReg, u32)
shift_logical_left_imm_alt_64(RawReg, RawReg, u32)
cmov_if_zero_imm(RawReg, RawReg, u32)
cmov_if_not_zero_imm(RawReg, RawReg, u32)
rotate_right_32_imm(RawReg, RawReg, u32)
rotate_right_32_imm_alt(RawReg, RawReg, u32)
rotate_right_64_imm(RawReg, RawReg, u32)
rotate_right_64_imm_alt(RawReg, RawReg, u32)
branch_eq(RawReg, RawReg, u32)
branch_not_eq(RawReg, RawReg, u32)
branch_less_unsigned(RawReg, RawReg, u32)
branch_less_signed(RawReg, RawReg, u32)
branch_greater_or_equal_unsigned(RawReg, RawReg, u32)
branch_greater_or_equal_signed(RawReg, RawReg, u32)
add_32(RawReg, RawReg, RawReg)
add_64(RawReg, RawReg, RawReg)
sub_32(RawReg, RawReg, RawReg)
sub_64(RawReg, RawReg, RawReg)
and(RawReg, RawReg, RawReg)
xor(RawReg, RawReg, RawReg)
or(RawReg, RawReg, RawReg)
mul_32(RawReg, RawReg, RawReg)
mul_64(RawReg, RawReg, RawReg)
mul_upper_signed_signed(RawReg, RawReg, RawReg)
mul_upper_unsigned_unsigned(RawReg, RawReg, RawReg)
mul_upper_signed_unsigned(RawReg, RawReg, RawReg)
set_less_than_unsigned(RawReg, RawReg, RawReg)
set_less_than_signed(RawReg, RawReg, RawReg)
shift_logical_left_32(RawReg, RawReg, RawReg)
shift_logical_left_64(RawReg, RawReg, RawReg)
shift_logical_right_32(RawReg, RawReg, RawReg)
shift_logical_right_64(RawReg, RawReg, RawReg)
shift_arithmetic_right_32(RawReg, RawReg, RawReg)
shift_arithmetic_right_64(RawReg, RawReg, RawReg)
div_unsigned_32(RawReg, RawReg, RawReg)
div_unsigned_64(RawReg, RawReg, RawReg)
div_signed_32(RawReg, RawReg, RawReg)
div_signed_64(RawReg, RawReg, RawReg)
rem_unsigned_32(RawReg, RawReg, RawReg)
rem_unsigned_64(RawReg, RawReg, RawReg)
rem_signed_32(RawReg, RawReg, RawReg)
rem_signed_64(RawReg, RawReg, RawReg)
cmov_if_zero(RawReg, RawReg, RawReg)
cmov_if_not_zero(RawReg, RawReg, RawReg)
and_inverted(RawReg, RawReg, RawReg)
or_inverted(RawReg, RawReg, RawReg)
xnor(RawReg, RawReg, RawReg)
maximum(RawReg, RawReg, RawReg)
maximum_unsigned(RawReg, RawReg, RawReg)
minimum(RawReg, RawReg, RawReg)
minimum_unsigned(RawReg, RawReg, RawReg)
rotate_left_32(RawReg, RawReg, RawReg)
rotate_left_64(RawReg, RawReg, RawReg)
rotate_right_32(RawReg, RawReg, RawReg)
rotate_right_64(RawReg, RawReg, RawReg)
jump(u32)
ecalli(u32)
store_imm_u8(u32, u32)
store_imm_u16(u32, u32)
store_imm_u32(u32, u32)
store_imm_u64(u32, u32)
move_reg(RawReg, RawReg)
sbrk(RawReg, RawReg)
count_leading_zero_bits_32(RawReg, RawReg)
count_leading_zero_bits_64(RawReg, RawReg)
count_trailing_zero_bits_32(RawReg, RawReg)
count_trailing_zero_bits_64(RawReg, RawReg)
count_set_bits_32(RawReg, RawReg)
count_set_bits_64(RawReg, RawReg)
sign_extend_8(RawReg, RawReg)
sign_extend_16(RawReg, RawReg)
zero_extend_16(RawReg, RawReg)
or_combine_byte(RawReg, RawReg)
reverse_byte(RawReg, RawReg)
load_imm_and_jump_indirect(RawReg, RawReg, u32, u32)
load_imm64(RawReg, u64)
invalid = 256
Implementations§
Source§impl Instruction
impl Instruction
Source§impl Instruction
impl Instruction
pub fn display<'a>(self, format: &'a InstructionFormat<'a>) -> impl Display + 'a
pub fn starts_new_basic_block(self) -> bool
Trait Implementations§
Source§impl Clone for Instruction
impl Clone for Instruction
Source§fn clone(&self) -> Instruction
fn clone(&self) -> Instruction
Returns a copy of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreSource§impl Debug for Instruction
impl Debug for Instruction
Source§impl Display for Instruction
impl Display for Instruction
Source§impl PartialEq for Instruction
impl PartialEq for Instruction
impl Copy for Instruction
impl Eq for Instruction
impl StructuralPartialEq for Instruction
Auto Trait Implementations§
impl Freeze for Instruction
impl RefUnwindSafe for Instruction
impl Send for Instruction
impl Sync for Instruction
impl Unpin for Instruction
impl UnwindSafe for Instruction
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more