pub enum Inst {
Show 109 variants
Nop {
len: u8,
},
AluRmiR {
size: OperandSize,
op: AluRmiROpcode,
src1: Gpr,
src2: GprMemImm,
dst: WritableGpr,
},
AluRM {
size: OperandSize,
op: AluRmiROpcode,
src1_dst: SyntheticAmode,
src2: Gpr,
lock: bool,
},
AluRmRVex {
size: OperandSize,
op: AluRmROpcode,
src1: Gpr,
src2: GprMem,
dst: WritableGpr,
},
AluConstOp {
op: AluRmiROpcode,
size: OperandSize,
dst: WritableGpr,
},
UnaryRmR {
size: OperandSize,
op: UnaryRmROpcode,
src: GprMem,
dst: WritableGpr,
},
UnaryRmRVex {
size: OperandSize,
op: UnaryRmRVexOpcode,
src: GprMem,
dst: WritableGpr,
},
UnaryRmRImmVex {
size: OperandSize,
op: UnaryRmRImmVexOpcode,
src: GprMem,
dst: WritableGpr,
imm: u8,
},
Not {
size: OperandSize,
src: Gpr,
dst: WritableGpr,
},
Neg {
size: OperandSize,
src: Gpr,
dst: WritableGpr,
},
Div {
size: OperandSize,
sign: DivSignedness,
trap: TrapCode,
divisor: GprMem,
dividend_lo: Gpr,
dividend_hi: Gpr,
dst_quotient: WritableGpr,
dst_remainder: WritableGpr,
},
Div8 {
sign: DivSignedness,
trap: TrapCode,
divisor: GprMem,
dividend: Gpr,
dst: WritableGpr,
},
Mul {
size: OperandSize,
signed: bool,
src1: Gpr,
src2: GprMem,
dst_lo: WritableGpr,
dst_hi: WritableGpr,
},
MulX {
size: OperandSize,
src1: Gpr,
src2: GprMem,
dst_lo: WritableGpr,
dst_hi: WritableGpr,
},
Mul8 {
signed: bool,
src1: Gpr,
src2: GprMem,
dst: WritableGpr,
},
IMul {
size: OperandSize,
src1: Gpr,
src2: GprMem,
dst: WritableGpr,
},
IMulImm {
size: OperandSize,
src1: GprMem,
src2: i32,
dst: WritableGpr,
},
CheckedSRemSeq {
size: OperandSize,
dividend_lo: Gpr,
dividend_hi: Gpr,
divisor: Gpr,
dst_quotient: WritableGpr,
dst_remainder: WritableGpr,
},
CheckedSRemSeq8 {
dividend: Gpr,
divisor: Gpr,
dst: WritableGpr,
},
SignExtendData {
size: OperandSize,
src: Gpr,
dst: WritableGpr,
},
Imm {
dst_size: OperandSize,
simm64: u64,
dst: WritableGpr,
},
MovRR {
size: OperandSize,
src: Gpr,
dst: WritableGpr,
},
MovFromPReg {
src: PReg,
dst: WritableGpr,
},
MovToPReg {
src: Gpr,
dst: PReg,
},
MovzxRmR {
ext_mode: ExtMode,
src: GprMem,
dst: WritableGpr,
},
Mov64MR {
src: SyntheticAmode,
dst: WritableGpr,
},
LoadEffectiveAddress {
addr: SyntheticAmode,
dst: WritableGpr,
size: OperandSize,
},
MovsxRmR {
ext_mode: ExtMode,
src: GprMem,
dst: WritableGpr,
},
MovImmM {
size: OperandSize,
simm32: i32,
dst: SyntheticAmode,
},
MovRM {
size: OperandSize,
src: Gpr,
dst: SyntheticAmode,
},
ShiftR {
size: OperandSize,
kind: ShiftKind,
src: Gpr,
num_bits: Imm8Gpr,
dst: WritableGpr,
},
XmmRmiReg {
opcode: SseOpcode,
src1: Xmm,
src2: XmmMemAlignedImm,
dst: WritableXmm,
},
CmpRmiR {
size: OperandSize,
opcode: CmpOpcode,
src1: Gpr,
src2: GprMemImm,
},
Setcc {
cc: CC,
dst: WritableGpr,
},
Bswap {
size: OperandSize,
src: Gpr,
dst: WritableGpr,
},
Cmove {
size: OperandSize,
cc: CC,
consequent: GprMem,
alternative: Gpr,
dst: WritableGpr,
},
XmmCmove {
ty: Type,
cc: CC,
consequent: Xmm,
alternative: Xmm,
dst: WritableXmm,
},
Push64 {
src: GprMemImm,
},
Pop64 {
dst: WritableGpr,
},
StackProbeLoop {
tmp: Writable<Reg>,
frame_size: u32,
guard_size: u32,
},
XmmRmR {
op: SseOpcode,
src1: Xmm,
src2: XmmMemAligned,
dst: WritableXmm,
},
XmmRmRUnaligned {
op: SseOpcode,
src1: Xmm,
src2: XmmMem,
dst: WritableXmm,
},
XmmRmRBlend {
op: SseOpcode,
src1: Xmm,
src2: XmmMemAligned,
mask: Xmm,
dst: WritableXmm,
},
XmmRmiRVex {
op: AvxOpcode,
src1: Xmm,
src2: XmmMemImm,
dst: WritableXmm,
},
XmmRmRImmVex {
op: AvxOpcode,
src1: Xmm,
src2: XmmMem,
dst: WritableXmm,
imm: u8,
},
XmmVexPinsr {
op: AvxOpcode,
src1: Xmm,
src2: GprMem,
dst: WritableXmm,
imm: u8,
},
XmmRmRVex3 {
op: AvxOpcode,
src1: Xmm,
src2: Xmm,
src3: XmmMem,
dst: WritableXmm,
},
XmmRmRBlendVex {
op: AvxOpcode,
src1: Xmm,
src2: XmmMem,
mask: Xmm,
dst: WritableXmm,
},
XmmUnaryRmRVex {
op: AvxOpcode,
src: XmmMem,
dst: WritableXmm,
},
XmmUnaryRmRImmVex {
op: AvxOpcode,
src: XmmMem,
dst: WritableXmm,
imm: u8,
},
XmmMovRMVex {
op: AvxOpcode,
src: Xmm,
dst: SyntheticAmode,
},
XmmMovRMImmVex {
op: AvxOpcode,
src: Xmm,
dst: SyntheticAmode,
imm: u8,
},
XmmToGprImmVex {
op: AvxOpcode,
src: Xmm,
dst: WritableGpr,
imm: u8,
},
GprToXmmVex {
op: AvxOpcode,
src: GprMem,
dst: WritableXmm,
src_size: OperandSize,
},
XmmToGprVex {
op: AvxOpcode,
src: Xmm,
dst: WritableGpr,
dst_size: OperandSize,
},
XmmCmpRmRVex {
op: AvxOpcode,
src1: Xmm,
src2: XmmMem,
},
XmmRmREvex {
op: Avx512Opcode,
src1: Xmm,
src2: XmmMem,
dst: WritableXmm,
},
XmmUnaryRmRImmEvex {
op: Avx512Opcode,
src: XmmMem,
dst: WritableXmm,
imm: u8,
},
XmmRmREvex3 {
op: Avx512Opcode,
src1: Xmm,
src2: Xmm,
src3: XmmMem,
dst: WritableXmm,
},
XmmUnaryRmR {
op: SseOpcode,
src: XmmMemAligned,
dst: WritableXmm,
},
XmmUnaryRmRUnaligned {
op: SseOpcode,
src: XmmMem,
dst: WritableXmm,
},
XmmUnaryRmRImm {
op: SseOpcode,
src: XmmMemAligned,
imm: u8,
dst: WritableXmm,
},
XmmUnaryRmREvex {
op: Avx512Opcode,
src: XmmMem,
dst: WritableXmm,
},
XmmMovRM {
op: SseOpcode,
src: Xmm,
dst: SyntheticAmode,
},
XmmMovRMImm {
op: SseOpcode,
src: Xmm,
dst: SyntheticAmode,
imm: u8,
},
XmmToGpr {
op: SseOpcode,
src: Xmm,
dst: WritableGpr,
dst_size: OperandSize,
},
XmmToGprImm {
op: SseOpcode,
src: Xmm,
dst: WritableGpr,
imm: u8,
},
GprToXmm {
op: SseOpcode,
src: GprMem,
dst: WritableXmm,
src_size: OperandSize,
},
CvtIntToFloat {
op: SseOpcode,
src1: Xmm,
src2: GprMem,
dst: WritableXmm,
src2_size: OperandSize,
},
CvtIntToFloatVex {
op: AvxOpcode,
src1: Xmm,
src2: GprMem,
dst: WritableXmm,
src2_size: OperandSize,
},
CvtUint64ToFloatSeq {
dst_size: OperandSize,
src: Gpr,
dst: WritableXmm,
tmp_gpr1: WritableGpr,
tmp_gpr2: WritableGpr,
},
CvtFloatToSintSeq {
dst_size: OperandSize,
src_size: OperandSize,
is_saturating: bool,
src: Xmm,
dst: WritableGpr,
tmp_gpr: WritableGpr,
tmp_xmm: WritableXmm,
},
CvtFloatToUintSeq {
dst_size: OperandSize,
src_size: OperandSize,
is_saturating: bool,
src: Xmm,
dst: WritableGpr,
tmp_gpr: WritableGpr,
tmp_xmm: WritableXmm,
tmp_xmm2: WritableXmm,
},
XmmMinMaxSeq {
size: OperandSize,
is_min: bool,
lhs: Xmm,
rhs: Xmm,
dst: WritableXmm,
},
XmmCmpRmR {
op: SseOpcode,
src1: Xmm,
src2: XmmMemAligned,
},
XmmRmRImm {
op: SseOpcode,
src1: Reg,
src2: RegMem,
dst: Writable<Reg>,
imm: u8,
size: OperandSize,
},
CallKnown {
info: Box<CallInfo<ExternalName>>,
},
CallUnknown {
info: Box<CallInfo<RegMem>>,
},
ReturnCallKnown {
info: Box<ReturnCallInfo<ExternalName>>,
},
ReturnCallUnknown {
info: Box<ReturnCallInfo<Reg>>,
},
Args {
args: Vec<ArgPair>,
},
Rets {
rets: Vec<RetPair>,
},
Ret {
stack_bytes_to_pop: u32,
},
StackSwitchBasic {
store_context_ptr: Gpr,
load_context_ptr: Gpr,
in_payload0: Gpr,
out_payload0: WritableGpr,
},
JmpKnown {
dst: MachLabel,
},
JmpIf {
cc: CC,
taken: MachLabel,
},
JmpCond {
cc: CC,
taken: MachLabel,
not_taken: MachLabel,
},
JmpTableSeq {
idx: Reg,
tmp1: Writable<Reg>,
tmp2: Writable<Reg>,
default_target: MachLabel,
targets: Box<Vec<MachLabel>>,
},
JmpUnknown {
target: RegMem,
},
TrapIf {
cc: CC,
trap_code: TrapCode,
},
TrapIfAnd {
cc1: CC,
cc2: CC,
trap_code: TrapCode,
},
TrapIfOr {
cc1: CC,
cc2: CC,
trap_code: TrapCode,
},
Hlt,
Ud2 {
trap_code: TrapCode,
},
LoadExtName {
dst: Writable<Reg>,
name: Box<ExternalName>,
offset: i64,
distance: RelocDistance,
},
LockCmpxchg {
ty: Type,
replacement: Reg,
expected: Reg,
mem: SyntheticAmode,
dst_old: Writable<Reg>,
},
LockCmpxchg16b {
replacement_low: Reg,
replacement_high: Reg,
expected_low: Reg,
expected_high: Reg,
mem: Box<SyntheticAmode>,
dst_old_low: Writable<Reg>,
dst_old_high: Writable<Reg>,
},
LockXadd {
size: OperandSize,
operand: Reg,
mem: SyntheticAmode,
dst_old: Writable<Reg>,
},
Xchg {
size: OperandSize,
operand: Reg,
mem: SyntheticAmode,
dst_old: Writable<Reg>,
},
AtomicRmwSeq {
ty: Type,
op: AtomicRmwSeqOp,
mem: SyntheticAmode,
operand: Reg,
temp: Writable<Reg>,
dst_old: Writable<Reg>,
},
Atomic128RmwSeq {
op: Atomic128RmwSeqOp,
mem: Box<SyntheticAmode>,
operand_low: Reg,
operand_high: Reg,
temp_low: Writable<Reg>,
temp_high: Writable<Reg>,
dst_old_low: Writable<Reg>,
dst_old_high: Writable<Reg>,
},
Atomic128XchgSeq {
mem: SyntheticAmode,
operand_low: Reg,
operand_high: Reg,
dst_old_low: Writable<Reg>,
dst_old_high: Writable<Reg>,
},
Fence {
kind: FenceKind,
},
XmmUninitializedValue {
dst: WritableXmm,
},
ElfTlsGetAddr {
symbol: ExternalName,
dst: WritableGpr,
},
MachOTlsGetAddr {
symbol: ExternalName,
dst: WritableGpr,
},
CoffTlsGetAddr {
symbol: ExternalName,
dst: WritableGpr,
tmp: WritableGpr,
},
Unwind {
inst: UnwindInst,
},
DummyUse {
reg: Reg,
},
}
Available on crate feature
x86
only.Expand description
Internal type MInst: defined at src/isa/x64/inst.isle line 7.
Variants§
Nop
AluRmiR
AluRM
AluRmRVex
AluConstOp
UnaryRmR
UnaryRmRVex
UnaryRmRImmVex
Not
Neg
Div
Div8
Mul
MulX
Mul8
IMul
IMulImm
CheckedSRemSeq
CheckedSRemSeq8
SignExtendData
Imm
MovRR
MovFromPReg
MovToPReg
MovzxRmR
Mov64MR
LoadEffectiveAddress
MovsxRmR
MovImmM
MovRM
ShiftR
XmmRmiReg
CmpRmiR
Setcc
Bswap
Cmove
XmmCmove
Push64
Pop64
Fields
§
dst: WritableGpr
StackProbeLoop
XmmRmR
XmmRmRUnaligned
XmmRmRBlend
XmmRmiRVex
XmmRmRImmVex
XmmVexPinsr
XmmRmRVex3
XmmRmRBlendVex
XmmUnaryRmRVex
XmmUnaryRmRImmVex
XmmMovRMVex
XmmMovRMImmVex
XmmToGprImmVex
GprToXmmVex
XmmToGprVex
XmmCmpRmRVex
XmmRmREvex
XmmUnaryRmRImmEvex
XmmRmREvex3
XmmUnaryRmR
XmmUnaryRmRUnaligned
XmmUnaryRmRImm
XmmUnaryRmREvex
XmmMovRM
XmmMovRMImm
XmmToGpr
XmmToGprImm
GprToXmm
CvtIntToFloat
CvtIntToFloatVex
CvtUint64ToFloatSeq
CvtFloatToSintSeq
Fields
§
dst_size: OperandSize
§
src_size: OperandSize
§
dst: WritableGpr
§
tmp_gpr: WritableGpr
§
tmp_xmm: WritableXmm
CvtFloatToUintSeq
Fields
§
dst_size: OperandSize
§
src_size: OperandSize
§
dst: WritableGpr
§
tmp_gpr: WritableGpr
§
tmp_xmm: WritableXmm
§
tmp_xmm2: WritableXmm
XmmMinMaxSeq
XmmCmpRmR
XmmRmRImm
CallKnown
Fields
§
info: Box<CallInfo<ExternalName>>
CallUnknown
ReturnCallKnown
Fields
§
info: Box<ReturnCallInfo<ExternalName>>
ReturnCallUnknown
Args
Rets
Ret
StackSwitchBasic
JmpKnown
JmpIf
JmpCond
JmpTableSeq
Fields
JmpUnknown
TrapIf
TrapIfAnd
TrapIfOr
Hlt
Ud2
LoadExtName
LockCmpxchg
LockCmpxchg16b
Fields
§
mem: Box<SyntheticAmode>
LockXadd
Xchg
AtomicRmwSeq
Atomic128RmwSeq
Atomic128XchgSeq
Fields
§
mem: SyntheticAmode
Fence
XmmUninitializedValue
Fields
§
dst: WritableXmm
ElfTlsGetAddr
MachOTlsGetAddr
CoffTlsGetAddr
Unwind
Fields
§
inst: UnwindInst
DummyUse
Trait Implementations§
Source§impl MachInst for Inst
impl MachInst for Inst
Source§const TRAP_OPCODE: &'static [u8] = _
const TRAP_OPCODE: &'static [u8] = _
Byte representation of a trap opcode which is inserted by
MachBuffer
during its defer_trap
method.Source§type ABIMachineSpec = X64ABIMachineSpec
type ABIMachineSpec = X64ABIMachineSpec
The ABI machine spec for this
MachInst
.Source§type LabelUse = LabelUse
type LabelUse = LabelUse
A label-use kind: a type that describes the types of label references that
can occur in an instruction.
Source§fn get_operands(&mut self, collector: &mut impl OperandVisitor)
fn get_operands(&mut self, collector: &mut impl OperandVisitor)
Return the registers referenced by this machine instruction along with
the modes of reference (use, def, modify).
Source§fn is_move(&self) -> Option<(Writable<Reg>, Reg)>
fn is_move(&self) -> Option<(Writable<Reg>, Reg)>
If this is a simple move, return the (source, destination) tuple of registers.
Source§fn is_included_in_clobbers(&self) -> bool
fn is_included_in_clobbers(&self) -> bool
Should this instruction be included in the clobber-set?
Source§fn is_term(&self) -> MachTerminator
fn is_term(&self) -> MachTerminator
Is this a terminator (branch or ret)? If so, return its type
(ret/uncond/cond) and target if applicable.
Source§fn is_mem_access(&self) -> bool
fn is_mem_access(&self) -> bool
Does this instruction access memory?
Source§fn gen_nop(preferred_size: usize) -> Inst
fn gen_nop(preferred_size: usize) -> Inst
Generate a NOP. The
preferred_size
parameter allows the caller to
request a NOP of that size, or as close to it as possible. The machine
backend may return a NOP whose binary encoding is smaller than the
preferred size, but must not return a NOP that is larger. However,
the instruction must have a nonzero size if preferred_size is nonzero.Source§fn rc_for_type(
ty: Type,
) -> CodegenResult<(&'static [RegClass], &'static [Type])>
fn rc_for_type( ty: Type, ) -> CodegenResult<(&'static [RegClass], &'static [Type])>
Determine register class(es) to store the given Cranelift type, and the
Cranelift type actually stored in the underlying register(s). May return
an error if the type isn’t supported by this backend. Read more
Source§fn canonical_type_for_rc(rc: RegClass) -> Type
fn canonical_type_for_rc(rc: RegClass) -> Type
Get an appropriate type that can fully hold a value in a given
register class. This may not be the only type that maps to
that class, but when used with
gen_move()
or the ABI trait’s
load/spill constructors, it should produce instruction(s) that
move the entire register contents.Source§fn gen_jump(label: MachLabel) -> Inst
fn gen_jump(label: MachLabel) -> Inst
Generate a jump to another target. Used during lowering of
control flow.
Source§fn gen_imm_u64(value: u64, dst: Writable<Reg>) -> Option<Self>
fn gen_imm_u64(value: u64, dst: Writable<Reg>) -> Option<Self>
Generate a store of an immediate 64-bit integer to a register. Used by
the control plane to generate random instructions.
Source§fn gen_imm_f64(
value: f64,
tmp: Writable<Reg>,
dst: Writable<Reg>,
) -> SmallVec<[Self; 2]>
fn gen_imm_f64( value: f64, tmp: Writable<Reg>, dst: Writable<Reg>, ) -> SmallVec<[Self; 2]>
Generate a store of an immediate 64-bit integer to a register. Used by
the control plane to generate random instructions. The tmp register may
be used by architectures which don’t support writing immediate values to
floating point registers directly.
Source§fn gen_dummy_use(reg: Reg) -> Self
fn gen_dummy_use(reg: Reg) -> Self
Generate a dummy instruction that will keep a value alive but
has no other purpose.
Source§fn worst_case_size() -> CodeOffset
fn worst_case_size() -> CodeOffset
What is the worst-case instruction size emitted by this instruction type?
Source§fn ref_type_regclass(_: &Flags) -> RegClass
fn ref_type_regclass(_: &Flags) -> RegClass
What is the register class used for reference types (GC-observable pointers)? Can
be dependent on compilation flags.
Source§fn is_safepoint(&self) -> bool
fn is_safepoint(&self) -> bool
Is this a safepoint?
Source§fn function_alignment() -> FunctionAlignment
fn function_alignment() -> FunctionAlignment
Returns a description of the alignment required for functions for this
architecture.
Source§fn align_basic_block(offset: CodeOffset) -> CodeOffset
fn align_basic_block(offset: CodeOffset) -> CodeOffset
Align a basic block offset (from start of function). By default, no
alignment occurs.
Source§impl MachInstEmit for Inst
impl MachInstEmit for Inst
Source§impl MachInstEmitState<MInst> for EmitState
impl MachInstEmitState<MInst> for EmitState
Source§fn new(abi: &Callee<X64ABIMachineSpec>, ctrl_plane: ControlPlane) -> Self
fn new(abi: &Callee<X64ABIMachineSpec>, ctrl_plane: ControlPlane) -> Self
Create a new emission state given the ABI object.
Source§fn pre_safepoint(&mut self, user_stack_map: Option<UserStackMap>)
fn pre_safepoint(&mut self, user_stack_map: Option<UserStackMap>)
Update the emission state before emitting an instruction that is a
safepoint.
Source§fn ctrl_plane_mut(&mut self) -> &mut ControlPlane
fn ctrl_plane_mut(&mut self) -> &mut ControlPlane
The emission state holds ownership of a control plane, so it doesn’t
have to be passed around explicitly too much.
ctrl_plane_mut
may
be used if temporary access to the control plane is needed by some
other function that doesn’t have access to the emission state.Source§fn take_ctrl_plane(self) -> ControlPlane
fn take_ctrl_plane(self) -> ControlPlane
Used to continue using a control plane after the emission state is
not needed anymore.
Source§fn frame_layout(&self) -> &FrameLayout
fn frame_layout(&self) -> &FrameLayout
The [
FrameLayout
] for the function currently being compiled.Source§fn on_new_block(&mut self)
fn on_new_block(&mut self)
A hook that triggers when first emitting a new block.
It is guaranteed to be called before any instructions are emitted.
Auto Trait Implementations§
impl Freeze for MInst
impl RefUnwindSafe for MInst
impl Send for MInst
impl Sync for MInst
impl Unpin for MInst
impl UnwindSafe for MInst
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more