1pub use emit_state::EmitState;
4
5use crate::binemit::{Addend, CodeOffset, Reloc};
6use crate::ir::{types, ExternalName, LibCall, TrapCode, Type};
7use crate::isa::x64::abi::X64ABIMachineSpec;
8use crate::isa::x64::inst::regs::{pretty_print_reg, show_ireg_sized};
9use crate::isa::x64::settings as x64_settings;
10use crate::isa::{CallConv, FunctionAlignment};
11use crate::{machinst::*, trace};
12use crate::{settings, CodegenError, CodegenResult};
13use alloc::boxed::Box;
14use smallvec::{smallvec, SmallVec};
15use std::fmt::{self, Write};
16use std::string::{String, ToString};
17
18pub mod args;
19mod emit;
20mod emit_state;
21#[cfg(test)]
22mod emit_tests;
23pub mod external;
24pub mod regs;
25mod stack_switch;
26pub mod unwind;
27
28use args::*;
29
30pub use super::lower::isle::generated_code::AtomicRmwSeqOp;
35pub use super::lower::isle::generated_code::MInst as Inst;
36
37#[derive(Clone, Debug)]
39pub struct ReturnCallInfo<T> {
40 pub dest: T,
42
43 pub new_stack_arg_size: u32,
46
47 pub uses: CallArgList,
49
50 pub tmp: WritableGpr,
52}
53
54#[test]
55#[cfg(target_pointer_width = "64")]
56fn inst_size_test() {
57 assert_eq!(48, std::mem::size_of::<Inst>());
60}
61
62pub(crate) fn low32_will_sign_extend_to_64(x: u64) -> bool {
63 let xs = x as i64;
64 xs == ((xs << 32) >> 32)
65}
66
67impl Inst {
68 fn available_in_any_isa(&self) -> SmallVec<[InstructionSet; 2]> {
73 match self {
74 Inst::AluRmiR { .. }
77 | Inst::AluRM { .. }
78 | Inst::AtomicRmwSeq { .. }
79 | Inst::Bswap { .. }
80 | Inst::CallKnown { .. }
81 | Inst::CallUnknown { .. }
82 | Inst::ReturnCallKnown { .. }
83 | Inst::ReturnCallUnknown { .. }
84 | Inst::CheckedSRemSeq { .. }
85 | Inst::CheckedSRemSeq8 { .. }
86 | Inst::Cmove { .. }
87 | Inst::CmpRmiR { .. }
88 | Inst::CvtFloatToSintSeq { .. }
89 | Inst::CvtFloatToUintSeq { .. }
90 | Inst::CvtUint64ToFloatSeq { .. }
91 | Inst::Div { .. }
92 | Inst::Div8 { .. }
93 | Inst::Fence { .. }
94 | Inst::Hlt
95 | Inst::Imm { .. }
96 | Inst::JmpCond { .. }
97 | Inst::JmpCondOr { .. }
98 | Inst::WinchJmpIf { .. }
99 | Inst::JmpKnown { .. }
100 | Inst::JmpTableSeq { .. }
101 | Inst::JmpUnknown { .. }
102 | Inst::LoadEffectiveAddress { .. }
103 | Inst::LoadExtName { .. }
104 | Inst::LockCmpxchg { .. }
105 | Inst::LockXadd { .. }
106 | Inst::Xchg { .. }
107 | Inst::Mov64MR { .. }
108 | Inst::MovImmM { .. }
109 | Inst::MovRM { .. }
110 | Inst::MovRR { .. }
111 | Inst::MovFromPReg { .. }
112 | Inst::MovToPReg { .. }
113 | Inst::MovsxRmR { .. }
114 | Inst::MovzxRmR { .. }
115 | Inst::Mul { .. }
116 | Inst::Mul8 { .. }
117 | Inst::IMul { .. }
118 | Inst::IMulImm { .. }
119 | Inst::Neg { .. }
120 | Inst::Not { .. }
121 | Inst::Nop { .. }
122 | Inst::Pop64 { .. }
123 | Inst::Push64 { .. }
124 | Inst::StackProbeLoop { .. }
125 | Inst::Args { .. }
126 | Inst::Rets { .. }
127 | Inst::Ret { .. }
128 | Inst::Setcc { .. }
129 | Inst::ShiftR { .. }
130 | Inst::SignExtendData { .. }
131 | Inst::StackSwitchBasic { .. }
132 | Inst::TrapIf { .. }
133 | Inst::TrapIfAnd { .. }
134 | Inst::TrapIfOr { .. }
135 | Inst::Ud2 { .. }
136 | Inst::XmmCmove { .. }
137 | Inst::XmmCmpRmR { .. }
138 | Inst::XmmMinMaxSeq { .. }
139 | Inst::XmmUninitializedValue { .. }
140 | Inst::ElfTlsGetAddr { .. }
141 | Inst::MachOTlsGetAddr { .. }
142 | Inst::CoffTlsGetAddr { .. }
143 | Inst::Unwind { .. }
144 | Inst::DummyUse { .. }
145 | Inst::AluConstOp { .. } => smallvec![],
146
147 Inst::LockCmpxchg16b { .. }
148 | Inst::Atomic128RmwSeq { .. }
149 | Inst::Atomic128XchgSeq { .. } => smallvec![InstructionSet::CMPXCHG16b],
150
151 Inst::AluRmRVex { op, .. } => op.available_from(),
152 Inst::UnaryRmR { op, .. } => op.available_from(),
153 Inst::UnaryRmRVex { op, .. } => op.available_from(),
154 Inst::UnaryRmRImmVex { op, .. } => op.available_from(),
155
156 Inst::GprToXmm { op, .. }
158 | Inst::XmmMovRM { op, .. }
159 | Inst::XmmMovRMImm { op, .. }
160 | Inst::XmmRmiReg { opcode: op, .. }
161 | Inst::XmmRmR { op, .. }
162 | Inst::XmmRmRUnaligned { op, .. }
163 | Inst::XmmRmRBlend { op, .. }
164 | Inst::XmmRmRImm { op, .. }
165 | Inst::XmmToGpr { op, .. }
166 | Inst::XmmToGprImm { op, .. }
167 | Inst::XmmUnaryRmRImm { op, .. }
168 | Inst::XmmUnaryRmRUnaligned { op, .. }
169 | Inst::XmmUnaryRmR { op, .. }
170 | Inst::CvtIntToFloat { op, .. } => smallvec![op.available_from()],
171
172 Inst::XmmUnaryRmREvex { op, .. }
173 | Inst::XmmRmREvex { op, .. }
174 | Inst::XmmRmREvex3 { op, .. }
175 | Inst::XmmUnaryRmRImmEvex { op, .. } => op.available_from(),
176
177 Inst::XmmRmiRVex { op, .. }
178 | Inst::XmmRmRVex3 { op, .. }
179 | Inst::XmmRmRImmVex { op, .. }
180 | Inst::XmmRmRBlendVex { op, .. }
181 | Inst::XmmVexPinsr { op, .. }
182 | Inst::XmmUnaryRmRVex { op, .. }
183 | Inst::XmmUnaryRmRImmVex { op, .. }
184 | Inst::XmmMovRMVex { op, .. }
185 | Inst::XmmMovRMImmVex { op, .. }
186 | Inst::XmmToGprImmVex { op, .. }
187 | Inst::XmmToGprVex { op, .. }
188 | Inst::GprToXmmVex { op, .. }
189 | Inst::CvtIntToFloatVex { op, .. }
190 | Inst::XmmCmpRmRVex { op, .. } => op.available_from(),
191
192 Inst::MulX { .. } => smallvec![InstructionSet::BMI2],
193
194 Inst::External { inst } => {
195 use cranelift_assembler_x64::Feature::*;
196 let features = smallvec![];
197 for f in inst.features() {
198 match f {
199 _64b | compat => {}
200 }
201 }
202 features
203 }
204 }
205 }
206}
207
208impl Inst {
211 pub(crate) fn nop(len: u8) -> Self {
212 debug_assert!(len <= 15);
213 Self::Nop { len }
214 }
215
216 pub(crate) fn alu_rmi_r(
217 size: OperandSize,
218 op: AluRmiROpcode,
219 src: RegMemImm,
220 dst: Writable<Reg>,
221 ) -> Self {
222 src.assert_regclass_is(RegClass::Int);
223 debug_assert!(dst.to_reg().class() == RegClass::Int);
224 Self::AluRmiR {
225 size,
226 op,
227 src1: Gpr::unwrap_new(dst.to_reg()),
228 src2: GprMemImm::unwrap_new(src),
229 dst: WritableGpr::from_writable_reg(dst).unwrap(),
230 }
231 }
232
233 #[allow(dead_code)]
234 pub(crate) fn unary_rm_r(
235 size: OperandSize,
236 op: UnaryRmROpcode,
237 src: RegMem,
238 dst: Writable<Reg>,
239 ) -> Self {
240 src.assert_regclass_is(RegClass::Int);
241 debug_assert!(dst.to_reg().class() == RegClass::Int);
242 debug_assert!(size.is_one_of(&[
243 OperandSize::Size16,
244 OperandSize::Size32,
245 OperandSize::Size64
246 ]));
247 Self::UnaryRmR {
248 size,
249 op,
250 src: GprMem::unwrap_new(src),
251 dst: WritableGpr::from_writable_reg(dst).unwrap(),
252 }
253 }
254
255 pub(crate) fn not(size: OperandSize, src: Writable<Reg>) -> Inst {
256 debug_assert_eq!(src.to_reg().class(), RegClass::Int);
257 Inst::Not {
258 size,
259 src: Gpr::unwrap_new(src.to_reg()),
260 dst: WritableGpr::from_writable_reg(src).unwrap(),
261 }
262 }
263
264 pub(crate) fn div(
265 size: OperandSize,
266 sign: DivSignedness,
267 trap: TrapCode,
268 divisor: RegMem,
269 dividend_lo: Gpr,
270 dividend_hi: Gpr,
271 dst_quotient: WritableGpr,
272 dst_remainder: WritableGpr,
273 ) -> Inst {
274 divisor.assert_regclass_is(RegClass::Int);
275 Inst::Div {
276 size,
277 sign,
278 trap,
279 divisor: GprMem::unwrap_new(divisor),
280 dividend_lo,
281 dividend_hi,
282 dst_quotient,
283 dst_remainder,
284 }
285 }
286
287 pub(crate) fn div8(
288 sign: DivSignedness,
289 trap: TrapCode,
290 divisor: RegMem,
291 dividend: Gpr,
292 dst: WritableGpr,
293 ) -> Inst {
294 divisor.assert_regclass_is(RegClass::Int);
295 Inst::Div8 {
296 sign,
297 trap,
298 divisor: GprMem::unwrap_new(divisor),
299 dividend,
300 dst,
301 }
302 }
303
304 pub(crate) fn imm(dst_size: OperandSize, simm64: u64, dst: Writable<Reg>) -> Inst {
305 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
306 debug_assert!(dst.to_reg().class() == RegClass::Int);
307 let dst_size = match dst_size {
310 OperandSize::Size64 if simm64 > u32::max_value() as u64 => OperandSize::Size64,
311 _ => OperandSize::Size32,
312 };
313 Inst::Imm {
314 dst_size,
315 simm64,
316 dst: WritableGpr::from_writable_reg(dst).unwrap(),
317 }
318 }
319
320 pub(crate) fn mov_r_r(size: OperandSize, src: Reg, dst: Writable<Reg>) -> Inst {
321 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
322 debug_assert!(src.class() == RegClass::Int);
323 debug_assert!(dst.to_reg().class() == RegClass::Int);
324 let src = Gpr::unwrap_new(src);
325 let dst = WritableGpr::from_writable_reg(dst).unwrap();
326 Inst::MovRR { size, src, dst }
327 }
328
329 pub(crate) fn xmm_unary_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst {
331 src.assert_regclass_is(RegClass::Float);
332 debug_assert!(dst.to_reg().class() == RegClass::Float);
333 Inst::XmmUnaryRmR {
334 op,
335 src: XmmMemAligned::unwrap_new(src),
336 dst: WritableXmm::from_writable_reg(dst).unwrap(),
337 }
338 }
339
340 pub(crate) fn xmm_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Self {
341 src.assert_regclass_is(RegClass::Float);
342 debug_assert!(dst.to_reg().class() == RegClass::Float);
343 Inst::XmmRmR {
344 op,
345 src1: Xmm::unwrap_new(dst.to_reg()),
346 src2: XmmMemAligned::unwrap_new(src),
347 dst: WritableXmm::from_writable_reg(dst).unwrap(),
348 }
349 }
350
351 #[cfg(test)]
352 pub(crate) fn xmm_rmr_vex3(op: AvxOpcode, src3: RegMem, src2: Reg, dst: Writable<Reg>) -> Self {
353 src3.assert_regclass_is(RegClass::Float);
354 debug_assert!(src2.class() == RegClass::Float);
355 debug_assert!(dst.to_reg().class() == RegClass::Float);
356 Inst::XmmRmRVex3 {
357 op,
358 src3: XmmMem::unwrap_new(src3),
359 src2: Xmm::unwrap_new(src2),
360 src1: Xmm::unwrap_new(dst.to_reg()),
361 dst: WritableXmm::from_writable_reg(dst).unwrap(),
362 }
363 }
364
365 pub(crate) fn xmm_mov_r_m(op: SseOpcode, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
366 debug_assert!(src.class() == RegClass::Float);
367 Inst::XmmMovRM {
368 op,
369 src: Xmm::unwrap_new(src),
370 dst: dst.into(),
371 }
372 }
373
374 pub(crate) fn xmm_to_gpr(
375 op: SseOpcode,
376 src: Reg,
377 dst: Writable<Reg>,
378 dst_size: OperandSize,
379 ) -> Inst {
380 debug_assert!(src.class() == RegClass::Float);
381 debug_assert!(dst.to_reg().class() == RegClass::Int);
382 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
383 Inst::XmmToGpr {
384 op,
385 src: Xmm::unwrap_new(src),
386 dst: WritableGpr::from_writable_reg(dst).unwrap(),
387 dst_size,
388 }
389 }
390
391 pub(crate) fn gpr_to_xmm(
392 op: SseOpcode,
393 src: RegMem,
394 src_size: OperandSize,
395 dst: Writable<Reg>,
396 ) -> Inst {
397 src.assert_regclass_is(RegClass::Int);
398 debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
399 debug_assert!(dst.to_reg().class() == RegClass::Float);
400 Inst::GprToXmm {
401 op,
402 src: GprMem::unwrap_new(src),
403 dst: WritableXmm::from_writable_reg(dst).unwrap(),
404 src_size,
405 }
406 }
407
408 pub(crate) fn xmm_cmp_rm_r(op: SseOpcode, src1: Reg, src2: RegMem) -> Inst {
409 src2.assert_regclass_is(RegClass::Float);
410 debug_assert!(src1.class() == RegClass::Float);
411 let src2 = XmmMemAligned::unwrap_new(src2);
412 let src1 = Xmm::unwrap_new(src1);
413 Inst::XmmCmpRmR { op, src1, src2 }
414 }
415
416 #[allow(dead_code)]
417 pub(crate) fn xmm_min_max_seq(
418 size: OperandSize,
419 is_min: bool,
420 lhs: Reg,
421 rhs: Reg,
422 dst: Writable<Reg>,
423 ) -> Inst {
424 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
425 debug_assert_eq!(lhs.class(), RegClass::Float);
426 debug_assert_eq!(rhs.class(), RegClass::Float);
427 debug_assert_eq!(dst.to_reg().class(), RegClass::Float);
428 Inst::XmmMinMaxSeq {
429 size,
430 is_min,
431 lhs: Xmm::unwrap_new(lhs),
432 rhs: Xmm::unwrap_new(rhs),
433 dst: WritableXmm::from_writable_reg(dst).unwrap(),
434 }
435 }
436
437 pub(crate) fn movzx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
438 src.assert_regclass_is(RegClass::Int);
439 debug_assert!(dst.to_reg().class() == RegClass::Int);
440 let src = GprMem::unwrap_new(src);
441 let dst = WritableGpr::from_writable_reg(dst).unwrap();
442 Inst::MovzxRmR { ext_mode, src, dst }
443 }
444
445 pub(crate) fn movsx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
446 src.assert_regclass_is(RegClass::Int);
447 debug_assert!(dst.to_reg().class() == RegClass::Int);
448 let src = GprMem::unwrap_new(src);
449 let dst = WritableGpr::from_writable_reg(dst).unwrap();
450 Inst::MovsxRmR { ext_mode, src, dst }
451 }
452
453 pub(crate) fn mov64_m_r(src: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
454 debug_assert!(dst.to_reg().class() == RegClass::Int);
455 Inst::Mov64MR {
456 src: src.into(),
457 dst: WritableGpr::from_writable_reg(dst).unwrap(),
458 }
459 }
460
461 pub(crate) fn mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
462 debug_assert!(src.class() == RegClass::Int);
463 Inst::MovRM {
464 size,
465 src: Gpr::unwrap_new(src),
466 dst: dst.into(),
467 }
468 }
469
470 pub(crate) fn lea(addr: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
471 debug_assert!(dst.to_reg().class() == RegClass::Int);
472 Inst::LoadEffectiveAddress {
473 addr: addr.into(),
474 dst: WritableGpr::from_writable_reg(dst).unwrap(),
475 size: OperandSize::Size64,
476 }
477 }
478
479 pub(crate) fn shift_r(
480 size: OperandSize,
481 kind: ShiftKind,
482 num_bits: Imm8Gpr,
483 src: Reg,
484 dst: Writable<Reg>,
485 ) -> Inst {
486 if let &Imm8Reg::Imm8 { imm: num_bits } = num_bits.as_imm8_reg() {
487 debug_assert!(num_bits < size.to_bits());
488 }
489 debug_assert!(dst.to_reg().class() == RegClass::Int);
490 Inst::ShiftR {
491 size,
492 kind,
493 src: Gpr::unwrap_new(src),
494 num_bits,
495 dst: WritableGpr::from_writable_reg(dst).unwrap(),
496 }
497 }
498
499 pub(crate) fn cmp_rmi_r(size: OperandSize, src1: Reg, src2: RegMemImm) -> Inst {
502 src2.assert_regclass_is(RegClass::Int);
503 debug_assert_eq!(src1.class(), RegClass::Int);
504 Inst::CmpRmiR {
505 size,
506 src1: Gpr::unwrap_new(src1),
507 src2: GprMemImm::unwrap_new(src2),
508 opcode: CmpOpcode::Cmp,
509 }
510 }
511
512 pub(crate) fn trap(trap_code: TrapCode) -> Inst {
513 Inst::Ud2 { trap_code }
514 }
515
516 pub(crate) fn trap_if(cc: CC, trap_code: TrapCode) -> Inst {
517 Inst::TrapIf { cc, trap_code }
518 }
519
520 pub(crate) fn cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
521 debug_assert!(size.is_one_of(&[
522 OperandSize::Size16,
523 OperandSize::Size32,
524 OperandSize::Size64
525 ]));
526 debug_assert!(dst.to_reg().class() == RegClass::Int);
527 Inst::Cmove {
528 size,
529 cc,
530 consequent: GprMem::unwrap_new(src),
531 alternative: Gpr::unwrap_new(dst.to_reg()),
532 dst: WritableGpr::from_writable_reg(dst).unwrap(),
533 }
534 }
535
536 pub(crate) fn push64(src: RegMemImm) -> Inst {
537 src.assert_regclass_is(RegClass::Int);
538 let src = GprMemImm::unwrap_new(src);
539 Inst::Push64 { src }
540 }
541
542 pub(crate) fn pop64(dst: Writable<Reg>) -> Inst {
543 debug_assert!(dst.to_reg().class() == RegClass::Int);
544 let dst = WritableGpr::from_writable_reg(dst).unwrap();
545 Inst::Pop64 { dst }
546 }
547
548 pub(crate) fn call_known(info: Box<CallInfo<ExternalName>>) -> Inst {
549 Inst::CallKnown { info }
550 }
551
552 pub(crate) fn call_unknown(info: Box<CallInfo<RegMem>>) -> Inst {
553 info.dest.assert_regclass_is(RegClass::Int);
554 Inst::CallUnknown { info }
555 }
556
557 pub(crate) fn ret(stack_bytes_to_pop: u32) -> Inst {
558 Inst::Ret { stack_bytes_to_pop }
559 }
560
561 pub(crate) fn jmp_known(dst: MachLabel) -> Inst {
562 Inst::JmpKnown { dst }
563 }
564
565 pub(crate) fn jmp_unknown(target: RegMem) -> Inst {
566 target.assert_regclass_is(RegClass::Int);
567 Inst::JmpUnknown { target }
568 }
569
570 pub(crate) fn load(
574 ty: Type,
575 from_addr: impl Into<SyntheticAmode>,
576 to_reg: Writable<Reg>,
577 ext_kind: ExtKind,
578 ) -> Inst {
579 let rc = to_reg.to_reg().class();
580 match rc {
581 RegClass::Int => {
582 let ext_mode = match ty.bytes() {
583 1 => Some(ExtMode::BQ),
584 2 => Some(ExtMode::WQ),
585 4 => Some(ExtMode::LQ),
586 8 => None,
587 _ => unreachable!("the type should never use a scalar load: {}", ty),
588 };
589 if let Some(ext_mode) = ext_mode {
590 match ext_kind {
592 ExtKind::SignExtend => {
593 Inst::movsx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
594 }
595 ExtKind::ZeroExtend => {
596 Inst::movzx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
597 }
598 ExtKind::None => {
599 panic!("expected an extension kind for extension mode: {ext_mode:?}")
600 }
601 }
602 } else {
603 Inst::mov64_m_r(from_addr, to_reg)
605 }
606 }
607 RegClass::Float => {
608 let opcode = match ty {
609 types::F16 => panic!("loading a f16 requires multiple instructions"),
610 types::F32 => SseOpcode::Movss,
611 types::F64 => SseOpcode::Movsd,
612 types::F32X4 => SseOpcode::Movups,
613 types::F64X2 => SseOpcode::Movupd,
614 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
615 _ => unimplemented!("unable to load type: {}", ty),
616 };
617 Inst::xmm_unary_rm_r(opcode, RegMem::mem(from_addr), to_reg)
618 }
619 RegClass::Vector => unreachable!(),
620 }
621 }
622
623 pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
625 let rc = from_reg.class();
626 match rc {
627 RegClass::Int => Inst::mov_r_m(OperandSize::from_ty(ty), from_reg, to_addr),
628 RegClass::Float => {
629 let opcode = match ty {
630 types::F16 => panic!("storing a f16 requires multiple instructions"),
631 types::F32 => SseOpcode::Movss,
632 types::F64 => SseOpcode::Movsd,
633 types::F32X4 => SseOpcode::Movups,
634 types::F64X2 => SseOpcode::Movupd,
635 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
636 _ => unimplemented!("unable to store type: {}", ty),
637 };
638 Inst::xmm_mov_r_m(opcode, from_reg, to_addr)
639 }
640 RegClass::Vector => unreachable!(),
641 }
642 }
643}
644
645impl PrettyPrint for Inst {
649 fn pretty_print(&self, _size: u8) -> String {
650 fn ljustify(s: String) -> String {
651 let w = 7;
652 if s.len() >= w {
653 s
654 } else {
655 let need = usize::min(w, w - s.len());
656 s + &format!("{nil: <width$}", nil = "", width = need)
657 }
658 }
659
660 fn ljustify2(s1: String, s2: String) -> String {
661 ljustify(s1 + &s2)
662 }
663
664 fn suffix_lq(size: OperandSize) -> String {
665 match size {
666 OperandSize::Size32 => "l",
667 OperandSize::Size64 => "q",
668 _ => unreachable!(),
669 }
670 .to_string()
671 }
672
673 #[allow(dead_code)]
674 fn suffix_lqb(size: OperandSize) -> String {
675 match size {
676 OperandSize::Size32 => "l",
677 OperandSize::Size64 => "q",
678 _ => unreachable!(),
679 }
680 .to_string()
681 }
682
683 fn suffix_bwlq(size: OperandSize) -> String {
684 match size {
685 OperandSize::Size8 => "b".to_string(),
686 OperandSize::Size16 => "w".to_string(),
687 OperandSize::Size32 => "l".to_string(),
688 OperandSize::Size64 => "q".to_string(),
689 }
690 }
691
692 match self {
693 Inst::Nop { len } => format!("{} len={}", ljustify("nop".to_string()), len),
694
695 Inst::AluRmiR {
696 size,
697 op,
698 src1,
699 src2,
700 dst,
701 } => {
702 let size_bytes = size.to_bytes();
703 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
704 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
705 let src2 = src2.pretty_print(size_bytes);
706 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
707 format!("{op} {src1}, {src2}, {dst}")
708 }
709 Inst::AluConstOp { op, dst, size } => {
710 let size_bytes = size.to_bytes();
711 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
712 let op = ljustify2(op.to_string(), suffix_lqb(*size));
713 format!("{op} {dst}, {dst}, {dst}")
714 }
715 Inst::AluRM {
716 size,
717 op,
718 src1_dst,
719 src2,
720 lock,
721 } => {
722 let size_bytes = size.to_bytes();
723 let src2 = pretty_print_reg(src2.to_reg(), size_bytes);
724 let src1_dst = src1_dst.pretty_print(size_bytes);
725 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
726 let prefix = if *lock { "lock " } else { "" };
727 format!("{prefix}{op} {src2}, {src1_dst}")
728 }
729 Inst::AluRmRVex {
730 size,
731 op,
732 src1,
733 src2,
734 dst,
735 } => {
736 let size_bytes = size.to_bytes();
737 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
738 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
739 let src2 = src2.pretty_print(size_bytes);
740 let op = ljustify2(op.to_string(), String::new());
741 format!("{op} {src2}, {src1}, {dst}")
742 }
743 Inst::UnaryRmR { src, dst, op, size } => {
744 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
745 let src = src.pretty_print(size.to_bytes());
746 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
747 format!("{op} {src}, {dst}")
748 }
749
750 Inst::UnaryRmRVex { src, dst, op, size } => {
751 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
752 let src = src.pretty_print(size.to_bytes());
753 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
754 format!("{op} {src}, {dst}")
755 }
756
757 Inst::UnaryRmRImmVex {
758 src,
759 dst,
760 op,
761 size,
762 imm,
763 } => {
764 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
765 let src = src.pretty_print(size.to_bytes());
766 format!(
767 "{} ${imm}, {src}, {dst}",
768 ljustify2(op.to_string(), suffix_bwlq(*size))
769 )
770 }
771
772 Inst::Not { size, src, dst } => {
773 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
774 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
775 let op = ljustify2("not".to_string(), suffix_bwlq(*size));
776 format!("{op} {src}, {dst}")
777 }
778
779 Inst::Neg { size, src, dst } => {
780 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
781 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
782 let op = ljustify2("neg".to_string(), suffix_bwlq(*size));
783 format!("{op} {src}, {dst}")
784 }
785
786 Inst::Div {
787 size,
788 sign,
789 trap,
790 divisor,
791 dividend_lo,
792 dividend_hi,
793 dst_quotient,
794 dst_remainder,
795 } => {
796 let divisor = divisor.pretty_print(size.to_bytes());
797 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
798 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
799 let dst_quotient =
800 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
801 let dst_remainder =
802 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
803 let op = ljustify(match sign {
804 DivSignedness::Signed => "idiv".to_string(),
805 DivSignedness::Unsigned => "div".to_string(),
806 });
807 format!(
808 "{op} {dividend_lo}, {dividend_hi}, {divisor}, {dst_quotient}, {dst_remainder} ; trap={trap}"
809 )
810 }
811
812 Inst::Div8 {
813 sign,
814 trap,
815 divisor,
816 dividend,
817 dst,
818 } => {
819 let divisor = divisor.pretty_print(1);
820 let dividend = pretty_print_reg(dividend.to_reg(), 1);
821 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
822 let op = ljustify(match sign {
823 DivSignedness::Signed => "idiv".to_string(),
824 DivSignedness::Unsigned => "div".to_string(),
825 });
826 format!("{op} {dividend}, {divisor}, {dst} ; trap={trap}")
827 }
828
829 Inst::Mul {
830 size,
831 signed,
832 src1,
833 src2,
834 dst_lo,
835 dst_hi,
836 } => {
837 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
838 let dst_lo = pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes());
839 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
840 let src2 = src2.pretty_print(size.to_bytes());
841 let suffix = suffix_bwlq(*size);
842 let op = ljustify(if *signed {
843 format!("imul{suffix}")
844 } else {
845 format!("mul{suffix}")
846 });
847 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
848 }
849
850 Inst::MulX {
851 size,
852 src1,
853 src2,
854 dst_lo,
855 dst_hi,
856 } => {
857 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
858 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
859 let dst_lo = if dst_lo.to_reg().is_invalid_sentinel() {
860 dst_hi.clone()
861 } else {
862 pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes())
863 };
864 let src2 = src2.pretty_print(size.to_bytes());
865 let suffix = suffix_bwlq(*size);
866 let op = ljustify(format!("mulx{suffix}"));
867 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
868 }
869
870 Inst::Mul8 {
871 signed,
872 src1,
873 src2,
874 dst,
875 } => {
876 let src1 = pretty_print_reg(src1.to_reg(), 1);
877 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
878 let src2 = src2.pretty_print(1);
879 let op = ljustify(if *signed {
880 "imulb".to_string()
881 } else {
882 "mulb".to_string()
883 });
884 format!("{op} {src1}, {src2}, {dst}")
885 }
886
887 Inst::IMul {
888 size,
889 src1,
890 src2,
891 dst,
892 } => {
893 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
894 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
895 let src2 = src2.pretty_print(size.to_bytes());
896 let suffix = suffix_bwlq(*size);
897 let op = ljustify(format!("imul{suffix}"));
898 format!("{op} {src1}, {src2}, {dst}")
899 }
900
901 Inst::IMulImm {
902 size,
903 src1,
904 src2,
905 dst,
906 } => {
907 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
908 let src1 = src1.pretty_print(size.to_bytes());
909 let suffix = suffix_bwlq(*size);
910 let op = ljustify(format!("imul{suffix}"));
911 format!("{op} {src1}, {src2:#x}, {dst}")
912 }
913
914 Inst::CheckedSRemSeq {
915 size,
916 divisor,
917 dividend_lo,
918 dividend_hi,
919 dst_quotient,
920 dst_remainder,
921 } => {
922 let divisor = pretty_print_reg(divisor.to_reg(), size.to_bytes());
923 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
924 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
925 let dst_quotient =
926 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
927 let dst_remainder =
928 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
929 format!(
930 "checked_srem_seq {dividend_lo}, {dividend_hi}, \
931 {divisor}, {dst_quotient}, {dst_remainder}",
932 )
933 }
934
935 Inst::CheckedSRemSeq8 {
936 divisor,
937 dividend,
938 dst,
939 } => {
940 let divisor = pretty_print_reg(divisor.to_reg(), 1);
941 let dividend = pretty_print_reg(dividend.to_reg(), 1);
942 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
943 format!("checked_srem_seq {dividend}, {divisor}, {dst}")
944 }
945
946 Inst::SignExtendData { size, src, dst } => {
947 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
948 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
949 let op = match size {
950 OperandSize::Size8 => "cbw",
951 OperandSize::Size16 => "cwd",
952 OperandSize::Size32 => "cdq",
953 OperandSize::Size64 => "cqo",
954 };
955 format!("{op} {src}, {dst}")
956 }
957
958 Inst::XmmUnaryRmR { op, src, dst, .. } => {
959 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
960 let src = src.pretty_print(op.src_size());
961 let op = ljustify(op.to_string());
962 format!("{op} {src}, {dst}")
963 }
964
965 Inst::XmmUnaryRmRUnaligned { op, src, dst, .. } => {
966 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
967 let src = src.pretty_print(op.src_size());
968 let op = ljustify(op.to_string());
969 format!("{op} {src}, {dst}")
970 }
971
972 Inst::XmmUnaryRmRImm {
973 op, src, dst, imm, ..
974 } => {
975 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
976 let src = src.pretty_print(op.src_size());
977 let op = ljustify(op.to_string());
978 format!("{op} ${imm}, {src}, {dst}")
979 }
980
981 Inst::XmmUnaryRmRVex { op, src, dst, .. } => {
982 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
983 let src = src.pretty_print(8);
984 let op = ljustify(op.to_string());
985 format!("{op} {src}, {dst}")
986 }
987
988 Inst::XmmUnaryRmRImmVex {
989 op, src, dst, imm, ..
990 } => {
991 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
992 let src = src.pretty_print(8);
993 let op = ljustify(op.to_string());
994 format!("{op} ${imm}, {src}, {dst}")
995 }
996
997 Inst::XmmUnaryRmREvex { op, src, dst, .. } => {
998 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
999 let src = src.pretty_print(8);
1000 let op = ljustify(op.to_string());
1001 format!("{op} {src}, {dst}")
1002 }
1003
1004 Inst::XmmUnaryRmRImmEvex {
1005 op, src, dst, imm, ..
1006 } => {
1007 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1008 let src = src.pretty_print(8);
1009 let op = ljustify(op.to_string());
1010 format!("{op} ${imm}, {src}, {dst}")
1011 }
1012
1013 Inst::XmmMovRM { op, src, dst, .. } => {
1014 let src = pretty_print_reg(src.to_reg(), 8);
1015 let dst = dst.pretty_print(8);
1016 let op = ljustify(op.to_string());
1017 format!("{op} {src}, {dst}")
1018 }
1019
1020 Inst::XmmMovRMVex { op, src, dst, .. } => {
1021 let src = pretty_print_reg(src.to_reg(), 8);
1022 let dst = dst.pretty_print(8);
1023 let op = ljustify(op.to_string());
1024 format!("{op} {src}, {dst}")
1025 }
1026
1027 Inst::XmmMovRMImm {
1028 op, src, dst, imm, ..
1029 } => {
1030 let src = pretty_print_reg(src.to_reg(), 8);
1031 let dst = dst.pretty_print(8);
1032 let op = ljustify(op.to_string());
1033 format!("{op} ${imm}, {src}, {dst}")
1034 }
1035
1036 Inst::XmmMovRMImmVex {
1037 op, src, dst, imm, ..
1038 } => {
1039 let src = pretty_print_reg(src.to_reg(), 8);
1040 let dst = dst.pretty_print(8);
1041 let op = ljustify(op.to_string());
1042 format!("{op} ${imm}, {src}, {dst}")
1043 }
1044
1045 Inst::XmmRmR {
1046 op,
1047 src1,
1048 src2,
1049 dst,
1050 ..
1051 } => {
1052 let src1 = pretty_print_reg(src1.to_reg(), 8);
1053 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1054 let src2 = src2.pretty_print(8);
1055 let op = ljustify(op.to_string());
1056 format!("{op} {src1}, {src2}, {dst}")
1057 }
1058
1059 Inst::XmmRmRUnaligned {
1060 op,
1061 src1,
1062 src2,
1063 dst,
1064 ..
1065 } => {
1066 let src1 = pretty_print_reg(src1.to_reg(), 8);
1067 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1068 let src2 = src2.pretty_print(8);
1069 let op = ljustify(op.to_string());
1070 format!("{op} {src1}, {src2}, {dst}")
1071 }
1072
1073 Inst::XmmRmRBlend {
1074 op,
1075 src1,
1076 src2,
1077 mask,
1078 dst,
1079 } => {
1080 let src1 = pretty_print_reg(src1.to_reg(), 8);
1081 let mask = mask.to_reg();
1082 let mask = if mask.is_virtual() {
1083 format!(" <{}>", show_ireg_sized(mask, 8))
1084 } else {
1085 debug_assert_eq!(mask, regs::xmm0());
1086 String::new()
1087 };
1088 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1089 let src2 = src2.pretty_print(8);
1090 let op = ljustify(op.to_string());
1091 format!("{op} {src1}, {src2}, {dst}{mask}")
1092 }
1093
1094 Inst::XmmRmiRVex {
1095 op,
1096 src1,
1097 src2,
1098 dst,
1099 ..
1100 } => {
1101 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1102 let src1 = pretty_print_reg(src1.to_reg(), 8);
1103 let src2 = src2.pretty_print(8);
1104 let op = ljustify(op.to_string());
1105 format!("{op} {src1}, {src2}, {dst}")
1106 }
1107
1108 Inst::XmmRmRImmVex {
1109 op,
1110 src1,
1111 src2,
1112 dst,
1113 imm,
1114 ..
1115 } => {
1116 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1117 let src1 = pretty_print_reg(src1.to_reg(), 8);
1118 let src2 = src2.pretty_print(8);
1119 let op = ljustify(op.to_string());
1120 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1121 }
1122
1123 Inst::XmmVexPinsr {
1124 op,
1125 src1,
1126 src2,
1127 dst,
1128 imm,
1129 ..
1130 } => {
1131 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1132 let src1 = pretty_print_reg(src1.to_reg(), 8);
1133 let src2 = src2.pretty_print(8);
1134 let op = ljustify(op.to_string());
1135 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1136 }
1137
1138 Inst::XmmRmRVex3 {
1139 op,
1140 src1,
1141 src2,
1142 src3,
1143 dst,
1144 ..
1145 } => {
1146 let src1 = pretty_print_reg(src1.to_reg(), 8);
1147 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1148 let src2 = pretty_print_reg(src2.to_reg(), 8);
1149 let src3 = src3.pretty_print(8);
1150 let op = ljustify(op.to_string());
1151 format!("{op} {src1}, {src2}, {src3}, {dst}")
1152 }
1153
1154 Inst::XmmRmRBlendVex {
1155 op,
1156 src1,
1157 src2,
1158 mask,
1159 dst,
1160 ..
1161 } => {
1162 let src1 = pretty_print_reg(src1.to_reg(), 8);
1163 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1164 let src2 = src2.pretty_print(8);
1165 let mask = pretty_print_reg(mask.to_reg(), 8);
1166 let op = ljustify(op.to_string());
1167 format!("{op} {src1}, {src2}, {mask}, {dst}")
1168 }
1169
1170 Inst::XmmRmREvex {
1171 op,
1172 src1,
1173 src2,
1174 dst,
1175 ..
1176 } => {
1177 let src1 = pretty_print_reg(src1.to_reg(), 8);
1178 let src2 = src2.pretty_print(8);
1179 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1180 let op = ljustify(op.to_string());
1181 format!("{op} {src2}, {src1}, {dst}")
1182 }
1183
1184 Inst::XmmRmREvex3 {
1185 op,
1186 src1,
1187 src2,
1188 src3,
1189 dst,
1190 ..
1191 } => {
1192 let src1 = pretty_print_reg(src1.to_reg(), 8);
1193 let src2 = pretty_print_reg(src2.to_reg(), 8);
1194 let src3 = src3.pretty_print(8);
1195 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1196 let op = ljustify(op.to_string());
1197 format!("{op} {src3}, {src2}, {src1}, {dst}")
1198 }
1199
1200 Inst::XmmMinMaxSeq {
1201 lhs,
1202 rhs,
1203 dst,
1204 is_min,
1205 size,
1206 } => {
1207 let rhs = pretty_print_reg(rhs.to_reg(), 8);
1208 let lhs = pretty_print_reg(lhs.to_reg(), 8);
1209 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1210 let op = ljustify2(
1211 if *is_min {
1212 "xmm min seq ".to_string()
1213 } else {
1214 "xmm max seq ".to_string()
1215 },
1216 format!("f{}", size.to_bits()),
1217 );
1218 format!("{op} {lhs}, {rhs}, {dst}")
1219 }
1220
1221 Inst::XmmRmRImm {
1222 op,
1223 src1,
1224 src2,
1225 dst,
1226 imm,
1227 size,
1228 ..
1229 } => {
1230 let src1 = pretty_print_reg(*src1, 8);
1231 let dst = pretty_print_reg(dst.to_reg(), 8);
1232 let src2 = src2.pretty_print(8);
1233 let op = ljustify(format!(
1234 "{}{}",
1235 op.to_string(),
1236 if *size == OperandSize::Size64 {
1237 ".w"
1238 } else {
1239 ""
1240 }
1241 ));
1242 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1243 }
1244
1245 Inst::XmmUninitializedValue { dst } => {
1246 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1247 let op = ljustify("uninit".into());
1248 format!("{op} {dst}")
1249 }
1250
1251 Inst::XmmToGpr {
1252 op,
1253 src,
1254 dst,
1255 dst_size,
1256 } => {
1257 let dst_size = dst_size.to_bytes();
1258 let src = pretty_print_reg(src.to_reg(), 8);
1259 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1260 let op = ljustify(op.to_string());
1261 format!("{op} {src}, {dst}")
1262 }
1263
1264 Inst::XmmToGprVex {
1265 op,
1266 src,
1267 dst,
1268 dst_size,
1269 } => {
1270 let dst_size = dst_size.to_bytes();
1271 let src = pretty_print_reg(src.to_reg(), 8);
1272 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1273 let op = ljustify(op.to_string());
1274 format!("{op} {src}, {dst}")
1275 }
1276
1277 Inst::XmmToGprImm { op, src, dst, imm } => {
1278 let src = pretty_print_reg(src.to_reg(), 8);
1279 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1280 let op = ljustify(op.to_string());
1281 format!("{op} ${imm}, {src}, {dst}")
1282 }
1283
1284 Inst::XmmToGprImmVex { op, src, dst, imm } => {
1285 let src = pretty_print_reg(src.to_reg(), 8);
1286 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1287 let op = ljustify(op.to_string());
1288 format!("{op} ${imm}, {src}, {dst}")
1289 }
1290
1291 Inst::GprToXmm {
1292 op,
1293 src,
1294 src_size,
1295 dst,
1296 } => {
1297 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1298 let src = src.pretty_print(src_size.to_bytes());
1299 let op = ljustify(op.to_string());
1300 format!("{op} {src}, {dst}")
1301 }
1302
1303 Inst::GprToXmmVex {
1304 op,
1305 src,
1306 src_size,
1307 dst,
1308 } => {
1309 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1310 let src = src.pretty_print(src_size.to_bytes());
1311 let op = ljustify(op.to_string());
1312 format!("{op} {src}, {dst}")
1313 }
1314
1315 Inst::XmmCmpRmR { op, src1, src2 } => {
1316 let src1 = pretty_print_reg(src1.to_reg(), 8);
1317 let src2 = src2.pretty_print(8);
1318 let op = ljustify(op.to_string());
1319 format!("{op} {src2}, {src1}")
1320 }
1321
1322 Inst::CvtIntToFloat {
1323 op,
1324 src1,
1325 src2,
1326 dst,
1327 src2_size,
1328 } => {
1329 let src1 = pretty_print_reg(src1.to_reg(), 8);
1330 let dst = pretty_print_reg(*dst.to_reg(), 8);
1331 let src2 = src2.pretty_print(src2_size.to_bytes());
1332 let op = ljustify(op.to_string());
1333 format!("{op} {src1}, {src2}, {dst}")
1334 }
1335
1336 Inst::CvtIntToFloatVex {
1337 op,
1338 src1,
1339 src2,
1340 dst,
1341 src2_size,
1342 } => {
1343 let dst = pretty_print_reg(*dst.to_reg(), 8);
1344 let src1 = pretty_print_reg(src1.to_reg(), 8);
1345 let src2 = src2.pretty_print(src2_size.to_bytes());
1346 let op = ljustify(op.to_string());
1347 format!("{op} {src1}, {src2}, {dst}")
1348 }
1349
1350 Inst::XmmCmpRmRVex { op, src1, src2 } => {
1351 let src1 = pretty_print_reg(src1.to_reg(), 8);
1352 let src2 = src2.pretty_print(8);
1353 format!("{} {src2}, {src1}", ljustify(op.to_string()))
1354 }
1355
1356 Inst::CvtUint64ToFloatSeq {
1357 src,
1358 dst,
1359 dst_size,
1360 tmp_gpr1,
1361 tmp_gpr2,
1362 ..
1363 } => {
1364 let src = pretty_print_reg(src.to_reg(), 8);
1365 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1366 let tmp_gpr1 = pretty_print_reg(tmp_gpr1.to_reg().to_reg(), 8);
1367 let tmp_gpr2 = pretty_print_reg(tmp_gpr2.to_reg().to_reg(), 8);
1368 let op = ljustify(format!(
1369 "u64_to_{}_seq",
1370 if *dst_size == OperandSize::Size64 {
1371 "f64"
1372 } else {
1373 "f32"
1374 }
1375 ));
1376 format!("{op} {src}, {dst}, {tmp_gpr1}, {tmp_gpr2}")
1377 }
1378
1379 Inst::CvtFloatToSintSeq {
1380 src,
1381 dst,
1382 src_size,
1383 dst_size,
1384 tmp_xmm,
1385 tmp_gpr,
1386 is_saturating,
1387 } => {
1388 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1389 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1390 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1391 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1392 let op = ljustify(format!(
1393 "cvt_float{}_to_sint{}{}_seq",
1394 src_size.to_bits(),
1395 dst_size.to_bits(),
1396 if *is_saturating { "_sat" } else { "" },
1397 ));
1398 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}")
1399 }
1400
1401 Inst::CvtFloatToUintSeq {
1402 src,
1403 dst,
1404 src_size,
1405 dst_size,
1406 tmp_gpr,
1407 tmp_xmm,
1408 tmp_xmm2,
1409 is_saturating,
1410 } => {
1411 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1412 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1413 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1414 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1415 let tmp_xmm2 = pretty_print_reg(tmp_xmm2.to_reg().to_reg(), 8);
1416 let op = ljustify(format!(
1417 "cvt_float{}_to_uint{}{}_seq",
1418 src_size.to_bits(),
1419 dst_size.to_bits(),
1420 if *is_saturating { "_sat" } else { "" },
1421 ));
1422 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}, {tmp_xmm2}")
1423 }
1424
1425 Inst::Imm {
1426 dst_size,
1427 simm64,
1428 dst,
1429 } => {
1430 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1431 if *dst_size == OperandSize::Size64 {
1432 let op = ljustify("movabsq".to_string());
1433 let imm = *simm64 as i64;
1434 format!("{op} ${imm}, {dst}")
1435 } else {
1436 let op = ljustify("movl".to_string());
1437 let imm = (*simm64 as u32) as i32;
1438 format!("{op} ${imm}, {dst}")
1439 }
1440 }
1441
1442 Inst::MovImmM { size, simm32, dst } => {
1443 let dst = dst.pretty_print(size.to_bytes());
1444 let suffix = suffix_bwlq(*size);
1445 let imm = match *size {
1446 OperandSize::Size8 => ((*simm32 as u8) as i8).to_string(),
1447 OperandSize::Size16 => ((*simm32 as u16) as i16).to_string(),
1448 OperandSize::Size32 => simm32.to_string(),
1449 OperandSize::Size64 => (*simm32 as i64).to_string(),
1450 };
1451 let op = ljustify2("mov".to_string(), suffix);
1452 format!("{op} ${imm}, {dst}")
1453 }
1454
1455 Inst::MovRR { size, src, dst } => {
1456 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1457 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1458 let op = ljustify2("mov".to_string(), suffix_lq(*size));
1459 format!("{op} {src}, {dst}")
1460 }
1461
1462 Inst::MovFromPReg { src, dst } => {
1463 let src: Reg = (*src).into();
1464 let src = regs::show_ireg_sized(src, 8);
1465 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1466 let op = ljustify("movq".to_string());
1467 format!("{op} {src}, {dst}")
1468 }
1469
1470 Inst::MovToPReg { src, dst } => {
1471 let src = pretty_print_reg(src.to_reg(), 8);
1472 let dst: Reg = (*dst).into();
1473 let dst = regs::show_ireg_sized(dst, 8);
1474 let op = ljustify("movq".to_string());
1475 format!("{op} {src}, {dst}")
1476 }
1477
1478 Inst::MovzxRmR {
1479 ext_mode, src, dst, ..
1480 } => {
1481 let dst_size = if *ext_mode == ExtMode::LQ {
1482 4
1483 } else {
1484 ext_mode.dst_size()
1485 };
1486 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1487 let src = src.pretty_print(ext_mode.src_size());
1488
1489 if *ext_mode == ExtMode::LQ {
1490 let op = ljustify("movl".to_string());
1491 format!("{op} {src}, {dst}")
1492 } else {
1493 let op = ljustify2("movz".to_string(), ext_mode.to_string());
1494 format!("{op} {src}, {dst}")
1495 }
1496 }
1497
1498 Inst::Mov64MR { src, dst, .. } => {
1499 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1500 let src = src.pretty_print(8);
1501 let op = ljustify("movq".to_string());
1502 format!("{op} {src}, {dst}")
1503 }
1504
1505 Inst::LoadEffectiveAddress { addr, dst, size } => {
1506 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1507 let addr = addr.pretty_print(8);
1508 let op = ljustify("lea".to_string());
1509 format!("{op} {addr}, {dst}")
1510 }
1511
1512 Inst::MovsxRmR {
1513 ext_mode, src, dst, ..
1514 } => {
1515 let dst = pretty_print_reg(dst.to_reg().to_reg(), ext_mode.dst_size());
1516 let src = src.pretty_print(ext_mode.src_size());
1517 let op = ljustify2("movs".to_string(), ext_mode.to_string());
1518 format!("{op} {src}, {dst}")
1519 }
1520
1521 Inst::MovRM { size, src, dst, .. } => {
1522 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1523 let dst = dst.pretty_print(size.to_bytes());
1524 let op = ljustify2("mov".to_string(), suffix_bwlq(*size));
1525 format!("{op} {src}, {dst}")
1526 }
1527
1528 Inst::ShiftR {
1529 size,
1530 kind,
1531 num_bits,
1532 src,
1533 dst,
1534 ..
1535 } => {
1536 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1537 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1538 match num_bits.as_imm8_reg() {
1539 &Imm8Reg::Reg { reg } => {
1540 let reg = pretty_print_reg(reg, 1);
1541 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1542 format!("{op} {reg}, {src}, {dst}")
1543 }
1544
1545 &Imm8Reg::Imm8 { imm: num_bits } => {
1546 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1547 format!("{op} ${num_bits}, {src}, {dst}")
1548 }
1549 }
1550 }
1551
1552 Inst::XmmRmiReg {
1553 opcode,
1554 src1,
1555 src2,
1556 dst,
1557 ..
1558 } => {
1559 let src1 = pretty_print_reg(src1.to_reg(), 8);
1560 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1561 let src2 = src2.pretty_print(8);
1562 let op = ljustify(opcode.to_string());
1563 format!("{op} {src1}, {src2}, {dst}")
1564 }
1565
1566 Inst::CmpRmiR {
1567 size,
1568 src1,
1569 src2,
1570 opcode,
1571 } => {
1572 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
1573 let src2 = src2.pretty_print(size.to_bytes());
1574 let op = match opcode {
1575 CmpOpcode::Cmp => "cmp",
1576 CmpOpcode::Test => "test",
1577 };
1578 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
1579 format!("{op} {src2}, {src1}")
1580 }
1581
1582 Inst::Setcc { cc, dst } => {
1583 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
1584 let op = ljustify2("set".to_string(), cc.to_string());
1585 format!("{op} {dst}")
1586 }
1587
1588 Inst::Bswap { size, src, dst } => {
1589 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1590 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1591 let op = ljustify2("bswap".to_string(), suffix_bwlq(*size));
1592 format!("{op} {src}, {dst}")
1593 }
1594
1595 Inst::Cmove {
1596 size,
1597 cc,
1598 consequent,
1599 alternative,
1600 dst,
1601 } => {
1602 let alternative = pretty_print_reg(alternative.to_reg(), size.to_bytes());
1603 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1604 let consequent = consequent.pretty_print(size.to_bytes());
1605 let op = ljustify(format!("cmov{}{}", cc.to_string(), suffix_bwlq(*size)));
1606 format!("{op} {consequent}, {alternative}, {dst}")
1607 }
1608
1609 Inst::XmmCmove {
1610 ty,
1611 cc,
1612 consequent,
1613 alternative,
1614 dst,
1615 ..
1616 } => {
1617 let size = u8::try_from(ty.bytes()).unwrap();
1618 let alternative = pretty_print_reg(alternative.to_reg(), size);
1619 let dst = pretty_print_reg(dst.to_reg().to_reg(), size);
1620 let consequent = pretty_print_reg(consequent.to_reg(), size);
1621 let suffix = match *ty {
1622 types::F64 => "sd",
1623 types::F32 => "ss",
1624 types::F16 => "ss",
1625 types::F32X4 => "aps",
1626 types::F64X2 => "apd",
1627 _ => "dqa",
1628 };
1629 let cc = cc.invert();
1630 format!(
1631 "mov{suffix} {alternative}, {dst}; \
1632 j{cc} $next; \
1633 mov{suffix} {consequent}, {dst}; \
1634 $next:"
1635 )
1636 }
1637
1638 Inst::Push64 { src } => {
1639 let src = src.pretty_print(8);
1640 let op = ljustify("pushq".to_string());
1641 format!("{op} {src}")
1642 }
1643
1644 Inst::StackProbeLoop {
1645 tmp,
1646 frame_size,
1647 guard_size,
1648 } => {
1649 let tmp = pretty_print_reg(tmp.to_reg(), 8);
1650 let op = ljustify("stack_probe_loop".to_string());
1651 format!("{op} {tmp}, frame_size={frame_size}, guard_size={guard_size}")
1652 }
1653
1654 Inst::Pop64 { dst } => {
1655 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1656 let op = ljustify("popq".to_string());
1657 format!("{op} {dst}")
1658 }
1659
1660 Inst::CallKnown { info } => {
1661 let op = ljustify("call".to_string());
1662 format!("{op} {:?}", info.dest)
1663 }
1664
1665 Inst::CallUnknown { info } => {
1666 let dest = info.dest.pretty_print(8);
1667 let op = ljustify("call".to_string());
1668 format!("{op} *{dest}")
1669 }
1670
1671 Inst::ReturnCallKnown { info } => {
1672 let ReturnCallInfo {
1673 uses,
1674 new_stack_arg_size,
1675 tmp,
1676 dest,
1677 } = &**info;
1678 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1679 let mut s = format!("return_call_known {dest:?} ({new_stack_arg_size}) tmp={tmp}");
1680 for ret in uses {
1681 let preg = regs::show_reg(ret.preg);
1682 let vreg = pretty_print_reg(ret.vreg, 8);
1683 write!(&mut s, " {vreg}={preg}").unwrap();
1684 }
1685 s
1686 }
1687
1688 Inst::ReturnCallUnknown { info } => {
1689 let ReturnCallInfo {
1690 uses,
1691 new_stack_arg_size,
1692 tmp,
1693 dest,
1694 } = &**info;
1695 let callee = pretty_print_reg(*dest, 8);
1696 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1697 let mut s =
1698 format!("return_call_unknown {callee} ({new_stack_arg_size}) tmp={tmp}");
1699 for ret in uses {
1700 let preg = regs::show_reg(ret.preg);
1701 let vreg = pretty_print_reg(ret.vreg, 8);
1702 write!(&mut s, " {vreg}={preg}").unwrap();
1703 }
1704 s
1705 }
1706
1707 Inst::Args { args } => {
1708 let mut s = "args".to_string();
1709 for arg in args {
1710 let preg = regs::show_reg(arg.preg);
1711 let def = pretty_print_reg(arg.vreg.to_reg(), 8);
1712 write!(&mut s, " {def}={preg}").unwrap();
1713 }
1714 s
1715 }
1716
1717 Inst::Rets { rets } => {
1718 let mut s = "rets".to_string();
1719 for ret in rets {
1720 let preg = regs::show_reg(ret.preg);
1721 let vreg = pretty_print_reg(ret.vreg, 8);
1722 write!(&mut s, " {vreg}={preg}").unwrap();
1723 }
1724 s
1725 }
1726
1727 Inst::Ret { stack_bytes_to_pop } => {
1728 let mut s = "ret".to_string();
1729 if *stack_bytes_to_pop != 0 {
1730 write!(&mut s, " {stack_bytes_to_pop}").unwrap();
1731 }
1732 s
1733 }
1734
1735 Inst::StackSwitchBasic {
1736 store_context_ptr,
1737 load_context_ptr,
1738 in_payload0,
1739 out_payload0,
1740 } => {
1741 let store_context_ptr = pretty_print_reg(**store_context_ptr, 8);
1742 let load_context_ptr = pretty_print_reg(**load_context_ptr, 8);
1743 let in_payload0 = pretty_print_reg(**in_payload0, 8);
1744 let out_payload0 = pretty_print_reg(*out_payload0.to_reg(), 8);
1745 format!("{out_payload0} = stack_switch_basic {store_context_ptr}, {load_context_ptr}, {in_payload0}")
1746 }
1747
1748 Inst::JmpKnown { dst } => {
1749 let op = ljustify("jmp".to_string());
1750 let dst = dst.to_string();
1751 format!("{op} {dst}")
1752 }
1753
1754 Inst::WinchJmpIf { cc, taken } => {
1755 let taken = taken.to_string();
1756 let op = ljustify2("j".to_string(), cc.to_string());
1757 format!("{op} {taken}")
1758 }
1759
1760 Inst::JmpCondOr {
1761 cc1,
1762 cc2,
1763 taken,
1764 not_taken,
1765 } => {
1766 let taken = taken.to_string();
1767 let not_taken = not_taken.to_string();
1768 let op = ljustify(format!("j{cc1},{cc2}"));
1769 format!("{op} {taken}; j {not_taken}")
1770 }
1771
1772 Inst::JmpCond {
1773 cc,
1774 taken,
1775 not_taken,
1776 } => {
1777 let taken = taken.to_string();
1778 let not_taken = not_taken.to_string();
1779 let op = ljustify2("j".to_string(), cc.to_string());
1780 format!("{op} {taken}; j {not_taken}")
1781 }
1782
1783 Inst::JmpTableSeq {
1784 idx, tmp1, tmp2, ..
1785 } => {
1786 let idx = pretty_print_reg(*idx, 8);
1787 let tmp1 = pretty_print_reg(tmp1.to_reg(), 8);
1788 let tmp2 = pretty_print_reg(tmp2.to_reg(), 8);
1789 let op = ljustify("br_table".into());
1790 format!("{op} {idx}, {tmp1}, {tmp2}")
1791 }
1792
1793 Inst::JmpUnknown { target } => {
1794 let target = target.pretty_print(8);
1795 let op = ljustify("jmp".to_string());
1796 format!("{op} *{target}")
1797 }
1798
1799 Inst::TrapIf { cc, trap_code, .. } => {
1800 format!("j{cc} #trap={trap_code}")
1801 }
1802
1803 Inst::TrapIfAnd {
1804 cc1,
1805 cc2,
1806 trap_code,
1807 ..
1808 } => {
1809 let cc1 = cc1.invert();
1810 let cc2 = cc2.invert();
1811 format!("trap_if_and {cc1}, {cc2}, {trap_code}")
1812 }
1813
1814 Inst::TrapIfOr {
1815 cc1,
1816 cc2,
1817 trap_code,
1818 ..
1819 } => {
1820 let cc2 = cc2.invert();
1821 format!("trap_if_or {cc1}, {cc2}, {trap_code}")
1822 }
1823
1824 Inst::LoadExtName {
1825 dst, name, offset, ..
1826 } => {
1827 let dst = pretty_print_reg(dst.to_reg(), 8);
1828 let name = name.display(None);
1829 let op = ljustify("load_ext_name".into());
1830 format!("{op} {name}+{offset}, {dst}")
1831 }
1832
1833 Inst::LockCmpxchg {
1834 ty,
1835 replacement,
1836 expected,
1837 mem,
1838 dst_old,
1839 ..
1840 } => {
1841 let size = ty.bytes() as u8;
1842 let replacement = pretty_print_reg(*replacement, size);
1843 let expected = pretty_print_reg(*expected, size);
1844 let dst_old = pretty_print_reg(dst_old.to_reg(), size);
1845 let mem = mem.pretty_print(size);
1846 let suffix = suffix_bwlq(OperandSize::from_bytes(size as u32));
1847 format!(
1848 "lock cmpxchg{suffix} {replacement}, {mem}, expected={expected}, dst_old={dst_old}"
1849 )
1850 }
1851
1852 Inst::LockCmpxchg16b {
1853 replacement_low,
1854 replacement_high,
1855 expected_low,
1856 expected_high,
1857 mem,
1858 dst_old_low,
1859 dst_old_high,
1860 ..
1861 } => {
1862 let replacement_low = pretty_print_reg(*replacement_low, 8);
1863 let replacement_high = pretty_print_reg(*replacement_high, 8);
1864 let expected_low = pretty_print_reg(*expected_low, 8);
1865 let expected_high = pretty_print_reg(*expected_high, 8);
1866 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1867 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1868 let mem = mem.pretty_print(16);
1869 format!(
1870 "lock cmpxchg16b {mem}, replacement={replacement_high}:{replacement_low}, expected={expected_high}:{expected_low}, dst_old={dst_old_high}:{dst_old_low}"
1871 )
1872 }
1873
1874 Inst::LockXadd {
1875 size,
1876 operand,
1877 mem,
1878 dst_old,
1879 } => {
1880 let operand = pretty_print_reg(*operand, size.to_bytes());
1881 let dst_old = pretty_print_reg(dst_old.to_reg(), size.to_bytes());
1882 let mem = mem.pretty_print(size.to_bytes());
1883 let suffix = suffix_bwlq(*size);
1884 format!("lock xadd{suffix} {operand}, {mem}, dst_old={dst_old}")
1885 }
1886
1887 Inst::Xchg {
1888 size,
1889 operand,
1890 mem,
1891 dst_old,
1892 } => {
1893 let operand = pretty_print_reg(*operand, size.to_bytes());
1894 let dst_old = pretty_print_reg(dst_old.to_reg(), size.to_bytes());
1895 let mem = mem.pretty_print(size.to_bytes());
1896 let suffix = suffix_bwlq(*size);
1897 format!("xchg{suffix} {operand}, {mem}, dst_old={dst_old}")
1898 }
1899
1900 Inst::AtomicRmwSeq { ty, op, .. } => {
1901 let ty = ty.bits();
1902 format!(
1903 "atomically {{ {ty}_bits_at_[%r9] {op:?}= %r10; %rax = old_value_at_[%r9]; %r11, %rflags = trash }}"
1904 )
1905 }
1906
1907 Inst::Atomic128RmwSeq {
1908 op,
1909 mem,
1910 operand_low,
1911 operand_high,
1912 temp_low,
1913 temp_high,
1914 dst_old_low,
1915 dst_old_high,
1916 } => {
1917 let operand_low = pretty_print_reg(*operand_low, 8);
1918 let operand_high = pretty_print_reg(*operand_high, 8);
1919 let temp_low = pretty_print_reg(temp_low.to_reg(), 8);
1920 let temp_high = pretty_print_reg(temp_high.to_reg(), 8);
1921 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1922 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1923 let mem = mem.pretty_print(16);
1924 format!("atomically {{ {dst_old_high}:{dst_old_low} = {mem}; {temp_high}:{temp_low} = {dst_old_high}:{dst_old_low} {op:?} {operand_high}:{operand_low}; {mem} = {temp_high}:{temp_low} }}")
1925 }
1926
1927 Inst::Atomic128XchgSeq {
1928 mem,
1929 operand_low,
1930 operand_high,
1931 dst_old_low,
1932 dst_old_high,
1933 } => {
1934 let operand_low = pretty_print_reg(*operand_low, 8);
1935 let operand_high = pretty_print_reg(*operand_high, 8);
1936 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1937 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1938 let mem = mem.pretty_print(16);
1939 format!("atomically {{ {dst_old_high}:{dst_old_low} = {mem}; {mem} = {operand_high}:{operand_low} }}")
1940 }
1941
1942 Inst::Fence { kind } => match kind {
1943 FenceKind::MFence => "mfence".to_string(),
1944 FenceKind::LFence => "lfence".to_string(),
1945 FenceKind::SFence => "sfence".to_string(),
1946 },
1947
1948 Inst::Hlt => "hlt".into(),
1949
1950 Inst::Ud2 { trap_code } => format!("ud2 {trap_code}"),
1951
1952 Inst::ElfTlsGetAddr { symbol, dst } => {
1953 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1954 format!("{dst} = elf_tls_get_addr {symbol:?}")
1955 }
1956
1957 Inst::MachOTlsGetAddr { symbol, dst } => {
1958 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1959 format!("{dst} = macho_tls_get_addr {symbol:?}")
1960 }
1961
1962 Inst::CoffTlsGetAddr { symbol, dst, tmp } => {
1963 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1964 let tmp = tmp.to_reg().to_reg();
1965
1966 let mut s = format!("{dst} = coff_tls_get_addr {symbol:?}");
1967 if tmp.is_virtual() {
1968 let tmp = show_ireg_sized(tmp, 8);
1969 write!(&mut s, ", {tmp}").unwrap();
1970 };
1971
1972 s
1973 }
1974
1975 Inst::Unwind { inst } => format!("unwind {inst:?}"),
1976
1977 Inst::DummyUse { reg } => {
1978 let reg = pretty_print_reg(*reg, 8);
1979 format!("dummy_use {reg}")
1980 }
1981
1982 Inst::External { inst } => {
1983 format!("{inst}")
1984 }
1985 }
1986 }
1987}
1988
1989impl fmt::Debug for Inst {
1990 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1991 write!(fmt, "{}", self.pretty_print_inst(&mut Default::default()))
1992 }
1993}
1994
1995fn x64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
1996 match inst {
2006 Inst::AluRmiR {
2007 src1, src2, dst, ..
2008 } => {
2009 collector.reg_use(src1);
2010 collector.reg_reuse_def(dst, 0);
2011 src2.get_operands(collector);
2012 }
2013 Inst::AluConstOp { dst, .. } => collector.reg_def(dst),
2014 Inst::AluRM { src1_dst, src2, .. } => {
2015 collector.reg_use(src2);
2016 src1_dst.get_operands(collector);
2017 }
2018 Inst::AluRmRVex {
2019 src1, src2, dst, ..
2020 } => {
2021 collector.reg_def(dst);
2022 collector.reg_use(src1);
2023 src2.get_operands(collector);
2024 }
2025 Inst::Not { src, dst, .. } => {
2026 collector.reg_use(src);
2027 collector.reg_reuse_def(dst, 0);
2028 }
2029 Inst::Neg { src, dst, .. } => {
2030 collector.reg_use(src);
2031 collector.reg_reuse_def(dst, 0);
2032 }
2033 Inst::Div {
2034 divisor,
2035 dividend_lo,
2036 dividend_hi,
2037 dst_quotient,
2038 dst_remainder,
2039 ..
2040 } => {
2041 divisor.get_operands(collector);
2042 collector.reg_fixed_use(dividend_lo, regs::rax());
2043 collector.reg_fixed_use(dividend_hi, regs::rdx());
2044 collector.reg_fixed_def(dst_quotient, regs::rax());
2045 collector.reg_fixed_def(dst_remainder, regs::rdx());
2046 }
2047 Inst::CheckedSRemSeq {
2048 divisor,
2049 dividend_lo,
2050 dividend_hi,
2051 dst_quotient,
2052 dst_remainder,
2053 ..
2054 } => {
2055 collector.reg_use(divisor);
2056 collector.reg_fixed_use(dividend_lo, regs::rax());
2057 collector.reg_fixed_use(dividend_hi, regs::rdx());
2058 collector.reg_fixed_def(dst_quotient, regs::rax());
2059 collector.reg_fixed_def(dst_remainder, regs::rdx());
2060 }
2061 Inst::Div8 {
2062 divisor,
2063 dividend,
2064 dst,
2065 ..
2066 } => {
2067 divisor.get_operands(collector);
2068 collector.reg_fixed_use(dividend, regs::rax());
2069 collector.reg_fixed_def(dst, regs::rax());
2070 }
2071 Inst::CheckedSRemSeq8 {
2072 divisor,
2073 dividend,
2074 dst,
2075 ..
2076 } => {
2077 collector.reg_use(divisor);
2078 collector.reg_fixed_use(dividend, regs::rax());
2079 collector.reg_fixed_def(dst, regs::rax());
2080 }
2081 Inst::Mul {
2082 src1,
2083 src2,
2084 dst_lo,
2085 dst_hi,
2086 ..
2087 } => {
2088 collector.reg_fixed_use(src1, regs::rax());
2089 collector.reg_fixed_def(dst_lo, regs::rax());
2090 collector.reg_fixed_def(dst_hi, regs::rdx());
2091 src2.get_operands(collector);
2092 }
2093 Inst::Mul8 {
2094 src1, src2, dst, ..
2095 } => {
2096 collector.reg_fixed_use(src1, regs::rax());
2097 collector.reg_fixed_def(dst, regs::rax());
2098 src2.get_operands(collector);
2099 }
2100 Inst::IMul {
2101 src1, src2, dst, ..
2102 } => {
2103 collector.reg_use(src1);
2104 collector.reg_reuse_def(dst, 0);
2105 src2.get_operands(collector);
2106 }
2107 Inst::IMulImm { src1, dst, .. } => {
2108 collector.reg_def(dst);
2109 src1.get_operands(collector);
2110 }
2111 Inst::MulX {
2112 src1,
2113 src2,
2114 dst_lo,
2115 dst_hi,
2116 ..
2117 } => {
2118 if !dst_lo.to_reg().is_invalid_sentinel() {
2119 collector.reg_def(dst_lo);
2120 }
2121 collector.reg_def(dst_hi);
2122 collector.reg_fixed_use(src1, regs::rdx());
2123 src2.get_operands(collector);
2124 }
2125 Inst::SignExtendData { size, src, dst } => {
2126 match size {
2127 OperandSize::Size8 => {
2128 collector.reg_fixed_use(src, regs::rax());
2131 collector.reg_fixed_def(dst, regs::rax());
2132 }
2133 _ => {
2134 collector.reg_fixed_use(src, regs::rax());
2137 collector.reg_fixed_def(dst, regs::rdx());
2138 }
2139 }
2140 }
2141 Inst::UnaryRmR { src, dst, .. }
2142 | Inst::UnaryRmRVex { src, dst, .. }
2143 | Inst::UnaryRmRImmVex { src, dst, .. } => {
2144 collector.reg_def(dst);
2145 src.get_operands(collector);
2146 }
2147 Inst::XmmUnaryRmR { src, dst, .. } | Inst::XmmUnaryRmRImm { src, dst, .. } => {
2148 collector.reg_def(dst);
2149 src.get_operands(collector);
2150 }
2151 Inst::XmmUnaryRmREvex { src, dst, .. }
2152 | Inst::XmmUnaryRmRImmEvex { src, dst, .. }
2153 | Inst::XmmUnaryRmRUnaligned { src, dst, .. }
2154 | Inst::XmmUnaryRmRVex { src, dst, .. }
2155 | Inst::XmmUnaryRmRImmVex { src, dst, .. } => {
2156 collector.reg_def(dst);
2157 src.get_operands(collector);
2158 }
2159 Inst::XmmRmR {
2160 src1, src2, dst, ..
2161 } => {
2162 collector.reg_use(src1);
2163 collector.reg_reuse_def(dst, 0);
2164 src2.get_operands(collector);
2165 }
2166 Inst::XmmRmRUnaligned {
2167 src1, src2, dst, ..
2168 } => {
2169 collector.reg_use(src1);
2170 collector.reg_reuse_def(dst, 0);
2171 src2.get_operands(collector);
2172 }
2173 Inst::XmmRmRBlend {
2174 src1,
2175 src2,
2176 mask,
2177 dst,
2178 op,
2179 } => {
2180 assert!(matches!(
2181 op,
2182 SseOpcode::Blendvpd | SseOpcode::Blendvps | SseOpcode::Pblendvb
2183 ));
2184 collector.reg_use(src1);
2185 collector.reg_fixed_use(mask, regs::xmm0());
2186 collector.reg_reuse_def(dst, 0);
2187 src2.get_operands(collector);
2188 }
2189 Inst::XmmRmiRVex {
2190 src1, src2, dst, ..
2191 } => {
2192 collector.reg_def(dst);
2193 collector.reg_use(src1);
2194 src2.get_operands(collector);
2195 }
2196 Inst::XmmRmRImmVex {
2197 src1, src2, dst, ..
2198 } => {
2199 collector.reg_def(dst);
2200 collector.reg_use(src1);
2201 src2.get_operands(collector);
2202 }
2203 Inst::XmmVexPinsr {
2204 src1, src2, dst, ..
2205 } => {
2206 collector.reg_def(dst);
2207 collector.reg_use(src1);
2208 src2.get_operands(collector);
2209 }
2210 Inst::XmmRmRVex3 {
2211 src1,
2212 src2,
2213 src3,
2214 dst,
2215 ..
2216 } => {
2217 collector.reg_use(src1);
2218 collector.reg_reuse_def(dst, 0);
2219 collector.reg_use(src2);
2220 src3.get_operands(collector);
2221 }
2222 Inst::XmmRmRBlendVex {
2223 src1,
2224 src2,
2225 mask,
2226 dst,
2227 ..
2228 } => {
2229 collector.reg_def(dst);
2230 collector.reg_use(src1);
2231 src2.get_operands(collector);
2232 collector.reg_use(mask);
2233 }
2234 Inst::XmmRmREvex {
2235 op,
2236 src1,
2237 src2,
2238 dst,
2239 ..
2240 } => {
2241 assert_ne!(*op, Avx512Opcode::Vpermi2b);
2242 collector.reg_use(src1);
2243 src2.get_operands(collector);
2244 collector.reg_def(dst);
2245 }
2246 Inst::XmmRmREvex3 {
2247 op,
2248 src1,
2249 src2,
2250 src3,
2251 dst,
2252 ..
2253 } => {
2254 assert_eq!(*op, Avx512Opcode::Vpermi2b);
2255 collector.reg_use(src1);
2256 collector.reg_use(src2);
2257 src3.get_operands(collector);
2258 collector.reg_reuse_def(dst, 0); }
2260 Inst::XmmRmRImm {
2261 src1, src2, dst, ..
2262 } => {
2263 collector.reg_use(src1);
2264 collector.reg_reuse_def(dst, 0);
2265 src2.get_operands(collector);
2266 }
2267 Inst::XmmUninitializedValue { dst } => collector.reg_def(dst),
2268 Inst::XmmMinMaxSeq { lhs, rhs, dst, .. } => {
2269 collector.reg_use(rhs);
2270 collector.reg_use(lhs);
2271 collector.reg_reuse_def(dst, 0); }
2273 Inst::XmmRmiReg {
2274 src1, src2, dst, ..
2275 } => {
2276 collector.reg_use(src1);
2277 collector.reg_reuse_def(dst, 0); src2.get_operands(collector);
2279 }
2280 Inst::XmmMovRM { src, dst, .. }
2281 | Inst::XmmMovRMVex { src, dst, .. }
2282 | Inst::XmmMovRMImm { src, dst, .. }
2283 | Inst::XmmMovRMImmVex { src, dst, .. } => {
2284 collector.reg_use(src);
2285 dst.get_operands(collector);
2286 }
2287 Inst::XmmCmpRmR { src1, src2, .. } => {
2288 collector.reg_use(src1);
2289 src2.get_operands(collector);
2290 }
2291 Inst::XmmCmpRmRVex { src1, src2, .. } => {
2292 collector.reg_use(src1);
2293 src2.get_operands(collector);
2294 }
2295 Inst::Imm { dst, .. } => {
2296 collector.reg_def(dst);
2297 }
2298 Inst::MovRR { src, dst, .. } => {
2299 collector.reg_use(src);
2300 collector.reg_def(dst);
2301 }
2302 Inst::MovFromPReg { dst, src } => {
2303 debug_assert!(dst.to_reg().to_reg().is_virtual());
2304 collector.reg_fixed_nonallocatable(*src);
2305 collector.reg_def(dst);
2306 }
2307 Inst::MovToPReg { dst, src } => {
2308 debug_assert!(src.to_reg().is_virtual());
2309 collector.reg_use(src);
2310 collector.reg_fixed_nonallocatable(*dst);
2311 }
2312 Inst::XmmToGpr { src, dst, .. }
2313 | Inst::XmmToGprVex { src, dst, .. }
2314 | Inst::XmmToGprImm { src, dst, .. }
2315 | Inst::XmmToGprImmVex { src, dst, .. } => {
2316 collector.reg_use(src);
2317 collector.reg_def(dst);
2318 }
2319 Inst::GprToXmm { src, dst, .. } | Inst::GprToXmmVex { src, dst, .. } => {
2320 collector.reg_def(dst);
2321 src.get_operands(collector);
2322 }
2323 Inst::CvtIntToFloat {
2324 src1, src2, dst, ..
2325 } => {
2326 collector.reg_use(src1);
2327 collector.reg_reuse_def(dst, 0);
2328 src2.get_operands(collector);
2329 }
2330 Inst::CvtIntToFloatVex {
2331 src1, src2, dst, ..
2332 } => {
2333 collector.reg_def(dst);
2334 collector.reg_use(src1);
2335 src2.get_operands(collector);
2336 }
2337 Inst::CvtUint64ToFloatSeq {
2338 src,
2339 dst,
2340 tmp_gpr1,
2341 tmp_gpr2,
2342 ..
2343 } => {
2344 collector.reg_use(src);
2345 collector.reg_early_def(dst);
2346 collector.reg_early_def(tmp_gpr1);
2347 collector.reg_early_def(tmp_gpr2);
2348 }
2349 Inst::CvtFloatToSintSeq {
2350 src,
2351 dst,
2352 tmp_xmm,
2353 tmp_gpr,
2354 ..
2355 } => {
2356 collector.reg_use(src);
2357 collector.reg_early_def(dst);
2358 collector.reg_early_def(tmp_gpr);
2359 collector.reg_early_def(tmp_xmm);
2360 }
2361 Inst::CvtFloatToUintSeq {
2362 src,
2363 dst,
2364 tmp_gpr,
2365 tmp_xmm,
2366 tmp_xmm2,
2367 ..
2368 } => {
2369 collector.reg_use(src);
2370 collector.reg_early_def(dst);
2371 collector.reg_early_def(tmp_gpr);
2372 collector.reg_early_def(tmp_xmm);
2373 collector.reg_early_def(tmp_xmm2);
2374 }
2375
2376 Inst::MovImmM { dst, .. } => {
2377 dst.get_operands(collector);
2378 }
2379
2380 Inst::MovzxRmR { src, dst, .. } => {
2381 collector.reg_def(dst);
2382 src.get_operands(collector);
2383 }
2384 Inst::Mov64MR { src, dst, .. } => {
2385 collector.reg_def(dst);
2386 src.get_operands(collector);
2387 }
2388 Inst::LoadEffectiveAddress { addr: src, dst, .. } => {
2389 collector.reg_def(dst);
2390 src.get_operands(collector);
2391 }
2392 Inst::MovsxRmR { src, dst, .. } => {
2393 collector.reg_def(dst);
2394 src.get_operands(collector);
2395 }
2396 Inst::MovRM { src, dst, .. } => {
2397 collector.reg_use(src);
2398 dst.get_operands(collector);
2399 }
2400 Inst::ShiftR {
2401 num_bits, src, dst, ..
2402 } => {
2403 collector.reg_use(src);
2404 collector.reg_reuse_def(dst, 0);
2405 if let Imm8Reg::Reg { reg } = num_bits.as_imm8_reg_mut() {
2406 collector.reg_fixed_use(reg, regs::rcx());
2407 }
2408 }
2409 Inst::CmpRmiR { src1, src2, .. } => {
2410 collector.reg_use(src1);
2411 src2.get_operands(collector);
2412 }
2413 Inst::Setcc { dst, .. } => {
2414 collector.reg_def(dst);
2415 }
2416 Inst::Bswap { src, dst, .. } => {
2417 collector.reg_use(src);
2418 collector.reg_reuse_def(dst, 0);
2419 }
2420 Inst::Cmove {
2421 consequent,
2422 alternative,
2423 dst,
2424 ..
2425 } => {
2426 collector.reg_use(alternative);
2427 collector.reg_reuse_def(dst, 0);
2428 consequent.get_operands(collector);
2429 }
2430 Inst::XmmCmove {
2431 consequent,
2432 alternative,
2433 dst,
2434 ..
2435 } => {
2436 collector.reg_use(alternative);
2437 collector.reg_reuse_def(dst, 0);
2438 collector.reg_use(consequent);
2439 }
2440 Inst::Push64 { src } => {
2441 src.get_operands(collector);
2442 }
2443 Inst::Pop64 { dst } => {
2444 collector.reg_def(dst);
2445 }
2446 Inst::StackProbeLoop { tmp, .. } => {
2447 collector.reg_early_def(tmp);
2448 }
2449
2450 Inst::CallKnown { info } => {
2451 let CallInfo {
2456 uses,
2457 defs,
2458 clobbers,
2459 dest,
2460 ..
2461 } = &mut **info;
2462 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2463 for CallArgPair { vreg, preg } in uses {
2464 collector.reg_fixed_use(vreg, *preg);
2465 }
2466 for CallRetPair { vreg, preg } in defs {
2467 collector.reg_fixed_def(vreg, *preg);
2468 }
2469 collector.reg_clobbers(*clobbers);
2470 }
2471
2472 Inst::CallUnknown { info } => {
2473 let CallInfo {
2474 uses,
2475 defs,
2476 clobbers,
2477 callee_conv,
2478 dest,
2479 ..
2480 } = &mut **info;
2481 match dest {
2482 RegMem::Reg { reg } if *callee_conv == CallConv::Winch => {
2483 collector.reg_fixed_use(reg, regs::r10())
2487 }
2488 _ => dest.get_operands(collector),
2489 }
2490 for CallArgPair { vreg, preg } in uses {
2491 collector.reg_fixed_use(vreg, *preg);
2492 }
2493 for CallRetPair { vreg, preg } in defs {
2494 collector.reg_fixed_def(vreg, *preg);
2495 }
2496 collector.reg_clobbers(*clobbers);
2497 }
2498 Inst::StackSwitchBasic {
2499 store_context_ptr,
2500 load_context_ptr,
2501 in_payload0,
2502 out_payload0,
2503 } => {
2504 collector.reg_use(load_context_ptr);
2505 collector.reg_use(store_context_ptr);
2506 collector.reg_fixed_use(in_payload0, stack_switch::payload_register());
2507 collector.reg_fixed_def(out_payload0, stack_switch::payload_register());
2508
2509 let mut clobbers = crate::isa::x64::abi::ALL_CLOBBERS;
2510 clobbers.remove(
2512 stack_switch::payload_register()
2513 .to_real_reg()
2514 .unwrap()
2515 .into(),
2516 );
2517 collector.reg_clobbers(clobbers);
2518 }
2519
2520 Inst::ReturnCallKnown { info } => {
2521 let ReturnCallInfo {
2522 dest, uses, tmp, ..
2523 } = &mut **info;
2524 collector.reg_fixed_def(tmp, regs::r11());
2525 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2527 for CallArgPair { vreg, preg } in uses {
2528 collector.reg_fixed_use(vreg, *preg);
2529 }
2530 }
2531
2532 Inst::ReturnCallUnknown { info } => {
2533 let ReturnCallInfo {
2534 dest, uses, tmp, ..
2535 } = &mut **info;
2536
2537 collector.reg_fixed_use(dest, regs::r10());
2543
2544 collector.reg_fixed_def(tmp, regs::r11());
2545 for CallArgPair { vreg, preg } in uses {
2546 collector.reg_fixed_use(vreg, *preg);
2547 }
2548 }
2549
2550 Inst::JmpTableSeq {
2551 idx, tmp1, tmp2, ..
2552 } => {
2553 collector.reg_use(idx);
2554 collector.reg_early_def(tmp1);
2555 collector.reg_def(tmp2);
2559 }
2560
2561 Inst::JmpUnknown { target } => {
2562 target.get_operands(collector);
2563 }
2564
2565 Inst::LoadExtName { dst, .. } => {
2566 collector.reg_def(dst);
2567 }
2568
2569 Inst::LockCmpxchg {
2570 replacement,
2571 expected,
2572 mem,
2573 dst_old,
2574 ..
2575 } => {
2576 collector.reg_use(replacement);
2577 collector.reg_fixed_use(expected, regs::rax());
2578 collector.reg_fixed_def(dst_old, regs::rax());
2579 mem.get_operands(collector);
2580 }
2581
2582 Inst::LockCmpxchg16b {
2583 replacement_low,
2584 replacement_high,
2585 expected_low,
2586 expected_high,
2587 mem,
2588 dst_old_low,
2589 dst_old_high,
2590 ..
2591 } => {
2592 collector.reg_fixed_use(replacement_low, regs::rbx());
2593 collector.reg_fixed_use(replacement_high, regs::rcx());
2594 collector.reg_fixed_use(expected_low, regs::rax());
2595 collector.reg_fixed_use(expected_high, regs::rdx());
2596 collector.reg_fixed_def(dst_old_low, regs::rax());
2597 collector.reg_fixed_def(dst_old_high, regs::rdx());
2598 mem.get_operands(collector);
2599 }
2600
2601 Inst::LockXadd {
2602 operand,
2603 mem,
2604 dst_old,
2605 ..
2606 } => {
2607 collector.reg_use(operand);
2608 collector.reg_reuse_def(dst_old, 0);
2609 mem.get_operands(collector);
2610 }
2611
2612 Inst::Xchg {
2613 operand,
2614 mem,
2615 dst_old,
2616 ..
2617 } => {
2618 collector.reg_use(operand);
2619 collector.reg_reuse_def(dst_old, 0);
2620 mem.get_operands(collector);
2621 }
2622
2623 Inst::AtomicRmwSeq {
2624 operand,
2625 temp,
2626 dst_old,
2627 mem,
2628 ..
2629 } => {
2630 collector.reg_late_use(operand);
2631 collector.reg_early_def(temp);
2632 collector.reg_fixed_def(dst_old, regs::rax());
2635 mem.get_operands_late(collector)
2636 }
2637
2638 Inst::Atomic128RmwSeq {
2639 operand_low,
2640 operand_high,
2641 temp_low,
2642 temp_high,
2643 dst_old_low,
2644 dst_old_high,
2645 mem,
2646 ..
2647 } => {
2648 collector.reg_late_use(operand_low);
2650 collector.reg_late_use(operand_high);
2651 collector.reg_fixed_def(temp_low, regs::rbx());
2652 collector.reg_fixed_def(temp_high, regs::rcx());
2653 collector.reg_fixed_def(dst_old_low, regs::rax());
2654 collector.reg_fixed_def(dst_old_high, regs::rdx());
2655 mem.get_operands_late(collector)
2656 }
2657
2658 Inst::Atomic128XchgSeq {
2659 operand_low,
2660 operand_high,
2661 dst_old_low,
2662 dst_old_high,
2663 mem,
2664 ..
2665 } => {
2666 collector.reg_fixed_late_use(operand_low, regs::rbx());
2668 collector.reg_fixed_late_use(operand_high, regs::rcx());
2669 collector.reg_fixed_def(dst_old_low, regs::rax());
2670 collector.reg_fixed_def(dst_old_high, regs::rdx());
2671 mem.get_operands_late(collector)
2672 }
2673
2674 Inst::Args { args } => {
2675 for ArgPair { vreg, preg } in args {
2676 collector.reg_fixed_def(vreg, *preg);
2677 }
2678 }
2679
2680 Inst::Rets { rets } => {
2681 for RetPair { vreg, preg } in rets {
2684 collector.reg_fixed_use(vreg, *preg);
2685 }
2686 }
2687
2688 Inst::JmpKnown { .. }
2689 | Inst::WinchJmpIf { .. }
2690 | Inst::JmpCond { .. }
2691 | Inst::JmpCondOr { .. }
2692 | Inst::Ret { .. }
2693 | Inst::Nop { .. }
2694 | Inst::TrapIf { .. }
2695 | Inst::TrapIfAnd { .. }
2696 | Inst::TrapIfOr { .. }
2697 | Inst::Hlt
2698 | Inst::Ud2 { .. }
2699 | Inst::Fence { .. } => {
2700 }
2702
2703 Inst::ElfTlsGetAddr { dst, .. } | Inst::MachOTlsGetAddr { dst, .. } => {
2704 collector.reg_fixed_def(dst, regs::rax());
2705 let mut clobbers = X64ABIMachineSpec::get_regs_clobbered_by_call(CallConv::SystemV);
2712 clobbers.remove(regs::gpr_preg(regs::ENC_RAX));
2713 collector.reg_clobbers(clobbers);
2714 }
2715
2716 Inst::CoffTlsGetAddr { dst, tmp, .. } => {
2717 collector.reg_fixed_def(dst, regs::rax());
2722
2723 collector.reg_fixed_def(tmp, regs::rcx());
2725 }
2726
2727 Inst::Unwind { .. } => {}
2728
2729 Inst::DummyUse { reg } => {
2730 collector.reg_use(reg);
2731 }
2732
2733 Inst::External { inst } => {
2734 inst.visit(&mut external::RegallocVisitor { collector });
2735 }
2736 }
2737}
2738
2739impl MachInst for Inst {
2743 type ABIMachineSpec = X64ABIMachineSpec;
2744
2745 fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
2746 x64_get_operands(self, collector)
2747 }
2748
2749 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
2750 match self {
2751 Self::MovRR { size, src, dst, .. } if *size == OperandSize::Size64 => {
2756 Some((dst.to_writable_reg(), src.to_reg()))
2757 }
2758 Self::XmmUnaryRmR { op, src, dst, .. }
2763 if *op == SseOpcode::Movss
2764 || *op == SseOpcode::Movsd
2765 || *op == SseOpcode::Movaps
2766 || *op == SseOpcode::Movapd
2767 || *op == SseOpcode::Movups
2768 || *op == SseOpcode::Movupd
2769 || *op == SseOpcode::Movdqa
2770 || *op == SseOpcode::Movdqu =>
2771 {
2772 if let RegMem::Reg { reg } = src.clone().to_reg_mem() {
2773 Some((dst.to_writable_reg(), reg))
2774 } else {
2775 None
2776 }
2777 }
2778 _ => None,
2779 }
2780 }
2781
2782 fn is_included_in_clobbers(&self) -> bool {
2783 match self {
2784 &Inst::Args { .. } => false,
2785 _ => true,
2786 }
2787 }
2788
2789 fn is_trap(&self) -> bool {
2790 match self {
2791 Self::Ud2 { .. } => true,
2792 _ => false,
2793 }
2794 }
2795
2796 fn is_args(&self) -> bool {
2797 match self {
2798 Self::Args { .. } => true,
2799 _ => false,
2800 }
2801 }
2802
2803 fn is_term(&self) -> MachTerminator {
2804 match self {
2805 &Self::Rets { .. } => MachTerminator::Ret,
2807 &Self::ReturnCallKnown { .. } | &Self::ReturnCallUnknown { .. } => {
2808 MachTerminator::RetCall
2809 }
2810 &Self::JmpKnown { .. } => MachTerminator::Uncond,
2811 &Self::JmpCond { .. } => MachTerminator::Cond,
2812 &Self::JmpCondOr { .. } => MachTerminator::Cond,
2813 &Self::JmpTableSeq { .. } => MachTerminator::Indirect,
2814 _ => MachTerminator::None,
2816 }
2817 }
2818
2819 fn is_low_level_branch(&self) -> bool {
2820 match self {
2821 &Self::WinchJmpIf { .. } => true,
2822 _ => false,
2823 }
2824 }
2825
2826 fn is_mem_access(&self) -> bool {
2827 panic!("TODO FILL ME OUT")
2828 }
2829
2830 fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst {
2831 trace!(
2832 "Inst::gen_move {:?} -> {:?} (type: {:?})",
2833 src_reg,
2834 dst_reg.to_reg(),
2835 ty
2836 );
2837 let rc_dst = dst_reg.to_reg().class();
2838 let rc_src = src_reg.class();
2839 debug_assert!(rc_dst == rc_src);
2841 match rc_dst {
2842 RegClass::Int => Inst::mov_r_r(OperandSize::Size64, src_reg, dst_reg),
2843 RegClass::Float => {
2844 let opcode = match ty {
2849 types::F16 | types::F32 | types::F64 | types::F32X4 => SseOpcode::Movaps,
2850 types::F64X2 => SseOpcode::Movapd,
2851 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqa,
2852 _ => unimplemented!("unable to move type: {}", ty),
2853 };
2854 Inst::xmm_unary_rm_r(opcode, RegMem::reg(src_reg), dst_reg)
2855 }
2856 RegClass::Vector => unreachable!(),
2857 }
2858 }
2859
2860 fn gen_nop(preferred_size: usize) -> Inst {
2861 Inst::nop(std::cmp::min(preferred_size, 15) as u8)
2862 }
2863
2864 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
2865 match ty {
2866 types::I8 => Ok((&[RegClass::Int], &[types::I8])),
2867 types::I16 => Ok((&[RegClass::Int], &[types::I16])),
2868 types::I32 => Ok((&[RegClass::Int], &[types::I32])),
2869 types::I64 => Ok((&[RegClass::Int], &[types::I64])),
2870 types::F16 => Ok((&[RegClass::Float], &[types::F16])),
2871 types::F32 => Ok((&[RegClass::Float], &[types::F32])),
2872 types::F64 => Ok((&[RegClass::Float], &[types::F64])),
2873 types::F128 => Ok((&[RegClass::Float], &[types::F128])),
2874 types::I128 => Ok((&[RegClass::Int, RegClass::Int], &[types::I64, types::I64])),
2875 _ if ty.is_vector() => {
2876 assert!(ty.bits() <= 128);
2877 Ok((&[RegClass::Float], &[types::I8X16]))
2878 }
2879 _ => Err(CodegenError::Unsupported(format!(
2880 "Unexpected SSA-value type: {ty}"
2881 ))),
2882 }
2883 }
2884
2885 fn canonical_type_for_rc(rc: RegClass) -> Type {
2886 match rc {
2887 RegClass::Float => types::I8X16,
2888 RegClass::Int => types::I64,
2889 RegClass::Vector => unreachable!(),
2890 }
2891 }
2892
2893 fn gen_jump(label: MachLabel) -> Inst {
2894 Inst::jmp_known(label)
2895 }
2896
2897 fn gen_imm_u64(value: u64, dst: Writable<Reg>) -> Option<Self> {
2898 Some(Inst::imm(OperandSize::Size64, value, dst))
2899 }
2900
2901 fn gen_imm_f64(value: f64, tmp: Writable<Reg>, dst: Writable<Reg>) -> SmallVec<[Self; 2]> {
2902 let imm_to_gpr = Inst::imm(OperandSize::Size64, value.to_bits(), tmp);
2903 let gpr_to_xmm = Self::gpr_to_xmm(
2904 SseOpcode::Movd,
2905 tmp.to_reg().into(),
2906 OperandSize::Size64,
2907 dst,
2908 );
2909 smallvec![imm_to_gpr, gpr_to_xmm]
2910 }
2911
2912 fn gen_dummy_use(reg: Reg) -> Self {
2913 Inst::DummyUse { reg }
2914 }
2915
2916 fn worst_case_size() -> CodeOffset {
2917 15
2918 }
2919
2920 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
2921 RegClass::Int
2922 }
2923
2924 fn is_safepoint(&self) -> bool {
2925 match self {
2926 Inst::CallKnown { .. } | Inst::CallUnknown { .. } => true,
2927 _ => false,
2928 }
2929 }
2930
2931 fn function_alignment() -> FunctionAlignment {
2932 FunctionAlignment {
2933 minimum: 1,
2934 preferred: 32,
2937 }
2938 }
2939
2940 type LabelUse = LabelUse;
2941
2942 const TRAP_OPCODE: &'static [u8] = &[0x0f, 0x0b];
2943}
2944
2945pub struct EmitInfo {
2947 pub(super) flags: settings::Flags,
2948 isa_flags: x64_settings::Flags,
2949}
2950
2951impl EmitInfo {
2952 pub fn new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
2954 Self { flags, isa_flags }
2955 }
2956}
2957
2958impl MachInstEmit for Inst {
2959 type State = EmitState;
2960 type Info = EmitInfo;
2961
2962 fn emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State) {
2963 emit::emit(self, sink, info, state);
2964 }
2965
2966 fn pretty_print_inst(&self, _: &mut Self::State) -> String {
2967 PrettyPrint::pretty_print(self, 0)
2968 }
2969}
2970
2971#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2973pub enum LabelUse {
2974 JmpRel32,
2978
2979 PCRel32,
2982}
2983
2984impl MachInstLabelUse for LabelUse {
2985 const ALIGN: CodeOffset = 1;
2986
2987 fn max_pos_range(self) -> CodeOffset {
2988 match self {
2989 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x7fff_ffff,
2990 }
2991 }
2992
2993 fn max_neg_range(self) -> CodeOffset {
2994 match self {
2995 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x8000_0000,
2996 }
2997 }
2998
2999 fn patch_size(self) -> CodeOffset {
3000 match self {
3001 LabelUse::JmpRel32 | LabelUse::PCRel32 => 4,
3002 }
3003 }
3004
3005 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
3006 let pc_rel = (label_offset as i64) - (use_offset as i64);
3007 debug_assert!(pc_rel <= self.max_pos_range() as i64);
3008 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
3009 let pc_rel = pc_rel as u32;
3010 match self {
3011 LabelUse::JmpRel32 => {
3012 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3013 let value = pc_rel.wrapping_add(addend).wrapping_sub(4);
3014 buffer.copy_from_slice(&value.to_le_bytes()[..]);
3015 }
3016 LabelUse::PCRel32 => {
3017 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3018 let value = pc_rel.wrapping_add(addend);
3019 buffer.copy_from_slice(&value.to_le_bytes()[..]);
3020 }
3021 }
3022 }
3023
3024 fn supports_veneer(self) -> bool {
3025 match self {
3026 LabelUse::JmpRel32 | LabelUse::PCRel32 => false,
3027 }
3028 }
3029
3030 fn veneer_size(self) -> CodeOffset {
3031 match self {
3032 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0,
3033 }
3034 }
3035
3036 fn worst_case_veneer_size() -> CodeOffset {
3037 0
3038 }
3039
3040 fn generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse) {
3041 match self {
3042 LabelUse::JmpRel32 | LabelUse::PCRel32 => {
3043 panic!("Veneer not supported for JumpRel32 label-use.");
3044 }
3045 }
3046 }
3047
3048 fn from_reloc(reloc: Reloc, addend: Addend) -> Option<Self> {
3049 match (reloc, addend) {
3050 (Reloc::X86CallPCRel4, -4) => Some(LabelUse::JmpRel32),
3051 _ => None,
3052 }
3053 }
3054}