cranelift_codegen/machinst/lower.rs
1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12 ArgumentPurpose, Block, Constant, ConstantData, DataFlowGraph, ExternalName, Function,
13 GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags, RelSourceLoc, Type,
14 Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::valueregs::InvalidSentinel;
17use crate::machinst::{
18 writable_value_regs, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, Callee, InsnIndex,
19 LoweredBlock, MachLabel, Reg, SigSet, VCode, VCodeBuilder, VCodeConstant, VCodeConstantData,
20 VCodeConstants, VCodeInst, ValueRegs, Writable,
21};
22use crate::settings::Flags;
23use crate::{trace, CodegenError, CodegenResult};
24use alloc::vec::Vec;
25use cranelift_control::ControlPlane;
26use rustc_hash::{FxHashMap, FxHashSet};
27use smallvec::{smallvec, SmallVec};
28use std::fmt::Debug;
29
30use super::{VCodeBuildDirection, VRegAllocator};
31
32/// A vector of ValueRegs, used to represent the outputs of an instruction.
33pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
34
35/// An "instruction color" partitions CLIF instructions by side-effecting ops.
36/// All instructions with the same "color" are guaranteed not to be separated by
37/// any side-effecting op (for this purpose, loads are also considered
38/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
39/// furthermore, it is guaranteed that for any two instructions A and B such
40/// that color(A) == color(B), either A dominates B and B postdominates A, or
41/// vice-versa. (For now, in practice, only ops in the same basic block can ever
42/// have the same color, trivially providing the second condition.) Intuitively,
43/// this means that the ops of the same color must always execute "together", as
44/// part of one atomic contiguous section of the dynamic execution trace, and
45/// they can be freely permuted (modulo true dataflow dependencies) without
46/// affecting program behavior.
47#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
48struct InstColor(u32);
49impl InstColor {
50 fn new(n: u32) -> InstColor {
51 InstColor(n)
52 }
53
54 /// Get an arbitrary index representing this color. The index is unique
55 /// *within a single function compilation*, but indices may be reused across
56 /// functions.
57 pub fn get(self) -> u32 {
58 self.0
59 }
60}
61
62/// A representation of all of the ways in which a value is available, aside
63/// from as a direct register.
64///
65/// - An instruction, if it would be allowed to occur at the current location
66/// instead (see [Lower::get_input_as_source_or_const()] for more details).
67///
68/// - A constant, if the value is known to be a constant.
69#[derive(Clone, Copy, Debug)]
70pub struct NonRegInput {
71 /// An instruction produces this value (as the given output), and its
72 /// computation (and side-effect if applicable) could occur at the
73 /// current instruction's location instead.
74 ///
75 /// If this instruction's operation is merged into the current instruction,
76 /// the backend must call [Lower::sink_inst()].
77 ///
78 /// This enum indicates whether this use of the source instruction
79 /// is unique or not.
80 pub inst: InputSourceInst,
81 /// The value is a known constant.
82 pub constant: Option<u64>,
83}
84
85/// When examining an input to an instruction, this enum provides one
86/// of several options: there is or isn't a single instruction (that
87/// we can see and merge with) that produces that input's value, and
88/// we are or aren't the single user of that instruction.
89#[derive(Clone, Copy, Debug)]
90pub enum InputSourceInst {
91 /// The input in question is the single, unique use of the given
92 /// instruction and output index, and it can be sunk to the
93 /// location of this input.
94 UniqueUse(Inst, usize),
95 /// The input in question is one of multiple uses of the given
96 /// instruction. It can still be sunk to the location of this
97 /// input.
98 Use(Inst, usize),
99 /// We cannot determine which instruction produced the input, or
100 /// it is one of several instructions (e.g., due to a control-flow
101 /// merge and blockparam), or the source instruction cannot be
102 /// allowed to sink to the current location due to side-effects.
103 None,
104}
105
106impl InputSourceInst {
107 /// Get the instruction and output index for this source, whether
108 /// we are its single or one of many users.
109 pub fn as_inst(&self) -> Option<(Inst, usize)> {
110 match self {
111 &InputSourceInst::UniqueUse(inst, output_idx)
112 | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
113 &InputSourceInst::None => None,
114 }
115 }
116}
117
118/// A machine backend.
119pub trait LowerBackend {
120 /// The machine instruction type.
121 type MInst: VCodeInst;
122
123 /// Lower a single instruction.
124 ///
125 /// For a branch, this function should not generate the actual branch
126 /// instruction. However, it must force any values it needs for the branch
127 /// edge (block-param actuals) into registers, because the actual branch
128 /// generation (`lower_branch()`) happens *after* any possible merged
129 /// out-edge.
130 ///
131 /// Returns `None` if no lowering for the instruction was found.
132 fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
133
134 /// Lower a block-terminating group of branches (which together can be seen
135 /// as one N-way branch), given a vcode MachLabel for each target.
136 ///
137 /// Returns `None` if no lowering for the branch was found.
138 fn lower_branch(
139 &self,
140 ctx: &mut Lower<Self::MInst>,
141 inst: Inst,
142 targets: &[MachLabel],
143 ) -> Option<()>;
144
145 /// A bit of a hack: give a fixed register that always holds the result of a
146 /// `get_pinned_reg` instruction, if known. This allows elision of moves
147 /// into the associated vreg, instead using the real reg directly.
148 fn maybe_pinned_reg(&self) -> Option<Reg> {
149 None
150 }
151
152 /// The type of state carried between `check_fact` invocations.
153 type FactFlowState: Default + Clone + Debug;
154
155 /// Check any facts about an instruction, given VCode with facts
156 /// on VRegs. Takes mutable `VCode` so that it can propagate some
157 /// kinds of facts automatically.
158 fn check_fact(
159 &self,
160 _ctx: &FactContext<'_>,
161 _vcode: &mut VCode<Self::MInst>,
162 _inst: InsnIndex,
163 _state: &mut Self::FactFlowState,
164 ) -> PccResult<()> {
165 Err(PccError::UnimplementedBackend)
166 }
167}
168
169/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
170/// from original Inst to MachInsts.
171pub struct Lower<'func, I: VCodeInst> {
172 /// The function to lower.
173 f: &'func Function,
174
175 /// Lowered machine instructions.
176 vcode: VCodeBuilder<I>,
177
178 /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
179 vregs: VRegAllocator<I>,
180
181 /// Mapping from `Value` (SSA value in IR) to virtual register.
182 value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
183
184 /// sret registers, if needed.
185 sret_reg: Option<ValueRegs<Reg>>,
186
187 /// Instruction colors at block exits. From this map, we can recover all
188 /// instruction colors by scanning backward from the block end and
189 /// decrementing on any color-changing (side-effecting) instruction.
190 block_end_colors: SecondaryMap<Block, InstColor>,
191
192 /// Instruction colors at side-effecting ops. This is the *entry* color,
193 /// i.e., the version of global state that exists before an instruction
194 /// executes. For each side-effecting instruction, the *exit* color is its
195 /// entry color plus one.
196 side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
197
198 /// Current color as we scan during lowering. While we are lowering an
199 /// instruction, this is equal to the color *at entry to* the instruction.
200 cur_scan_entry_color: Option<InstColor>,
201
202 /// Current instruction as we scan during lowering.
203 cur_inst: Option<Inst>,
204
205 /// Instruction constant values, if known.
206 inst_constants: FxHashMap<Inst, u64>,
207
208 /// Use-counts per SSA value, as counted in the input IR. These
209 /// are "coarsened", in the abstract-interpretation sense: we only
210 /// care about "0, 1, many" states, as this is all we need and
211 /// this lets us do an efficient fixpoint analysis.
212 ///
213 /// See doc comment on `ValueUseState` for more details.
214 value_ir_uses: SecondaryMap<Value, ValueUseState>,
215
216 /// Actual uses of each SSA value so far, incremented while lowering.
217 value_lowered_uses: SecondaryMap<Value, u32>,
218
219 /// Effectful instructions that have been sunk; they are not codegen'd at
220 /// their original locations.
221 inst_sunk: FxHashSet<Inst>,
222
223 /// Instructions collected for the CLIF inst in progress, in forward order.
224 ir_insts: Vec<I>,
225
226 /// The register to use for GetPinnedReg, if any, on this architecture.
227 pinned_reg: Option<Reg>,
228
229 /// Compilation flags.
230 flags: Flags,
231}
232
233/// How is a value used in the IR?
234///
235/// This can be seen as a coarsening of an integer count. We only need
236/// distinct states for zero, one, or many.
237///
238/// This analysis deserves further explanation. The basic idea is that
239/// we want to allow instruction lowering to know whether a value that
240/// an instruction references is *only* referenced by that one use, or
241/// by others as well. This is necessary to know when we might want to
242/// move a side-effect: we cannot, for example, duplicate a load, so
243/// we cannot let instruction lowering match a load as part of a
244/// subpattern and potentially incorporate it.
245///
246/// Note that a lot of subtlety comes into play once we have
247/// *indirect* uses. The classical example of this in our development
248/// history was the x86 compare instruction, which is incorporated
249/// into flags users (e.g. `selectif`, `trueif`, branches) and can
250/// subsequently incorporate loads, or at least we would like it
251/// to. However, danger awaits: the compare might be the only user of
252/// a load, so we might think we can just move the load (and nothing
253/// is duplicated -- success!), except that the compare itself is
254/// codegen'd in multiple places, where it is incorporated as a
255/// subpattern itself.
256///
257/// So we really want a notion of "unique all the way along the
258/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
259/// to the semantics that we want here: we want to know when we've
260/// matched a unique use of an instruction, and that instruction's
261/// unique use of another instruction, etc, just as `&mut T` can only
262/// be obtained by going through a chain of `&mut T`. If one has a
263/// `&T` to a struct containing `&mut T` (one of several uses of an
264/// instruction that itself has a unique use of an instruction), one
265/// can only get a `&T` (one can only get a "I am one of several users
266/// of this instruction" result).
267///
268/// We could track these paths, either dynamically as one "looks up the operand
269/// tree" or precomputed. But the former requires state and means that the
270/// `Lower` API carries that state implicitly, which we'd like to avoid if we
271/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
272/// inst `i` unique from the point of view of `j`).
273///
274/// To make matters even a little more complex still, a value that is
275/// not uniquely used when initially viewing the IR can *become*
276/// uniquely used, at least as a root allowing further unique uses of
277/// e.g. loads to merge, if no other instruction actually merges
278/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
279/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
280/// point of view of lowering `v4` or `v3`, we cannot merge the load
281/// at `v1`. But if we decide just to use the assigned register for
282/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
283/// once, so it *is* a unique root at that point and we *can* merge
284/// the load.
285///
286/// Note also that the color scheme is not sufficient to give us this
287/// information, for various reasons: reasoning about side-effects
288/// does not tell us about potential duplication of uses through pure
289/// ops.
290///
291/// To keep things simple and avoid error-prone lowering APIs that
292/// would extract more information about whether instruction merging
293/// happens or not (we don't have that info now, and it would be
294/// difficult to refactor to get it and make that refactor 100%
295/// correct), we give up on the above "can become unique if not
296/// actually merged" point. Instead, we compute a
297/// transitive-uniqueness. That is what this enum represents.
298///
299/// There is one final caveat as well to the result of this analysis. Notably,
300/// we define some instructions to be "root" instructions, which means that we
301/// assume they will always be codegen'd at the root of a matching tree, and not
302/// matched. (This comes with the caveat that we actually enforce this property
303/// by making them "opaque" to subtree matching in
304/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
305/// they in some sense "reset" multiplicity: these root instructions can be used
306/// many times, but because their result(s) are only computed once, they only
307/// use their inputs once.
308///
309/// We currently define all multi-result instructions to be "root" instructions,
310/// because it is too complex to reason about matching through them, and they
311/// cause too-coarse-grained approximation of multiplicity otherwise: the
312/// analysis would have to assume (as it used to!) that they are always
313/// multiply-used, simply because they have multiple outputs even if those
314/// outputs are used only once.
315///
316/// In the future we could define other instructions to be "root" instructions
317/// as well, if we make the corresponding change to get_value_as_source_or_const
318/// as well.
319///
320/// To define `ValueUseState` more plainly: a value is `Unused` if no references
321/// exist to it; `Once` if only one other op refers to it, *and* that other op
322/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
323/// is contagious (except through root instructions): even if an op's result
324/// value is directly used only once in the CLIF, that value is `Multiple` if
325/// the op that uses it is itself used multiple times (hence could be codegen'd
326/// multiple times). In brief, this analysis tells us whether, if every op
327/// merged all of its operand tree, a given op could be codegen'd in more than
328/// one place.
329///
330/// To compute this, we first consider direct uses. At this point
331/// `Unused` answers are correct, `Multiple` answers are correct, but
332/// some `Once`s may change to `Multiple`s. Then we propagate
333/// `Multiple` transitively using a workqueue/fixpoint algorithm.
334#[derive(Clone, Copy, Debug, PartialEq, Eq)]
335enum ValueUseState {
336 /// Not used at all.
337 Unused,
338 /// Used exactly once.
339 Once,
340 /// Used multiple times.
341 Multiple,
342}
343
344impl ValueUseState {
345 /// Add one use.
346 fn inc(&mut self) {
347 let new = match self {
348 Self::Unused => Self::Once,
349 Self::Once | Self::Multiple => Self::Multiple,
350 };
351 *self = new;
352 }
353}
354
355/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
356/// reference.
357#[derive(Clone, Copy, Debug, PartialEq, Eq)]
358pub enum RelocDistance {
359 /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
360 /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
361 /// 128MB offset. If unsure, use `Far` instead.
362 Near,
363 /// Target of relocation could be anywhere in the address space.
364 Far,
365}
366
367impl<'func, I: VCodeInst> Lower<'func, I> {
368 /// Prepare a new lowering context for the given IR function.
369 pub fn new(
370 f: &'func Function,
371 abi: Callee<I::ABIMachineSpec>,
372 emit_info: I::Info,
373 block_order: BlockLoweringOrder,
374 sigs: SigSet,
375 flags: Flags,
376 ) -> CodegenResult<Self> {
377 let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
378 let vcode = VCodeBuilder::new(
379 sigs,
380 abi,
381 emit_info,
382 block_order,
383 constants,
384 VCodeBuildDirection::Backward,
385 flags.log2_min_function_alignment(),
386 );
387
388 // We usually need two VRegs per instruction result, plus extras for
389 // various temporaries, but two per Value is a good starting point.
390 let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
391
392 let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
393
394 // Assign a vreg to each block param and each inst result.
395 for bb in f.layout.blocks() {
396 for ¶m in f.dfg.block_params(bb) {
397 let ty = f.dfg.value_type(param);
398 if value_regs[param].is_invalid() {
399 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
400 value_regs[param] = regs;
401 trace!("bb {} param {}: regs {:?}", bb, param, regs);
402 }
403 }
404 for inst in f.layout.block_insts(bb) {
405 for &result in f.dfg.inst_results(inst) {
406 let ty = f.dfg.value_type(result);
407 if value_regs[result].is_invalid() && !ty.is_invalid() {
408 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
409 value_regs[result] = regs;
410 trace!(
411 "bb {} inst {} ({:?}): result {} regs {:?}",
412 bb,
413 inst,
414 f.dfg.insts[inst],
415 result,
416 regs,
417 );
418 }
419 }
420 }
421 }
422
423 // Find the sret register, if it's used.
424 let mut sret_param = None;
425 for ret in vcode.abi().signature().returns.iter() {
426 if ret.purpose == ArgumentPurpose::StructReturn {
427 let entry_bb = f.stencil.layout.entry_block().unwrap();
428 for (¶m, sig_param) in f
429 .dfg
430 .block_params(entry_bb)
431 .iter()
432 .zip(vcode.abi().signature().params.iter())
433 {
434 if sig_param.purpose == ArgumentPurpose::StructReturn {
435 assert!(sret_param.is_none());
436 sret_param = Some(param);
437 }
438 }
439
440 assert!(sret_param.is_some());
441 }
442 }
443
444 let sret_reg = sret_param.map(|param| {
445 let regs = value_regs[param];
446 assert!(regs.len() == 1);
447 regs
448 });
449
450 // Compute instruction colors, find constant instructions, and find instructions with
451 // side-effects, in one combined pass.
452 let mut cur_color = 0;
453 let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
454 let mut side_effect_inst_entry_colors = FxHashMap::default();
455 let mut inst_constants = FxHashMap::default();
456 for bb in f.layout.blocks() {
457 cur_color += 1;
458 for inst in f.layout.block_insts(bb) {
459 let side_effect = has_lowering_side_effect(f, inst);
460
461 trace!("bb {} inst {} has color {}", bb, inst, cur_color);
462 if side_effect {
463 side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
464 trace!(" -> side-effecting; incrementing color for next inst");
465 cur_color += 1;
466 }
467
468 // Determine if this is a constant; if so, add to the table.
469 if let Some(c) = is_constant_64bit(f, inst) {
470 trace!(" -> constant: {}", c);
471 inst_constants.insert(inst, c);
472 }
473 }
474
475 block_end_colors[bb] = InstColor::new(cur_color);
476 }
477
478 let value_ir_uses = compute_use_states(f, sret_param);
479
480 Ok(Lower {
481 f,
482 vcode,
483 vregs,
484 value_regs,
485 sret_reg,
486 block_end_colors,
487 side_effect_inst_entry_colors,
488 inst_constants,
489 value_ir_uses,
490 value_lowered_uses: SecondaryMap::default(),
491 inst_sunk: FxHashSet::default(),
492 cur_scan_entry_color: None,
493 cur_inst: None,
494 ir_insts: vec![],
495 pinned_reg: None,
496 flags,
497 })
498 }
499
500 pub fn sigs(&self) -> &SigSet {
501 self.vcode.sigs()
502 }
503
504 pub fn sigs_mut(&mut self) -> &mut SigSet {
505 self.vcode.sigs_mut()
506 }
507
508 fn gen_arg_setup(&mut self) {
509 if let Some(entry_bb) = self.f.layout.entry_block() {
510 trace!(
511 "gen_arg_setup: entry BB {} args are:\n{:?}",
512 entry_bb,
513 self.f.dfg.block_params(entry_bb)
514 );
515
516 for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
517 if self.value_ir_uses[*param] == ValueUseState::Unused {
518 continue;
519 }
520 let regs = writable_value_regs(self.value_regs[*param]);
521 for insn in self
522 .vcode
523 .vcode
524 .abi
525 .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
526 .into_iter()
527 {
528 self.emit(insn);
529 }
530 }
531 if let Some(insn) = self
532 .vcode
533 .vcode
534 .abi
535 .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
536 {
537 self.emit(insn);
538 }
539
540 // The `args` instruction below must come first. Finish
541 // the current "IR inst" (with a default source location,
542 // as for other special instructions inserted during
543 // lowering) and continue the scan backward.
544 self.finish_ir_inst(Default::default());
545
546 if let Some(insn) = self.vcode.vcode.abi.take_args() {
547 self.emit(insn);
548 }
549 }
550 }
551
552 /// Generate the return instruction.
553 pub fn gen_return(&mut self, rets: Vec<ValueRegs<Reg>>) {
554 let mut out_rets = vec![];
555
556 let mut rets = rets.into_iter();
557 for (i, ret) in self
558 .abi()
559 .signature()
560 .returns
561 .clone()
562 .into_iter()
563 .enumerate()
564 {
565 let regs = if ret.purpose == ArgumentPurpose::StructReturn {
566 self.sret_reg.unwrap()
567 } else {
568 rets.next().unwrap()
569 };
570
571 let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
572 self.vcode.sigs(),
573 i,
574 regs,
575 &mut self.vregs,
576 );
577 out_rets.extend(regs);
578 for insn in insns {
579 self.emit(insn);
580 }
581 }
582
583 // Hack: generate a virtual instruction that uses vmctx in
584 // order to keep it alive for the duration of the function,
585 // for the benefit of debuginfo.
586 if self.f.dfg.values_labels.is_some() {
587 if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
588 if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
589 let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
590 self.emit(I::gen_dummy_use(vmctx_reg));
591 }
592 }
593 }
594
595 let inst = self.abi().gen_rets(out_rets);
596 self.emit(inst);
597 }
598
599 /// Has this instruction been sunk to a use-site (i.e., away from its
600 /// original location)?
601 fn is_inst_sunk(&self, inst: Inst) -> bool {
602 self.inst_sunk.contains(&inst)
603 }
604
605 // Is any result of this instruction needed?
606 fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
607 self.f
608 .dfg
609 .inst_results(inst)
610 .iter()
611 .any(|&result| self.value_lowered_uses[result] > 0)
612 }
613
614 fn lower_clif_block<B: LowerBackend<MInst = I>>(
615 &mut self,
616 backend: &B,
617 block: Block,
618 ctrl_plane: &mut ControlPlane,
619 ) -> CodegenResult<()> {
620 self.cur_scan_entry_color = Some(self.block_end_colors[block]);
621 // Lowering loop:
622 // - For each non-branch instruction, in reverse order:
623 // - If side-effecting (load, store, branch/call/return,
624 // possible trap), or if used outside of this block, or if
625 // demanded by another inst, then lower.
626 //
627 // That's it! Lowering of side-effecting ops will force all *needed*
628 // (live) non-side-effecting ops to be lowered at the right places, via
629 // the `use_input_reg()` callback on the `Lower` (that's us). That's
630 // because `use_input_reg()` sets the eager/demand bit for any insts
631 // whose result registers are used.
632 //
633 // We set the VCodeBuilder to "backward" mode, so we emit
634 // blocks in reverse order wrt the BlockIndex sequence, and
635 // emit instructions in reverse order within blocks. Because
636 // the machine backend calls `ctx.emit()` in forward order, we
637 // collect per-IR-inst lowered instructions in `ir_insts`,
638 // then reverse these and append to the VCode at the end of
639 // each IR instruction.
640 for inst in self.f.layout.block_insts(block).rev() {
641 let data = &self.f.dfg.insts[inst];
642 let has_side_effect = has_lowering_side_effect(self.f, inst);
643 // If inst has been sunk to another location, skip it.
644 if self.is_inst_sunk(inst) {
645 continue;
646 }
647 // Are any outputs used at least once?
648 let value_needed = self.is_any_inst_result_needed(inst);
649 trace!(
650 "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
651 block,
652 inst,
653 data,
654 data.opcode().is_branch(),
655 has_side_effect,
656 value_needed,
657 );
658
659 // Update scan state to color prior to this inst (as we are scanning
660 // backward).
661 self.cur_inst = Some(inst);
662 if has_side_effect {
663 let entry_color = *self
664 .side_effect_inst_entry_colors
665 .get(&inst)
666 .expect("every side-effecting inst should have a color-map entry");
667 self.cur_scan_entry_color = Some(entry_color);
668 }
669
670 // Skip lowering branches; these are handled separately
671 // (see `lower_clif_branches()` below).
672 if self.f.dfg.insts[inst].opcode().is_branch() {
673 continue;
674 }
675
676 // Normal instruction: codegen if the instruction is side-effecting
677 // or any of its outputs is used.
678 if has_side_effect || value_needed {
679 trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
680 let temp_regs = match backend.lower(self, inst) {
681 Some(regs) => regs,
682 None => {
683 let ty = if self.num_outputs(inst) > 0 {
684 Some(self.output_ty(inst, 0))
685 } else {
686 None
687 };
688 return Err(CodegenError::Unsupported(format!(
689 "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
690 self.f.dfg.display_inst(inst),
691 ty
692 )));
693 }
694 };
695
696 // The ISLE generated code emits its own registers to define the
697 // instruction's lowered values in. However, other instructions
698 // that use this SSA value will be lowered assuming that the value
699 // is generated into a pre-assigned, different, register.
700 //
701 // To connect the two, we set up "aliases" in the VCodeBuilder
702 // that apply when it is building the Operand table for the
703 // regalloc to use. These aliases effectively rewrite any use of
704 // the pre-assigned register to the register that was returned by
705 // the ISLE lowering logic.
706 let results = self.f.dfg.inst_results(inst);
707 debug_assert_eq!(temp_regs.len(), results.len());
708 for (regs, &result) in temp_regs.iter().zip(results) {
709 let dsts = self.value_regs[result];
710 let mut regs = regs.regs().iter();
711 for &dst in dsts.regs().iter() {
712 let temp = regs.next().copied().unwrap_or(Reg::invalid_sentinel());
713 trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
714 self.vregs.set_vreg_alias(dst, temp);
715 }
716 }
717 }
718
719 let start = self.vcode.vcode.num_insts();
720 let loc = self.srcloc(inst);
721 self.finish_ir_inst(loc);
722
723 // If the instruction had a user stack map, forward it from the CLIF
724 // to the vcode.
725 if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
726 let end = self.vcode.vcode.num_insts();
727 debug_assert!(end > start);
728 debug_assert_eq!(
729 (start..end)
730 .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
731 .count(),
732 1
733 );
734 for i in start..end {
735 let iix = InsnIndex::new(i);
736 if self.vcode.vcode[iix].is_safepoint() {
737 trace!(
738 "Adding user stack map from clif\n\n\
739 {inst:?} `{}`\n\n\
740 to vcode\n\n\
741 {iix:?} `{}`",
742 self.f.dfg.display_inst(inst),
743 &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
744 );
745 self.vcode
746 .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
747 break;
748 }
749 }
750 }
751
752 // maybe insert random instruction
753 if ctrl_plane.get_decision() {
754 if ctrl_plane.get_decision() {
755 let imm: u64 = ctrl_plane.get_arbitrary();
756 let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
757 I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
758 } else {
759 let imm: f64 = ctrl_plane.get_arbitrary();
760 let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
761 let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
762 for inst in I::gen_imm_f64(imm, tmp, reg) {
763 self.emit(inst);
764 }
765 }
766 }
767
768 // Emit value-label markers if needed, to later recover
769 // debug mappings. This must happen before the instruction
770 // (so after we emit, in bottom-to-top pass).
771 self.emit_value_label_markers_for_inst(inst);
772 }
773
774 // Add the block params to this block.
775 self.add_block_params(block)?;
776
777 self.cur_scan_entry_color = None;
778 Ok(())
779 }
780
781 fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
782 for ¶m in self.f.dfg.block_params(block) {
783 for ® in self.value_regs[param].regs() {
784 let vreg = reg.to_virtual_reg().unwrap();
785 self.vcode.add_block_param(vreg);
786 }
787 }
788 Ok(())
789 }
790
791 fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
792 if let Some(ref values_labels) = self.f.dfg.values_labels {
793 debug_assert!(self.f.dfg.value_is_real(val));
794 trace!(
795 "get_value_labels: val {} -> {:?}",
796 val,
797 values_labels.get(&val)
798 );
799 match values_labels.get(&val) {
800 Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
801 Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
802 self.get_value_labels(value, depth + 1)
803 }
804 _ => None,
805 }
806 } else {
807 None
808 }
809 }
810
811 fn emit_value_label_marks_for_value(&mut self, val: Value) {
812 let regs = self.value_regs[val];
813 if regs.len() > 1 {
814 return;
815 }
816 let reg = regs.only_reg().unwrap();
817
818 if let Some(label_starts) = self.get_value_labels(val, 0) {
819 let labels = label_starts
820 .iter()
821 .map(|&ValueLabelStart { label, .. }| label)
822 .collect::<FxHashSet<_>>();
823 for label in labels {
824 trace!(
825 "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
826 val,
827 reg,
828 label,
829 );
830 self.vcode.add_value_label(reg, label);
831 }
832 }
833 }
834
835 fn emit_value_label_markers_for_inst(&mut self, inst: Inst) {
836 if self.f.dfg.values_labels.is_none() {
837 return;
838 }
839
840 trace!(
841 "value labeling: srcloc {}: inst {}",
842 self.srcloc(inst),
843 inst
844 );
845 for &val in self.f.dfg.inst_results(inst) {
846 self.emit_value_label_marks_for_value(val);
847 }
848 }
849
850 fn emit_value_label_markers_for_block_args(&mut self, block: Block) {
851 if self.f.dfg.values_labels.is_none() {
852 return;
853 }
854
855 trace!("value labeling: block {}", block);
856 for &arg in self.f.dfg.block_params(block) {
857 self.emit_value_label_marks_for_value(arg);
858 }
859 self.finish_ir_inst(Default::default());
860 }
861
862 fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
863 // The VCodeBuilder builds in reverse order (and reverses at
864 // the end), but `ir_insts` is in forward order, so reverse
865 // it.
866 for inst in self.ir_insts.drain(..).rev() {
867 self.vcode.push(inst, loc);
868 }
869 }
870
871 fn finish_bb(&mut self) {
872 self.vcode.end_bb();
873 }
874
875 fn lower_clif_branch<B: LowerBackend<MInst = I>>(
876 &mut self,
877 backend: &B,
878 // Lowered block index:
879 bindex: BlockIndex,
880 // Original CLIF block:
881 block: Block,
882 branch: Inst,
883 targets: &[MachLabel],
884 ) -> CodegenResult<()> {
885 trace!(
886 "lower_clif_branch: block {} branch {:?} targets {:?}",
887 block,
888 branch,
889 targets,
890 );
891 // When considering code-motion opportunities, consider the current
892 // program point to be this branch.
893 self.cur_inst = Some(branch);
894
895 // Lower the branch in ISLE.
896 backend
897 .lower_branch(self, branch, targets)
898 .unwrap_or_else(|| {
899 panic!(
900 "should be implemented in ISLE: branch = `{}`",
901 self.f.dfg.display_inst(branch),
902 )
903 });
904 let loc = self.srcloc(branch);
905 self.finish_ir_inst(loc);
906 // Add block param outputs for current block.
907 self.lower_branch_blockparam_args(bindex);
908 Ok(())
909 }
910
911 fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
912 let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
913
914 // TODO: why not make `block_order` public?
915 for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
916 branch_arg_vregs.clear();
917 let (succ, args) = self.collect_block_call(block, succ_idx, &mut branch_arg_vregs);
918 self.vcode.add_succ(succ, args);
919 }
920 }
921
922 fn collect_branch_and_targets(
923 &self,
924 bindex: BlockIndex,
925 _bb: Block,
926 targets: &mut SmallVec<[MachLabel; 2]>,
927 ) -> Option<Inst> {
928 targets.clear();
929 let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
930 targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
931 opt_inst
932 }
933
934 /// Collect the outgoing block-call arguments for a given edge out
935 /// of a lowered block.
936 fn collect_block_call<'a>(
937 &mut self,
938 block: BlockIndex,
939 succ_idx: usize,
940 buffer: &'a mut SmallVec<[Reg; 16]>,
941 ) -> (BlockIndex, &'a [Reg]) {
942 let block_order = self.vcode.block_order();
943 let (_, succs) = block_order.succ_indices(block);
944 let succ = succs[succ_idx];
945 let this_lb = block_order.lowered_order()[block.index()];
946 let succ_lb = block_order.lowered_order()[succ.index()];
947
948 let (branch_inst, succ_idx) = match (this_lb, succ_lb) {
949 (_, LoweredBlock::CriticalEdge { .. }) => {
950 // The successor is a split-critical-edge block. In this
951 // case, this block-call has no arguments, and the
952 // arguments go on the critical edge block's unconditional
953 // branch instead.
954 return (succ, &[]);
955 }
956 (LoweredBlock::CriticalEdge { pred, succ_idx, .. }, _) => {
957 // This is a split-critical-edge block. In this case, our
958 // block-call has the arguments that in the CLIF appear in
959 // the predecessor's branch to this edge.
960 let branch_inst = self.f.layout.last_inst(pred).unwrap();
961 (branch_inst, succ_idx as usize)
962 }
963
964 (this, _) => {
965 let block = this.orig_block().unwrap();
966 // Ordinary block, with an ordinary block as
967 // successor. Take the arguments from the branch.
968 let branch_inst = self.f.layout.last_inst(block).unwrap();
969 (branch_inst, succ_idx)
970 }
971 };
972
973 let block_call =
974 self.f.dfg.insts[branch_inst].branch_destination(&self.f.dfg.jump_tables)[succ_idx];
975 let args = block_call.args_slice(&self.f.dfg.value_lists);
976 for &arg in args {
977 debug_assert!(self.f.dfg.value_is_real(arg));
978 let regs = self.put_value_in_regs(arg);
979 buffer.extend_from_slice(regs.regs());
980 }
981 (succ, &buffer[..])
982 }
983
984 /// Lower the function.
985 pub fn lower<B: LowerBackend<MInst = I>>(
986 mut self,
987 backend: &B,
988 ctrl_plane: &mut ControlPlane,
989 ) -> CodegenResult<VCode<I>> {
990 trace!("about to lower function: {:?}", self.f);
991
992 self.vcode.init_retval_area(&mut self.vregs)?;
993
994 // Get the pinned reg here (we only parameterize this function on `B`,
995 // not the whole `Lower` impl).
996 self.pinned_reg = backend.maybe_pinned_reg();
997
998 self.vcode.set_entry(BlockIndex::new(0));
999
1000 // Reused vectors for branch lowering.
1001 let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1002
1003 // get a copy of the lowered order; we hold this separately because we
1004 // need a mut ref to the vcode to mutate it below.
1005 let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1006 .vcode
1007 .block_order()
1008 .lowered_order()
1009 .iter()
1010 .cloned()
1011 .collect();
1012
1013 // Main lowering loop over lowered blocks.
1014 for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1015 let bindex = BlockIndex::new(bindex);
1016
1017 // Lower the block body in reverse order (see comment in
1018 // `lower_clif_block()` for rationale).
1019
1020 // End branch.
1021 if let Some(bb) = lb.orig_block() {
1022 if let Some(branch) = self.collect_branch_and_targets(bindex, bb, &mut targets) {
1023 self.lower_clif_branch(backend, bindex, bb, branch, &targets)?;
1024 self.finish_ir_inst(self.srcloc(branch));
1025 }
1026 } else {
1027 // If no orig block, this must be a pure edge block;
1028 // get the successor and emit a jump. This block has
1029 // no block params; and this jump's block-call args
1030 // will be filled in by
1031 // `lower_branch_blockparam_args`.
1032 let succ = self.vcode.block_order().succ_indices(bindex).1[0];
1033 self.emit(I::gen_jump(MachLabel::from_block(succ)));
1034 self.finish_ir_inst(Default::default());
1035 self.lower_branch_blockparam_args(bindex);
1036 }
1037
1038 // Original block body.
1039 if let Some(bb) = lb.orig_block() {
1040 self.lower_clif_block(backend, bb, ctrl_plane)?;
1041 self.emit_value_label_markers_for_block_args(bb);
1042 }
1043
1044 if bindex.index() == 0 {
1045 // Set up the function with arg vreg inits.
1046 self.gen_arg_setup();
1047 self.finish_ir_inst(Default::default());
1048 }
1049
1050 self.finish_bb();
1051
1052 // Check for any deferred vreg-temp allocation errors, and
1053 // bubble one up at this time if it exists.
1054 if let Some(e) = self.vregs.take_deferred_error() {
1055 return Err(e);
1056 }
1057 }
1058
1059 // Now that we've emitted all instructions into the
1060 // VCodeBuilder, let's build the VCode.
1061 trace!(
1062 "built vcode:\n{:?}Backwards {:?}",
1063 &self.vregs,
1064 &self.vcode.vcode
1065 );
1066 let vcode = self.vcode.build(self.vregs);
1067
1068 Ok(vcode)
1069 }
1070
1071 pub fn value_is_unused(&self, val: Value) -> bool {
1072 match self.value_ir_uses[val] {
1073 ValueUseState::Unused => true,
1074 _ => false,
1075 }
1076 }
1077}
1078
1079/// Pre-analysis: compute `value_ir_uses`. See comment on
1080/// `ValueUseState` for a description of what this analysis
1081/// computes.
1082fn compute_use_states(
1083 f: &Function,
1084 sret_param: Option<Value>,
1085) -> SecondaryMap<Value, ValueUseState> {
1086 // We perform the analysis without recursion, so we don't
1087 // overflow the stack on long chains of ops in the input.
1088 //
1089 // This is sort of a hybrid of a "shallow use-count" pass and
1090 // a DFS. We iterate over all instructions and mark their args
1091 // as used. However when we increment a use-count to
1092 // "Multiple" we push its args onto the stack and do a DFS,
1093 // immediately marking the whole dependency tree as
1094 // Multiple. Doing both (shallow use-counting over all insts,
1095 // and deep Multiple propagation) lets us trim both
1096 // traversals, stopping recursion when a node is already at
1097 // the appropriate state.
1098 //
1099 // In particular, note that the *coarsening* into {Unused,
1100 // Once, Multiple} is part of what makes this pass more
1101 // efficient than a full indirect-use-counting pass.
1102
1103 let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1104
1105 if let Some(sret_param) = sret_param {
1106 // There's an implicit use of the struct-return parameter in each
1107 // copy of the function epilogue, which we count here.
1108 value_ir_uses[sret_param] = ValueUseState::Multiple;
1109 }
1110
1111 // Stack of iterators over Values as we do DFS to mark
1112 // Multiple-state subtrees. The iterator type is whatever is
1113 // returned by `uses` below.
1114 let mut stack: SmallVec<[_; 16]> = smallvec![];
1115
1116 // Find the args for the inst corresponding to the given value.
1117 //
1118 // Note that "root" instructions are skipped here. This means that multiple
1119 // uses of any result of a multi-result instruction are not considered
1120 // multiple uses of the operands of a multi-result instruction. This
1121 // requires tight coupling with `get_value_as_source_or_const` above which
1122 // is the consumer of the map that this function is producing.
1123 let uses = |value| {
1124 trace!(" -> pushing args for {} onto stack", value);
1125 if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1126 if is_value_use_root(f, src_inst) {
1127 None
1128 } else {
1129 Some(f.dfg.inst_values(src_inst))
1130 }
1131 } else {
1132 None
1133 }
1134 };
1135
1136 // Do a DFS through `value_ir_uses` to mark a subtree as
1137 // Multiple.
1138 for inst in f
1139 .layout
1140 .blocks()
1141 .flat_map(|block| f.layout.block_insts(block))
1142 {
1143 // Iterate over all values used by all instructions, noting an
1144 // additional use on each operand.
1145 for arg in f.dfg.inst_values(inst) {
1146 debug_assert!(f.dfg.value_is_real(arg));
1147 let old = value_ir_uses[arg];
1148 value_ir_uses[arg].inc();
1149 let new = value_ir_uses[arg];
1150 trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1151
1152 // On transition to Multiple, do DFS.
1153 if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1154 continue;
1155 }
1156 if let Some(iter) = uses(arg) {
1157 stack.push(iter);
1158 }
1159 while let Some(iter) = stack.last_mut() {
1160 if let Some(value) = iter.next() {
1161 debug_assert!(f.dfg.value_is_real(value));
1162 trace!(" -> DFS reaches {}", value);
1163 if value_ir_uses[value] == ValueUseState::Multiple {
1164 // Truncate DFS here: no need to go further,
1165 // as whole subtree must already be Multiple.
1166 // With debug asserts, check one level of
1167 // that invariant at least.
1168 debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1169 debug_assert!(f.dfg.value_is_real(arg));
1170 value_ir_uses[arg] == ValueUseState::Multiple
1171 }));
1172 continue;
1173 }
1174 value_ir_uses[value] = ValueUseState::Multiple;
1175 trace!(" -> became Multiple");
1176 if let Some(iter) = uses(value) {
1177 stack.push(iter);
1178 }
1179 } else {
1180 // Empty iterator, discard.
1181 stack.pop();
1182 }
1183 }
1184 }
1185 }
1186
1187 value_ir_uses
1188}
1189
1190/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1191///
1192/// This function calculates whether `inst` is considered a "root" for value-use
1193/// information. This concept is used to forcibly prevent looking-through the
1194/// instruction during `get_value_as_source_or_const` as it additionally
1195/// prevents propagating `Multiple`-used results of the `inst` here to the
1196/// operands of the instruction.
1197///
1198/// Currently this is defined as multi-result instructions. That means that
1199/// lowerings are never allowed to look through a multi-result instruction to
1200/// generate patterns. Note that this isn't possible in ISLE today anyway so
1201/// this isn't currently much of a loss.
1202///
1203/// The main purpose of this function is to prevent the operands of a
1204/// multi-result instruction from being forcibly considered `Multiple`-used
1205/// regardless of circumstances.
1206fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1207 f.dfg.inst_results(inst).len() > 1
1208}
1209
1210/// Function-level queries.
1211impl<'func, I: VCodeInst> Lower<'func, I> {
1212 pub fn dfg(&self) -> &DataFlowGraph {
1213 &self.f.dfg
1214 }
1215
1216 /// Get the `Callee`.
1217 pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1218 self.vcode.abi()
1219 }
1220
1221 /// Get the `Callee`.
1222 pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1223 self.vcode.abi_mut()
1224 }
1225}
1226
1227/// Instruction input/output queries.
1228impl<'func, I: VCodeInst> Lower<'func, I> {
1229 /// Get the instdata for a given IR instruction.
1230 pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1231 &self.f.dfg.insts[ir_inst]
1232 }
1233
1234 /// Likewise, but starting with a GlobalValue identifier.
1235 pub fn symbol_value_data<'b>(
1236 &'b self,
1237 global_value: GlobalValue,
1238 ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1239 let gvdata = &self.f.global_values[global_value];
1240 match gvdata {
1241 &GlobalValueData::Symbol {
1242 ref name,
1243 ref offset,
1244 colocated,
1245 ..
1246 } => {
1247 let offset = offset.bits();
1248 let dist = if colocated {
1249 RelocDistance::Near
1250 } else {
1251 RelocDistance::Far
1252 };
1253 Some((name, dist, offset))
1254 }
1255 _ => None,
1256 }
1257 }
1258
1259 /// Returns the memory flags of a given memory access.
1260 pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1261 match &self.f.dfg.insts[ir_inst] {
1262 &InstructionData::AtomicCas { flags, .. } => Some(flags),
1263 &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1264 &InstructionData::Load { flags, .. }
1265 | &InstructionData::LoadNoOffset { flags, .. }
1266 | &InstructionData::Store { flags, .. } => Some(flags),
1267 &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1268 _ => None,
1269 }
1270 }
1271
1272 /// Get the source location for a given instruction.
1273 pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1274 self.f.rel_srclocs()[ir_inst]
1275 }
1276
1277 /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1278 /// arguments to the instruction: block arguments will not be included in this count.
1279 pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1280 self.f.dfg.inst_args(ir_inst).len()
1281 }
1282
1283 /// Get the number of outputs to the given IR instruction.
1284 pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1285 self.f.dfg.inst_results(ir_inst).len()
1286 }
1287
1288 /// Get the type for an instruction's input.
1289 pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1290 self.value_ty(self.input_as_value(ir_inst, idx))
1291 }
1292
1293 /// Get the type for a value.
1294 pub fn value_ty(&self, val: Value) -> Type {
1295 self.f.dfg.value_type(val)
1296 }
1297
1298 /// Get the type for an instruction's output.
1299 pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1300 self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1301 }
1302
1303 /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1304 /// value, if possible.
1305 pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1306 self.inst_constants.get(&ir_inst).map(|&c| {
1307 // The upper bits must be zero, enforced during legalization and by
1308 // the CLIF verifier.
1309 debug_assert_eq!(c, {
1310 let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1311 let shift = 64 - input_size;
1312 (c << shift) >> shift
1313 });
1314 c
1315 })
1316 }
1317
1318 /// Get the input as one of two options other than a direct register:
1319 ///
1320 /// - An instruction, given that it is effect-free or able to sink its
1321 /// effect to the current instruction being lowered, and given it has only
1322 /// one output, and if effect-ful, given that this is the only use;
1323 /// - A constant, if the value is a constant.
1324 ///
1325 /// The instruction input may be available in either of these forms. It may
1326 /// be available in neither form, if the conditions are not met; if so, use
1327 /// `put_input_in_regs()` instead to get it in a register.
1328 ///
1329 /// If the backend merges the effect of a side-effecting instruction, it
1330 /// must call `sink_inst()`. When this is called, it indicates that the
1331 /// effect has been sunk to the current scan location. The sunk
1332 /// instruction's result(s) must have *no* uses remaining, because it will
1333 /// not be codegen'd (it has been integrated into the current instruction).
1334 pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1335 let val = self.f.dfg.inst_args(ir_inst)[idx];
1336 debug_assert!(self.f.dfg.value_is_real(val));
1337 val
1338 }
1339
1340 /// Resolves a particular input of an instruction to the `Value` that it is
1341 /// represented with.
1342 ///
1343 /// For more information see [`Lower::get_value_as_source_or_const`].
1344 pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1345 let val = self.input_as_value(ir_inst, idx);
1346 self.get_value_as_source_or_const(val)
1347 }
1348
1349 /// Resolves a `Value` definition to the source instruction it came from
1350 /// plus whether it's a unique-use of that instruction.
1351 ///
1352 /// This function is the workhorse of pattern-matching in ISLE which enables
1353 /// combining multiple instructions together. This is used implicitly in
1354 /// patterns such as `(iadd x (iconst y))` where this function is used to
1355 /// extract the `(iconst y)` operand.
1356 ///
1357 /// At its core this function is a wrapper around
1358 /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1359 /// that, however, to determine when it is actually safe to "look through"
1360 /// the `val` definition here and view the underlying instruction. This
1361 /// protects against duplicating side effects, such as loads, for example.
1362 ///
1363 /// Internally this uses the data computed from `compute_use_states` along
1364 /// with other instruction properties to know what to return.
1365 pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1366 trace!(
1367 "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1368 val,
1369 self.cur_inst,
1370 self.cur_scan_entry_color,
1371 );
1372 let inst = match self.f.dfg.value_def(val) {
1373 // OK to merge source instruction if we have a source
1374 // instruction, and one of these two conditions hold:
1375 //
1376 // - It has no side-effects and this instruction is not a "value-use
1377 // root" instruction. Instructions which are considered "roots"
1378 // for value-use calculations do not have accurate information
1379 // known about the `ValueUseState` of their operands. This is
1380 // currently done for multi-result instructions to prevent a use
1381 // of each result from forcing all operands of the multi-result
1382 // instruction to also be `Multiple`. This in turn means that the
1383 // `ValueUseState` for operands of a "root" instruction to be a
1384 // lie if pattern matching were to look through the multi-result
1385 // instruction. As a result the "look through this instruction"
1386 // logic only succeeds if it's not a root instruction.
1387 //
1388 // - It has a side-effect, has one output value, that one
1389 // output has only one use, directly or indirectly (so
1390 // cannot be duplicated -- see comment on
1391 // `ValueUseState`), and the instruction's color is *one
1392 // less than* the current scan color.
1393 //
1394 // This latter set of conditions is testing whether a
1395 // side-effecting instruction can sink to the current scan
1396 // location; this is possible if the in-color of this inst is
1397 // equal to the out-color of the producing inst, so no other
1398 // side-effecting ops occur between them (which will only be true
1399 // if they are in the same BB, because color increments at each BB
1400 // start).
1401 //
1402 // If it is actually sunk, then in `merge_inst()`, we update the
1403 // scan color so that as we scan over the range past which the
1404 // instruction was sunk, we allow other instructions (that came
1405 // prior to the sunk instruction) to sink.
1406 ValueDef::Result(src_inst, result_idx) => {
1407 let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1408 trace!(" -> src inst {}", src_inst);
1409 trace!(" -> has lowering side effect: {}", src_side_effect);
1410 if is_value_use_root(self.f, src_inst) {
1411 // If this instruction is a "root instruction" then it's
1412 // required that we can't look through it to see the
1413 // definition. This means that the `ValueUseState` for the
1414 // operands of this result assume that this instruction is
1415 // generated exactly once which might get violated were we
1416 // to allow looking through it.
1417 trace!(" -> is a root instruction");
1418 InputSourceInst::None
1419 } else if !src_side_effect {
1420 // Otherwise if this instruction has no side effects and the
1421 // value is used only once then we can look through it with
1422 // a "unique" tag. A non-unique `Use` can be shown for other
1423 // values ensuring consumers know how it's computed but that
1424 // it's not available to omit.
1425 if self.value_ir_uses[val] == ValueUseState::Once {
1426 InputSourceInst::UniqueUse(src_inst, result_idx)
1427 } else {
1428 InputSourceInst::Use(src_inst, result_idx)
1429 }
1430 } else {
1431 // Side-effect: test whether this is the only use of the
1432 // only result of the instruction, and whether colors allow
1433 // the code-motion.
1434 trace!(
1435 " -> side-effecting op {} for val {}: use state {:?}",
1436 src_inst,
1437 val,
1438 self.value_ir_uses[val]
1439 );
1440 if self.cur_scan_entry_color.is_some()
1441 && self.value_ir_uses[val] == ValueUseState::Once
1442 && self.num_outputs(src_inst) == 1
1443 && self
1444 .side_effect_inst_entry_colors
1445 .get(&src_inst)
1446 .unwrap()
1447 .get()
1448 + 1
1449 == self.cur_scan_entry_color.unwrap().get()
1450 {
1451 InputSourceInst::UniqueUse(src_inst, 0)
1452 } else {
1453 InputSourceInst::None
1454 }
1455 }
1456 }
1457 _ => InputSourceInst::None,
1458 };
1459 let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1460
1461 NonRegInput { inst, constant }
1462 }
1463
1464 /// Increment the reference count for the Value, ensuring that it gets lowered.
1465 pub fn increment_lowered_uses(&mut self, val: Value) {
1466 self.value_lowered_uses[val] += 1
1467 }
1468
1469 /// Put the `idx`th input into register(s) and return the assigned register.
1470 pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1471 let val = self.f.dfg.inst_args(ir_inst)[idx];
1472 self.put_value_in_regs(val)
1473 }
1474
1475 /// Put the given value into register(s) and return the assigned register.
1476 pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1477 debug_assert!(self.f.dfg.value_is_real(val));
1478 trace!("put_value_in_regs: val {}", val);
1479
1480 if let Some(inst) = self.f.dfg.value_def(val).inst() {
1481 assert!(!self.inst_sunk.contains(&inst));
1482 }
1483
1484 let regs = self.value_regs[val];
1485 trace!(" -> regs {:?}", regs);
1486 assert!(regs.is_valid());
1487
1488 self.value_lowered_uses[val] += 1;
1489
1490 regs
1491 }
1492}
1493
1494/// Codegen primitives: allocate temps, emit instructions, set result registers,
1495/// ask for an input to be gen'd into a register.
1496impl<'func, I: VCodeInst> Lower<'func, I> {
1497 /// Get a new temp.
1498 pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1499 writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1500 }
1501
1502 /// Emit a machine instruction.
1503 pub fn emit(&mut self, mach_inst: I) {
1504 trace!("emit: {:?}", mach_inst);
1505 self.ir_insts.push(mach_inst);
1506 }
1507
1508 /// Indicate that the side-effect of an instruction has been sunk to the
1509 /// current scan location. This should only be done with the instruction's
1510 /// original results are not used (i.e., `put_input_in_regs` is not invoked
1511 /// for the input produced by the sunk instruction), otherwise the
1512 /// side-effect will occur twice.
1513 pub fn sink_inst(&mut self, ir_inst: Inst) {
1514 assert!(has_lowering_side_effect(self.f, ir_inst));
1515 assert!(self.cur_scan_entry_color.is_some());
1516
1517 for result in self.dfg().inst_results(ir_inst) {
1518 assert!(self.value_lowered_uses[*result] == 0);
1519 }
1520
1521 let sunk_inst_entry_color = self
1522 .side_effect_inst_entry_colors
1523 .get(&ir_inst)
1524 .cloned()
1525 .unwrap();
1526 let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1527 assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1528 self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1529 self.inst_sunk.insert(ir_inst);
1530 }
1531
1532 /// Retrieve immediate data given a handle.
1533 pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1534 self.f.dfg.immediates.get(imm).unwrap()
1535 }
1536
1537 /// Retrieve constant data given a handle.
1538 pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1539 self.f.dfg.constants.get(constant_handle)
1540 }
1541
1542 /// Indicate that a constant should be emitted.
1543 pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1544 self.vcode.constants().insert(constant)
1545 }
1546
1547 /// Cause the value in `reg` to be in a virtual reg, by copying it into a
1548 /// new virtual reg if `reg` is a real reg. `ty` describes the type of the
1549 /// value in `reg`.
1550 pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1551 if reg.to_virtual_reg().is_some() {
1552 reg
1553 } else {
1554 let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1555 self.emit(I::gen_move(new_reg, reg, ty));
1556 new_reg.to_reg()
1557 }
1558 }
1559
1560 /// Add a range fact to a register, if no other fact is present.
1561 pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1562 if self.flags.enable_pcc() {
1563 self.vregs.set_fact_if_missing(
1564 reg.to_virtual_reg().unwrap(),
1565 Fact::Range {
1566 bit_width,
1567 min,
1568 max,
1569 },
1570 );
1571 }
1572 }
1573}
1574
1575#[cfg(test)]
1576mod tests {
1577 use super::ValueUseState;
1578 use crate::cursor::{Cursor, FuncCursor};
1579 use crate::ir::types;
1580 use crate::ir::{Function, InstBuilder};
1581
1582 #[test]
1583 fn multi_result_use_once() {
1584 let mut func = Function::new();
1585 let block0 = func.dfg.make_block();
1586 let mut pos = FuncCursor::new(&mut func);
1587 pos.insert_block(block0);
1588 let v1 = pos.ins().iconst(types::I64, 0);
1589 let v2 = pos.ins().iconst(types::I64, 1);
1590 let v3 = pos.ins().iconcat(v1, v2);
1591 let (v4, v5) = pos.ins().isplit(v3);
1592 pos.ins().return_(&[v4, v5]);
1593 let func = pos.func;
1594
1595 let uses = super::compute_use_states(&func, None);
1596 assert_eq!(uses[v1], ValueUseState::Once);
1597 assert_eq!(uses[v2], ValueUseState::Once);
1598 assert_eq!(uses[v3], ValueUseState::Once);
1599 assert_eq!(uses[v4], ValueUseState::Once);
1600 assert_eq!(uses[v5], ValueUseState::Once);
1601 }
1602
1603 #[test]
1604 fn results_used_twice_but_not_operands() {
1605 let mut func = Function::new();
1606 let block0 = func.dfg.make_block();
1607 let mut pos = FuncCursor::new(&mut func);
1608 pos.insert_block(block0);
1609 let v1 = pos.ins().iconst(types::I64, 0);
1610 let v2 = pos.ins().iconst(types::I64, 1);
1611 let v3 = pos.ins().iconcat(v1, v2);
1612 let (v4, v5) = pos.ins().isplit(v3);
1613 pos.ins().return_(&[v4, v4]);
1614 let func = pos.func;
1615
1616 let uses = super::compute_use_states(&func, None);
1617 assert_eq!(uses[v1], ValueUseState::Once);
1618 assert_eq!(uses[v2], ValueUseState::Once);
1619 assert_eq!(uses[v3], ValueUseState::Once);
1620 assert_eq!(uses[v4], ValueUseState::Multiple);
1621 assert_eq!(uses[v5], ValueUseState::Unused);
1622 }
1623}