cranelift_codegen/machinst/lower.rs
1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12 ArgumentPurpose, Block, Constant, ConstantData, DataFlowGraph, ExternalName, Function,
13 GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags, RelSourceLoc, Type,
14 Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::{
17 writable_value_regs, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, Callee, InsnIndex,
18 LoweredBlock, MachLabel, Reg, SigSet, VCode, VCodeBuilder, VCodeConstant, VCodeConstantData,
19 VCodeConstants, VCodeInst, ValueRegs, Writable,
20};
21use crate::settings::Flags;
22use crate::{trace, CodegenResult};
23use alloc::vec::Vec;
24use cranelift_control::ControlPlane;
25use rustc_hash::{FxHashMap, FxHashSet};
26use smallvec::{smallvec, SmallVec};
27use std::fmt::Debug;
28
29use super::{VCodeBuildDirection, VRegAllocator};
30
31/// A vector of ValueRegs, used to represent the outputs of an instruction.
32pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
33
34/// An "instruction color" partitions CLIF instructions by side-effecting ops.
35/// All instructions with the same "color" are guaranteed not to be separated by
36/// any side-effecting op (for this purpose, loads are also considered
37/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
38/// furthermore, it is guaranteed that for any two instructions A and B such
39/// that color(A) == color(B), either A dominates B and B postdominates A, or
40/// vice-versa. (For now, in practice, only ops in the same basic block can ever
41/// have the same color, trivially providing the second condition.) Intuitively,
42/// this means that the ops of the same color must always execute "together", as
43/// part of one atomic contiguous section of the dynamic execution trace, and
44/// they can be freely permuted (modulo true dataflow dependencies) without
45/// affecting program behavior.
46#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
47struct InstColor(u32);
48impl InstColor {
49 fn new(n: u32) -> InstColor {
50 InstColor(n)
51 }
52
53 /// Get an arbitrary index representing this color. The index is unique
54 /// *within a single function compilation*, but indices may be reused across
55 /// functions.
56 pub fn get(self) -> u32 {
57 self.0
58 }
59}
60
61/// A representation of all of the ways in which a value is available, aside
62/// from as a direct register.
63///
64/// - An instruction, if it would be allowed to occur at the current location
65/// instead (see [Lower::get_input_as_source_or_const()] for more details).
66///
67/// - A constant, if the value is known to be a constant.
68#[derive(Clone, Copy, Debug)]
69pub struct NonRegInput {
70 /// An instruction produces this value (as the given output), and its
71 /// computation (and side-effect if applicable) could occur at the
72 /// current instruction's location instead.
73 ///
74 /// If this instruction's operation is merged into the current instruction,
75 /// the backend must call [Lower::sink_inst()].
76 ///
77 /// This enum indicates whether this use of the source instruction
78 /// is unique or not.
79 pub inst: InputSourceInst,
80 /// The value is a known constant.
81 pub constant: Option<u64>,
82}
83
84/// When examining an input to an instruction, this enum provides one
85/// of several options: there is or isn't a single instruction (that
86/// we can see and merge with) that produces that input's value, and
87/// we are or aren't the single user of that instruction.
88#[derive(Clone, Copy, Debug)]
89pub enum InputSourceInst {
90 /// The input in question is the single, unique use of the given
91 /// instruction and output index, and it can be sunk to the
92 /// location of this input.
93 UniqueUse(Inst, usize),
94 /// The input in question is one of multiple uses of the given
95 /// instruction. It can still be sunk to the location of this
96 /// input.
97 Use(Inst, usize),
98 /// We cannot determine which instruction produced the input, or
99 /// it is one of several instructions (e.g., due to a control-flow
100 /// merge and blockparam), or the source instruction cannot be
101 /// allowed to sink to the current location due to side-effects.
102 None,
103}
104
105impl InputSourceInst {
106 /// Get the instruction and output index for this source, whether
107 /// we are its single or one of many users.
108 pub fn as_inst(&self) -> Option<(Inst, usize)> {
109 match self {
110 &InputSourceInst::UniqueUse(inst, output_idx)
111 | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
112 &InputSourceInst::None => None,
113 }
114 }
115}
116
117/// A machine backend.
118pub trait LowerBackend {
119 /// The machine instruction type.
120 type MInst: VCodeInst;
121
122 /// Lower a single instruction.
123 ///
124 /// For a branch, this function should not generate the actual branch
125 /// instruction. However, it must force any values it needs for the branch
126 /// edge (block-param actuals) into registers, because the actual branch
127 /// generation (`lower_branch()`) happens *after* any possible merged
128 /// out-edge.
129 ///
130 /// Returns `None` if no lowering for the instruction was found.
131 fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
132
133 /// Lower a block-terminating group of branches (which together can be seen
134 /// as one N-way branch), given a vcode MachLabel for each target.
135 ///
136 /// Returns `None` if no lowering for the branch was found.
137 fn lower_branch(
138 &self,
139 ctx: &mut Lower<Self::MInst>,
140 inst: Inst,
141 targets: &[MachLabel],
142 ) -> Option<()>;
143
144 /// A bit of a hack: give a fixed register that always holds the result of a
145 /// `get_pinned_reg` instruction, if known. This allows elision of moves
146 /// into the associated vreg, instead using the real reg directly.
147 fn maybe_pinned_reg(&self) -> Option<Reg> {
148 None
149 }
150
151 /// The type of state carried between `check_fact` invocations.
152 type FactFlowState: Default + Clone + Debug;
153
154 /// Check any facts about an instruction, given VCode with facts
155 /// on VRegs. Takes mutable `VCode` so that it can propagate some
156 /// kinds of facts automatically.
157 fn check_fact(
158 &self,
159 _ctx: &FactContext<'_>,
160 _vcode: &mut VCode<Self::MInst>,
161 _inst: InsnIndex,
162 _state: &mut Self::FactFlowState,
163 ) -> PccResult<()> {
164 Err(PccError::UnimplementedBackend)
165 }
166}
167
168/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
169/// from original Inst to MachInsts.
170pub struct Lower<'func, I: VCodeInst> {
171 /// The function to lower.
172 f: &'func Function,
173
174 /// Lowered machine instructions.
175 vcode: VCodeBuilder<I>,
176
177 /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
178 vregs: VRegAllocator<I>,
179
180 /// Mapping from `Value` (SSA value in IR) to virtual register.
181 value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
182
183 /// sret registers, if needed.
184 sret_reg: Option<ValueRegs<Reg>>,
185
186 /// Instruction colors at block exits. From this map, we can recover all
187 /// instruction colors by scanning backward from the block end and
188 /// decrementing on any color-changing (side-effecting) instruction.
189 block_end_colors: SecondaryMap<Block, InstColor>,
190
191 /// Instruction colors at side-effecting ops. This is the *entry* color,
192 /// i.e., the version of global state that exists before an instruction
193 /// executes. For each side-effecting instruction, the *exit* color is its
194 /// entry color plus one.
195 side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
196
197 /// Current color as we scan during lowering. While we are lowering an
198 /// instruction, this is equal to the color *at entry to* the instruction.
199 cur_scan_entry_color: Option<InstColor>,
200
201 /// Current instruction as we scan during lowering.
202 cur_inst: Option<Inst>,
203
204 /// Instruction constant values, if known.
205 inst_constants: FxHashMap<Inst, u64>,
206
207 /// Use-counts per SSA value, as counted in the input IR. These
208 /// are "coarsened", in the abstract-interpretation sense: we only
209 /// care about "0, 1, many" states, as this is all we need and
210 /// this lets us do an efficient fixpoint analysis.
211 ///
212 /// See doc comment on `ValueUseState` for more details.
213 value_ir_uses: SecondaryMap<Value, ValueUseState>,
214
215 /// Actual uses of each SSA value so far, incremented while lowering.
216 value_lowered_uses: SecondaryMap<Value, u32>,
217
218 /// Effectful instructions that have been sunk; they are not codegen'd at
219 /// their original locations.
220 inst_sunk: FxHashSet<Inst>,
221
222 /// Instructions collected for the CLIF inst in progress, in forward order.
223 ir_insts: Vec<I>,
224
225 /// The register to use for GetPinnedReg, if any, on this architecture.
226 pinned_reg: Option<Reg>,
227
228 /// Compilation flags.
229 flags: Flags,
230}
231
232/// How is a value used in the IR?
233///
234/// This can be seen as a coarsening of an integer count. We only need
235/// distinct states for zero, one, or many.
236///
237/// This analysis deserves further explanation. The basic idea is that
238/// we want to allow instruction lowering to know whether a value that
239/// an instruction references is *only* referenced by that one use, or
240/// by others as well. This is necessary to know when we might want to
241/// move a side-effect: we cannot, for example, duplicate a load, so
242/// we cannot let instruction lowering match a load as part of a
243/// subpattern and potentially incorporate it.
244///
245/// Note that a lot of subtlety comes into play once we have
246/// *indirect* uses. The classical example of this in our development
247/// history was the x86 compare instruction, which is incorporated
248/// into flags users (e.g. `selectif`, `trueif`, branches) and can
249/// subsequently incorporate loads, or at least we would like it
250/// to. However, danger awaits: the compare might be the only user of
251/// a load, so we might think we can just move the load (and nothing
252/// is duplicated -- success!), except that the compare itself is
253/// codegen'd in multiple places, where it is incorporated as a
254/// subpattern itself.
255///
256/// So we really want a notion of "unique all the way along the
257/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
258/// to the semantics that we want here: we want to know when we've
259/// matched a unique use of an instruction, and that instruction's
260/// unique use of another instruction, etc, just as `&mut T` can only
261/// be obtained by going through a chain of `&mut T`. If one has a
262/// `&T` to a struct containing `&mut T` (one of several uses of an
263/// instruction that itself has a unique use of an instruction), one
264/// can only get a `&T` (one can only get a "I am one of several users
265/// of this instruction" result).
266///
267/// We could track these paths, either dynamically as one "looks up the operand
268/// tree" or precomputed. But the former requires state and means that the
269/// `Lower` API carries that state implicitly, which we'd like to avoid if we
270/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
271/// inst `i` unique from the point of view of `j`).
272///
273/// To make matters even a little more complex still, a value that is
274/// not uniquely used when initially viewing the IR can *become*
275/// uniquely used, at least as a root allowing further unique uses of
276/// e.g. loads to merge, if no other instruction actually merges
277/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
278/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
279/// point of view of lowering `v4` or `v3`, we cannot merge the load
280/// at `v1`. But if we decide just to use the assigned register for
281/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
282/// once, so it *is* a unique root at that point and we *can* merge
283/// the load.
284///
285/// Note also that the color scheme is not sufficient to give us this
286/// information, for various reasons: reasoning about side-effects
287/// does not tell us about potential duplication of uses through pure
288/// ops.
289///
290/// To keep things simple and avoid error-prone lowering APIs that
291/// would extract more information about whether instruction merging
292/// happens or not (we don't have that info now, and it would be
293/// difficult to refactor to get it and make that refactor 100%
294/// correct), we give up on the above "can become unique if not
295/// actually merged" point. Instead, we compute a
296/// transitive-uniqueness. That is what this enum represents.
297///
298/// To define it plainly: a value is `Unused` if no references exist
299/// to it; `Once` if only one other op refers to it, *and* that other
300/// op is `Unused` or `Once`; and `Multiple` otherwise. In other
301/// words, `Multiple` is contagious: even if an op's result value is
302/// directly used only once in the CLIF, that value is `Multiple` if
303/// the op that uses it is itself used multiple times (hence could be
304/// codegen'd multiple times). In brief, this analysis tells us
305/// whether, if every op merged all of its operand tree, a given op
306/// could be codegen'd in more than one place.
307///
308/// To compute this, we first consider direct uses. At this point
309/// `Unused` answers are correct, `Multiple` answers are correct, but
310/// some `Once`s may change to `Multiple`s. Then we propagate
311/// `Multiple` transitively using a workqueue/fixpoint algorithm.
312#[derive(Clone, Copy, Debug, PartialEq, Eq)]
313enum ValueUseState {
314 /// Not used at all.
315 Unused,
316 /// Used exactly once.
317 Once,
318 /// Used multiple times.
319 Multiple,
320}
321
322impl ValueUseState {
323 /// Add one use.
324 fn inc(&mut self) {
325 let new = match self {
326 Self::Unused => Self::Once,
327 Self::Once | Self::Multiple => Self::Multiple,
328 };
329 *self = new;
330 }
331}
332
333/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
334/// reference.
335#[derive(Clone, Copy, Debug, PartialEq, Eq)]
336pub enum RelocDistance {
337 /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
338 /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
339 /// 128MB offset. If unsure, use `Far` instead.
340 Near,
341 /// Target of relocation could be anywhere in the address space.
342 Far,
343}
344
345impl<'func, I: VCodeInst> Lower<'func, I> {
346 /// Prepare a new lowering context for the given IR function.
347 pub fn new(
348 f: &'func Function,
349 abi: Callee<I::ABIMachineSpec>,
350 emit_info: I::Info,
351 block_order: BlockLoweringOrder,
352 sigs: SigSet,
353 flags: Flags,
354 ) -> CodegenResult<Self> {
355 let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
356 let vcode = VCodeBuilder::new(
357 sigs,
358 abi,
359 emit_info,
360 block_order,
361 constants,
362 VCodeBuildDirection::Backward,
363 );
364
365 // We usually need two VRegs per instruction result, plus extras for
366 // various temporaries, but two per Value is a good starting point.
367 let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
368
369 let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
370
371 // Assign a vreg to each block param and each inst result.
372 for bb in f.layout.blocks() {
373 for ¶m in f.dfg.block_params(bb) {
374 let ty = f.dfg.value_type(param);
375 if value_regs[param].is_invalid() {
376 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
377 value_regs[param] = regs;
378 trace!("bb {} param {}: regs {:?}", bb, param, regs);
379 }
380 }
381 for inst in f.layout.block_insts(bb) {
382 for &result in f.dfg.inst_results(inst) {
383 let ty = f.dfg.value_type(result);
384 if value_regs[result].is_invalid() && !ty.is_invalid() {
385 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
386 value_regs[result] = regs;
387 trace!(
388 "bb {} inst {} ({:?}): result {} regs {:?}",
389 bb,
390 inst,
391 f.dfg.insts[inst],
392 result,
393 regs,
394 );
395 }
396 }
397 }
398 }
399
400 // Find the sret register, if it's used.
401 let mut sret_param = None;
402 for ret in vcode.abi().signature().returns.iter() {
403 if ret.purpose == ArgumentPurpose::StructReturn {
404 let entry_bb = f.stencil.layout.entry_block().unwrap();
405 for (¶m, sig_param) in f
406 .dfg
407 .block_params(entry_bb)
408 .iter()
409 .zip(vcode.abi().signature().params.iter())
410 {
411 if sig_param.purpose == ArgumentPurpose::StructReturn {
412 assert!(sret_param.is_none());
413 sret_param = Some(param);
414 }
415 }
416
417 assert!(sret_param.is_some());
418 }
419 }
420
421 let sret_reg = sret_param.map(|param| {
422 let regs = value_regs[param];
423 assert!(regs.len() == 1);
424 regs
425 });
426
427 // Compute instruction colors, find constant instructions, and find instructions with
428 // side-effects, in one combined pass.
429 let mut cur_color = 0;
430 let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
431 let mut side_effect_inst_entry_colors = FxHashMap::default();
432 let mut inst_constants = FxHashMap::default();
433 for bb in f.layout.blocks() {
434 cur_color += 1;
435 for inst in f.layout.block_insts(bb) {
436 let side_effect = has_lowering_side_effect(f, inst);
437
438 trace!("bb {} inst {} has color {}", bb, inst, cur_color);
439 if side_effect {
440 side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
441 trace!(" -> side-effecting; incrementing color for next inst");
442 cur_color += 1;
443 }
444
445 // Determine if this is a constant; if so, add to the table.
446 if let Some(c) = is_constant_64bit(f, inst) {
447 trace!(" -> constant: {}", c);
448 inst_constants.insert(inst, c);
449 }
450 }
451
452 block_end_colors[bb] = InstColor::new(cur_color);
453 }
454
455 let value_ir_uses = Self::compute_use_states(f, sret_param);
456
457 Ok(Lower {
458 f,
459 vcode,
460 vregs,
461 value_regs,
462 sret_reg,
463 block_end_colors,
464 side_effect_inst_entry_colors,
465 inst_constants,
466 value_ir_uses,
467 value_lowered_uses: SecondaryMap::default(),
468 inst_sunk: FxHashSet::default(),
469 cur_scan_entry_color: None,
470 cur_inst: None,
471 ir_insts: vec![],
472 pinned_reg: None,
473 flags,
474 })
475 }
476
477 pub fn sigs(&self) -> &SigSet {
478 self.vcode.sigs()
479 }
480
481 pub fn sigs_mut(&mut self) -> &mut SigSet {
482 self.vcode.sigs_mut()
483 }
484
485 /// Pre-analysis: compute `value_ir_uses`. See comment on
486 /// `ValueUseState` for a description of what this analysis
487 /// computes.
488 fn compute_use_states(
489 f: &Function,
490 sret_param: Option<Value>,
491 ) -> SecondaryMap<Value, ValueUseState> {
492 // We perform the analysis without recursion, so we don't
493 // overflow the stack on long chains of ops in the input.
494 //
495 // This is sort of a hybrid of a "shallow use-count" pass and
496 // a DFS. We iterate over all instructions and mark their args
497 // as used. However when we increment a use-count to
498 // "Multiple" we push its args onto the stack and do a DFS,
499 // immediately marking the whole dependency tree as
500 // Multiple. Doing both (shallow use-counting over all insts,
501 // and deep Multiple propagation) lets us trim both
502 // traversals, stopping recursion when a node is already at
503 // the appropriate state.
504 //
505 // In particular, note that the *coarsening* into {Unused,
506 // Once, Multiple} is part of what makes this pass more
507 // efficient than a full indirect-use-counting pass.
508
509 let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
510
511 if let Some(sret_param) = sret_param {
512 // There's an implicit use of the struct-return parameter in each
513 // copy of the function epilogue, which we count here.
514 value_ir_uses[sret_param] = ValueUseState::Multiple;
515 }
516
517 // Stack of iterators over Values as we do DFS to mark
518 // Multiple-state subtrees. The iterator type is whatever is
519 // returned by `uses` below.
520 let mut stack: SmallVec<[_; 16]> = smallvec![];
521
522 // Find the args for the inst corresponding to the given value.
523 let uses = |value| {
524 trace!(" -> pushing args for {} onto stack", value);
525 if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
526 Some(f.dfg.inst_values(src_inst))
527 } else {
528 None
529 }
530 };
531
532 // Do a DFS through `value_ir_uses` to mark a subtree as
533 // Multiple.
534 for inst in f
535 .layout
536 .blocks()
537 .flat_map(|block| f.layout.block_insts(block))
538 {
539 // If this inst produces multiple values, we must mark all
540 // of its args as Multiple, because otherwise two uses
541 // could come in as Once on our two different results.
542 let force_multiple = f.dfg.inst_results(inst).len() > 1;
543
544 // Iterate over all values used by all instructions, noting an
545 // additional use on each operand.
546 for arg in f.dfg.inst_values(inst) {
547 debug_assert!(f.dfg.value_is_real(arg));
548 let old = value_ir_uses[arg];
549 if force_multiple {
550 trace!(
551 "forcing arg {} to Multiple because of multiple results of user inst",
552 arg
553 );
554 value_ir_uses[arg] = ValueUseState::Multiple;
555 } else {
556 value_ir_uses[arg].inc();
557 }
558 let new = value_ir_uses[arg];
559 trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
560
561 // On transition to Multiple, do DFS.
562 if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
563 continue;
564 }
565 if let Some(iter) = uses(arg) {
566 stack.push(iter);
567 }
568 while let Some(iter) = stack.last_mut() {
569 if let Some(value) = iter.next() {
570 debug_assert!(f.dfg.value_is_real(value));
571 trace!(" -> DFS reaches {}", value);
572 if value_ir_uses[value] == ValueUseState::Multiple {
573 // Truncate DFS here: no need to go further,
574 // as whole subtree must already be Multiple.
575 // With debug asserts, check one level of
576 // that invariant at least.
577 debug_assert!(uses(value).into_iter().flatten().all(|arg| {
578 debug_assert!(f.dfg.value_is_real(arg));
579 value_ir_uses[arg] == ValueUseState::Multiple
580 }));
581 continue;
582 }
583 value_ir_uses[value] = ValueUseState::Multiple;
584 trace!(" -> became Multiple");
585 if let Some(iter) = uses(value) {
586 stack.push(iter);
587 }
588 } else {
589 // Empty iterator, discard.
590 stack.pop();
591 }
592 }
593 }
594 }
595
596 value_ir_uses
597 }
598
599 fn gen_arg_setup(&mut self) {
600 if let Some(entry_bb) = self.f.layout.entry_block() {
601 trace!(
602 "gen_arg_setup: entry BB {} args are:\n{:?}",
603 entry_bb,
604 self.f.dfg.block_params(entry_bb)
605 );
606
607 for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
608 if self.value_ir_uses[*param] == ValueUseState::Unused {
609 continue;
610 }
611 let regs = writable_value_regs(self.value_regs[*param]);
612 for insn in self
613 .vcode
614 .vcode
615 .abi
616 .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
617 .into_iter()
618 {
619 self.emit(insn);
620 }
621 }
622 if let Some(insn) = self
623 .vcode
624 .vcode
625 .abi
626 .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
627 {
628 self.emit(insn);
629 }
630
631 // The `args` instruction below must come first. Finish
632 // the current "IR inst" (with a default source location,
633 // as for other special instructions inserted during
634 // lowering) and continue the scan backward.
635 self.finish_ir_inst(Default::default());
636
637 if let Some(insn) = self.vcode.vcode.abi.take_args() {
638 self.emit(insn);
639 }
640 }
641 }
642
643 /// Generate the return instruction.
644 pub fn gen_return(&mut self, rets: Vec<ValueRegs<Reg>>) {
645 let mut out_rets = vec![];
646
647 let mut rets = rets.into_iter();
648 for (i, ret) in self
649 .abi()
650 .signature()
651 .returns
652 .clone()
653 .into_iter()
654 .enumerate()
655 {
656 let regs = if ret.purpose == ArgumentPurpose::StructReturn {
657 self.sret_reg.unwrap()
658 } else {
659 rets.next().unwrap()
660 };
661
662 let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
663 self.vcode.sigs(),
664 i,
665 regs,
666 &mut self.vregs,
667 );
668 out_rets.extend(regs);
669 for insn in insns {
670 self.emit(insn);
671 }
672 }
673
674 // Hack: generate a virtual instruction that uses vmctx in
675 // order to keep it alive for the duration of the function,
676 // for the benefit of debuginfo.
677 if self.f.dfg.values_labels.is_some() {
678 if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
679 if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
680 let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
681 self.emit(I::gen_dummy_use(vmctx_reg));
682 }
683 }
684 }
685
686 let inst = self.abi().gen_rets(out_rets);
687 self.emit(inst);
688 }
689
690 /// Has this instruction been sunk to a use-site (i.e., away from its
691 /// original location)?
692 fn is_inst_sunk(&self, inst: Inst) -> bool {
693 self.inst_sunk.contains(&inst)
694 }
695
696 // Is any result of this instruction needed?
697 fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
698 self.f
699 .dfg
700 .inst_results(inst)
701 .iter()
702 .any(|&result| self.value_lowered_uses[result] > 0)
703 }
704
705 fn lower_clif_block<B: LowerBackend<MInst = I>>(
706 &mut self,
707 backend: &B,
708 block: Block,
709 ctrl_plane: &mut ControlPlane,
710 ) -> CodegenResult<()> {
711 self.cur_scan_entry_color = Some(self.block_end_colors[block]);
712 // Lowering loop:
713 // - For each non-branch instruction, in reverse order:
714 // - If side-effecting (load, store, branch/call/return,
715 // possible trap), or if used outside of this block, or if
716 // demanded by another inst, then lower.
717 //
718 // That's it! Lowering of side-effecting ops will force all *needed*
719 // (live) non-side-effecting ops to be lowered at the right places, via
720 // the `use_input_reg()` callback on the `Lower` (that's us). That's
721 // because `use_input_reg()` sets the eager/demand bit for any insts
722 // whose result registers are used.
723 //
724 // We set the VCodeBuilder to "backward" mode, so we emit
725 // blocks in reverse order wrt the BlockIndex sequence, and
726 // emit instructions in reverse order within blocks. Because
727 // the machine backend calls `ctx.emit()` in forward order, we
728 // collect per-IR-inst lowered instructions in `ir_insts`,
729 // then reverse these and append to the VCode at the end of
730 // each IR instruction.
731 for inst in self.f.layout.block_insts(block).rev() {
732 let data = &self.f.dfg.insts[inst];
733 let has_side_effect = has_lowering_side_effect(self.f, inst);
734 // If inst has been sunk to another location, skip it.
735 if self.is_inst_sunk(inst) {
736 continue;
737 }
738 // Are any outputs used at least once?
739 let value_needed = self.is_any_inst_result_needed(inst);
740 trace!(
741 "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
742 block,
743 inst,
744 data,
745 data.opcode().is_branch(),
746 has_side_effect,
747 value_needed,
748 );
749
750 // Update scan state to color prior to this inst (as we are scanning
751 // backward).
752 self.cur_inst = Some(inst);
753 if has_side_effect {
754 let entry_color = *self
755 .side_effect_inst_entry_colors
756 .get(&inst)
757 .expect("every side-effecting inst should have a color-map entry");
758 self.cur_scan_entry_color = Some(entry_color);
759 }
760
761 // Skip lowering branches; these are handled separately
762 // (see `lower_clif_branches()` below).
763 if self.f.dfg.insts[inst].opcode().is_branch() {
764 continue;
765 }
766
767 // Normal instruction: codegen if the instruction is side-effecting
768 // or any of its outputs is used.
769 if has_side_effect || value_needed {
770 trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
771 let temp_regs = backend.lower(self, inst).unwrap_or_else(|| {
772 let ty = if self.num_outputs(inst) > 0 {
773 Some(self.output_ty(inst, 0))
774 } else {
775 None
776 };
777 panic!(
778 "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
779 self.f.dfg.display_inst(inst),
780 ty
781 )
782 });
783
784 // The ISLE generated code emits its own registers to define the
785 // instruction's lowered values in. However, other instructions
786 // that use this SSA value will be lowered assuming that the value
787 // is generated into a pre-assigned, different, register.
788 //
789 // To connect the two, we set up "aliases" in the VCodeBuilder
790 // that apply when it is building the Operand table for the
791 // regalloc to use. These aliases effectively rewrite any use of
792 // the pre-assigned register to the register that was returned by
793 // the ISLE lowering logic.
794 let results = self.f.dfg.inst_results(inst);
795 debug_assert_eq!(temp_regs.len(), results.len());
796 for (regs, &result) in temp_regs.iter().zip(results) {
797 let dsts = self.value_regs[result];
798 debug_assert_eq!(regs.len(), dsts.len());
799 for (&dst, &temp) in dsts.regs().iter().zip(regs.regs()) {
800 trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
801 self.vregs.set_vreg_alias(dst, temp);
802 }
803 }
804 }
805
806 let start = self.vcode.vcode.num_insts();
807 let loc = self.srcloc(inst);
808 self.finish_ir_inst(loc);
809
810 // If the instruction had a user stack map, forward it from the CLIF
811 // to the vcode.
812 if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
813 let end = self.vcode.vcode.num_insts();
814 debug_assert!(end > start);
815 debug_assert_eq!(
816 (start..end)
817 .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
818 .count(),
819 1
820 );
821 for i in start..end {
822 let iix = InsnIndex::new(i);
823 if self.vcode.vcode[iix].is_safepoint() {
824 trace!(
825 "Adding user stack map from clif\n\n\
826 {inst:?} `{}`\n\n\
827 to vcode\n\n\
828 {iix:?} `{}`",
829 self.f.dfg.display_inst(inst),
830 &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
831 );
832 self.vcode
833 .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
834 break;
835 }
836 }
837 }
838
839 // maybe insert random instruction
840 if ctrl_plane.get_decision() {
841 if ctrl_plane.get_decision() {
842 let imm: u64 = ctrl_plane.get_arbitrary();
843 let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
844 I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
845 } else {
846 let imm: f64 = ctrl_plane.get_arbitrary();
847 let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
848 let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
849 for inst in I::gen_imm_f64(imm, tmp, reg) {
850 self.emit(inst);
851 }
852 }
853 }
854
855 // Emit value-label markers if needed, to later recover
856 // debug mappings. This must happen before the instruction
857 // (so after we emit, in bottom-to-top pass).
858 self.emit_value_label_markers_for_inst(inst);
859 }
860
861 // Add the block params to this block.
862 self.add_block_params(block)?;
863
864 self.cur_scan_entry_color = None;
865 Ok(())
866 }
867
868 fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
869 for ¶m in self.f.dfg.block_params(block) {
870 for ® in self.value_regs[param].regs() {
871 let vreg = reg.to_virtual_reg().unwrap();
872 self.vcode.add_block_param(vreg);
873 }
874 }
875 Ok(())
876 }
877
878 fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
879 if let Some(ref values_labels) = self.f.dfg.values_labels {
880 debug_assert!(self.f.dfg.value_is_real(val));
881 trace!(
882 "get_value_labels: val {} -> {:?}",
883 val,
884 values_labels.get(&val)
885 );
886 match values_labels.get(&val) {
887 Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
888 Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
889 self.get_value_labels(value, depth + 1)
890 }
891 _ => None,
892 }
893 } else {
894 None
895 }
896 }
897
898 fn emit_value_label_marks_for_value(&mut self, val: Value) {
899 let regs = self.value_regs[val];
900 if regs.len() > 1 {
901 return;
902 }
903 let reg = regs.only_reg().unwrap();
904
905 if let Some(label_starts) = self.get_value_labels(val, 0) {
906 let labels = label_starts
907 .iter()
908 .map(|&ValueLabelStart { label, .. }| label)
909 .collect::<FxHashSet<_>>();
910 for label in labels {
911 trace!(
912 "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
913 val,
914 reg,
915 label,
916 );
917 self.vcode.add_value_label(reg, label);
918 }
919 }
920 }
921
922 fn emit_value_label_markers_for_inst(&mut self, inst: Inst) {
923 if self.f.dfg.values_labels.is_none() {
924 return;
925 }
926
927 trace!(
928 "value labeling: srcloc {}: inst {}",
929 self.srcloc(inst),
930 inst
931 );
932 for &val in self.f.dfg.inst_results(inst) {
933 self.emit_value_label_marks_for_value(val);
934 }
935 }
936
937 fn emit_value_label_markers_for_block_args(&mut self, block: Block) {
938 if self.f.dfg.values_labels.is_none() {
939 return;
940 }
941
942 trace!("value labeling: block {}", block);
943 for &arg in self.f.dfg.block_params(block) {
944 self.emit_value_label_marks_for_value(arg);
945 }
946 self.finish_ir_inst(Default::default());
947 }
948
949 fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
950 // The VCodeBuilder builds in reverse order (and reverses at
951 // the end), but `ir_insts` is in forward order, so reverse
952 // it.
953 for inst in self.ir_insts.drain(..).rev() {
954 self.vcode.push(inst, loc);
955 }
956 }
957
958 fn finish_bb(&mut self) {
959 self.vcode.end_bb();
960 }
961
962 fn lower_clif_branches<B: LowerBackend<MInst = I>>(
963 &mut self,
964 backend: &B,
965 // Lowered block index:
966 bindex: BlockIndex,
967 // Original CLIF block:
968 block: Block,
969 branch: Inst,
970 targets: &[MachLabel],
971 ) -> CodegenResult<()> {
972 trace!(
973 "lower_clif_branches: block {} branch {:?} targets {:?}",
974 block,
975 branch,
976 targets,
977 );
978 // When considering code-motion opportunities, consider the current
979 // program point to be this branch.
980 self.cur_inst = Some(branch);
981
982 // Lower the branch in ISLE.
983 backend
984 .lower_branch(self, branch, targets)
985 .unwrap_or_else(|| {
986 panic!(
987 "should be implemented in ISLE: branch = `{}`",
988 self.f.dfg.display_inst(branch),
989 )
990 });
991 let loc = self.srcloc(branch);
992 self.finish_ir_inst(loc);
993 // Add block param outputs for current block.
994 self.lower_branch_blockparam_args(bindex);
995 Ok(())
996 }
997
998 fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
999 // TODO: why not make `block_order` public?
1000 for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
1001 // Avoid immutable borrow by explicitly indexing.
1002 let (opt_inst, succs) = self.vcode.block_order().succ_indices(block);
1003 let inst = opt_inst.expect("lower_branch_blockparam_args called on a critical edge!");
1004 let succ = succs[succ_idx];
1005
1006 // The use of `succ_idx` to index `branch_destination` is valid on the assumption that
1007 // the traversal order defined in `visit_block_succs` mirrors the order returned by
1008 // `branch_destination`. If that assumption is violated, the branch targets returned
1009 // here will not match the clif.
1010 let branches = self.f.dfg.insts[inst].branch_destination(&self.f.dfg.jump_tables);
1011 let branch_args = branches[succ_idx].args_slice(&self.f.dfg.value_lists);
1012
1013 let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
1014 for &arg in branch_args {
1015 debug_assert!(self.f.dfg.value_is_real(arg));
1016 let regs = self.put_value_in_regs(arg);
1017 branch_arg_vregs.extend_from_slice(regs.regs());
1018 }
1019 self.vcode.add_succ(succ, &branch_arg_vregs[..]);
1020 }
1021 }
1022
1023 fn collect_branches_and_targets(
1024 &self,
1025 bindex: BlockIndex,
1026 _bb: Block,
1027 targets: &mut SmallVec<[MachLabel; 2]>,
1028 ) -> Option<Inst> {
1029 targets.clear();
1030 let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
1031 targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
1032 opt_inst
1033 }
1034
1035 /// Lower the function.
1036 pub fn lower<B: LowerBackend<MInst = I>>(
1037 mut self,
1038 backend: &B,
1039 ctrl_plane: &mut ControlPlane,
1040 ) -> CodegenResult<VCode<I>> {
1041 trace!("about to lower function: {:?}", self.f);
1042
1043 self.vcode.init_retval_area(&mut self.vregs)?;
1044
1045 // Get the pinned reg here (we only parameterize this function on `B`,
1046 // not the whole `Lower` impl).
1047 self.pinned_reg = backend.maybe_pinned_reg();
1048
1049 self.vcode.set_entry(BlockIndex::new(0));
1050
1051 // Reused vectors for branch lowering.
1052 let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1053
1054 // get a copy of the lowered order; we hold this separately because we
1055 // need a mut ref to the vcode to mutate it below.
1056 let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1057 .vcode
1058 .block_order()
1059 .lowered_order()
1060 .iter()
1061 .cloned()
1062 .collect();
1063
1064 // Main lowering loop over lowered blocks.
1065 for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1066 let bindex = BlockIndex::new(bindex);
1067
1068 // Lower the block body in reverse order (see comment in
1069 // `lower_clif_block()` for rationale).
1070
1071 // End branches.
1072 if let Some(bb) = lb.orig_block() {
1073 if let Some(branch) = self.collect_branches_and_targets(bindex, bb, &mut targets) {
1074 self.lower_clif_branches(backend, bindex, bb, branch, &targets)?;
1075 self.finish_ir_inst(self.srcloc(branch));
1076 }
1077 } else {
1078 // If no orig block, this must be a pure edge block;
1079 // get the successor and emit a jump. Add block params
1080 // according to the one successor, and pass them
1081 // through; note that the successor must have an
1082 // original block.
1083 let (_, succs) = self.vcode.block_order().succ_indices(bindex);
1084 let succ = succs[0];
1085
1086 let orig_succ = lowered_order[succ.index()];
1087 let orig_succ = orig_succ
1088 .orig_block()
1089 .expect("Edge block succ must be body block");
1090
1091 let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
1092 for ty in self.f.dfg.block_param_types(orig_succ) {
1093 let regs = self.vregs.alloc(ty)?;
1094 for ® in regs.regs() {
1095 branch_arg_vregs.push(reg);
1096 let vreg = reg.to_virtual_reg().unwrap();
1097 self.vcode.add_block_param(vreg);
1098 }
1099 }
1100 self.vcode.add_succ(succ, &branch_arg_vregs[..]);
1101
1102 self.emit(I::gen_jump(MachLabel::from_block(succ)));
1103 self.finish_ir_inst(Default::default());
1104 }
1105
1106 // Original block body.
1107 if let Some(bb) = lb.orig_block() {
1108 self.lower_clif_block(backend, bb, ctrl_plane)?;
1109 self.emit_value_label_markers_for_block_args(bb);
1110 }
1111
1112 if bindex.index() == 0 {
1113 // Set up the function with arg vreg inits.
1114 self.gen_arg_setup();
1115 self.finish_ir_inst(Default::default());
1116 }
1117
1118 self.finish_bb();
1119
1120 // Check for any deferred vreg-temp allocation errors, and
1121 // bubble one up at this time if it exists.
1122 if let Some(e) = self.vregs.take_deferred_error() {
1123 return Err(e);
1124 }
1125 }
1126
1127 // Now that we've emitted all instructions into the
1128 // VCodeBuilder, let's build the VCode.
1129 trace!(
1130 "built vcode:\n{:?}Backwards {:?}",
1131 &self.vregs,
1132 &self.vcode.vcode
1133 );
1134 let vcode = self.vcode.build(self.vregs);
1135
1136 Ok(vcode)
1137 }
1138}
1139
1140/// Function-level queries.
1141impl<'func, I: VCodeInst> Lower<'func, I> {
1142 pub fn dfg(&self) -> &DataFlowGraph {
1143 &self.f.dfg
1144 }
1145
1146 /// Get the `Callee`.
1147 pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1148 self.vcode.abi()
1149 }
1150
1151 /// Get the `Callee`.
1152 pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1153 self.vcode.abi_mut()
1154 }
1155}
1156
1157/// Instruction input/output queries.
1158impl<'func, I: VCodeInst> Lower<'func, I> {
1159 /// Get the instdata for a given IR instruction.
1160 pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1161 &self.f.dfg.insts[ir_inst]
1162 }
1163
1164 /// Likewise, but starting with a GlobalValue identifier.
1165 pub fn symbol_value_data<'b>(
1166 &'b self,
1167 global_value: GlobalValue,
1168 ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1169 let gvdata = &self.f.global_values[global_value];
1170 match gvdata {
1171 &GlobalValueData::Symbol {
1172 ref name,
1173 ref offset,
1174 colocated,
1175 ..
1176 } => {
1177 let offset = offset.bits();
1178 let dist = if colocated {
1179 RelocDistance::Near
1180 } else {
1181 RelocDistance::Far
1182 };
1183 Some((name, dist, offset))
1184 }
1185 _ => None,
1186 }
1187 }
1188
1189 /// Returns the memory flags of a given memory access.
1190 pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1191 match &self.f.dfg.insts[ir_inst] {
1192 &InstructionData::AtomicCas { flags, .. } => Some(flags),
1193 &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1194 &InstructionData::Load { flags, .. }
1195 | &InstructionData::LoadNoOffset { flags, .. }
1196 | &InstructionData::Store { flags, .. } => Some(flags),
1197 &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1198 _ => None,
1199 }
1200 }
1201
1202 /// Get the source location for a given instruction.
1203 pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1204 self.f.rel_srclocs()[ir_inst]
1205 }
1206
1207 /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1208 /// arguments to the instruction: block arguments will not be included in this count.
1209 pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1210 self.f.dfg.inst_args(ir_inst).len()
1211 }
1212
1213 /// Get the number of outputs to the given IR instruction.
1214 pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1215 self.f.dfg.inst_results(ir_inst).len()
1216 }
1217
1218 /// Get the type for an instruction's input.
1219 pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1220 self.value_ty(self.input_as_value(ir_inst, idx))
1221 }
1222
1223 /// Get the type for a value.
1224 pub fn value_ty(&self, val: Value) -> Type {
1225 self.f.dfg.value_type(val)
1226 }
1227
1228 /// Get the type for an instruction's output.
1229 pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1230 self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1231 }
1232
1233 /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1234 /// value, if possible.
1235 pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1236 self.inst_constants.get(&ir_inst).map(|&c| {
1237 // The upper bits must be zero, enforced during legalization and by
1238 // the CLIF verifier.
1239 debug_assert_eq!(c, {
1240 let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1241 let shift = 64 - input_size;
1242 (c << shift) >> shift
1243 });
1244 c
1245 })
1246 }
1247
1248 /// Get the input as one of two options other than a direct register:
1249 ///
1250 /// - An instruction, given that it is effect-free or able to sink its
1251 /// effect to the current instruction being lowered, and given it has only
1252 /// one output, and if effect-ful, given that this is the only use;
1253 /// - A constant, if the value is a constant.
1254 ///
1255 /// The instruction input may be available in either of these forms. It may
1256 /// be available in neither form, if the conditions are not met; if so, use
1257 /// `put_input_in_regs()` instead to get it in a register.
1258 ///
1259 /// If the backend merges the effect of a side-effecting instruction, it
1260 /// must call `sink_inst()`. When this is called, it indicates that the
1261 /// effect has been sunk to the current scan location. The sunk
1262 /// instruction's result(s) must have *no* uses remaining, because it will
1263 /// not be codegen'd (it has been integrated into the current instruction).
1264 pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1265 let val = self.f.dfg.inst_args(ir_inst)[idx];
1266 debug_assert!(self.f.dfg.value_is_real(val));
1267 val
1268 }
1269
1270 /// Like `get_input_as_source_or_const` but with a `Value`.
1271 pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1272 let val = self.input_as_value(ir_inst, idx);
1273 self.get_value_as_source_or_const(val)
1274 }
1275
1276 /// Resolves a particular input of an instruction to the `Value` that it is
1277 /// represented with.
1278 pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1279 trace!(
1280 "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1281 val,
1282 self.cur_inst,
1283 self.cur_scan_entry_color,
1284 );
1285 let inst = match self.f.dfg.value_def(val) {
1286 // OK to merge source instruction if (i) we have a source
1287 // instruction, and:
1288 // - It has no side-effects, OR
1289 // - It has a side-effect, has one output value, that one
1290 // output has only one use, directly or indirectly (so
1291 // cannot be duplicated -- see comment on
1292 // `ValueUseState`), and the instruction's color is *one
1293 // less than* the current scan color.
1294 //
1295 // This latter set of conditions is testing whether a
1296 // side-effecting instruction can sink to the current scan
1297 // location; this is possible if the in-color of this inst is
1298 // equal to the out-color of the producing inst, so no other
1299 // side-effecting ops occur between them (which will only be true
1300 // if they are in the same BB, because color increments at each BB
1301 // start).
1302 //
1303 // If it is actually sunk, then in `merge_inst()`, we update the
1304 // scan color so that as we scan over the range past which the
1305 // instruction was sunk, we allow other instructions (that came
1306 // prior to the sunk instruction) to sink.
1307 ValueDef::Result(src_inst, result_idx) => {
1308 let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1309 trace!(" -> src inst {}", src_inst);
1310 trace!(" -> has lowering side effect: {}", src_side_effect);
1311 if !src_side_effect {
1312 // Pure instruction: always possible to
1313 // sink. Let's determine whether we are the only
1314 // user or not.
1315 if self.value_ir_uses[val] == ValueUseState::Once {
1316 InputSourceInst::UniqueUse(src_inst, result_idx)
1317 } else {
1318 InputSourceInst::Use(src_inst, result_idx)
1319 }
1320 } else {
1321 // Side-effect: test whether this is the only use of the
1322 // only result of the instruction, and whether colors allow
1323 // the code-motion.
1324 trace!(
1325 " -> side-effecting op {} for val {}: use state {:?}",
1326 src_inst,
1327 val,
1328 self.value_ir_uses[val]
1329 );
1330 if self.cur_scan_entry_color.is_some()
1331 && self.value_ir_uses[val] == ValueUseState::Once
1332 && self.num_outputs(src_inst) == 1
1333 && self
1334 .side_effect_inst_entry_colors
1335 .get(&src_inst)
1336 .unwrap()
1337 .get()
1338 + 1
1339 == self.cur_scan_entry_color.unwrap().get()
1340 {
1341 InputSourceInst::UniqueUse(src_inst, 0)
1342 } else {
1343 InputSourceInst::None
1344 }
1345 }
1346 }
1347 _ => InputSourceInst::None,
1348 };
1349 let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1350
1351 NonRegInput { inst, constant }
1352 }
1353
1354 /// Increment the reference count for the Value, ensuring that it gets lowered.
1355 pub fn increment_lowered_uses(&mut self, val: Value) {
1356 self.value_lowered_uses[val] += 1
1357 }
1358
1359 /// Put the `idx`th input into register(s) and return the assigned register.
1360 pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1361 let val = self.f.dfg.inst_args(ir_inst)[idx];
1362 self.put_value_in_regs(val)
1363 }
1364
1365 /// Put the given value into register(s) and return the assigned register.
1366 pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1367 debug_assert!(self.f.dfg.value_is_real(val));
1368 trace!("put_value_in_regs: val {}", val);
1369
1370 if let Some(inst) = self.f.dfg.value_def(val).inst() {
1371 assert!(!self.inst_sunk.contains(&inst));
1372 }
1373
1374 let regs = self.value_regs[val];
1375 trace!(" -> regs {:?}", regs);
1376 assert!(regs.is_valid());
1377
1378 self.value_lowered_uses[val] += 1;
1379
1380 regs
1381 }
1382}
1383
1384/// Codegen primitives: allocate temps, emit instructions, set result registers,
1385/// ask for an input to be gen'd into a register.
1386impl<'func, I: VCodeInst> Lower<'func, I> {
1387 /// Get a new temp.
1388 pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1389 writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1390 }
1391
1392 /// Emit a machine instruction.
1393 pub fn emit(&mut self, mach_inst: I) {
1394 trace!("emit: {:?}", mach_inst);
1395 self.ir_insts.push(mach_inst);
1396 }
1397
1398 /// Indicate that the side-effect of an instruction has been sunk to the
1399 /// current scan location. This should only be done with the instruction's
1400 /// original results are not used (i.e., `put_input_in_regs` is not invoked
1401 /// for the input produced by the sunk instruction), otherwise the
1402 /// side-effect will occur twice.
1403 pub fn sink_inst(&mut self, ir_inst: Inst) {
1404 assert!(has_lowering_side_effect(self.f, ir_inst));
1405 assert!(self.cur_scan_entry_color.is_some());
1406
1407 for result in self.dfg().inst_results(ir_inst) {
1408 assert!(self.value_lowered_uses[*result] == 0);
1409 }
1410
1411 let sunk_inst_entry_color = self
1412 .side_effect_inst_entry_colors
1413 .get(&ir_inst)
1414 .cloned()
1415 .unwrap();
1416 let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1417 assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1418 self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1419 self.inst_sunk.insert(ir_inst);
1420 }
1421
1422 /// Retrieve immediate data given a handle.
1423 pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1424 self.f.dfg.immediates.get(imm).unwrap()
1425 }
1426
1427 /// Retrieve constant data given a handle.
1428 pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1429 self.f.dfg.constants.get(constant_handle)
1430 }
1431
1432 /// Indicate that a constant should be emitted.
1433 pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1434 self.vcode.constants().insert(constant)
1435 }
1436
1437 /// Cause the value in `reg` to be in a virtual reg, by copying it into a new virtual reg
1438 /// if `reg` is a real reg. `ty` describes the type of the value in `reg`.
1439 pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1440 if reg.to_virtual_reg().is_some() {
1441 reg
1442 } else {
1443 let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1444 self.emit(I::gen_move(new_reg, reg, ty));
1445 new_reg.to_reg()
1446 }
1447 }
1448
1449 /// Add a range fact to a register, if no other fact is present.
1450 pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1451 if self.flags.enable_pcc() {
1452 self.vregs.set_fact_if_missing(
1453 reg.to_virtual_reg().unwrap(),
1454 Fact::Range {
1455 bit_width,
1456 min,
1457 max,
1458 },
1459 );
1460 }
1461 }
1462}