use crate::entity::SecondaryMap;
use crate::fx::{FxHashMap, FxHashSet};
use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
use crate::ir::{
types::{FFLAGS, IFLAGS},
ArgumentPurpose, Block, Constant, ConstantData, DataFlowGraph, ExternalName, Function,
GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags, Opcode, RelSourceLoc,
Type, Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
};
use crate::machinst::{
non_writable_value_regs, writable_value_regs, BlockIndex, BlockLoweringOrder, Callee,
LoweredBlock, MachLabel, Reg, SigSet, VCode, VCodeBuilder, VCodeConstant, VCodeConstantData,
VCodeConstants, VCodeInst, ValueRegs, Writable,
};
use crate::{trace, CodegenError, CodegenResult};
use alloc::vec::Vec;
use regalloc2::VReg;
use smallvec::{smallvec, SmallVec};
use std::fmt::Debug;
use super::{first_user_vreg_index, VCodeBuildDirection};
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct InstColor(u32);
impl InstColor {
fn new(n: u32) -> InstColor {
InstColor(n)
}
pub fn get(self) -> u32 {
self.0
}
}
#[derive(Clone, Copy, Debug)]
pub struct NonRegInput {
pub inst: InputSourceInst,
pub constant: Option<u64>,
}
#[derive(Clone, Copy, Debug)]
pub enum InputSourceInst {
UniqueUse(Inst, usize),
Use(Inst, usize),
None,
}
impl InputSourceInst {
pub fn as_inst(&self) -> Option<(Inst, usize)> {
match self {
&InputSourceInst::UniqueUse(inst, output_idx)
| &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
&InputSourceInst::None => None,
}
}
}
pub trait LowerBackend {
type MInst: VCodeInst;
fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> CodegenResult<()>;
fn lower_branch_group(
&self,
ctx: &mut Lower<Self::MInst>,
insts: &[Inst],
targets: &[MachLabel],
) -> CodegenResult<()>;
fn maybe_pinned_reg(&self) -> Option<Reg> {
None
}
}
pub struct Lower<'func, I: VCodeInst> {
f: &'func Function,
vcode: VCodeBuilder<I>,
value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
retval_regs: Vec<ValueRegs<Reg>>,
block_end_colors: SecondaryMap<Block, InstColor>,
side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
cur_scan_entry_color: Option<InstColor>,
cur_inst: Option<Inst>,
inst_constants: FxHashMap<Inst, u64>,
value_ir_uses: SecondaryMap<Value, ValueUseState>,
value_lowered_uses: SecondaryMap<Value, u32>,
inst_sunk: FxHashSet<Inst>,
next_vreg: usize,
ir_insts: Vec<I>,
pinned_reg: Option<Reg>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ValueUseState {
Unused,
Once,
Multiple,
}
impl ValueUseState {
fn inc(&mut self) {
let new = match self {
Self::Unused => Self::Once,
Self::Once | Self::Multiple => Self::Multiple,
};
*self = new;
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RelocDistance {
Near,
Far,
}
fn alloc_vregs<I: VCodeInst>(
ty: Type,
next_vreg: &mut usize,
vcode: &mut VCodeBuilder<I>,
) -> CodegenResult<ValueRegs<Reg>> {
let v = *next_vreg;
let (regclasses, tys) = I::rc_for_type(ty)?;
*next_vreg += regclasses.len();
if *next_vreg >= VReg::MAX {
return Err(CodegenError::CodeTooLarge);
}
let regs: ValueRegs<Reg> = match regclasses {
&[rc0] => ValueRegs::one(VReg::new(v, rc0).into()),
&[rc0, rc1] => ValueRegs::two(VReg::new(v, rc0).into(), VReg::new(v + 1, rc1).into()),
_ => panic!("Value must reside in 1 or 2 registers"),
};
for (®_ty, ®) in tys.iter().zip(regs.regs().iter()) {
vcode.set_vreg_type(reg.to_virtual_reg().unwrap(), reg_ty);
}
Ok(regs)
}
impl<'func, I: VCodeInst> Lower<'func, I> {
pub fn new(
f: &'func Function,
abi: Callee<I::ABIMachineSpec>,
emit_info: I::Info,
block_order: BlockLoweringOrder,
sigs: SigSet,
) -> CodegenResult<Lower<'func, I>> {
let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
let mut vcode = VCodeBuilder::new(
sigs,
abi,
emit_info,
block_order,
constants,
VCodeBuildDirection::Backward,
);
let mut next_vreg: usize = first_user_vreg_index();
let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
for bb in f.layout.blocks() {
for ¶m in f.dfg.block_params(bb) {
let ty = f.dfg.value_type(param);
if value_regs[param].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[param] = regs;
trace!("bb {} param {}: regs {:?}", bb, param, regs);
}
}
for inst in f.layout.block_insts(bb) {
for &result in f.dfg.inst_results(inst) {
let ty = f.dfg.value_type(result);
if value_regs[result].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[result] = regs;
trace!(
"bb {} inst {} ({:?}): result {} regs {:?}",
bb,
inst,
f.dfg[inst],
result,
regs,
);
}
}
}
}
let mut retval_regs = vec![];
for ret in &vcode.abi().signature().returns.clone() {
let regs = alloc_vregs(ret.value_type, &mut next_vreg, &mut vcode)?;
retval_regs.push(regs);
trace!("retval gets regs {:?}", regs);
}
let mut cur_color = 0;
let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
let mut side_effect_inst_entry_colors = FxHashMap::default();
let mut inst_constants = FxHashMap::default();
for bb in f.layout.blocks() {
cur_color += 1;
for inst in f.layout.block_insts(bb) {
let side_effect = has_lowering_side_effect(f, inst);
trace!("bb {} inst {} has color {}", bb, inst, cur_color);
if side_effect {
side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
trace!(" -> side-effecting; incrementing color for next inst");
cur_color += 1;
}
if let Some(c) = is_constant_64bit(f, inst) {
trace!(" -> constant: {}", c);
inst_constants.insert(inst, c);
}
}
block_end_colors[bb] = InstColor::new(cur_color);
}
let value_ir_uses = Self::compute_use_states(f);
Ok(Lower {
f,
vcode,
value_regs,
retval_regs,
block_end_colors,
side_effect_inst_entry_colors,
inst_constants,
next_vreg,
value_ir_uses,
value_lowered_uses: SecondaryMap::default(),
inst_sunk: FxHashSet::default(),
cur_scan_entry_color: None,
cur_inst: None,
ir_insts: vec![],
pinned_reg: None,
})
}
pub fn sigs(&self) -> &SigSet {
self.vcode.sigs()
}
pub fn sigs_mut(&mut self) -> &mut SigSet {
self.vcode.sigs_mut()
}
fn compute_use_states<'a>(f: &'a Function) -> SecondaryMap<Value, ValueUseState> {
let mut value_ir_uses: SecondaryMap<Value, ValueUseState> =
SecondaryMap::with_default(ValueUseState::Unused);
type StackVec<'a> = SmallVec<[std::slice::Iter<'a, Value>; 16]>;
let mut stack: StackVec = smallvec![];
let push_args_on_stack = |stack: &mut StackVec<'a>, value| {
trace!(" -> pushing args for {} onto stack", value);
if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
stack.push(f.dfg.inst_args(src_inst).iter());
}
};
let mark_all_uses_as_multiple =
|value_ir_uses: &mut SecondaryMap<Value, ValueUseState>, stack: &mut StackVec<'a>| {
while let Some(iter) = stack.last_mut() {
if let Some(&value) = iter.next() {
let value = f.dfg.resolve_aliases(value);
trace!(" -> DFS reaches {}", value);
if value_ir_uses[value] == ValueUseState::Multiple {
#[cfg(debug_assertions)]
{
if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
debug_assert!(f.dfg.inst_args(src_inst).iter().all(|&arg| {
let arg = f.dfg.resolve_aliases(arg);
value_ir_uses[arg] == ValueUseState::Multiple
}));
}
}
continue;
}
value_ir_uses[value] = ValueUseState::Multiple;
trace!(" -> became Multiple");
push_args_on_stack(stack, value);
} else {
stack.pop();
}
}
};
for inst in f
.layout
.blocks()
.flat_map(|block| f.layout.block_insts(block))
{
let force_multiple = f.dfg.inst_results(inst).len() > 1;
for &arg in f.dfg.inst_args(inst) {
let arg = f.dfg.resolve_aliases(arg);
let old = value_ir_uses[arg];
if force_multiple {
trace!(
"forcing arg {} to Multiple because of multiple results of user inst",
arg
);
value_ir_uses[arg] = ValueUseState::Multiple;
} else {
value_ir_uses[arg].inc();
}
let new = value_ir_uses[arg];
trace!("arg {} used, old state {:?}, new {:?}", arg, old, new,);
if old != ValueUseState::Multiple && new == ValueUseState::Multiple {
push_args_on_stack(&mut stack, arg);
mark_all_uses_as_multiple(&mut value_ir_uses, &mut stack);
}
}
}
value_ir_uses
}
fn gen_arg_setup(&mut self) {
if let Some(entry_bb) = self.f.layout.entry_block() {
trace!(
"gen_arg_setup: entry BB {} args are:\n{:?}",
entry_bb,
self.f.dfg.block_params(entry_bb)
);
if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
self.emit_value_label_marks_for_value(vmctx_val);
}
for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
if !self.vcode.abi().arg_is_needed_in_body(i) {
continue;
}
let regs = writable_value_regs(self.value_regs[*param]);
for insn in self
.vcode
.vcode
.abi
.gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs)
.into_iter()
{
self.emit(insn);
}
if self.abi().signature().params[i].purpose == ArgumentPurpose::StructReturn {
assert!(regs.len() == 1);
let ty = self.abi().signature().params[i].value_type;
let struct_ret_idx = self
.abi()
.signature()
.returns
.iter()
.position(|ret| ret.purpose == ArgumentPurpose::StructReturn)
.expect("StructReturn return value not present!");
self.emit(I::gen_move(
Writable::from_reg(self.retval_regs[struct_ret_idx].regs()[0]),
regs.regs()[0].to_reg(),
ty,
));
}
}
if let Some(insn) = self
.vcode
.vcode
.abi
.gen_retval_area_setup(&self.vcode.vcode.sigs)
{
self.emit(insn);
}
self.finish_ir_inst(Default::default());
if let Some(insn) = self.vcode.vcode.abi.take_args() {
self.emit(insn);
}
}
}
fn gen_retval_setup(&mut self) {
let retval_regs = self.retval_regs.clone();
for (i, regs) in retval_regs.into_iter().enumerate() {
let regs = writable_value_regs(regs);
for insn in self
.vcode
.abi()
.gen_copy_regs_to_retval(self.sigs(), i, regs)
.into_iter()
{
self.emit(insn);
}
}
let inst = self.vcode.abi().gen_ret(self.sigs());
self.emit(inst);
if self.f.dfg.values_labels.is_some() {
if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
self.emit(I::gen_dummy_use(vmctx_reg));
}
}
}
fn is_inst_sunk(&self, inst: Inst) -> bool {
self.inst_sunk.contains(&inst)
}
fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
self.f
.dfg
.inst_results(inst)
.iter()
.any(|&result| self.value_lowered_uses[result] > 0)
}
fn lower_clif_block<B: LowerBackend<MInst = I>>(
&mut self,
backend: &B,
block: Block,
) -> CodegenResult<()> {
self.cur_scan_entry_color = Some(self.block_end_colors[block]);
for inst in self.f.layout.block_insts(block).rev() {
let data = &self.f.dfg[inst];
let has_side_effect = has_lowering_side_effect(self.f, inst);
if self.is_inst_sunk(inst) {
continue;
}
let value_needed = self.is_any_inst_result_needed(inst);
trace!(
"lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
block,
inst,
data,
data.opcode().is_branch(),
has_side_effect,
value_needed,
);
self.cur_inst = Some(inst);
if has_side_effect {
let entry_color = *self
.side_effect_inst_entry_colors
.get(&inst)
.expect("every side-effecting inst should have a color-map entry");
self.cur_scan_entry_color = Some(entry_color);
}
if self.f.dfg[inst].opcode().is_branch() {
continue;
}
if has_side_effect || value_needed {
trace!("lowering: inst {}: {:?}", inst, self.f.dfg[inst]);
backend.lower(self, inst)?;
}
if data.opcode().is_return() {
self.gen_retval_setup();
}
let loc = self.srcloc(inst);
self.finish_ir_inst(loc);
self.emit_value_label_markers_for_inst(inst);
}
self.add_block_params(block)?;
self.cur_scan_entry_color = None;
Ok(())
}
fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
for ¶m in self.f.dfg.block_params(block) {
let ty = self.f.dfg.value_type(param);
let (_reg_rcs, reg_tys) = I::rc_for_type(ty)?;
debug_assert_eq!(reg_tys.len(), self.value_regs[param].len());
for (®, &rty) in self.value_regs[param].regs().iter().zip(reg_tys.iter()) {
self.vcode
.add_block_param(reg.to_virtual_reg().unwrap(), rty);
}
}
Ok(())
}
fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
if let Some(ref values_labels) = self.f.dfg.values_labels {
trace!(
"get_value_labels: val {} -> {} -> {:?}",
val,
self.f.dfg.resolve_aliases(val),
values_labels.get(&self.f.dfg.resolve_aliases(val))
);
let val = self.f.dfg.resolve_aliases(val);
match values_labels.get(&val) {
Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
self.get_value_labels(value, depth + 1)
}
_ => None,
}
} else {
None
}
}
fn emit_value_label_marks_for_value(&mut self, val: Value) {
let regs = self.value_regs[val];
if regs.len() > 1 {
return;
}
let reg = regs.only_reg().unwrap();
if let Some(label_starts) = self.get_value_labels(val, 0) {
let labels = label_starts
.iter()
.map(|&ValueLabelStart { label, .. }| label)
.collect::<FxHashSet<_>>();
for label in labels {
trace!(
"value labeling: defines val {:?} -> reg {:?} -> label {:?}",
val,
reg,
label,
);
self.vcode.add_value_label(reg, label);
}
}
}
fn emit_value_label_markers_for_inst(&mut self, inst: Inst) {
if self.f.dfg.values_labels.is_none() {
return;
}
trace!(
"value labeling: srcloc {}: inst {}",
self.srcloc(inst),
inst
);
for &val in self.f.dfg.inst_results(inst) {
self.emit_value_label_marks_for_value(val);
}
}
fn emit_value_label_markers_for_block_args(&mut self, block: Block) {
if self.f.dfg.values_labels.is_none() {
return;
}
trace!("value labeling: block {}", block);
for &arg in self.f.dfg.block_params(block) {
self.emit_value_label_marks_for_value(arg);
}
self.finish_ir_inst(Default::default());
}
fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
self.vcode.set_srcloc(loc);
for inst in self.ir_insts.drain(..).rev() {
self.vcode.push(inst);
}
}
fn finish_bb(&mut self) {
self.vcode.end_bb();
}
fn lower_clif_branches<B: LowerBackend<MInst = I>>(
&mut self,
backend: &B,
bindex: BlockIndex,
block: Block,
branches: &SmallVec<[Inst; 2]>,
targets: &SmallVec<[MachLabel; 2]>,
) -> CodegenResult<()> {
trace!(
"lower_clif_branches: block {} branches {:?} targets {:?}",
block,
branches,
targets,
);
self.cur_inst = Some(branches[0]);
backend.lower_branch_group(self, branches, targets)?;
let loc = self.srcloc(branches[0]);
self.finish_ir_inst(loc);
self.lower_branch_blockparam_args(bindex);
Ok(())
}
fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
for succ_idx in 0..self.vcode.block_order().succ_indices(block).len() {
let (inst, succ) = self.vcode.block_order().succ_indices(block)[succ_idx];
let branch_args = self.f.dfg.inst_variable_args(inst);
let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
for &arg in branch_args {
let arg = self.f.dfg.resolve_aliases(arg);
let regs = self.put_value_in_regs(arg);
for &vreg in regs.regs() {
let vreg = self.vcode.resolve_vreg_alias(vreg.into());
branch_arg_vregs.push(vreg.into());
}
}
self.vcode.add_succ(succ, &branch_arg_vregs[..]);
}
self.finish_ir_inst(Default::default());
}
fn collect_branches_and_targets(
&self,
bindex: BlockIndex,
_bb: Block,
branches: &mut SmallVec<[Inst; 2]>,
targets: &mut SmallVec<[MachLabel; 2]>,
) {
branches.clear();
targets.clear();
let mut last_inst = None;
for &(inst, succ) in self.vcode.block_order().succ_indices(bindex) {
if last_inst != Some(inst) {
branches.push(inst);
} else {
debug_assert!(self.f.dfg[inst].opcode() == Opcode::BrTable);
debug_assert!(branches.len() == 1);
}
last_inst = Some(inst);
targets.push(MachLabel::from_block(succ));
}
}
pub fn lower<B: LowerBackend<MInst = I>>(mut self, backend: &B) -> CodegenResult<VCode<I>> {
trace!("about to lower function: {:?}", self.f);
let temps = self
.vcode
.abi()
.temps_needed(self.sigs())
.into_iter()
.map(|temp_ty| self.alloc_tmp(temp_ty).only_reg().unwrap())
.collect::<Vec<_>>();
self.vcode.init_abi(temps);
self.pinned_reg = backend.maybe_pinned_reg();
self.vcode.set_entry(BlockIndex::new(0));
let mut branches: SmallVec<[Inst; 2]> = SmallVec::new();
let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
let lowered_order: SmallVec<[LoweredBlock; 64]> = self
.vcode
.block_order()
.lowered_order()
.iter()
.cloned()
.collect();
for (bindex, lb) in lowered_order.iter().enumerate().rev() {
let bindex = BlockIndex::new(bindex);
if let Some(bb) = lb.orig_block() {
self.collect_branches_and_targets(bindex, bb, &mut branches, &mut targets);
if branches.len() > 0 {
self.lower_clif_branches(backend, bindex, bb, &branches, &targets)?;
self.finish_ir_inst(self.srcloc(branches[0]));
}
} else {
let (_, succ) = self.vcode.block_order().succ_indices(bindex)[0];
let orig_succ = lowered_order[succ.index()];
let orig_succ = orig_succ
.orig_block()
.expect("Edge block succ must be body block");
let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
for ty in self.f.dfg.block_param_types(orig_succ) {
let regs = alloc_vregs(ty, &mut self.next_vreg, &mut self.vcode)?;
for ® in regs.regs() {
branch_arg_vregs.push(reg);
let vreg = reg.to_virtual_reg().unwrap();
self.vcode
.add_block_param(vreg, self.vcode.get_vreg_type(vreg));
}
}
self.vcode.add_succ(succ, &branch_arg_vregs[..]);
self.emit(I::gen_jump(MachLabel::from_block(succ)));
self.finish_ir_inst(Default::default());
}
if let Some(bb) = lb.orig_block() {
self.lower_clif_block(backend, bb)?;
self.emit_value_label_markers_for_block_args(bb);
}
if bindex.index() == 0 {
self.gen_arg_setup();
self.finish_ir_inst(Default::default());
}
self.finish_bb();
}
let vcode = self.vcode.build();
trace!("built vcode: {:?}", vcode);
Ok(vcode)
}
}
impl<'func, I: VCodeInst> Lower<'func, I> {
pub fn dfg(&self) -> &DataFlowGraph {
&self.f.dfg
}
pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
self.vcode.abi()
}
pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
self.vcode.abi_mut()
}
pub fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>> {
writable_value_regs(self.retval_regs[idx])
}
}
impl<'func, I: VCodeInst> Lower<'func, I> {
pub fn data(&self, ir_inst: Inst) -> &InstructionData {
&self.f.dfg[ir_inst]
}
pub fn symbol_value_data<'b>(
&'b self,
global_value: GlobalValue,
) -> Option<(&'b ExternalName, RelocDistance, i64)> {
let gvdata = &self.f.global_values[global_value];
match gvdata {
&GlobalValueData::Symbol {
ref name,
ref offset,
..
} => {
let offset = offset.bits();
let dist = gvdata.maybe_reloc_distance().unwrap();
Some((name, dist, offset))
}
_ => None,
}
}
pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
match &self.f.dfg[ir_inst] {
&InstructionData::AtomicCas { flags, .. } => Some(flags),
&InstructionData::AtomicRmw { flags, .. } => Some(flags),
&InstructionData::Load { flags, .. }
| &InstructionData::LoadNoOffset { flags, .. }
| &InstructionData::Store { flags, .. } => Some(flags),
&InstructionData::StoreNoOffset { flags, .. } => Some(flags),
_ => None,
}
}
pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
self.f.rel_srclocs()[ir_inst]
}
pub fn num_inputs(&self, ir_inst: Inst) -> usize {
self.f.dfg.inst_args(ir_inst).len()
}
pub fn num_outputs(&self, ir_inst: Inst) -> usize {
self.f.dfg.inst_results(ir_inst).len()
}
pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
self.value_ty(self.input_as_value(ir_inst, idx))
}
pub fn value_ty(&self, val: Value) -> Type {
self.f.dfg.value_type(val)
}
pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
}
pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
self.inst_constants.get(&ir_inst).cloned()
}
pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
let val = self.f.dfg.inst_args(ir_inst)[idx];
self.f.dfg.resolve_aliases(val)
}
pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
let val = self.input_as_value(ir_inst, idx);
self.get_value_as_source_or_const(val)
}
pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
trace!(
"get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
val,
self.cur_inst,
self.cur_scan_entry_color,
);
let inst = match self.f.dfg.value_def(val) {
ValueDef::Result(src_inst, result_idx) => {
let src_side_effect = has_lowering_side_effect(self.f, src_inst);
trace!(" -> src inst {}", src_inst);
trace!(" -> has lowering side effect: {}", src_side_effect);
if !src_side_effect {
if self.value_ir_uses[val] == ValueUseState::Once {
InputSourceInst::UniqueUse(src_inst, result_idx)
} else {
InputSourceInst::Use(src_inst, result_idx)
}
} else {
trace!(
" -> side-effecting op {} for val {}: use state {:?}",
src_inst,
val,
self.value_ir_uses[val]
);
if self.cur_scan_entry_color.is_some()
&& self.value_ir_uses[val] == ValueUseState::Once
&& self.num_outputs(src_inst) == 1
&& self
.side_effect_inst_entry_colors
.get(&src_inst)
.unwrap()
.get()
+ 1
== self.cur_scan_entry_color.unwrap().get()
{
InputSourceInst::UniqueUse(src_inst, 0)
} else {
InputSourceInst::None
}
}
}
_ => InputSourceInst::None,
};
let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
NonRegInput { inst, constant }
}
pub fn increment_lowered_uses(&mut self, val: Value) {
self.value_lowered_uses[val] += 1
}
pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
let val = self.f.dfg.inst_args(ir_inst)[idx];
self.put_value_in_regs(val)
}
pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
let val = self.f.dfg.resolve_aliases(val);
trace!("put_value_in_regs: val {}", val);
let ty = self.f.dfg.value_type(val);
assert!(ty != IFLAGS && ty != FFLAGS);
if let Some(inst) = self.f.dfg.value_def(val).inst() {
assert!(!self.inst_sunk.contains(&inst));
}
if let Some(c) = self
.f
.dfg
.value_def(val)
.inst()
.and_then(|inst| self.get_constant(inst))
{
let regs = self.alloc_tmp(ty);
trace!(" -> regs {:?}", regs);
assert!(regs.is_valid());
let insts = I::gen_constant(regs, c.into(), ty, |ty| {
self.alloc_tmp(ty).only_reg().unwrap()
});
for inst in insts {
self.emit(inst);
}
return non_writable_value_regs(regs);
}
let mut regs = self.value_regs[val];
trace!(" -> regs {:?}", regs);
assert!(regs.is_valid());
self.value_lowered_uses[val] += 1;
if let ValueDef::Result(i, 0) = self.f.dfg.value_def(val) {
if self.f.dfg[i].opcode() == Opcode::GetPinnedReg {
if let Some(pr) = self.pinned_reg {
regs = ValueRegs::one(pr);
}
}
}
regs
}
pub fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>> {
let val = self.f.dfg.inst_results(ir_inst)[idx];
writable_value_regs(self.value_regs[val])
}
}
impl<'func, I: VCodeInst> Lower<'func, I> {
pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
writable_value_regs(alloc_vregs(ty, &mut self.next_vreg, &mut self.vcode).unwrap())
}
pub fn emit(&mut self, mach_inst: I) {
trace!("emit: {:?}", mach_inst);
self.ir_insts.push(mach_inst);
}
pub fn sink_inst(&mut self, ir_inst: Inst) {
assert!(has_lowering_side_effect(self.f, ir_inst));
assert!(self.cur_scan_entry_color.is_some());
for result in self.dfg().inst_results(ir_inst) {
assert!(self.value_lowered_uses[*result] == 0);
}
let sunk_inst_entry_color = self
.side_effect_inst_entry_colors
.get(&ir_inst)
.cloned()
.unwrap();
let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
self.cur_scan_entry_color = Some(sunk_inst_entry_color);
self.inst_sunk.insert(ir_inst);
}
pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
self.f.dfg.immediates.get(imm).unwrap()
}
pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
self.f.dfg.constants.get(constant_handle)
}
pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
self.vcode.constants().insert(constant)
}
pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
if reg.to_virtual_reg().is_some() {
reg
} else {
let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
self.emit(I::gen_move(new_reg, reg, ty));
new_reg.to_reg()
}
}
pub fn set_vreg_alias(&mut self, from: Reg, to: Reg) {
trace!("set vreg alias: from {:?} to {:?}", from, to);
self.vcode.set_vreg_alias(from, to);
}
}