use alloc::vec::Vec;
use core::fmt::{self, Display, Formatter};
use core::ops::{Deref, DerefMut};
use core::str::FromStr;
#[cfg(feature = "enable-serde")]
use serde_derive::{Deserialize, Serialize};
use crate::bitset::BitSet;
use crate::entity;
use crate::ir::{
self,
condcodes::{FloatCC, IntCC},
trapcode::TrapCode,
types, Block, FuncRef, MemFlags, SigRef, StackSlot, Type, Value,
};
pub type ValueList = entity::EntityList<Value>;
pub type ValueListPool = entity::ListPool<Value>;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct BlockCall {
values: entity::EntityList<Value>,
}
impl BlockCall {
fn value_to_block(val: Value) -> Block {
Block::from_u32(val.as_u32())
}
fn block_to_value(block: Block) -> Value {
Value::from_u32(block.as_u32())
}
pub fn new(block: Block, args: &[Value], pool: &mut ValueListPool) -> Self {
let mut values = ValueList::default();
values.push(Self::block_to_value(block), pool);
values.extend(args.iter().copied(), pool);
Self { values }
}
pub fn block(&self, pool: &ValueListPool) -> Block {
let val = self.values.first(pool).unwrap();
Self::value_to_block(val)
}
pub fn set_block(&mut self, block: Block, pool: &mut ValueListPool) {
*self.values.get_mut(0, pool).unwrap() = Self::block_to_value(block);
}
pub fn append_argument(&mut self, arg: Value, pool: &mut ValueListPool) {
self.values.push(arg, pool);
}
pub fn args_slice<'a>(&self, pool: &'a ValueListPool) -> &'a [Value] {
&self.values.as_slice(pool)[1..]
}
pub fn args_slice_mut<'a>(&'a mut self, pool: &'a mut ValueListPool) -> &'a mut [Value] {
&mut self.values.as_mut_slice(pool)[1..]
}
pub fn remove(&mut self, ix: usize, pool: &mut ValueListPool) {
self.values.remove(1 + ix, pool)
}
pub fn clear(&mut self, pool: &mut ValueListPool) {
self.values.truncate(1, pool)
}
pub fn extend<I>(&mut self, elements: I, pool: &mut ValueListPool)
where
I: IntoIterator<Item = Value>,
{
self.values.extend(elements, pool)
}
pub fn display<'a>(&self, pool: &'a ValueListPool) -> DisplayBlockCall<'a> {
DisplayBlockCall { block: *self, pool }
}
pub fn deep_clone(&self, pool: &mut ValueListPool) -> Self {
Self {
values: self.values.deep_clone(pool),
}
}
}
pub struct DisplayBlockCall<'a> {
block: BlockCall,
pool: &'a ValueListPool,
}
impl<'a> Display for DisplayBlockCall<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.block.block(&self.pool))?;
let args = self.block.args_slice(&self.pool);
if !args.is_empty() {
write!(f, "(")?;
for (ix, arg) in args.iter().enumerate() {
if ix > 0 {
write!(f, ", ")?;
}
write!(f, "{}", arg)?;
}
write!(f, ")")?;
}
Ok(())
}
}
include!(concat!(env!("OUT_DIR"), "/opcodes.rs"));
impl Display for Opcode {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", opcode_name(*self))
}
}
impl Opcode {
pub fn format(self) -> InstructionFormat {
OPCODE_FORMAT[self as usize - 1]
}
pub fn constraints(self) -> OpcodeConstraints {
OPCODE_CONSTRAINTS[self as usize - 1]
}
pub fn is_resumable_trap(&self) -> bool {
match self {
Opcode::ResumableTrap | Opcode::ResumableTrapnz => true,
_ => false,
}
}
}
impl FromStr for Opcode {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, &'static str> {
use crate::constant_hash::{probe, simple_hash, Table};
impl<'a> Table<&'a str> for [Option<Opcode>] {
fn len(&self) -> usize {
self.len()
}
fn key(&self, idx: usize) -> Option<&'a str> {
self[idx].map(opcode_name)
}
}
match probe::<&str, [Option<Self>]>(&OPCODE_HASH_TABLE, s, simple_hash(s)) {
Err(_) => Err("Unknown opcode"),
Ok(i) => Ok(OPCODE_HASH_TABLE[i].unwrap()),
}
}
}
#[derive(Clone, Debug)]
pub struct VariableArgs(Vec<Value>);
impl VariableArgs {
pub fn new() -> Self {
Self(Vec::new())
}
pub fn push(&mut self, v: Value) {
self.0.push(v)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn into_value_list(self, fixed: &[Value], pool: &mut ValueListPool) -> ValueList {
let mut vlist = ValueList::default();
vlist.extend(fixed.iter().cloned(), pool);
vlist.extend(self.0, pool);
vlist
}
}
impl Deref for VariableArgs {
type Target = [Value];
fn deref(&self) -> &[Value] {
&self.0
}
}
impl DerefMut for VariableArgs {
fn deref_mut(&mut self) -> &mut [Value] {
&mut self.0
}
}
impl Display for VariableArgs {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
for (i, val) in self.0.iter().enumerate() {
if i == 0 {
write!(fmt, "{}", val)?;
} else {
write!(fmt, ", {}", val)?;
}
}
Ok(())
}
}
impl Default for VariableArgs {
fn default() -> Self {
Self::new()
}
}
impl InstructionData {
pub fn branch_destination<'a>(&'a self, jump_tables: &'a ir::JumpTables) -> &[BlockCall] {
match self {
Self::Jump {
ref destination, ..
} => std::slice::from_ref(destination),
Self::Brif { blocks, .. } => blocks.as_slice(),
Self::BranchTable { table, .. } => jump_tables.get(*table).unwrap().all_branches(),
_ => {
debug_assert!(!self.opcode().is_branch());
&[]
}
}
}
pub fn branch_destination_mut<'a>(
&'a mut self,
jump_tables: &'a mut ir::JumpTables,
) -> &mut [BlockCall] {
match self {
Self::Jump {
ref mut destination,
..
} => std::slice::from_mut(destination),
Self::Brif { blocks, .. } => blocks.as_mut_slice(),
Self::BranchTable { table, .. } => {
jump_tables.get_mut(*table).unwrap().all_branches_mut()
}
_ => {
debug_assert!(!self.opcode().is_branch());
&mut []
}
}
}
pub fn trap_code(&self) -> Option<TrapCode> {
match *self {
Self::CondTrap { code, .. } | Self::Trap { code, .. } => Some(code),
_ => None,
}
}
pub fn cond_code(&self) -> Option<IntCC> {
match self {
&InstructionData::IntCompare { cond, .. }
| &InstructionData::IntCompareImm { cond, .. } => Some(cond),
_ => None,
}
}
pub fn fp_cond_code(&self) -> Option<FloatCC> {
match self {
&InstructionData::FloatCompare { cond, .. } => Some(cond),
_ => None,
}
}
pub fn trap_code_mut(&mut self) -> Option<&mut TrapCode> {
match self {
Self::CondTrap { code, .. } | Self::Trap { code, .. } => Some(code),
_ => None,
}
}
pub fn atomic_rmw_op(&self) -> Option<ir::AtomicRmwOp> {
match self {
&InstructionData::AtomicRmw { op, .. } => Some(op),
_ => None,
}
}
pub fn load_store_offset(&self) -> Option<i32> {
match self {
&InstructionData::Load { offset, .. }
| &InstructionData::StackLoad { offset, .. }
| &InstructionData::Store { offset, .. }
| &InstructionData::StackStore { offset, .. } => Some(offset.into()),
_ => None,
}
}
pub fn memflags(&self) -> Option<MemFlags> {
match self {
&InstructionData::Load { flags, .. }
| &InstructionData::LoadNoOffset { flags, .. }
| &InstructionData::Store { flags, .. }
| &InstructionData::StoreNoOffset { flags, .. }
| &InstructionData::AtomicCas { flags, .. }
| &InstructionData::AtomicRmw { flags, .. } => Some(flags),
_ => None,
}
}
pub fn stack_slot(&self) -> Option<StackSlot> {
match self {
&InstructionData::StackStore { stack_slot, .. }
| &InstructionData::StackLoad { stack_slot, .. } => Some(stack_slot),
_ => None,
}
}
pub fn analyze_call<'a>(&'a self, pool: &'a ValueListPool) -> CallInfo<'a> {
match *self {
Self::Call {
func_ref, ref args, ..
} => CallInfo::Direct(func_ref, args.as_slice(pool)),
Self::CallIndirect {
sig_ref, ref args, ..
} => CallInfo::Indirect(sig_ref, &args.as_slice(pool)[1..]),
_ => {
debug_assert!(!self.opcode().is_call());
CallInfo::NotACall
}
}
}
#[inline]
pub(crate) fn sign_extend_immediates(&mut self, ctrl_typevar: Type) {
if ctrl_typevar.is_invalid() {
return;
}
let bit_width = ctrl_typevar.bits();
match self {
Self::BinaryImm64 {
opcode,
arg: _,
imm,
} => {
if *opcode == Opcode::SdivImm || *opcode == Opcode::SremImm {
imm.sign_extend_from_width(bit_width);
}
}
Self::IntCompareImm {
opcode,
arg: _,
cond,
imm,
} => {
debug_assert_eq!(*opcode, Opcode::IcmpImm);
if cond.unsigned() != *cond {
imm.sign_extend_from_width(bit_width);
}
}
_ => {}
}
}
}
pub enum CallInfo<'a> {
NotACall,
Direct(FuncRef, &'a [Value]),
Indirect(SigRef, &'a [Value]),
}
#[derive(Clone, Copy)]
pub struct OpcodeConstraints {
flags: u8,
typeset_offset: u8,
constraint_offset: u16,
}
impl OpcodeConstraints {
pub fn use_typevar_operand(self) -> bool {
(self.flags & 0x8) != 0
}
pub fn requires_typevar_operand(self) -> bool {
(self.flags & 0x10) != 0
}
pub fn num_fixed_results(self) -> usize {
(self.flags & 0x7) as usize
}
pub fn num_fixed_value_arguments(self) -> usize {
((self.flags >> 5) & 0x7) as usize
}
fn typeset_offset(self) -> Option<usize> {
let offset = usize::from(self.typeset_offset);
if offset < TYPE_SETS.len() {
Some(offset)
} else {
None
}
}
fn constraint_offset(self) -> usize {
self.constraint_offset as usize
}
pub fn result_type(self, n: usize, ctrl_type: Type) -> Type {
debug_assert!(n < self.num_fixed_results(), "Invalid result index");
match OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type) {
ResolvedConstraint::Bound(t) => t,
ResolvedConstraint::Free(ts) => panic!("Result constraints can't be free: {:?}", ts),
}
}
pub fn value_argument_constraint(self, n: usize, ctrl_type: Type) -> ResolvedConstraint {
debug_assert!(
n < self.num_fixed_value_arguments(),
"Invalid value argument index"
);
let offset = self.constraint_offset() + self.num_fixed_results();
OPERAND_CONSTRAINTS[offset + n].resolve(ctrl_type)
}
pub fn ctrl_typeset(self) -> Option<ValueTypeSet> {
self.typeset_offset().map(|offset| TYPE_SETS[offset])
}
pub fn is_polymorphic(self) -> bool {
self.ctrl_typeset().is_some()
}
}
type BitSet8 = BitSet<u8>;
type BitSet16 = BitSet<u16>;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct ValueTypeSet {
pub lanes: BitSet16,
pub ints: BitSet8,
pub floats: BitSet8,
pub refs: BitSet8,
pub dynamic_lanes: BitSet16,
}
impl ValueTypeSet {
fn is_base_type(self, scalar: Type) -> bool {
let l2b = scalar.log2_lane_bits();
if scalar.is_int() {
self.ints.contains(l2b)
} else if scalar.is_float() {
self.floats.contains(l2b)
} else if scalar.is_ref() {
self.refs.contains(l2b)
} else {
false
}
}
pub fn contains(self, typ: Type) -> bool {
if typ.is_dynamic_vector() {
let l2l = typ.log2_min_lane_count();
self.dynamic_lanes.contains(l2l) && self.is_base_type(typ.lane_type())
} else {
let l2l = typ.log2_lane_count();
self.lanes.contains(l2l) && self.is_base_type(typ.lane_type())
}
}
pub fn example(self) -> Type {
let t = if self.ints.max().unwrap_or(0) > 5 {
types::I32
} else if self.floats.max().unwrap_or(0) > 5 {
types::F32
} else {
types::I8
};
t.by(1 << self.lanes.min().unwrap()).unwrap()
}
}
enum OperandConstraint {
Concrete(Type),
Free(u8),
Same,
LaneOf,
AsTruthy,
HalfWidth,
DoubleWidth,
SplitLanes,
MergeLanes,
DynamicToVector,
Narrower,
Wider,
}
impl OperandConstraint {
pub fn resolve(&self, ctrl_type: Type) -> ResolvedConstraint {
use self::OperandConstraint::*;
use self::ResolvedConstraint::Bound;
match *self {
Concrete(t) => Bound(t),
Free(vts) => ResolvedConstraint::Free(TYPE_SETS[vts as usize]),
Same => Bound(ctrl_type),
LaneOf => Bound(ctrl_type.lane_of()),
AsTruthy => Bound(ctrl_type.as_truthy()),
HalfWidth => Bound(ctrl_type.half_width().expect("invalid type for half_width")),
DoubleWidth => Bound(
ctrl_type
.double_width()
.expect("invalid type for double_width"),
),
SplitLanes => {
if ctrl_type.is_dynamic_vector() {
Bound(
ctrl_type
.dynamic_to_vector()
.expect("invalid type for dynamic_to_vector")
.split_lanes()
.expect("invalid type for split_lanes")
.vector_to_dynamic()
.expect("invalid dynamic type"),
)
} else {
Bound(
ctrl_type
.split_lanes()
.expect("invalid type for split_lanes"),
)
}
}
MergeLanes => {
if ctrl_type.is_dynamic_vector() {
Bound(
ctrl_type
.dynamic_to_vector()
.expect("invalid type for dynamic_to_vector")
.merge_lanes()
.expect("invalid type for merge_lanes")
.vector_to_dynamic()
.expect("invalid dynamic type"),
)
} else {
Bound(
ctrl_type
.merge_lanes()
.expect("invalid type for merge_lanes"),
)
}
}
DynamicToVector => Bound(
ctrl_type
.dynamic_to_vector()
.expect("invalid type for dynamic_to_vector"),
),
Narrower => {
let ctrl_type_bits = ctrl_type.log2_lane_bits();
let mut tys = ValueTypeSet::default();
tys.lanes = BitSet::from_range(0, 1);
if ctrl_type.is_int() {
tys.ints = BitSet8::from_range(3, ctrl_type_bits as u8);
} else if ctrl_type.is_float() {
tys.floats = BitSet8::from_range(5, ctrl_type_bits as u8);
} else {
panic!("The Narrower constraint only operates on floats or ints");
}
ResolvedConstraint::Free(tys)
}
Wider => {
let ctrl_type_bits = ctrl_type.log2_lane_bits();
let mut tys = ValueTypeSet::default();
tys.lanes = BitSet::from_range(0, 1);
if ctrl_type.is_int() {
let lower_bound = ctrl_type_bits as u8 + 1;
if lower_bound < BitSet8::bits() as u8 {
tys.ints = BitSet8::from_range(lower_bound, 8);
}
} else if ctrl_type.is_float() {
tys.floats = BitSet8::from_range(ctrl_type_bits as u8 + 1, 7);
} else {
panic!("The Wider constraint only operates on floats or ints");
}
ResolvedConstraint::Free(tys)
}
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ResolvedConstraint {
Bound(Type),
Free(ValueTypeSet),
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::string::ToString;
#[test]
fn inst_data_is_copy() {
fn is_copy<T: Copy>() {}
is_copy::<InstructionData>();
}
#[test]
fn inst_data_size() {
assert_eq!(std::mem::size_of::<InstructionData>(), 16);
}
#[test]
fn opcodes() {
use core::mem;
let x = Opcode::Iadd;
let mut y = Opcode::Isub;
assert!(x != y);
y = Opcode::Iadd;
assert_eq!(x, y);
assert_eq!(x.format(), InstructionFormat::Binary);
assert_eq!(format!("{:?}", Opcode::IaddImm), "IaddImm");
assert_eq!(Opcode::IaddImm.to_string(), "iadd_imm");
assert_eq!("iadd".parse::<Opcode>(), Ok(Opcode::Iadd));
assert_eq!("iadd_imm".parse::<Opcode>(), Ok(Opcode::IaddImm));
assert_eq!("iadd\0".parse::<Opcode>(), Err("Unknown opcode"));
assert_eq!("".parse::<Opcode>(), Err("Unknown opcode"));
assert_eq!("\0".parse::<Opcode>(), Err("Unknown opcode"));
assert_eq!(mem::size_of::<Opcode>(), mem::size_of::<Option<Opcode>>());
}
#[test]
fn instruction_data() {
use core::mem;
assert_eq!(mem::size_of::<InstructionData>(), 16);
}
#[test]
fn constraints() {
let a = Opcode::Iadd.constraints();
assert!(a.use_typevar_operand());
assert!(!a.requires_typevar_operand());
assert_eq!(a.num_fixed_results(), 1);
assert_eq!(a.num_fixed_value_arguments(), 2);
assert_eq!(a.result_type(0, types::I32), types::I32);
assert_eq!(a.result_type(0, types::I8), types::I8);
assert_eq!(
a.value_argument_constraint(0, types::I32),
ResolvedConstraint::Bound(types::I32)
);
assert_eq!(
a.value_argument_constraint(1, types::I32),
ResolvedConstraint::Bound(types::I32)
);
let b = Opcode::Bitcast.constraints();
assert!(!b.use_typevar_operand());
assert!(!b.requires_typevar_operand());
assert_eq!(b.num_fixed_results(), 1);
assert_eq!(b.num_fixed_value_arguments(), 1);
assert_eq!(b.result_type(0, types::I32), types::I32);
assert_eq!(b.result_type(0, types::I8), types::I8);
match b.value_argument_constraint(0, types::I32) {
ResolvedConstraint::Free(vts) => assert!(vts.contains(types::F32)),
_ => panic!("Unexpected constraint from value_argument_constraint"),
}
let c = Opcode::Call.constraints();
assert_eq!(c.num_fixed_results(), 0);
assert_eq!(c.num_fixed_value_arguments(), 0);
let i = Opcode::CallIndirect.constraints();
assert_eq!(i.num_fixed_results(), 0);
assert_eq!(i.num_fixed_value_arguments(), 1);
let cmp = Opcode::Icmp.constraints();
assert!(cmp.use_typevar_operand());
assert!(cmp.requires_typevar_operand());
assert_eq!(cmp.num_fixed_results(), 1);
assert_eq!(cmp.num_fixed_value_arguments(), 2);
assert_eq!(cmp.result_type(0, types::I64), types::I8);
}
#[test]
fn value_set() {
use crate::ir::types::*;
let vts = ValueTypeSet {
lanes: BitSet16::from_range(0, 8),
ints: BitSet8::from_range(4, 7),
floats: BitSet8::from_range(0, 0),
refs: BitSet8::from_range(5, 7),
dynamic_lanes: BitSet16::from_range(0, 4),
};
assert!(!vts.contains(I8));
assert!(vts.contains(I32));
assert!(vts.contains(I64));
assert!(vts.contains(I32X4));
assert!(vts.contains(I32X4XN));
assert!(!vts.contains(F32));
assert!(vts.contains(R32));
assert!(vts.contains(R64));
assert_eq!(vts.example().to_string(), "i32");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(0, 8),
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(5, 7),
refs: BitSet8::from_range(0, 0),
dynamic_lanes: BitSet16::from_range(0, 8),
};
assert_eq!(vts.example().to_string(), "f32");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(1, 8),
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(5, 7),
refs: BitSet8::from_range(0, 0),
dynamic_lanes: BitSet16::from_range(0, 8),
};
assert_eq!(vts.example().to_string(), "f32x2");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(2, 8),
ints: BitSet8::from_range(3, 7),
floats: BitSet8::from_range(0, 0),
refs: BitSet8::from_range(0, 0),
dynamic_lanes: BitSet16::from_range(0, 8),
};
assert_eq!(vts.example().to_string(), "i32x4");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(0, 9),
ints: BitSet8::from_range(3, 7),
floats: BitSet8::from_range(0, 0),
refs: BitSet8::from_range(0, 0),
dynamic_lanes: BitSet16::from_range(0, 8),
};
assert!(vts.contains(I32));
assert!(vts.contains(I32X4));
assert!(!vts.contains(R32));
assert!(!vts.contains(R64));
}
}