use crate::ir::{self, LibCall, MemFlags, TrapCode};
use crate::isa::CallConv;
use crate::isa::s390x::abi::REG_SAVE_AREA_SIZE;
use crate::isa::s390x::inst::*;
use crate::isa::s390x::settings as s390x_settings;
use cranelift_control::ControlPlane;
macro_rules! debug_assert_valid_regpair {
($hi:expr, $lo:expr) => {
if cfg!(debug_assertions) {
match ($hi.to_real_reg(), $lo.to_real_reg()) {
(Some(hi), Some(lo)) => {
assert!(
hi.hw_enc() % 2 == 0,
"High register is not even: {}",
show_reg($hi)
);
assert_eq!(
hi.hw_enc() + 1,
lo.hw_enc(),
"Low register is not valid: {}, {}",
show_reg($hi),
show_reg($lo)
);
}
_ => {
panic!(
"Expected real registers for {} {}",
show_reg($hi),
show_reg($lo)
);
}
}
}
};
}
macro_rules! debug_assert_valid_fp_regpair {
($hi:expr, $lo:expr) => {
if cfg!(debug_assertions) {
match ($hi.to_real_reg(), $lo.to_real_reg()) {
(Some(hi), Some(lo)) => {
assert!(
hi.hw_enc() & 2 == 0,
"High register is not valid: {}",
show_reg($hi)
);
assert_eq!(
hi.hw_enc() + 2,
lo.hw_enc(),
"Low register is not valid: {}, {}",
show_reg($hi),
show_reg($lo)
);
}
_ => {
panic!(
"Expected real registers for {} {}",
show_reg($hi),
show_reg($lo)
);
}
}
}
};
}
const OPCODE_BRAS: u16 = 0xa75;
const OPCODE_BCR: u16 = 0xa74;
const OPCODE_LDR: u16 = 0x28;
const OPCODE_VLR: u16 = 0xe756;
pub struct MemInstType {
pub have_d12: bool,
pub have_d20: bool,
pub have_pcrel: bool,
pub have_unaligned_pcrel: bool,
pub have_index: bool,
}
pub fn mem_finalize(
mem: &MemArg,
state: &EmitState,
mi: MemInstType,
) -> (SmallVec<[Inst; 4]>, MemArg) {
let mut insts = SmallVec::new();
let mem = match mem {
&MemArg::RegOffset { off, .. }
| &MemArg::InitialSPOffset { off }
| &MemArg::IncomingArgOffset { off }
| &MemArg::OutgoingArgOffset { off }
| &MemArg::SlotOffset { off }
| &MemArg::SpillOffset { off } => {
let base = match mem {
&MemArg::RegOffset { reg, .. } => reg,
&MemArg::InitialSPOffset { .. }
| &MemArg::IncomingArgOffset { .. }
| &MemArg::OutgoingArgOffset { .. }
| &MemArg::SlotOffset { .. }
| &MemArg::SpillOffset { .. } => stack_reg(),
_ => unreachable!(),
};
let adj = match mem {
&MemArg::IncomingArgOffset { .. } => i64::from(
state.incoming_args_size
+ REG_SAVE_AREA_SIZE
+ state.frame_layout().clobber_size
+ state.frame_layout().fixed_frame_storage_size
+ state.frame_layout().outgoing_args_size
+ state.nominal_sp_offset,
),
&MemArg::InitialSPOffset { .. } => i64::from(
state.frame_layout().clobber_size
+ state.frame_layout().fixed_frame_storage_size
+ state.frame_layout().outgoing_args_size
+ state.nominal_sp_offset,
),
&MemArg::SpillOffset { .. } => i64::from(
state.frame_layout().stackslots_size
+ state.frame_layout().outgoing_args_size
+ state.nominal_sp_offset,
),
&MemArg::SlotOffset { .. } => {
i64::from(state.frame_layout().outgoing_args_size + state.nominal_sp_offset)
}
&MemArg::OutgoingArgOffset { .. } => {
i64::from(REG_SAVE_AREA_SIZE) - i64::from(state.outgoing_sp_offset)
}
_ => 0,
};
let off = off + adj;
if let Some(disp) = UImm12::maybe_from_u64(off as u64) {
MemArg::BXD12 {
base,
index: zero_reg(),
disp,
flags: mem.get_flags(),
}
} else if let Some(disp) = SImm20::maybe_from_i64(off) {
MemArg::BXD20 {
base,
index: zero_reg(),
disp,
flags: mem.get_flags(),
}
} else {
let tmp = writable_spilltmp_reg();
assert!(base != tmp.to_reg());
if let Ok(imm) = i16::try_from(off) {
insts.push(Inst::Mov64SImm16 { rd: tmp, imm });
} else if let Ok(imm) = i32::try_from(off) {
insts.push(Inst::Mov64SImm32 { rd: tmp, imm });
} else {
unreachable!();
}
MemArg::reg_plus_reg(base, tmp.to_reg(), mem.get_flags())
}
}
_ => mem.clone(),
};
let need_load_address = match &mem {
&MemArg::Label { .. } | &MemArg::Constant { .. } if !mi.have_pcrel => true,
&MemArg::Symbol { .. } if !mi.have_pcrel => true,
&MemArg::Symbol { flags, .. } if !mi.have_unaligned_pcrel && !flags.aligned() => true,
&MemArg::BXD20 { .. } if !mi.have_d20 => true,
&MemArg::BXD12 { index, .. } | &MemArg::BXD20 { index, .. } if !mi.have_index => {
index != zero_reg()
}
_ => false,
};
let mem = if need_load_address {
let flags = mem.get_flags();
let tmp = writable_spilltmp_reg();
insts.push(Inst::LoadAddr { rd: tmp, mem });
MemArg::reg(tmp.to_reg(), flags)
} else {
mem
};
let mem = match &mem {
&MemArg::BXD12 {
base,
index,
disp,
flags,
} if !mi.have_d12 => {
assert!(mi.have_d20);
MemArg::BXD20 {
base,
index,
disp: SImm20::from_uimm12(disp),
flags,
}
}
_ => mem,
};
(insts, mem)
}
pub fn mem_emit(
rd: Reg,
mem: &MemArg,
opcode_rx: Option<u16>,
opcode_rxy: Option<u16>,
opcode_ril: Option<u16>,
add_trap: bool,
sink: &mut MachBuffer<Inst>,
emit_info: &EmitInfo,
state: &mut EmitState,
) {
let (mem_insts, mem) = mem_finalize(
mem,
state,
MemInstType {
have_d12: opcode_rx.is_some(),
have_d20: opcode_rxy.is_some(),
have_pcrel: opcode_ril.is_some(),
have_unaligned_pcrel: opcode_ril.is_some() && !add_trap,
have_index: true,
},
);
for inst in mem_insts.into_iter() {
inst.emit(sink, emit_info, state);
}
if add_trap {
if let Some(trap_code) = mem.get_flags().trap_code() {
sink.add_trap(trap_code);
}
}
match &mem {
&MemArg::BXD12 {
base, index, disp, ..
} => {
put(
sink,
&enc_rx(opcode_rx.unwrap(), rd, base, index, disp.bits()),
);
}
&MemArg::BXD20 {
base, index, disp, ..
} => {
put(
sink,
&enc_rxy(opcode_rxy.unwrap(), rd, base, index, disp.bits()),
);
}
&MemArg::Label { target } => {
sink.use_label_at_offset(sink.cur_offset(), target, LabelUse::BranchRIL);
put(sink, &enc_ril_b(opcode_ril.unwrap(), rd, 0));
}
&MemArg::Constant { constant } => {
let target = sink.get_label_for_constant(constant);
sink.use_label_at_offset(sink.cur_offset(), target, LabelUse::BranchRIL);
put(sink, &enc_ril_b(opcode_ril.unwrap(), rd, 0));
}
&MemArg::Symbol {
ref name, offset, ..
} => {
let reloc_offset = sink.cur_offset() + 2;
sink.add_reloc_at_offset(
reloc_offset,
Reloc::S390xPCRel32Dbl,
&**name,
(offset + 2).into(),
);
put(sink, &enc_ril_b(opcode_ril.unwrap(), rd, 0));
}
_ => unreachable!(),
}
}
pub fn mem_rs_emit(
rd: Reg,
rn: Reg,
mem: &MemArg,
opcode_rs: Option<u16>,
opcode_rsy: Option<u16>,
add_trap: bool,
sink: &mut MachBuffer<Inst>,
emit_info: &EmitInfo,
state: &mut EmitState,
) {
let (mem_insts, mem) = mem_finalize(
mem,
state,
MemInstType {
have_d12: opcode_rs.is_some(),
have_d20: opcode_rsy.is_some(),
have_pcrel: false,
have_unaligned_pcrel: false,
have_index: false,
},
);
for inst in mem_insts.into_iter() {
inst.emit(sink, emit_info, state);
}
if add_trap {
if let Some(trap_code) = mem.get_flags().trap_code() {
sink.add_trap(trap_code);
}
}
match &mem {
&MemArg::BXD12 {
base, index, disp, ..
} => {
assert!(index == zero_reg());
put(sink, &enc_rs(opcode_rs.unwrap(), rd, rn, base, disp.bits()));
}
&MemArg::BXD20 {
base, index, disp, ..
} => {
assert!(index == zero_reg());
put(
sink,
&enc_rsy(opcode_rsy.unwrap(), rd, rn, base, disp.bits()),
);
}
_ => unreachable!(),
}
}
pub fn mem_imm8_emit(
imm: u8,
mem: &MemArg,
opcode_si: u16,
opcode_siy: u16,
add_trap: bool,
sink: &mut MachBuffer<Inst>,
emit_info: &EmitInfo,
state: &mut EmitState,
) {
let (mem_insts, mem) = mem_finalize(
mem,
state,
MemInstType {
have_d12: true,
have_d20: true,
have_pcrel: false,
have_unaligned_pcrel: false,
have_index: false,
},
);
for inst in mem_insts.into_iter() {
inst.emit(sink, emit_info, state);
}
if add_trap {
if let Some(trap_code) = mem.get_flags().trap_code() {
sink.add_trap(trap_code);
}
}
match &mem {
&MemArg::BXD12 {
base, index, disp, ..
} => {
assert!(index == zero_reg());
put(sink, &enc_si(opcode_si, base, disp.bits(), imm));
}
&MemArg::BXD20 {
base, index, disp, ..
} => {
assert!(index == zero_reg());
put(sink, &enc_siy(opcode_siy, base, disp.bits(), imm));
}
_ => unreachable!(),
}
}
pub fn mem_imm16_emit(
imm: i16,
mem: &MemArg,
opcode_sil: u16,
add_trap: bool,
sink: &mut MachBuffer<Inst>,
emit_info: &EmitInfo,
state: &mut EmitState,
) {
let (mem_insts, mem) = mem_finalize(
mem,
state,
MemInstType {
have_d12: true,
have_d20: false,
have_pcrel: false,
have_unaligned_pcrel: false,
have_index: false,
},
);
for inst in mem_insts.into_iter() {
inst.emit(sink, emit_info, state);
}
if add_trap {
if let Some(trap_code) = mem.get_flags().trap_code() {
sink.add_trap(trap_code);
}
}
match &mem {
&MemArg::BXD12 {
base, index, disp, ..
} => {
assert!(index == zero_reg());
put(sink, &enc_sil(opcode_sil, base, disp.bits(), imm));
}
_ => unreachable!(),
}
}
pub fn mem_vrx_emit(
rd: Reg,
mem: &MemArg,
opcode: u16,
m3: u8,
add_trap: bool,
sink: &mut MachBuffer<Inst>,
emit_info: &EmitInfo,
state: &mut EmitState,
) {
let (mem_insts, mem) = mem_finalize(
mem,
state,
MemInstType {
have_d12: true,
have_d20: false,
have_pcrel: false,
have_unaligned_pcrel: false,
have_index: true,
},
);
for inst in mem_insts.into_iter() {
inst.emit(sink, emit_info, state);
}
if add_trap {
if let Some(trap_code) = mem.get_flags().trap_code() {
sink.add_trap(trap_code);
}
}
match &mem {
&MemArg::BXD12 {
base, index, disp, ..
} => {
put(sink, &enc_vrx(opcode, rd, base, index, disp.bits(), m3));
}
_ => unreachable!(),
}
}
fn machreg_to_gpr(m: Reg) -> u8 {
assert_eq!(m.class(), RegClass::Int);
m.to_real_reg().unwrap().hw_enc()
}
fn machreg_to_vr(m: Reg) -> u8 {
assert_eq!(m.class(), RegClass::Float);
m.to_real_reg().unwrap().hw_enc()
}
fn machreg_to_fpr(m: Reg) -> u8 {
assert!(is_fpr(m));
m.to_real_reg().unwrap().hw_enc()
}
fn machreg_to_gpr_or_fpr(m: Reg) -> u8 {
let reg = m.to_real_reg().unwrap().hw_enc();
assert!(reg < 16);
reg
}
fn rxb(v1: Option<Reg>, v2: Option<Reg>, v3: Option<Reg>, v4: Option<Reg>) -> u8 {
let mut rxb = 0;
let is_high_vr = |reg| -> bool {
if let Some(reg) = reg {
if !is_fpr(reg) {
return true;
}
}
false
};
if is_high_vr(v1) {
rxb = rxb | 8;
}
if is_high_vr(v2) {
rxb = rxb | 4;
}
if is_high_vr(v3) {
rxb = rxb | 2;
}
if is_high_vr(v4) {
rxb = rxb | 1;
}
rxb
}
fn enc_e(opcode: u16) -> [u8; 2] {
let mut enc: [u8; 2] = [0; 2];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
enc[0] = opcode1;
enc[1] = opcode2;
enc
}
fn enc_ri_a(opcode: u16, r1: Reg, i2: u16) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 4) & 0xff) as u8;
let opcode2 = (opcode & 0xf) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
enc[0] = opcode1;
enc[1] = r1 << 4 | opcode2;
enc[2..].copy_from_slice(&i2.to_be_bytes());
enc
}
fn enc_ri_b(opcode: u16, r1: Reg, ri2: i32) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 4) & 0xff) as u8;
let opcode2 = (opcode & 0xf) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
let ri2 = ((ri2 >> 1) & 0xffff) as u16;
enc[0] = opcode1;
enc[1] = r1 << 4 | opcode2;
enc[2..].copy_from_slice(&ri2.to_be_bytes());
enc
}
fn enc_ri_c(opcode: u16, m1: u8, ri2: i32) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 4) & 0xff) as u8;
let opcode2 = (opcode & 0xf) as u8;
let m1 = m1 & 0x0f;
let ri2 = ((ri2 >> 1) & 0xffff) as u16;
enc[0] = opcode1;
enc[1] = m1 << 4 | opcode2;
enc[2..].copy_from_slice(&ri2.to_be_bytes());
enc
}
fn enc_rie_a(opcode: u16, r1: Reg, i2: u16, m3: u8) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
let m3 = m3 & 0x0f;
enc[0] = opcode1;
enc[1] = r1 << 4;
enc[2..4].copy_from_slice(&i2.to_be_bytes());
enc[4] = m3 << 4;
enc[5] = opcode2;
enc
}
fn enc_rie_d(opcode: u16, r1: Reg, r3: Reg, i2: u16) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
let r3 = machreg_to_gpr(r3) & 0x0f;
enc[0] = opcode1;
enc[1] = r1 << 4 | r3;
enc[2..4].copy_from_slice(&i2.to_be_bytes());
enc[5] = opcode2;
enc
}
fn enc_rie_f(opcode: u16, r1: Reg, r2: Reg, i3: u8, i4: u8, i5: u8) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
let r2 = machreg_to_gpr(r2) & 0x0f;
enc[0] = opcode1;
enc[1] = r1 << 4 | r2;
enc[2] = i3;
enc[3] = i4;
enc[4] = i5;
enc[5] = opcode2;
enc
}
fn enc_rie_g(opcode: u16, r1: Reg, i2: u16, m3: u8) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
let m3 = m3 & 0x0f;
enc[0] = opcode1;
enc[1] = r1 << 4 | m3;
enc[2..4].copy_from_slice(&i2.to_be_bytes());
enc[5] = opcode2;
enc
}
fn enc_ril_a(opcode: u16, r1: Reg, i2: u32) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 4) & 0xff) as u8;
let opcode2 = (opcode & 0xf) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
enc[0] = opcode1;
enc[1] = r1 << 4 | opcode2;
enc[2..].copy_from_slice(&i2.to_be_bytes());
enc
}
fn enc_ril_b(opcode: u16, r1: Reg, ri2: u32) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 4) & 0xff) as u8;
let opcode2 = (opcode & 0xf) as u8;
let r1 = machreg_to_gpr(r1) & 0x0f;
let ri2 = ri2 >> 1;
enc[0] = opcode1;
enc[1] = r1 << 4 | opcode2;
enc[2..].copy_from_slice(&ri2.to_be_bytes());
enc
}
fn enc_ril_c(opcode: u16, m1: u8, ri2: u32) -> [u8; 6] {
let mut enc: [u8; 6] = [0; 6];
let opcode1 = ((opcode >> 4) & 0xff) as u8;
let opcode2 = (opcode & 0xf) as u8;
let m1 = m1 & 0x0f;
let ri2 = ri2 >> 1;
enc[0] = opcode1;
enc[1] = m1 << 4 | opcode2;
enc[2..].copy_from_slice(&ri2.to_be_bytes());
enc
}
fn enc_rr(opcode: u16, r1: Reg, r2: Reg) -> [u8; 2] {
let mut enc: [u8; 2] = [0; 2];
let opcode = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
enc[0] = opcode;
enc[1] = r1 << 4 | r2;
enc
}
fn enc_rrd(opcode: u16, r1: Reg, r2: Reg, r3: Reg) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_fpr(r1) & 0x0f;
let r2 = machreg_to_fpr(r2) & 0x0f;
let r3 = machreg_to_fpr(r3) & 0x0f;
enc[0] = opcode1;
enc[1] = opcode2;
enc[2] = r1 << 4;
enc[3] = r3 << 4 | r2;
enc
}
fn enc_rre(opcode: u16, r1: Reg, r2: Reg) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
enc[0] = opcode1;
enc[1] = opcode2;
enc[3] = r1 << 4 | r2;
enc
}
fn enc_rrf_ab(opcode: u16, r1: Reg, r2: Reg, r3: Reg, m4: u8) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
let r3 = machreg_to_gpr_or_fpr(r3) & 0x0f;
let m4 = m4 & 0x0f;
enc[0] = opcode1;
enc[1] = opcode2;
enc[2] = r3 << 4 | m4;
enc[3] = r1 << 4 | r2;
enc
}
fn enc_rrf_cde(opcode: u16, r1: Reg, r2: Reg, m3: u8, m4: u8) -> [u8; 4] {
let mut enc: [u8; 4] = [0; 4];
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
let m3 = m3 & 0x0f;
let m4 = m4 & 0x0f;
enc[0] = opcode1;
enc[1] = opcode2;
enc[2] = m3 << 4 | m4;
enc[3] = r1 << 4 | r2;
enc
}
fn enc_rs(opcode: u16, r1: Reg, r3: Reg, b2: Reg, d2: u32) -> [u8; 4] {
let opcode = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let r3 = machreg_to_gpr_or_fpr(r3) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let d2_lo = (d2 & 0xff) as u8;
let d2_hi = ((d2 >> 8) & 0x0f) as u8;
let mut enc: [u8; 4] = [0; 4];
enc[0] = opcode;
enc[1] = r1 << 4 | r3;
enc[2] = b2 << 4 | d2_hi;
enc[3] = d2_lo;
enc
}
fn enc_rsy(opcode: u16, r1: Reg, r3: Reg, b2: Reg, d2: u32) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let r3 = machreg_to_gpr_or_fpr(r3) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let dl2_lo = (d2 & 0xff) as u8;
let dl2_hi = ((d2 >> 8) & 0x0f) as u8;
let dh2 = ((d2 >> 12) & 0xff) as u8;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = r1 << 4 | r3;
enc[2] = b2 << 4 | dl2_hi;
enc[3] = dl2_lo;
enc[4] = dh2;
enc[5] = opcode2;
enc
}
fn enc_rx(opcode: u16, r1: Reg, b2: Reg, x2: Reg, d2: u32) -> [u8; 4] {
let opcode = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let x2 = machreg_to_gpr(x2) & 0x0f;
let d2_lo = (d2 & 0xff) as u8;
let d2_hi = ((d2 >> 8) & 0x0f) as u8;
let mut enc: [u8; 4] = [0; 4];
enc[0] = opcode;
enc[1] = r1 << 4 | x2;
enc[2] = b2 << 4 | d2_hi;
enc[3] = d2_lo;
enc
}
fn enc_rxy(opcode: u16, r1: Reg, b2: Reg, x2: Reg, d2: u32) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let x2 = machreg_to_gpr(x2) & 0x0f;
let dl2_lo = (d2 & 0xff) as u8;
let dl2_hi = ((d2 >> 8) & 0x0f) as u8;
let dh2 = ((d2 >> 12) & 0xff) as u8;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = r1 << 4 | x2;
enc[2] = b2 << 4 | dl2_hi;
enc[3] = dl2_lo;
enc[4] = dh2;
enc[5] = opcode2;
enc
}
fn enc_si(opcode: u16, b1: Reg, d1: u32, i2: u8) -> [u8; 4] {
let opcode = (opcode & 0xff) as u8;
let b1 = machreg_to_gpr(b1) & 0x0f;
let d1_lo = (d1 & 0xff) as u8;
let d1_hi = ((d1 >> 8) & 0x0f) as u8;
let mut enc: [u8; 4] = [0; 4];
enc[0] = opcode;
enc[1] = i2;
enc[2] = b1 << 4 | d1_hi;
enc[3] = d1_lo;
enc
}
fn enc_sil(opcode: u16, b1: Reg, d1: u32, i2: i16) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let b1 = machreg_to_gpr(b1) & 0x0f;
let d1_lo = (d1 & 0xff) as u8;
let d1_hi = ((d1 >> 8) & 0x0f) as u8;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = opcode2;
enc[2] = b1 << 4 | d1_hi;
enc[3] = d1_lo;
enc[4..].copy_from_slice(&i2.to_be_bytes());
enc
}
fn enc_siy(opcode: u16, b1: Reg, d1: u32, i2: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let b1 = machreg_to_gpr(b1) & 0x0f;
let dl1_lo = (d1 & 0xff) as u8;
let dl1_hi = ((d1 >> 8) & 0x0f) as u8;
let dh1 = ((d1 >> 12) & 0xff) as u8;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = i2;
enc[2] = b1 << 4 | dl1_hi;
enc[3] = dl1_lo;
enc[4] = dh1;
enc[5] = opcode2;
enc
}
fn enc_vri_a(opcode: u16, v1: Reg, i2: u16, m3: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), None, None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let m3 = m3 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4;
enc[2..4].copy_from_slice(&i2.to_be_bytes());
enc[4] = m3 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vri_b(opcode: u16, v1: Reg, i2: u8, i3: u8, m4: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), None, None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let m4 = m4 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4;
enc[2] = i2;
enc[3] = i3;
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vri_c(opcode: u16, v1: Reg, i2: u16, v3: Reg, m4: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v3), None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let m4 = m4 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v3;
enc[2..4].copy_from_slice(&i2.to_be_bytes());
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vri_k(opcode: u16, i5: u8, v1: Reg, v2: Reg, v3: Reg, v4: Reg) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v2), Some(v3), Some(v4));
let v1 = machreg_to_vr(v1) & 0x0f;
let v2 = machreg_to_vr(v2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let v4 = machreg_to_vr(v4) & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v2;
enc[2] = v3 << 4;
enc[3] = i5;
enc[4] = v4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrr_a(opcode: u16, v1: Reg, v2: Reg, m3: u8, m4: u8, m5: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v2), None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let v2 = machreg_to_vr(v2) & 0x0f;
let m3 = m3 & 0x0f;
let m4 = m4 & 0x0f;
let m5 = m5 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v2;
enc[2] = 0;
enc[3] = m5 << 4 | m4;
enc[4] = m3 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrr_b(opcode: u16, v1: Reg, v2: Reg, v3: Reg, m4: u8, m5: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v2), Some(v3), None);
let v1 = machreg_to_vr(v1) & 0x0f;
let v2 = machreg_to_vr(v2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let m4 = m4 & 0x0f;
let m5 = m5 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v2;
enc[2] = v3 << 4;
enc[3] = m5 << 4;
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrr_c(opcode: u16, v1: Reg, v2: Reg, v3: Reg, m4: u8, m5: u8, m6: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v2), Some(v3), None);
let v1 = machreg_to_vr(v1) & 0x0f;
let v2 = machreg_to_vr(v2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let m4 = m4 & 0x0f;
let m5 = m5 & 0x0f;
let m6 = m6 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v2;
enc[2] = v3 << 4;
enc[3] = m6 << 4 | m5;
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrr_d(opcode: u16, v1: Reg, v2: Reg, v3: Reg, v4: Reg, m5: u8, m6: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v2), Some(v3), Some(v4));
let v1 = machreg_to_vr(v1) & 0x0f;
let v2 = machreg_to_vr(v2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let v4 = machreg_to_vr(v4) & 0x0f;
let m5 = m5 & 0x0f;
let m6 = m6 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v2;
enc[2] = v3 << 4 | m5;
enc[3] = m6 << 4;
enc[4] = v4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrr_e(opcode: u16, v1: Reg, v2: Reg, v3: Reg, v4: Reg, m5: u8, m6: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v2), Some(v3), Some(v4));
let v1 = machreg_to_vr(v1) & 0x0f;
let v2 = machreg_to_vr(v2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let v4 = machreg_to_vr(v4) & 0x0f;
let m5 = m5 & 0x0f;
let m6 = m6 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v2;
enc[2] = v3 << 4 | m6;
enc[3] = m5;
enc[4] = v4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrr_f(opcode: u16, v1: Reg, r2: Reg, r3: Reg) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), None, None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let r2 = machreg_to_gpr(r2) & 0x0f;
let r3 = machreg_to_gpr(r3) & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | r2;
enc[2] = r3 << 4;
enc[4] = rxb;
enc[5] = opcode2;
enc
}
fn enc_vrs_a(opcode: u16, v1: Reg, b2: Reg, d2: u32, v3: Reg, m4: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), Some(v3), None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let d2_lo = (d2 & 0xff) as u8;
let d2_hi = ((d2 >> 8) & 0x0f) as u8;
let m4 = m4 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | v3;
enc[2] = b2 << 4 | d2_hi;
enc[3] = d2_lo;
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrs_b(opcode: u16, v1: Reg, b2: Reg, d2: u32, r3: Reg, m4: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), None, None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let r3 = machreg_to_gpr(r3) & 0x0f;
let d2_lo = (d2 & 0xff) as u8;
let d2_hi = ((d2 >> 8) & 0x0f) as u8;
let m4 = m4 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | r3;
enc[2] = b2 << 4 | d2_hi;
enc[3] = d2_lo;
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrs_c(opcode: u16, r1: Reg, b2: Reg, d2: u32, v3: Reg, m4: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(None, Some(v3), None, None);
let r1 = machreg_to_gpr(r1) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let v3 = machreg_to_vr(v3) & 0x0f;
let d2_lo = (d2 & 0xff) as u8;
let d2_hi = ((d2 >> 8) & 0x0f) as u8;
let m4 = m4 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = r1 << 4 | v3;
enc[2] = b2 << 4 | d2_hi;
enc[3] = d2_lo;
enc[4] = m4 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn enc_vrx(opcode: u16, v1: Reg, b2: Reg, x2: Reg, d2: u32, m3: u8) -> [u8; 6] {
let opcode1 = ((opcode >> 8) & 0xff) as u8;
let opcode2 = (opcode & 0xff) as u8;
let rxb = rxb(Some(v1), None, None, None);
let v1 = machreg_to_vr(v1) & 0x0f;
let b2 = machreg_to_gpr(b2) & 0x0f;
let x2 = machreg_to_gpr(x2) & 0x0f;
let d2_lo = (d2 & 0xff) as u8;
let d2_hi = ((d2 >> 8) & 0x0f) as u8;
let m3 = m3 & 0x0f;
let mut enc: [u8; 6] = [0; 6];
enc[0] = opcode1;
enc[1] = v1 << 4 | x2;
enc[2] = b2 << 4 | d2_hi;
enc[3] = d2_lo;
enc[4] = m3 << 4 | rxb;
enc[5] = opcode2;
enc
}
fn put(sink: &mut MachBuffer<Inst>, enc: &[u8]) {
for byte in enc {
sink.put1(*byte);
}
}
fn put_with_trap(sink: &mut MachBuffer<Inst>, enc: &[u8], trap_code: TrapCode) {
let len = enc.len();
for i in 0..len - 1 {
sink.put1(enc[i]);
}
sink.add_trap(trap_code);
sink.put1(enc[len - 1]);
}
#[derive(Default, Clone, Debug)]
pub struct EmitState {
pub(crate) nominal_sp_offset: u32,
pub(crate) outgoing_sp_offset: u32,
pub(crate) incoming_args_size: u32,
user_stack_map: Option<ir::UserStackMap>,
ctrl_plane: ControlPlane,
frame_layout: FrameLayout,
}
impl MachInstEmitState<Inst> for EmitState {
fn new(abi: &Callee<S390xMachineDeps>, ctrl_plane: ControlPlane) -> Self {
let incoming_args_size = if abi.call_conv() == CallConv::Tail {
0
} else {
abi.frame_layout().incoming_args_size
};
EmitState {
nominal_sp_offset: 0,
outgoing_sp_offset: 0,
incoming_args_size,
user_stack_map: None,
ctrl_plane,
frame_layout: abi.frame_layout().clone(),
}
}
fn pre_safepoint(&mut self, user_stack_map: Option<ir::UserStackMap>) {
self.user_stack_map = user_stack_map;
}
fn ctrl_plane_mut(&mut self) -> &mut ControlPlane {
&mut self.ctrl_plane
}
fn take_ctrl_plane(self) -> ControlPlane {
self.ctrl_plane
}
fn frame_layout(&self) -> &FrameLayout {
&self.frame_layout
}
}
impl EmitState {
fn take_stack_map(&mut self) -> Option<ir::UserStackMap> {
self.user_stack_map.take()
}
fn clear_post_insn(&mut self) {
self.user_stack_map = None;
}
}
pub struct EmitInfo {
isa_flags: s390x_settings::Flags,
}
impl EmitInfo {
pub(crate) fn new(isa_flags: s390x_settings::Flags) -> Self {
Self { isa_flags }
}
}
impl MachInstEmit for Inst {
type State = EmitState;
type Info = EmitInfo;
fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
self.emit_with_alloc_consumer(sink, emit_info, state)
}
fn pretty_print_inst(&self, state: &mut EmitState) -> String {
self.print_with_state(state)
}
}
impl Inst {
fn emit_with_alloc_consumer(
&self,
sink: &mut MachBuffer<Inst>,
emit_info: &EmitInfo,
state: &mut EmitState,
) {
let matches_isa_flags = |iset_requirement: &InstructionSet| -> bool {
match iset_requirement {
InstructionSet::Base => true,
InstructionSet::MIE3 => emit_info.isa_flags.has_mie3(),
InstructionSet::MIE4 => emit_info.isa_flags.has_mie4(),
InstructionSet::VXRS_EXT2 => emit_info.isa_flags.has_vxrs_ext2(),
InstructionSet::VXRS_EXT3 => emit_info.isa_flags.has_vxrs_ext3(),
}
};
let isa_requirements = self.available_in_isa();
if !matches_isa_flags(&isa_requirements) {
panic!(
"Cannot emit inst '{self:?}' for target; failed to match ISA requirements: {isa_requirements:?}"
)
}
match self {
&Inst::AluRRR { alu_op, rd, rn, rm } => {
let (opcode, have_rr) = match alu_op {
ALUOp::Add32 => (0xb9f8, true), ALUOp::Add64 => (0xb9e8, true), ALUOp::AddLogical32 => (0xb9fa, true), ALUOp::AddLogical64 => (0xb9ea, true), ALUOp::Sub32 => (0xb9f9, true), ALUOp::Sub64 => (0xb9e9, true), ALUOp::SubLogical32 => (0xb9fb, true), ALUOp::SubLogical64 => (0xb9eb, true), ALUOp::Mul32 => (0xb9fd, true), ALUOp::Mul64 => (0xb9ed, true), ALUOp::And32 => (0xb9f4, true), ALUOp::And64 => (0xb9e4, true), ALUOp::Orr32 => (0xb9f6, true), ALUOp::Orr64 => (0xb9e6, true), ALUOp::Xor32 => (0xb9f7, true), ALUOp::Xor64 => (0xb9e7, true), ALUOp::NotAnd32 => (0xb974, false), ALUOp::NotAnd64 => (0xb964, false), ALUOp::NotOrr32 => (0xb976, false), ALUOp::NotOrr64 => (0xb966, false), ALUOp::NotXor32 => (0xb977, false), ALUOp::NotXor64 => (0xb967, false), ALUOp::AndNot32 => (0xb9f5, false), ALUOp::AndNot64 => (0xb9e5, false), ALUOp::OrrNot32 => (0xb975, false), ALUOp::OrrNot64 => (0xb965, false), _ => unreachable!(),
};
if have_rr && rd.to_reg() == rn {
let inst = Inst::AluRR {
alu_op,
rd,
ri: rn,
rm,
};
inst.emit(sink, emit_info, state);
} else {
put(sink, &enc_rrf_ab(opcode, rd.to_reg(), rn, rm, 0));
}
}
&Inst::AluRRSImm16 {
alu_op,
rd,
rn,
imm,
} => {
if rd.to_reg() == rn {
let inst = Inst::AluRSImm16 {
alu_op,
rd,
ri: rn,
imm,
};
inst.emit(sink, emit_info, state);
} else {
let opcode = match alu_op {
ALUOp::Add32 => 0xecd8, ALUOp::Add64 => 0xecd9, _ => unreachable!(),
};
put(sink, &enc_rie_d(opcode, rd.to_reg(), rn, imm as u16));
}
}
&Inst::AluRR { alu_op, rd, ri, rm } => {
debug_assert_eq!(rd.to_reg(), ri);
let (opcode, is_rre) = match alu_op {
ALUOp::Add32 => (0x1a, false), ALUOp::Add64 => (0xb908, true), ALUOp::Add64Ext32 => (0xb918, true), ALUOp::AddLogical32 => (0x1e, false), ALUOp::AddLogical64 => (0xb90a, true), ALUOp::AddLogical64Ext32 => (0xb91a, true), ALUOp::Sub32 => (0x1b, false), ALUOp::Sub64 => (0xb909, true), ALUOp::Sub64Ext32 => (0xb919, true), ALUOp::SubLogical32 => (0x1f, false), ALUOp::SubLogical64 => (0xb90b, true), ALUOp::SubLogical64Ext32 => (0xb91b, true), ALUOp::Mul32 => (0xb252, true), ALUOp::Mul64 => (0xb90c, true), ALUOp::Mul64Ext32 => (0xb91c, true), ALUOp::And32 => (0x14, false), ALUOp::And64 => (0xb980, true), ALUOp::Orr32 => (0x16, false), ALUOp::Orr64 => (0xb981, true), ALUOp::Xor32 => (0x17, false), ALUOp::Xor64 => (0xb982, true), _ => unreachable!(),
};
if is_rre {
put(sink, &enc_rre(opcode, rd.to_reg(), rm));
} else {
put(sink, &enc_rr(opcode, rd.to_reg(), rm));
}
}
&Inst::AluRX {
alu_op,
rd,
ri,
ref mem,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let mem = mem.clone();
let (opcode_rx, opcode_rxy) = match alu_op {
ALUOp::Add32 => (Some(0x5a), Some(0xe35a)), ALUOp::Add32Ext16 => (Some(0x4a), Some(0xe37a)), ALUOp::Add64 => (None, Some(0xe308)), ALUOp::Add64Ext16 => (None, Some(0xe338)), ALUOp::Add64Ext32 => (None, Some(0xe318)), ALUOp::AddLogical32 => (Some(0x5e), Some(0xe35e)), ALUOp::AddLogical64 => (None, Some(0xe30a)), ALUOp::AddLogical64Ext32 => (None, Some(0xe31a)), ALUOp::Sub32 => (Some(0x5b), Some(0xe35b)), ALUOp::Sub32Ext16 => (Some(0x4b), Some(0xe37b)), ALUOp::Sub64 => (None, Some(0xe309)), ALUOp::Sub64Ext16 => (None, Some(0xe339)), ALUOp::Sub64Ext32 => (None, Some(0xe319)), ALUOp::SubLogical32 => (Some(0x5f), Some(0xe35f)), ALUOp::SubLogical64 => (None, Some(0xe30b)), ALUOp::SubLogical64Ext32 => (None, Some(0xe31b)), ALUOp::Mul32 => (Some(0x71), Some(0xe351)), ALUOp::Mul32Ext16 => (Some(0x4c), Some(0xe37c)), ALUOp::Mul64 => (None, Some(0xe30c)), ALUOp::Mul64Ext16 => (None, Some(0xe33c)), ALUOp::Mul64Ext32 => (None, Some(0xe31c)), ALUOp::And32 => (Some(0x54), Some(0xe354)), ALUOp::And64 => (None, Some(0xe380)), ALUOp::Orr32 => (Some(0x56), Some(0xe356)), ALUOp::Orr64 => (None, Some(0xe381)), ALUOp::Xor32 => (Some(0x57), Some(0xe357)), ALUOp::Xor64 => (None, Some(0xe382)), _ => unreachable!(),
};
let rd = rd.to_reg();
mem_emit(
rd, &mem, opcode_rx, opcode_rxy, None, true, sink, emit_info, state,
);
}
&Inst::AluRSImm16 {
alu_op,
rd,
ri,
imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match alu_op {
ALUOp::Add32 => 0xa7a, ALUOp::Add64 => 0xa7b, ALUOp::Mul32 => 0xa7c, ALUOp::Mul64 => 0xa7d, _ => unreachable!(),
};
put(sink, &enc_ri_a(opcode, rd.to_reg(), imm as u16));
}
&Inst::AluRSImm32 {
alu_op,
rd,
ri,
imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match alu_op {
ALUOp::Add32 => 0xc29, ALUOp::Add64 => 0xc28, ALUOp::Mul32 => 0xc21, ALUOp::Mul64 => 0xc20, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rd.to_reg(), imm as u32));
}
&Inst::AluRUImm32 {
alu_op,
rd,
ri,
imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match alu_op {
ALUOp::AddLogical32 => 0xc2b, ALUOp::AddLogical64 => 0xc2a, ALUOp::SubLogical32 => 0xc25, ALUOp::SubLogical64 => 0xc24, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rd.to_reg(), imm));
}
&Inst::AluRUImm16Shifted {
alu_op,
rd,
ri,
imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match (alu_op, imm.shift) {
(ALUOp::And32, 0) => 0xa57, (ALUOp::And32, 1) => 0xa56, (ALUOp::And64, 0) => 0xa57, (ALUOp::And64, 1) => 0xa56, (ALUOp::And64, 2) => 0xa55, (ALUOp::And64, 3) => 0xa54, (ALUOp::Orr32, 0) => 0xa5b, (ALUOp::Orr32, 1) => 0xa5a, (ALUOp::Orr64, 0) => 0xa5b, (ALUOp::Orr64, 1) => 0xa5a, (ALUOp::Orr64, 2) => 0xa59, (ALUOp::Orr64, 3) => 0xa58, _ => unreachable!(),
};
put(sink, &enc_ri_a(opcode, rd.to_reg(), imm.bits));
}
&Inst::AluRUImm32Shifted {
alu_op,
rd,
ri,
imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match (alu_op, imm.shift) {
(ALUOp::And32, 0) => 0xc0b, (ALUOp::And64, 0) => 0xc0b, (ALUOp::And64, 1) => 0xc0a, (ALUOp::Orr32, 0) => 0xc0d, (ALUOp::Orr64, 0) => 0xc0d, (ALUOp::Orr64, 1) => 0xc0c, (ALUOp::Xor32, 0) => 0xc07, (ALUOp::Xor64, 0) => 0xc07, (ALUOp::Xor64, 1) => 0xc06, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rd.to_reg(), imm.bits));
}
&Inst::SMulWide { rd, rn, rm } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
let opcode = 0xb9ec; put(sink, &enc_rrf_ab(opcode, rd1.to_reg(), rn, rm, 0));
}
&Inst::UMulWide { rd, ri, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
debug_assert_eq!(rd2.to_reg(), ri);
let opcode = 0xb986; put(sink, &enc_rre(opcode, rd1.to_reg(), rn));
}
&Inst::SDivMod32 { rd, ri, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
debug_assert_eq!(rd2.to_reg(), ri);
let opcode = 0xb91d; let trap_code = TrapCode::INTEGER_DIVISION_BY_ZERO;
put_with_trap(sink, &enc_rre(opcode, rd1.to_reg(), rn), trap_code);
}
&Inst::SDivMod64 { rd, ri, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
debug_assert_eq!(rd2.to_reg(), ri);
let opcode = 0xb90d; let trap_code = TrapCode::INTEGER_DIVISION_BY_ZERO;
put_with_trap(sink, &enc_rre(opcode, rd1.to_reg(), rn), trap_code);
}
&Inst::UDivMod32 { rd, ri, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
let ri1 = ri.hi;
let ri2 = ri.lo;
debug_assert_eq!(rd1.to_reg(), ri1);
debug_assert_eq!(rd2.to_reg(), ri2);
let opcode = 0xb997; let trap_code = TrapCode::INTEGER_DIVISION_BY_ZERO;
put_with_trap(sink, &enc_rre(opcode, rd1.to_reg(), rn), trap_code);
}
&Inst::UDivMod64 { rd, ri, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
let ri1 = ri.hi;
let ri2 = ri.lo;
debug_assert_eq!(rd1.to_reg(), ri1);
debug_assert_eq!(rd2.to_reg(), ri2);
let opcode = 0xb987; let trap_code = TrapCode::INTEGER_DIVISION_BY_ZERO;
put_with_trap(sink, &enc_rre(opcode, rd1.to_reg(), rn), trap_code);
}
&Inst::Flogr { rd, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_regpair!(rd1.to_reg(), rd2.to_reg());
let opcode = 0xb983; put(sink, &enc_rre(opcode, rd1.to_reg(), rn));
}
&Inst::ShiftRR {
shift_op,
rd,
rn,
shift_imm,
shift_reg,
} => {
let opcode = match shift_op {
ShiftOp::RotL32 => 0xeb1d, ShiftOp::RotL64 => 0xeb1c, ShiftOp::LShL32 => 0xebdf, ShiftOp::LShL64 => 0xeb0d, ShiftOp::LShR32 => 0xebde, ShiftOp::LShR64 => 0xeb0c, ShiftOp::AShR32 => 0xebdc, ShiftOp::AShR64 => 0xeb0a, };
put(
sink,
&enc_rsy(opcode, rd.to_reg(), rn, shift_reg, shift_imm.into()),
);
}
&Inst::RxSBG {
op,
rd,
ri,
rn,
start_bit,
end_bit,
rotate_amt,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match op {
RxSBGOp::Insert => 0xec59, RxSBGOp::And => 0xec54, RxSBGOp::Or => 0xec56, RxSBGOp::Xor => 0xec57, };
put(
sink,
&enc_rie_f(
opcode,
rd.to_reg(),
rn,
start_bit,
end_bit,
(rotate_amt as u8) & 63,
),
);
}
&Inst::RxSBGTest {
op,
rd,
rn,
start_bit,
end_bit,
rotate_amt,
} => {
let opcode = match op {
RxSBGOp::And => 0xec54, RxSBGOp::Or => 0xec56, RxSBGOp::Xor => 0xec57, _ => unreachable!(),
};
put(
sink,
&enc_rie_f(
opcode,
rd,
rn,
start_bit | 0x80,
end_bit,
(rotate_amt as u8) & 63,
),
);
}
&Inst::UnaryRR { op, rd, rn } => {
match op {
UnaryOp::Abs32 => {
let opcode = 0x10; put(sink, &enc_rr(opcode, rd.to_reg(), rn));
}
UnaryOp::Abs64 => {
let opcode = 0xb900; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::Abs64Ext32 => {
let opcode = 0xb910; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::Neg32 => {
let opcode = 0x13; put(sink, &enc_rr(opcode, rd.to_reg(), rn));
}
UnaryOp::Neg64 => {
let opcode = 0xb903; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::Neg64Ext32 => {
let opcode = 0xb913; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::PopcntByte => {
let opcode = 0xb9e1; put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 0, 0));
}
UnaryOp::PopcntReg => {
let opcode = 0xb9e1; put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 8, 0));
}
UnaryOp::BSwap32 => {
let opcode = 0xb91f; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::BSwap64 => {
let opcode = 0xb90f; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::Clz64 => {
let opcode = 0xb968; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
UnaryOp::Ctz64 => {
let opcode = 0xb969; put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
}
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
to_bits,
} => {
let opcode = match (signed, from_bits, to_bits) {
(_, 1, 32) => 0xb926, (_, 1, 64) => 0xb906, (false, 8, 32) => 0xb994, (false, 8, 64) => 0xb984, (true, 8, 32) => 0xb926, (true, 8, 64) => 0xb906, (false, 16, 32) => 0xb995, (false, 16, 64) => 0xb985, (true, 16, 32) => 0xb927, (true, 16, 64) => 0xb907, (false, 32, 64) => 0xb916, (true, 32, 64) => 0xb914, _ => panic!(
"Unsupported extend combination: signed = {signed}, from_bits = {from_bits}, to_bits = {to_bits}"
),
};
put(sink, &enc_rre(opcode, rd.to_reg(), rn));
}
&Inst::CmpRR { op, rn, rm } => {
let (opcode, is_rre) = match op {
CmpOp::CmpS32 => (0x19, false), CmpOp::CmpS64 => (0xb920, true), CmpOp::CmpS64Ext32 => (0xb930, true), CmpOp::CmpL32 => (0x15, false), CmpOp::CmpL64 => (0xb921, true), CmpOp::CmpL64Ext32 => (0xb931, true), _ => unreachable!(),
};
if is_rre {
put(sink, &enc_rre(opcode, rn, rm));
} else {
put(sink, &enc_rr(opcode, rn, rm));
}
}
&Inst::CmpRX { op, rn, ref mem } => {
let mem = mem.clone();
let (opcode_rx, opcode_rxy, opcode_ril) = match op {
CmpOp::CmpS32 => (Some(0x59), Some(0xe359), Some(0xc6d)), CmpOp::CmpS32Ext16 => (Some(0x49), Some(0xe379), Some(0xc65)), CmpOp::CmpS64 => (None, Some(0xe320), Some(0xc68)), CmpOp::CmpS64Ext16 => (None, Some(0xe334), Some(0xc64)), CmpOp::CmpS64Ext32 => (None, Some(0xe330), Some(0xc6c)), CmpOp::CmpL32 => (Some(0x55), Some(0xe355), Some(0xc6f)), CmpOp::CmpL32Ext16 => (None, None, Some(0xc67)), CmpOp::CmpL64 => (None, Some(0xe321), Some(0xc6a)), CmpOp::CmpL64Ext16 => (None, None, Some(0xc66)), CmpOp::CmpL64Ext32 => (None, Some(0xe331), Some(0xc6e)), };
mem_emit(
rn, &mem, opcode_rx, opcode_rxy, opcode_ril, true, sink, emit_info, state,
);
}
&Inst::CmpRSImm16 { op, rn, imm } => {
let opcode = match op {
CmpOp::CmpS32 => 0xa7e, CmpOp::CmpS64 => 0xa7f, _ => unreachable!(),
};
put(sink, &enc_ri_a(opcode, rn, imm as u16));
}
&Inst::CmpRSImm32 { op, rn, imm } => {
let opcode = match op {
CmpOp::CmpS32 => 0xc2d, CmpOp::CmpS64 => 0xc2c, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rn, imm as u32));
}
&Inst::CmpRUImm32 { op, rn, imm } => {
let opcode = match op {
CmpOp::CmpL32 => 0xc2f, CmpOp::CmpL64 => 0xc2e, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rn, imm));
}
&Inst::CmpTrapRR {
op,
rn,
rm,
cond,
trap_code,
} => {
let opcode = match op {
CmpOp::CmpS32 => 0xb972, CmpOp::CmpS64 => 0xb960, CmpOp::CmpL32 => 0xb973, CmpOp::CmpL64 => 0xb961, _ => unreachable!(),
};
put_with_trap(
sink,
&enc_rrf_cde(opcode, rn, rm, cond.bits(), 0),
trap_code,
);
}
&Inst::CmpTrapRSImm16 {
op,
rn,
imm,
cond,
trap_code,
} => {
let opcode = match op {
CmpOp::CmpS32 => 0xec72, CmpOp::CmpS64 => 0xec70, _ => unreachable!(),
};
put_with_trap(
sink,
&enc_rie_a(opcode, rn, imm as u16, cond.bits()),
trap_code,
);
}
&Inst::CmpTrapRUImm16 {
op,
rn,
imm,
cond,
trap_code,
} => {
let opcode = match op {
CmpOp::CmpL32 => 0xec73, CmpOp::CmpL64 => 0xec71, _ => unreachable!(),
};
put_with_trap(sink, &enc_rie_a(opcode, rn, imm, cond.bits()), trap_code);
}
&Inst::AtomicRmw {
alu_op,
rd,
rn,
ref mem,
} => {
let mem = mem.clone();
let opcode = match alu_op {
ALUOp::Add32 => 0xebf8, ALUOp::Add64 => 0xebe8, ALUOp::AddLogical32 => 0xebfa, ALUOp::AddLogical64 => 0xebea, ALUOp::And32 => 0xebf4, ALUOp::And64 => 0xebe4, ALUOp::Orr32 => 0xebf6, ALUOp::Orr64 => 0xebe6, ALUOp::Xor32 => 0xebf7, ALUOp::Xor64 => 0xebe7, _ => unreachable!(),
};
let rd = rd.to_reg();
mem_rs_emit(
rd,
rn,
&mem,
None,
Some(opcode),
true,
sink,
emit_info,
state,
);
}
&Inst::Loop { ref body, cond } => {
let loop_label = sink.get_label();
let done_label = sink.get_label();
sink.bind_label(loop_label, &mut state.ctrl_plane);
for inst in (&body).into_iter() {
match &inst {
&Inst::CondBreak { cond } => {
let opcode = 0xc04; sink.use_label_at_offset(
sink.cur_offset(),
done_label,
LabelUse::BranchRIL,
);
put(sink, &enc_ril_c(opcode, cond.bits(), 0));
}
_ => inst.emit_with_alloc_consumer(sink, emit_info, state),
};
}
let opcode = 0xc04; sink.use_label_at_offset(sink.cur_offset(), loop_label, LabelUse::BranchRIL);
put(sink, &enc_ril_c(opcode, cond.bits(), 0));
sink.bind_label(done_label, &mut state.ctrl_plane);
}
&Inst::CondBreak { .. } => unreachable!(), &Inst::AtomicCas32 {
rd,
ri,
rn,
ref mem,
}
| &Inst::AtomicCas64 {
rd,
ri,
rn,
ref mem,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let mem = mem.clone();
let (opcode_rs, opcode_rsy) = match self {
&Inst::AtomicCas32 { .. } => (Some(0xba), Some(0xeb14)), &Inst::AtomicCas64 { .. } => (None, Some(0xeb30)), _ => unreachable!(),
};
let rd = rd.to_reg();
mem_rs_emit(
rd, rn, &mem, opcode_rs, opcode_rsy, true, sink, emit_info, state,
);
}
&Inst::Fence => {
put(sink, &enc_e(0x07e0));
}
&Inst::Load32 { rd, ref mem }
| &Inst::Load32ZExt8 { rd, ref mem }
| &Inst::Load32SExt8 { rd, ref mem }
| &Inst::Load32ZExt16 { rd, ref mem }
| &Inst::Load32SExt16 { rd, ref mem }
| &Inst::Load64 { rd, ref mem }
| &Inst::Load64ZExt8 { rd, ref mem }
| &Inst::Load64SExt8 { rd, ref mem }
| &Inst::Load64ZExt16 { rd, ref mem }
| &Inst::Load64SExt16 { rd, ref mem }
| &Inst::Load64ZExt32 { rd, ref mem }
| &Inst::Load64SExt32 { rd, ref mem }
| &Inst::LoadRev16 { rd, ref mem }
| &Inst::LoadRev32 { rd, ref mem }
| &Inst::LoadRev64 { rd, ref mem } => {
let mem = mem.clone();
let (opcode_rx, opcode_rxy, opcode_ril) = match self {
&Inst::Load32 { .. } => (Some(0x58), Some(0xe358), Some(0xc4d)), &Inst::Load32ZExt8 { .. } => (None, Some(0xe394), None), &Inst::Load32SExt8 { .. } => (None, Some(0xe376), None), &Inst::Load32ZExt16 { .. } => (None, Some(0xe395), Some(0xc42)), &Inst::Load32SExt16 { .. } => (Some(0x48), Some(0xe378), Some(0xc45)), &Inst::Load64 { .. } => (None, Some(0xe304), Some(0xc48)), &Inst::Load64ZExt8 { .. } => (None, Some(0xe390), None), &Inst::Load64SExt8 { .. } => (None, Some(0xe377), None), &Inst::Load64ZExt16 { .. } => (None, Some(0xe391), Some(0xc46)), &Inst::Load64SExt16 { .. } => (None, Some(0xe315), Some(0xc44)), &Inst::Load64ZExt32 { .. } => (None, Some(0xe316), Some(0xc4e)), &Inst::Load64SExt32 { .. } => (None, Some(0xe314), Some(0xc4c)), &Inst::LoadRev16 { .. } => (None, Some(0xe31f), None), &Inst::LoadRev32 { .. } => (None, Some(0xe31e), None), &Inst::LoadRev64 { .. } => (None, Some(0xe30f), None), _ => unreachable!(),
};
let rd = rd.to_reg();
mem_emit(
rd, &mem, opcode_rx, opcode_rxy, opcode_ril, true, sink, emit_info, state,
);
}
&Inst::Store8 { rd, ref mem }
| &Inst::Store16 { rd, ref mem }
| &Inst::Store32 { rd, ref mem }
| &Inst::Store64 { rd, ref mem }
| &Inst::StoreRev16 { rd, ref mem }
| &Inst::StoreRev32 { rd, ref mem }
| &Inst::StoreRev64 { rd, ref mem } => {
let mem = mem.clone();
let (opcode_rx, opcode_rxy, opcode_ril) = match self {
&Inst::Store8 { .. } => (Some(0x42), Some(0xe372), None), &Inst::Store16 { .. } => (Some(0x40), Some(0xe370), Some(0xc47)), &Inst::Store32 { .. } => (Some(0x50), Some(0xe350), Some(0xc4f)), &Inst::Store64 { .. } => (None, Some(0xe324), Some(0xc4b)), &Inst::StoreRev16 { .. } => (None, Some(0xe33f), None), &Inst::StoreRev32 { .. } => (None, Some(0xe33e), None), &Inst::StoreRev64 { .. } => (None, Some(0xe32f), None), _ => unreachable!(),
};
mem_emit(
rd, &mem, opcode_rx, opcode_rxy, opcode_ril, true, sink, emit_info, state,
);
}
&Inst::StoreImm8 { imm, ref mem } => {
let mem = mem.clone();
let opcode_si = 0x92; let opcode_siy = 0xeb52; mem_imm8_emit(
imm, &mem, opcode_si, opcode_siy, true, sink, emit_info, state,
);
}
&Inst::StoreImm16 { imm, ref mem }
| &Inst::StoreImm32SExt16 { imm, ref mem }
| &Inst::StoreImm64SExt16 { imm, ref mem } => {
let mem = mem.clone();
let opcode = match self {
&Inst::StoreImm16 { .. } => 0xe544, &Inst::StoreImm32SExt16 { .. } => 0xe54c, &Inst::StoreImm64SExt16 { .. } => 0xe548, _ => unreachable!(),
};
mem_imm16_emit(imm, &mem, opcode, true, sink, emit_info, state);
}
&Inst::LoadMultiple64 { rt, rt2, ref mem } => {
let mem = mem.clone();
let opcode = 0xeb04; let rt = rt.to_reg();
let rt2 = rt2.to_reg();
mem_rs_emit(
rt,
rt2,
&mem,
None,
Some(opcode),
true,
sink,
emit_info,
state,
);
}
&Inst::StoreMultiple64 { rt, rt2, ref mem } => {
let mem = mem.clone();
let opcode = 0xeb24; mem_rs_emit(
rt,
rt2,
&mem,
None,
Some(opcode),
true,
sink,
emit_info,
state,
);
}
&Inst::LoadAddr { rd, ref mem } => {
let mem = mem.clone();
let opcode_rx = Some(0x41); let opcode_rxy = Some(0xe371); let opcode_ril = Some(0xc00); let rd = rd.to_reg();
mem_emit(
rd, &mem, opcode_rx, opcode_rxy, opcode_ril, false, sink, emit_info, state,
);
}
&Inst::Mov64 { rd, rm } => {
let opcode = 0xb904; put(sink, &enc_rre(opcode, rd.to_reg(), rm));
}
&Inst::MovPReg { rd, rm } => {
Inst::Mov64 { rd, rm: rm.into() }.emit(sink, emit_info, state);
}
&Inst::Mov32 { rd, rm } => {
let opcode = 0x18; put(sink, &enc_rr(opcode, rd.to_reg(), rm));
}
&Inst::Mov32Imm { rd, imm } => {
let opcode = 0xc09; put(sink, &enc_ril_a(opcode, rd.to_reg(), imm));
}
&Inst::Mov32SImm16 { rd, imm } => {
let opcode = 0xa78; put(sink, &enc_ri_a(opcode, rd.to_reg(), imm as u16));
}
&Inst::Mov64SImm16 { rd, imm } => {
let opcode = 0xa79; put(sink, &enc_ri_a(opcode, rd.to_reg(), imm as u16));
}
&Inst::Mov64SImm32 { rd, imm } => {
let opcode = 0xc01; put(sink, &enc_ril_a(opcode, rd.to_reg(), imm as u32));
}
&Inst::CMov32 { rd, cond, ri, rm } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = 0xb9f2; put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rm, cond.bits(), 0));
}
&Inst::CMov64 { rd, cond, ri, rm } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = 0xb9e2; put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rm, cond.bits(), 0));
}
&Inst::CMov32SImm16 { rd, cond, ri, imm } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = 0xec42; put(
sink,
&enc_rie_g(opcode, rd.to_reg(), imm as u16, cond.bits()),
);
}
&Inst::CMov64SImm16 { rd, cond, ri, imm } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = 0xec46; put(
sink,
&enc_rie_g(opcode, rd.to_reg(), imm as u16, cond.bits()),
);
}
&Inst::Mov64UImm16Shifted { rd, imm } => {
let opcode = match imm.shift {
0 => 0xa5f, 1 => 0xa5e, 2 => 0xa5d, 3 => 0xa5c, _ => unreachable!(),
};
put(sink, &enc_ri_a(opcode, rd.to_reg(), imm.bits));
}
&Inst::Mov64UImm32Shifted { rd, imm } => {
let opcode = match imm.shift {
0 => 0xc0f, 1 => 0xc0e, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rd.to_reg(), imm.bits));
}
&Inst::Insert64UImm16Shifted { rd, ri, imm } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match imm.shift {
0 => 0xa53, 1 => 0xa52, 2 => 0xa51, 3 => 0xa50, _ => unreachable!(),
};
put(sink, &enc_ri_a(opcode, rd.to_reg(), imm.bits));
}
&Inst::Insert64UImm32Shifted { rd, ri, imm } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match imm.shift {
0 => 0xc09, 1 => 0xc08, _ => unreachable!(),
};
put(sink, &enc_ril_a(opcode, rd.to_reg(), imm.bits));
}
&Inst::LoadAR { rd, ar } => {
let opcode = 0xb24f; put(sink, &enc_rre(opcode, rd.to_reg(), gpr(ar)));
}
&Inst::InsertAR { rd, ri, ar } => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = 0xb24f; put(sink, &enc_rre(opcode, rd.to_reg(), gpr(ar)));
}
&Inst::LoadSymbolReloc {
rd,
ref symbol_reloc,
} => {
let reg = writable_spilltmp_reg().to_reg();
put(sink, &enc_ri_b(OPCODE_BRAS, reg, 12));
let (reloc, name, offset) = match &**symbol_reloc {
SymbolReloc::Absolute { name, offset } => (Reloc::Abs8, name, *offset),
SymbolReloc::TlsGd { name } => (Reloc::S390xTlsGd64, name, 0),
};
sink.add_reloc(reloc, name, offset);
sink.put8(0);
let inst = Inst::Load64 {
rd,
mem: MemArg::reg(reg, MemFlags::trusted()),
};
inst.emit(sink, emit_info, state);
}
&Inst::FpuMove32 { rd, rn } => {
if is_fpr(rd.to_reg()) && is_fpr(rn) {
let opcode = 0x38; put(sink, &enc_rr(opcode, rd.to_reg(), rn));
} else {
put(sink, &enc_vrr_a(OPCODE_VLR, rd.to_reg(), rn, 0, 0, 0));
}
}
&Inst::FpuMove64 { rd, rn } => {
if is_fpr(rd.to_reg()) && is_fpr(rn) {
put(sink, &enc_rr(OPCODE_LDR, rd.to_reg(), rn));
} else {
put(sink, &enc_vrr_a(OPCODE_VLR, rd.to_reg(), rn, 0, 0, 0));
}
}
&Inst::FpuCMov32 { rd, cond, ri, rm } => {
debug_assert_eq!(rd.to_reg(), ri);
if is_fpr(rd.to_reg()) && is_fpr(rm) {
put(sink, &enc_ri_c(OPCODE_BCR, cond.invert().bits(), 4 + 2));
let opcode = 0x38; put(sink, &enc_rr(opcode, rd.to_reg(), rm));
} else {
put(sink, &enc_ri_c(OPCODE_BCR, cond.invert().bits(), 4 + 6));
put(sink, &enc_vrr_a(OPCODE_VLR, rd.to_reg(), rm, 0, 0, 0));
}
}
&Inst::FpuCMov64 { rd, cond, ri, rm } => {
debug_assert_eq!(rd.to_reg(), ri);
if is_fpr(rd.to_reg()) && is_fpr(rm) {
put(sink, &enc_ri_c(OPCODE_BCR, cond.invert().bits(), 4 + 2));
put(sink, &enc_rr(OPCODE_LDR, rd.to_reg(), rm));
} else {
put(sink, &enc_ri_c(OPCODE_BCR, cond.invert().bits(), 4 + 6));
put(sink, &enc_vrr_a(OPCODE_VLR, rd.to_reg(), rm, 0, 0, 0));
}
}
&Inst::FpuRR { fpu_op, rd, rn } => {
let (opcode, m3, m4, m5, opcode_fpr) = match fpu_op {
FPUOp1::Abs32 => (0xe7cc, 2, 8, 2, Some(0xb300)), FPUOp1::Abs64 => (0xe7cc, 3, 8, 2, Some(0xb310)), FPUOp1::Abs128 => (0xe7cc, 4, 8, 2, None), FPUOp1::Abs32x4 => (0xe7cc, 2, 0, 2, None), FPUOp1::Abs64x2 => (0xe7cc, 3, 0, 2, None), FPUOp1::Neg32 => (0xe7cc, 2, 8, 0, Some(0xb303)), FPUOp1::Neg64 => (0xe7cc, 3, 8, 0, Some(0xb313)), FPUOp1::Neg128 => (0xe7cc, 4, 8, 0, None), FPUOp1::Neg32x4 => (0xe7cc, 2, 0, 0, None), FPUOp1::Neg64x2 => (0xe7cc, 3, 0, 0, None), FPUOp1::NegAbs32 => (0xe7cc, 2, 8, 1, Some(0xb301)), FPUOp1::NegAbs64 => (0xe7cc, 3, 8, 1, Some(0xb311)), FPUOp1::NegAbs128 => (0xe7cc, 4, 8, 1, None), FPUOp1::NegAbs32x4 => (0xe7cc, 2, 0, 1, None), FPUOp1::NegAbs64x2 => (0xe7cc, 3, 0, 1, None), FPUOp1::Sqrt32 => (0xe7ce, 2, 8, 0, Some(0xb314)), FPUOp1::Sqrt64 => (0xe7ce, 3, 8, 0, Some(0xb315)), FPUOp1::Sqrt128 => (0xe7ce, 4, 8, 0, None), FPUOp1::Sqrt32x4 => (0xe7ce, 2, 0, 0, None), FPUOp1::Sqrt64x2 => (0xe7ce, 3, 0, 0, None), FPUOp1::Cvt32To64 => (0xe7c4, 2, 8, 0, Some(0xb304)), FPUOp1::Cvt32x4To64x2 => (0xe7c4, 2, 0, 0, None), FPUOp1::Cvt64To128 => (0xe7c4, 3, 8, 0, None), };
if m4 == 8 && opcode_fpr.is_some() && is_fpr(rd.to_reg()) && is_fpr(rn) {
put(sink, &enc_rre(opcode_fpr.unwrap(), rd.to_reg(), rn));
} else {
put(sink, &enc_vrr_a(opcode, rd.to_reg(), rn, m3, m4, m5));
}
}
&Inst::FpuRRR { fpu_op, rd, rn, rm } => {
let (opcode, m4, m5, m6, opcode_fpr) = match fpu_op {
FPUOp2::Add32 => (0xe7e3, 2, 8, 0, Some(0xb30a)), FPUOp2::Add64 => (0xe7e3, 3, 8, 0, Some(0xb31a)), FPUOp2::Add128 => (0xe7e3, 4, 8, 0, None), FPUOp2::Add32x4 => (0xe7e3, 2, 0, 0, None), FPUOp2::Add64x2 => (0xe7e3, 3, 0, 0, None), FPUOp2::Sub32 => (0xe7e2, 2, 8, 0, Some(0xb30b)), FPUOp2::Sub64 => (0xe7e2, 3, 8, 0, Some(0xb31b)), FPUOp2::Sub128 => (0xe7e2, 4, 8, 0, None), FPUOp2::Sub32x4 => (0xe7e2, 2, 0, 0, None), FPUOp2::Sub64x2 => (0xe7e2, 3, 0, 0, None), FPUOp2::Mul32 => (0xe7e7, 2, 8, 0, Some(0xb317)), FPUOp2::Mul64 => (0xe7e7, 3, 8, 0, Some(0xb31c)), FPUOp2::Mul128 => (0xe7e7, 4, 8, 0, None), FPUOp2::Mul32x4 => (0xe7e7, 2, 0, 0, None), FPUOp2::Mul64x2 => (0xe7e7, 3, 0, 0, None), FPUOp2::Div32 => (0xe7e5, 2, 8, 0, Some(0xb30d)), FPUOp2::Div64 => (0xe7e5, 3, 8, 0, Some(0xb31d)), FPUOp2::Div128 => (0xe7e5, 4, 8, 0, None), FPUOp2::Div32x4 => (0xe7e5, 2, 0, 0, None), FPUOp2::Div64x2 => (0xe7e5, 3, 0, 0, None), FPUOp2::Max32 => (0xe7ef, 2, 8, 1, None), FPUOp2::Max64 => (0xe7ef, 3, 8, 1, None), FPUOp2::Max128 => (0xe7ef, 4, 8, 1, None), FPUOp2::Max32x4 => (0xe7ef, 2, 0, 1, None), FPUOp2::Max64x2 => (0xe7ef, 3, 0, 1, None), FPUOp2::Min32 => (0xe7ee, 2, 8, 1, None), FPUOp2::Min64 => (0xe7ee, 3, 8, 1, None), FPUOp2::Min128 => (0xe7ee, 4, 8, 1, None), FPUOp2::Min32x4 => (0xe7ee, 2, 0, 1, None), FPUOp2::Min64x2 => (0xe7ee, 3, 0, 1, None), FPUOp2::MaxPseudo32 => (0xe7ef, 2, 8, 3, None), FPUOp2::MaxPseudo64 => (0xe7ef, 3, 8, 3, None), FPUOp2::MaxPseudo128 => (0xe7ef, 4, 8, 3, None), FPUOp2::MaxPseudo32x4 => (0xe7ef, 2, 0, 3, None), FPUOp2::MaxPseudo64x2 => (0xe7ef, 3, 0, 3, None), FPUOp2::MinPseudo32 => (0xe7ee, 2, 8, 3, None), FPUOp2::MinPseudo64 => (0xe7ee, 3, 8, 3, None), FPUOp2::MinPseudo128 => (0xe7ee, 4, 8, 3, None), FPUOp2::MinPseudo32x4 => (0xe7ee, 2, 0, 3, None), FPUOp2::MinPseudo64x2 => (0xe7ee, 3, 0, 3, None), };
if m5 == 8 && opcode_fpr.is_some() && rd.to_reg() == rn && is_fpr(rn) && is_fpr(rm)
{
put(sink, &enc_rre(opcode_fpr.unwrap(), rd.to_reg(), rm));
} else {
put(sink, &enc_vrr_c(opcode, rd.to_reg(), rn, rm, m4, m5, m6));
}
}
&Inst::FpuRRRR {
fpu_op,
rd,
rn,
rm,
ra,
} => {
let (opcode, m5, m6, opcode_fpr) = match fpu_op {
FPUOp3::MAdd32 => (0xe78f, 8, 2, Some(0xb30e)), FPUOp3::MAdd64 => (0xe78f, 8, 3, Some(0xb31e)), FPUOp3::MAdd128 => (0xe78f, 8, 4, None), FPUOp3::MAdd32x4 => (0xe78f, 0, 2, None), FPUOp3::MAdd64x2 => (0xe78f, 0, 3, None), FPUOp3::MSub32 => (0xe78e, 8, 2, Some(0xb30f)), FPUOp3::MSub64 => (0xe78e, 8, 3, Some(0xb31f)), FPUOp3::MSub128 => (0xe78e, 8, 4, None), FPUOp3::MSub32x4 => (0xe78e, 0, 2, None), FPUOp3::MSub64x2 => (0xe78e, 0, 3, None), };
if m5 == 8
&& opcode_fpr.is_some()
&& rd.to_reg() == ra
&& is_fpr(rn)
&& is_fpr(rm)
&& is_fpr(ra)
{
put(sink, &enc_rrd(opcode_fpr.unwrap(), rd.to_reg(), rm, rn));
} else {
put(sink, &enc_vrr_e(opcode, rd.to_reg(), rn, rm, ra, m5, m6));
}
}
&Inst::FpuRound { op, mode, rd, rn } => {
let mode = match mode {
FpuRoundMode::Current => 0,
FpuRoundMode::ToNearest => 1,
FpuRoundMode::ShorterPrecision => 3,
FpuRoundMode::ToNearestTiesToEven => 4,
FpuRoundMode::ToZero => 5,
FpuRoundMode::ToPosInfinity => 6,
FpuRoundMode::ToNegInfinity => 7,
};
let (opcode, m3, m4, opcode_fpr) = match op {
FpuRoundOp::Cvt64To32 => (0xe7c5, 3, 8, Some(0xb344)), FpuRoundOp::Cvt64x2To32x4 => (0xe7c5, 3, 0, None), FpuRoundOp::Cvt128To64 => (0xe7c5, 4, 8, None), FpuRoundOp::Round32 => (0xe7c7, 2, 8, Some(0xb357)), FpuRoundOp::Round64 => (0xe7c7, 3, 8, Some(0xb35f)), FpuRoundOp::Round128 => (0xe7c7, 4, 8, None), FpuRoundOp::Round32x4 => (0xe7c7, 2, 0, None), FpuRoundOp::Round64x2 => (0xe7c7, 3, 0, None), FpuRoundOp::ToSInt32 => (0xe7c2, 2, 8, None), FpuRoundOp::ToSInt64 => (0xe7c2, 3, 8, None), FpuRoundOp::ToUInt32 => (0xe7c0, 2, 8, None), FpuRoundOp::ToUInt64 => (0xe7c0, 3, 8, None), FpuRoundOp::ToSInt32x4 => (0xe7c2, 2, 0, None), FpuRoundOp::ToSInt64x2 => (0xe7c2, 3, 0, None), FpuRoundOp::ToUInt32x4 => (0xe7c0, 2, 0, None), FpuRoundOp::ToUInt64x2 => (0xe7c0, 3, 0, None), FpuRoundOp::FromSInt32 => (0xe7c3, 2, 8, None), FpuRoundOp::FromSInt64 => (0xe7c3, 3, 8, None), FpuRoundOp::FromUInt32 => (0xe7c1, 2, 8, None), FpuRoundOp::FromUInt64 => (0xe7c1, 3, 8, None), FpuRoundOp::FromSInt32x4 => (0xe7c3, 2, 0, None), FpuRoundOp::FromSInt64x2 => (0xe7c3, 3, 0, None), FpuRoundOp::FromUInt32x4 => (0xe7c1, 2, 0, None), FpuRoundOp::FromUInt64x2 => (0xe7c1, 3, 0, None), };
if m4 == 8 && opcode_fpr.is_some() && is_fpr(rd.to_reg()) && is_fpr(rn) {
put(
sink,
&enc_rrf_cde(opcode_fpr.unwrap(), rd.to_reg(), rn, mode, 0),
);
} else {
put(sink, &enc_vrr_a(opcode, rd.to_reg(), rn, m3, m4, mode));
}
}
&Inst::FpuConv128FromInt { op, mode, rd, rn } => {
let rd1 = rd.hi;
let rd2 = rd.lo;
debug_assert_valid_fp_regpair!(rd1.to_reg(), rd2.to_reg());
let mode = match mode {
FpuRoundMode::Current => 0,
FpuRoundMode::ToNearest => 1,
FpuRoundMode::ShorterPrecision => 3,
FpuRoundMode::ToNearestTiesToEven => 4,
FpuRoundMode::ToZero => 5,
FpuRoundMode::ToPosInfinity => 6,
FpuRoundMode::ToNegInfinity => 7,
};
let opcode = match op {
FpuConv128Op::SInt32 => 0xb396, FpuConv128Op::SInt64 => 0xb3a6, FpuConv128Op::UInt32 => 0xb392, FpuConv128Op::UInt64 => 0xb3a2, };
put(sink, &enc_rrf_cde(opcode, rd1.to_reg(), rn, mode, 0));
}
&Inst::FpuConv128ToInt { op, mode, rd, rn } => {
let rn1 = rn.hi;
let rn2 = rn.lo;
debug_assert_valid_fp_regpair!(rn1, rn2);
let mode = match mode {
FpuRoundMode::Current => 0,
FpuRoundMode::ToNearest => 1,
FpuRoundMode::ShorterPrecision => 3,
FpuRoundMode::ToNearestTiesToEven => 4,
FpuRoundMode::ToZero => 5,
FpuRoundMode::ToPosInfinity => 6,
FpuRoundMode::ToNegInfinity => 7,
};
let opcode = match op {
FpuConv128Op::SInt32 => 0xb39a, FpuConv128Op::SInt64 => 0xb3aa, FpuConv128Op::UInt32 => 0xb39e, FpuConv128Op::UInt64 => 0xb3ae, };
put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn1, mode, 0));
}
&Inst::FpuCmp32 { rn, rm } => {
if is_fpr(rn) && is_fpr(rm) {
let opcode = 0xb309; put(sink, &enc_rre(opcode, rn, rm));
} else {
let opcode = 0xe7cb; put(sink, &enc_vrr_a(opcode, rn, rm, 2, 0, 0));
}
}
&Inst::FpuCmp64 { rn, rm } => {
if is_fpr(rn) && is_fpr(rm) {
let opcode = 0xb319; put(sink, &enc_rre(opcode, rn, rm));
} else {
let opcode = 0xe7cb; put(sink, &enc_vrr_a(opcode, rn, rm, 3, 0, 0));
}
}
&Inst::FpuCmp128 { rn, rm } => {
let opcode = 0xe7cb; put(sink, &enc_vrr_a(opcode, rn, rm, 4, 0, 0));
}
&Inst::VecRRR { op, rd, rn, rm } => {
let (opcode, m4) = match op {
VecBinaryOp::Add8x16 => (0xe7f3, 0), VecBinaryOp::Add16x8 => (0xe7f3, 1), VecBinaryOp::Add32x4 => (0xe7f3, 2), VecBinaryOp::Add64x2 => (0xe7f3, 3), VecBinaryOp::Add128 => (0xe7f3, 4), VecBinaryOp::Sub8x16 => (0xe7f7, 0), VecBinaryOp::Sub16x8 => (0xe7f7, 1), VecBinaryOp::Sub32x4 => (0xe7f7, 2), VecBinaryOp::Sub64x2 => (0xe7f7, 3), VecBinaryOp::Sub128 => (0xe7f7, 4), VecBinaryOp::Mul8x16 => (0xe7a2, 0), VecBinaryOp::Mul16x8 => (0xe7a2, 1), VecBinaryOp::Mul32x4 => (0xe7a2, 2), VecBinaryOp::Mul64x2 => (0xe7a2, 3), VecBinaryOp::Mul128 => (0xe7a2, 4), VecBinaryOp::UMulHi8x16 => (0xe7a1, 0), VecBinaryOp::UMulHi16x8 => (0xe7a1, 1), VecBinaryOp::UMulHi32x4 => (0xe7a1, 2), VecBinaryOp::UMulHi64x2 => (0xe7a1, 3), VecBinaryOp::UMulHi128 => (0xe7a1, 4), VecBinaryOp::SMulHi8x16 => (0xe7a3, 0), VecBinaryOp::SMulHi16x8 => (0xe7a3, 1), VecBinaryOp::SMulHi32x4 => (0xe7a3, 2), VecBinaryOp::SMulHi64x2 => (0xe7a3, 3), VecBinaryOp::SMulHi128 => (0xe7a3, 4), VecBinaryOp::UMulEven8x16 => (0xe7a4, 0), VecBinaryOp::UMulEven16x8 => (0xe7a4, 1), VecBinaryOp::UMulEven32x4 => (0xe7a4, 2), VecBinaryOp::UMulEven64x2 => (0xe7a4, 3), VecBinaryOp::SMulEven8x16 => (0xe7a6, 0), VecBinaryOp::SMulEven16x8 => (0xe7a6, 1), VecBinaryOp::SMulEven32x4 => (0xe7a6, 2), VecBinaryOp::SMulEven64x2 => (0xe7a6, 3), VecBinaryOp::UMulOdd8x16 => (0xe7a5, 0), VecBinaryOp::UMulOdd16x8 => (0xe7a5, 1), VecBinaryOp::UMulOdd32x4 => (0xe7a5, 2), VecBinaryOp::UMulOdd64x2 => (0xe7a5, 3), VecBinaryOp::SMulOdd8x16 => (0xe7a7, 0), VecBinaryOp::SMulOdd16x8 => (0xe7a7, 1), VecBinaryOp::SMulOdd32x4 => (0xe7a7, 2), VecBinaryOp::SMulOdd64x2 => (0xe7a7, 3), VecBinaryOp::UDiv32x4 => (0xe7b0, 2), VecBinaryOp::UDiv64x2 => (0xe7b0, 3), VecBinaryOp::UDiv128 => (0xe7b0, 4), VecBinaryOp::SDiv32x4 => (0xe7b2, 2), VecBinaryOp::SDiv64x2 => (0xe7b2, 3), VecBinaryOp::SDiv128 => (0xe7b2, 4), VecBinaryOp::URem32x4 => (0xe7b1, 2), VecBinaryOp::URem64x2 => (0xe7b1, 3), VecBinaryOp::URem128 => (0xe7b1, 4), VecBinaryOp::SRem32x4 => (0xe7b3, 2), VecBinaryOp::SRem64x2 => (0xe7b3, 3), VecBinaryOp::SRem128 => (0xe7b3, 4), VecBinaryOp::UMax8x16 => (0xe7fd, 0), VecBinaryOp::UMax16x8 => (0xe7fd, 1), VecBinaryOp::UMax32x4 => (0xe7fd, 2), VecBinaryOp::UMax64x2 => (0xe7fd, 3), VecBinaryOp::UMax128 => (0xe7fd, 4), VecBinaryOp::SMax8x16 => (0xe7ff, 0), VecBinaryOp::SMax16x8 => (0xe7ff, 1), VecBinaryOp::SMax32x4 => (0xe7ff, 2), VecBinaryOp::SMax64x2 => (0xe7ff, 3), VecBinaryOp::SMax128 => (0xe7ff, 4), VecBinaryOp::UMin8x16 => (0xe7fc, 0), VecBinaryOp::UMin16x8 => (0xe7fc, 1), VecBinaryOp::UMin32x4 => (0xe7fc, 2), VecBinaryOp::UMin64x2 => (0xe7fc, 3), VecBinaryOp::UMin128 => (0xe7fc, 4), VecBinaryOp::SMin8x16 => (0xe7fe, 0), VecBinaryOp::SMin16x8 => (0xe7fe, 1), VecBinaryOp::SMin32x4 => (0xe7fe, 2), VecBinaryOp::SMin64x2 => (0xe7fe, 3), VecBinaryOp::SMin128 => (0xe7fe, 4), VecBinaryOp::UAvg8x16 => (0xe7f0, 0), VecBinaryOp::UAvg16x8 => (0xe7f0, 1), VecBinaryOp::UAvg32x4 => (0xe7f0, 2), VecBinaryOp::UAvg64x2 => (0xe7f0, 3), VecBinaryOp::UAvg128 => (0xe7f0, 4), VecBinaryOp::SAvg8x16 => (0xe7f2, 0), VecBinaryOp::SAvg16x8 => (0xe7f2, 1), VecBinaryOp::SAvg32x4 => (0xe7f2, 2), VecBinaryOp::SAvg64x2 => (0xe7f2, 3), VecBinaryOp::SAvg128 => (0xe7f2, 4), VecBinaryOp::And128 => (0xe768, 0), VecBinaryOp::Orr128 => (0xe76a, 0), VecBinaryOp::Xor128 => (0xe76d, 0), VecBinaryOp::NotAnd128 => (0xe76e, 0), VecBinaryOp::NotOrr128 => (0xe76b, 0), VecBinaryOp::NotXor128 => (0xe76c, 0), VecBinaryOp::AndNot128 => (0xe769, 0), VecBinaryOp::OrrNot128 => (0xe76f, 0), VecBinaryOp::BitPermute128 => (0xe785, 0), VecBinaryOp::LShLByByte128 => (0xe775, 0), VecBinaryOp::LShRByByte128 => (0xe77d, 0), VecBinaryOp::AShRByByte128 => (0xe77f, 0), VecBinaryOp::LShLByBit128 => (0xe774, 0), VecBinaryOp::LShRByBit128 => (0xe77c, 0), VecBinaryOp::AShRByBit128 => (0xe77e, 0), VecBinaryOp::Pack16x8 => (0xe794, 1), VecBinaryOp::Pack32x4 => (0xe794, 2), VecBinaryOp::Pack64x2 => (0xe794, 3), VecBinaryOp::PackUSat16x8 => (0xe795, 1), VecBinaryOp::PackUSat32x4 => (0xe795, 2), VecBinaryOp::PackUSat64x2 => (0xe795, 3), VecBinaryOp::PackSSat16x8 => (0xe797, 1), VecBinaryOp::PackSSat32x4 => (0xe797, 2), VecBinaryOp::PackSSat64x2 => (0xe797, 3), VecBinaryOp::MergeLow8x16 => (0xe760, 0), VecBinaryOp::MergeLow16x8 => (0xe760, 1), VecBinaryOp::MergeLow32x4 => (0xe760, 2), VecBinaryOp::MergeLow64x2 => (0xe760, 3), VecBinaryOp::MergeHigh8x16 => (0xe761, 0), VecBinaryOp::MergeHigh16x8 => (0xe761, 1), VecBinaryOp::MergeHigh32x4 => (0xe761, 2), VecBinaryOp::MergeHigh64x2 => (0xe761, 3), };
let enc = &enc_vrr_c(opcode, rd.to_reg(), rn, rm, m4, 0, 0);
let may_trap = match op {
VecBinaryOp::UDiv32x4
| VecBinaryOp::UDiv64x2
| VecBinaryOp::UDiv128
| VecBinaryOp::SDiv32x4
| VecBinaryOp::SDiv64x2
| VecBinaryOp::SDiv128
| VecBinaryOp::URem32x4
| VecBinaryOp::URem64x2
| VecBinaryOp::URem128
| VecBinaryOp::SRem32x4
| VecBinaryOp::SRem64x2
| VecBinaryOp::SRem128 => true,
_ => false,
};
if may_trap {
put_with_trap(sink, enc, TrapCode::INTEGER_DIVISION_BY_ZERO);
} else {
put(sink, enc);
}
}
&Inst::VecRR { op, rd, rn } => {
let (opcode, m3) = match op {
VecUnaryOp::Abs8x16 => (0xe7df, 0), VecUnaryOp::Abs16x8 => (0xe7df, 1), VecUnaryOp::Abs32x4 => (0xe7df, 2), VecUnaryOp::Abs64x2 => (0xe7df, 3), VecUnaryOp::Abs128 => (0xe7df, 4), VecUnaryOp::Neg8x16 => (0xe7de, 0), VecUnaryOp::Neg16x8 => (0xe7de, 1), VecUnaryOp::Neg32x4 => (0xe7de, 2), VecUnaryOp::Neg64x2 => (0xe7de, 3), VecUnaryOp::Neg128 => (0xe7de, 4), VecUnaryOp::Popcnt8x16 => (0xe750, 0), VecUnaryOp::Popcnt16x8 => (0xe750, 1), VecUnaryOp::Popcnt32x4 => (0xe750, 2), VecUnaryOp::Popcnt64x2 => (0xe750, 3), VecUnaryOp::Clz8x16 => (0xe753, 0), VecUnaryOp::Clz16x8 => (0xe753, 1), VecUnaryOp::Clz32x4 => (0xe753, 2), VecUnaryOp::Clz64x2 => (0xe753, 3), VecUnaryOp::Clz128 => (0xe753, 4), VecUnaryOp::Ctz8x16 => (0xe752, 0), VecUnaryOp::Ctz16x8 => (0xe752, 1), VecUnaryOp::Ctz32x4 => (0xe752, 2), VecUnaryOp::Ctz64x2 => (0xe752, 3), VecUnaryOp::Ctz128 => (0xe752, 4), VecUnaryOp::UnpackULow8x16 => (0xe7d4, 0), VecUnaryOp::UnpackULow16x8 => (0xe7d4, 1), VecUnaryOp::UnpackULow32x4 => (0xe7d4, 2), VecUnaryOp::UnpackULow64x2 => (0xe7d4, 3), VecUnaryOp::UnpackUHigh8x16 => (0xe7d5, 0), VecUnaryOp::UnpackUHigh16x8 => (0xe7d5, 1), VecUnaryOp::UnpackUHigh32x4 => (0xe7d5, 2), VecUnaryOp::UnpackUHigh64x2 => (0xe7d5, 3), VecUnaryOp::UnpackSLow8x16 => (0xe7d6, 0), VecUnaryOp::UnpackSLow16x8 => (0xe7d6, 1), VecUnaryOp::UnpackSLow32x4 => (0xe7d6, 2), VecUnaryOp::UnpackSLow64x2 => (0xe7d6, 3), VecUnaryOp::UnpackSHigh8x16 => (0xe7d7, 0), VecUnaryOp::UnpackSHigh16x8 => (0xe7d7, 1), VecUnaryOp::UnpackSHigh32x4 => (0xe7d7, 2), VecUnaryOp::UnpackSHigh64x2 => (0xe7d7, 3), };
put(sink, &enc_vrr_a(opcode, rd.to_reg(), rn, m3, 0, 0));
}
&Inst::VecShiftRR {
shift_op,
rd,
rn,
shift_imm,
shift_reg,
} => {
let (opcode, m4) = match shift_op {
VecShiftOp::RotL8x16 => (0xe733, 0), VecShiftOp::RotL16x8 => (0xe733, 1), VecShiftOp::RotL32x4 => (0xe733, 2), VecShiftOp::RotL64x2 => (0xe733, 3), VecShiftOp::LShL8x16 => (0xe730, 0), VecShiftOp::LShL16x8 => (0xe730, 1), VecShiftOp::LShL32x4 => (0xe730, 2), VecShiftOp::LShL64x2 => (0xe730, 3), VecShiftOp::LShR8x16 => (0xe738, 0), VecShiftOp::LShR16x8 => (0xe738, 1), VecShiftOp::LShR32x4 => (0xe738, 2), VecShiftOp::LShR64x2 => (0xe738, 3), VecShiftOp::AShR8x16 => (0xe73a, 0), VecShiftOp::AShR16x8 => (0xe73a, 1), VecShiftOp::AShR32x4 => (0xe73a, 2), VecShiftOp::AShR64x2 => (0xe73a, 3), };
put(
sink,
&enc_vrs_a(opcode, rd.to_reg(), shift_reg, shift_imm.into(), rn, m4),
);
}
&Inst::VecSelect { rd, rn, rm, ra } => {
let opcode = 0xe78d; put(sink, &enc_vrr_e(opcode, rd.to_reg(), rn, rm, ra, 0, 0));
}
&Inst::VecPermute { rd, rn, rm, ra } => {
let opcode = 0xe78c; put(sink, &enc_vrr_e(opcode, rd.to_reg(), rn, rm, ra, 0, 0));
}
&Inst::VecBlend { rd, rn, rm, ra } => {
let opcode = 0xe789; put(sink, &enc_vrr_d(opcode, rd.to_reg(), rn, rm, ra, 0, 0));
}
&Inst::VecEvaluate {
imm,
rd,
rn,
rm,
ra,
} => {
let opcode = 0xe788; put(sink, &enc_vri_k(opcode, imm, rd.to_reg(), rn, rm, ra));
}
&Inst::VecPermuteDWImm {
rd,
rn,
rm,
idx1,
idx2,
} => {
let m4 = (idx1 & 1) * 4 + (idx2 & 1);
let opcode = 0xe784; put(sink, &enc_vrr_c(opcode, rd.to_reg(), rn, rm, m4, 0, 0));
}
&Inst::VecIntCmp { op, rd, rn, rm } | &Inst::VecIntCmpS { op, rd, rn, rm } => {
let (opcode, m4) = match op {
VecIntCmpOp::CmpEq8x16 => (0xe7f8, 0), VecIntCmpOp::CmpEq16x8 => (0xe7f8, 1), VecIntCmpOp::CmpEq32x4 => (0xe7f8, 2), VecIntCmpOp::CmpEq64x2 => (0xe7f8, 3), VecIntCmpOp::CmpEq128 => (0xe7f8, 4), VecIntCmpOp::SCmpHi8x16 => (0xe7fb, 0), VecIntCmpOp::SCmpHi16x8 => (0xe7fb, 1), VecIntCmpOp::SCmpHi32x4 => (0xe7fb, 2), VecIntCmpOp::SCmpHi64x2 => (0xe7fb, 3), VecIntCmpOp::SCmpHi128 => (0xe7fb, 4), VecIntCmpOp::UCmpHi8x16 => (0xe7f9, 0), VecIntCmpOp::UCmpHi16x8 => (0xe7f9, 1), VecIntCmpOp::UCmpHi32x4 => (0xe7f9, 2), VecIntCmpOp::UCmpHi64x2 => (0xe7f9, 3), VecIntCmpOp::UCmpHi128 => (0xe7f9, 4), };
let m5 = match self {
&Inst::VecIntCmp { .. } => 0,
&Inst::VecIntCmpS { .. } => 1,
_ => unreachable!(),
};
put(sink, &enc_vrr_b(opcode, rd.to_reg(), rn, rm, m4, m5));
}
&Inst::VecFloatCmp { op, rd, rn, rm } | &Inst::VecFloatCmpS { op, rd, rn, rm } => {
let (opcode, m4) = match op {
VecFloatCmpOp::CmpEq32x4 => (0xe7e8, 2), VecFloatCmpOp::CmpEq64x2 => (0xe7e8, 3), VecFloatCmpOp::CmpHi32x4 => (0xe7eb, 2), VecFloatCmpOp::CmpHi64x2 => (0xe7eb, 3), VecFloatCmpOp::CmpHiEq32x4 => (0xe7ea, 2), VecFloatCmpOp::CmpHiEq64x2 => (0xe7ea, 3), };
let m6 = match self {
&Inst::VecFloatCmp { .. } => 0,
&Inst::VecFloatCmpS { .. } => 1,
_ => unreachable!(),
};
put(sink, &enc_vrr_c(opcode, rd.to_reg(), rn, rm, m4, 0, m6));
}
&Inst::VecIntEltCmp { op, rn, rm } => {
let (opcode, m3) = match op {
VecIntEltCmpOp::SCmp128 => (0xe7db, 4), VecIntEltCmpOp::UCmp128 => (0xe7d9, 4), };
put(sink, &enc_vrr_a(opcode, rn, rm, m3, 0, 0));
}
&Inst::VecInt128SCmpHi { tmp, rn, rm } | &Inst::VecInt128UCmpHi { tmp, rn, rm } => {
let (opcode, m3) = match self {
&Inst::VecInt128SCmpHi { .. } => (0xe7db, 3), &Inst::VecInt128UCmpHi { .. } => (0xe7d9, 3), _ => unreachable!(),
};
put(sink, &enc_vrr_a(opcode, rm, rn, m3, 0, 0));
put(sink, &enc_ri_c(OPCODE_BCR, 7, 4 + 6));
let inst = Inst::VecIntCmpS {
op: VecIntCmpOp::UCmpHi64x2,
rd: tmp,
rn,
rm,
};
inst.emit(sink, emit_info, state);
}
&Inst::VecLoad { rd, ref mem }
| &Inst::VecLoadRev { rd, ref mem }
| &Inst::VecLoadByte16Rev { rd, ref mem }
| &Inst::VecLoadByte32Rev { rd, ref mem }
| &Inst::VecLoadByte64Rev { rd, ref mem }
| &Inst::VecLoadElt16Rev { rd, ref mem }
| &Inst::VecLoadElt32Rev { rd, ref mem }
| &Inst::VecLoadElt64Rev { rd, ref mem } => {
let mem = mem.clone();
let (opcode, m3) = match self {
&Inst::VecLoad { .. } => (0xe706, 0), &Inst::VecLoadRev { .. } => (0xe606, 4), &Inst::VecLoadByte16Rev { .. } => (0xe606, 1), &Inst::VecLoadByte32Rev { .. } => (0xe606, 2), &Inst::VecLoadByte64Rev { .. } => (0xe606, 3), &Inst::VecLoadElt16Rev { .. } => (0xe607, 1), &Inst::VecLoadElt32Rev { .. } => (0xe607, 2), &Inst::VecLoadElt64Rev { .. } => (0xe607, 3), _ => unreachable!(),
};
mem_vrx_emit(rd.to_reg(), &mem, opcode, m3, true, sink, emit_info, state);
}
&Inst::VecStore { rd, ref mem }
| &Inst::VecStoreRev { rd, ref mem }
| &Inst::VecStoreByte16Rev { rd, ref mem }
| &Inst::VecStoreByte32Rev { rd, ref mem }
| &Inst::VecStoreByte64Rev { rd, ref mem }
| &Inst::VecStoreElt16Rev { rd, ref mem }
| &Inst::VecStoreElt32Rev { rd, ref mem }
| &Inst::VecStoreElt64Rev { rd, ref mem } => {
let mem = mem.clone();
let (opcode, m3) = match self {
&Inst::VecStore { .. } => (0xe70e, 0), &Inst::VecStoreRev { .. } => (0xe60e, 4), &Inst::VecStoreByte16Rev { .. } => (0xe60e, 1), &Inst::VecStoreByte32Rev { .. } => (0xe60e, 2), &Inst::VecStoreByte64Rev { .. } => (0xe60e, 3), &Inst::VecStoreElt16Rev { .. } => (0xe60f, 1), &Inst::VecStoreElt32Rev { .. } => (0xe60f, 2), &Inst::VecStoreElt64Rev { .. } => (0xe60f, 3), _ => unreachable!(),
};
mem_vrx_emit(rd, &mem, opcode, m3, true, sink, emit_info, state);
}
&Inst::VecLoadReplicate { size, rd, ref mem }
| &Inst::VecLoadReplicateRev { size, rd, ref mem } => {
let mem = mem.clone();
let (opcode, m3) = match (self, size) {
(&Inst::VecLoadReplicate { .. }, 8) => (0xe705, 0), (&Inst::VecLoadReplicate { .. }, 16) => (0xe705, 1), (&Inst::VecLoadReplicate { .. }, 32) => (0xe705, 2), (&Inst::VecLoadReplicate { .. }, 64) => (0xe705, 3), (&Inst::VecLoadReplicateRev { .. }, 16) => (0xe605, 1), (&Inst::VecLoadReplicateRev { .. }, 32) => (0xe605, 2), (&Inst::VecLoadReplicateRev { .. }, 64) => (0xe605, 3), _ => unreachable!(),
};
mem_vrx_emit(rd.to_reg(), &mem, opcode, m3, true, sink, emit_info, state);
}
&Inst::VecMov { rd, rn } => {
put(sink, &enc_vrr_a(OPCODE_VLR, rd.to_reg(), rn, 0, 0, 0));
}
&Inst::VecCMov { rd, cond, ri, rm } => {
debug_assert_eq!(rd.to_reg(), ri);
put(sink, &enc_ri_c(OPCODE_BCR, cond.invert().bits(), 4 + 6));
put(sink, &enc_vrr_a(OPCODE_VLR, rd.to_reg(), rm, 0, 0, 0));
}
&Inst::MovToVec128 { rd, rn, rm } => {
let opcode = 0xe762; put(sink, &enc_vrr_f(opcode, rd.to_reg(), rn, rm));
}
&Inst::VecImmByteMask { rd, mask } => {
let opcode = 0xe744; put(sink, &enc_vri_a(opcode, rd.to_reg(), mask, 0));
}
&Inst::VecImmBitMask {
size,
rd,
start_bit,
end_bit,
} => {
let (opcode, m4) = match size {
8 => (0xe746, 0), 16 => (0xe746, 1), 32 => (0xe746, 2), 64 => (0xe746, 3), _ => unreachable!(),
};
put(
sink,
&enc_vri_b(opcode, rd.to_reg(), start_bit, end_bit, m4),
);
}
&Inst::VecImmReplicate { size, rd, imm } => {
let (opcode, m3) = match size {
8 => (0xe745, 0), 16 => (0xe745, 1), 32 => (0xe745, 2), 64 => (0xe745, 3), _ => unreachable!(),
};
put(sink, &enc_vri_a(opcode, rd.to_reg(), imm as u16, m3));
}
&Inst::VecLoadLane {
size,
rd,
ri,
ref mem,
lane_imm,
}
| &Inst::VecLoadLaneRev {
size,
rd,
ri,
ref mem,
lane_imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let mem = mem.clone();
let opcode_vrx = match (self, size) {
(&Inst::VecLoadLane { .. }, 8) => 0xe700, (&Inst::VecLoadLane { .. }, 16) => 0xe701, (&Inst::VecLoadLane { .. }, 32) => 0xe703, (&Inst::VecLoadLane { .. }, 64) => 0xe702, (&Inst::VecLoadLaneRev { .. }, 16) => 0xe601, (&Inst::VecLoadLaneRev { .. }, 32) => 0xe603, (&Inst::VecLoadLaneRev { .. }, 64) => 0xe602, _ => unreachable!(),
};
let rd = rd.to_reg();
mem_vrx_emit(rd, &mem, opcode_vrx, lane_imm, true, sink, emit_info, state);
}
&Inst::VecLoadLaneUndef {
size,
rd,
ref mem,
lane_imm,
}
| &Inst::VecLoadLaneRevUndef {
size,
rd,
ref mem,
lane_imm,
} => {
let mem = mem.clone();
let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) {
(&Inst::VecLoadLaneUndef { .. }, 8) => (0xe700, None, None), (&Inst::VecLoadLaneUndef { .. }, 16) => (0xe701, None, None), (&Inst::VecLoadLaneUndef { .. }, 32) => (0xe703, Some(0x78), Some(0xed64)), (&Inst::VecLoadLaneUndef { .. }, 64) => (0xe702, Some(0x68), Some(0xed65)), (&Inst::VecLoadLaneRevUndef { .. }, 16) => (0xe601, None, None), (&Inst::VecLoadLaneRevUndef { .. }, 32) => (0xe603, None, None), (&Inst::VecLoadLaneRevUndef { .. }, 64) => (0xe602, None, None), _ => unreachable!(),
};
let rd = rd.to_reg();
if lane_imm == 0 && is_fpr(rd) && opcode_rx.is_some() {
mem_emit(
rd, &mem, opcode_rx, opcode_rxy, None, true, sink, emit_info, state,
);
} else {
mem_vrx_emit(rd, &mem, opcode_vrx, lane_imm, true, sink, emit_info, state);
}
}
&Inst::VecStoreLane {
size,
rd,
ref mem,
lane_imm,
}
| &Inst::VecStoreLaneRev {
size,
rd,
ref mem,
lane_imm,
} => {
let mem = mem.clone();
let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) {
(&Inst::VecStoreLane { .. }, 8) => (0xe708, None, None), (&Inst::VecStoreLane { .. }, 16) => (0xe709, None, None), (&Inst::VecStoreLane { .. }, 32) => (0xe70b, Some(0x70), Some(0xed66)), (&Inst::VecStoreLane { .. }, 64) => (0xe70a, Some(0x60), Some(0xed67)), (&Inst::VecStoreLaneRev { .. }, 16) => (0xe609, None, None), (&Inst::VecStoreLaneRev { .. }, 32) => (0xe60b, None, None), (&Inst::VecStoreLaneRev { .. }, 64) => (0xe60a, None, None), _ => unreachable!(),
};
if lane_imm == 0 && is_fpr(rd) && opcode_rx.is_some() {
mem_emit(
rd, &mem, opcode_rx, opcode_rxy, None, true, sink, emit_info, state,
);
} else {
mem_vrx_emit(rd, &mem, opcode_vrx, lane_imm, true, sink, emit_info, state);
}
}
&Inst::VecInsertLane {
size,
rd,
ri,
rn,
lane_imm,
lane_reg,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let (opcode_vrs, m4) = match size {
8 => (0xe722, 0), 16 => (0xe722, 1), 32 => (0xe722, 2), 64 => (0xe722, 3), _ => unreachable!(),
};
put(
sink,
&enc_vrs_b(opcode_vrs, rd.to_reg(), lane_reg, lane_imm.into(), rn, m4),
);
}
&Inst::VecInsertLaneUndef {
size,
rd,
rn,
lane_imm,
lane_reg,
} => {
let (opcode_vrs, m4, opcode_rre) = match size {
8 => (0xe722, 0, None), 16 => (0xe722, 1, None), 32 => (0xe722, 2, None), 64 => (0xe722, 3, Some(0xb3c1)), _ => unreachable!(),
};
if opcode_rre.is_some()
&& lane_imm == 0
&& lane_reg == zero_reg()
&& is_fpr(rd.to_reg())
{
put(sink, &enc_rre(opcode_rre.unwrap(), rd.to_reg(), rn));
} else {
put(
sink,
&enc_vrs_b(opcode_vrs, rd.to_reg(), lane_reg, lane_imm.into(), rn, m4),
);
}
}
&Inst::VecExtractLane {
size,
rd,
rn,
lane_imm,
lane_reg,
} => {
let (opcode_vrs, m4, opcode_rre) = match size {
8 => (0xe721, 0, None), 16 => (0xe721, 1, None), 32 => (0xe721, 2, None), 64 => (0xe721, 3, Some(0xb3cd)), _ => unreachable!(),
};
if opcode_rre.is_some() && lane_imm == 0 && lane_reg == zero_reg() && is_fpr(rn) {
put(sink, &enc_rre(opcode_rre.unwrap(), rd.to_reg(), rn));
} else {
put(
sink,
&enc_vrs_c(opcode_vrs, rd.to_reg(), lane_reg, lane_imm.into(), rn, m4),
);
}
}
&Inst::VecInsertLaneImm {
size,
rd,
ri,
imm,
lane_imm,
} => {
debug_assert_eq!(rd.to_reg(), ri);
let opcode = match size {
8 => 0xe740, 16 => 0xe741, 32 => 0xe743, 64 => 0xe742, _ => unreachable!(),
};
put(sink, &enc_vri_a(opcode, rd.to_reg(), imm as u16, lane_imm));
}
&Inst::VecInsertLaneImmUndef {
size,
rd,
imm,
lane_imm,
} => {
let opcode = match size {
8 => 0xe740, 16 => 0xe741, 32 => 0xe743, 64 => 0xe742, _ => unreachable!(),
};
put(sink, &enc_vri_a(opcode, rd.to_reg(), imm as u16, lane_imm));
}
&Inst::VecReplicateLane {
size,
rd,
rn,
lane_imm,
} => {
let (opcode, m4) = match size {
8 => (0xe74d, 0), 16 => (0xe74d, 1), 32 => (0xe74d, 2), 64 => (0xe74d, 3), _ => unreachable!(),
};
put(
sink,
&enc_vri_c(opcode, rd.to_reg(), lane_imm.into(), rn, m4),
);
}
&Inst::VecEltRev { lane_count, rd, rn } => {
assert!(lane_count >= 2 && lane_count <= 16);
let inst = Inst::VecPermuteDWImm {
rd,
rn,
rm: rn,
idx1: 1,
idx2: 0,
};
inst.emit(sink, emit_info, state);
if lane_count >= 4 {
let inst = Inst::VecShiftRR {
shift_op: VecShiftOp::RotL64x2,
rd,
rn: rd.to_reg(),
shift_imm: 32,
shift_reg: zero_reg(),
};
inst.emit(sink, emit_info, state);
}
if lane_count >= 8 {
let inst = Inst::VecShiftRR {
shift_op: VecShiftOp::RotL32x4,
rd,
rn: rd.to_reg(),
shift_imm: 16,
shift_reg: zero_reg(),
};
inst.emit(sink, emit_info, state);
}
if lane_count >= 16 {
let inst = Inst::VecShiftRR {
shift_op: VecShiftOp::RotL16x8,
rd,
rn: rd.to_reg(),
shift_imm: 8,
shift_reg: zero_reg(),
};
inst.emit(sink, emit_info, state);
}
}
&Inst::AllocateArgs { size } => {
let inst = if let Ok(size) = i16::try_from(size) {
Inst::AluRSImm16 {
alu_op: ALUOp::Add64,
rd: writable_stack_reg(),
ri: stack_reg(),
imm: -size,
}
} else {
Inst::AluRUImm32 {
alu_op: ALUOp::SubLogical64,
rd: writable_stack_reg(),
ri: stack_reg(),
imm: size,
}
};
inst.emit(sink, emit_info, state);
assert_eq!(state.nominal_sp_offset, 0);
state.nominal_sp_offset += size;
}
&Inst::Call { link, ref info } => {
let start = sink.cur_offset();
let enc: &[u8] = match &info.dest {
CallInstDest::Direct { name } => {
let offset = sink.cur_offset() + 2;
sink.add_reloc_at_offset(offset, Reloc::S390xPLTRel32Dbl, name, 2);
let opcode = 0xc05; &enc_ril_b(opcode, link.to_reg(), 0)
}
CallInstDest::Indirect { reg } => {
let opcode = 0x0d; &enc_rr(opcode, link.to_reg(), *reg)
}
};
if let Some(s) = state.take_stack_map() {
let offset = sink.cur_offset() + enc.len() as u32;
sink.push_user_stack_map(state, offset, s);
}
put(sink, enc);
if let Some(try_call) = info.try_call_info.as_ref() {
sink.add_try_call_site(
Some(state.frame_layout.sp_to_fp()),
try_call.exception_handlers(&state.frame_layout),
);
} else {
sink.add_call_site();
}
state.nominal_sp_offset -= info.callee_pop_size;
assert_eq!(state.nominal_sp_offset, 0);
if info.patchable {
sink.add_patchable_call_site(sink.cur_offset() - start);
} else {
state.outgoing_sp_offset = info.callee_pop_size;
for inst in S390xMachineDeps::gen_retval_loads(info) {
inst.emit(sink, emit_info, state);
}
state.outgoing_sp_offset = 0;
}
if let Some(try_call) = info.try_call_info.as_ref() {
let jmp = Inst::Jump {
dest: try_call.continuation,
};
jmp.emit(sink, emit_info, state);
}
}
&Inst::ReturnCall { ref info } => {
let (epilogue_insts, temp_dest) = S390xMachineDeps::gen_tail_epilogue(
state.frame_layout(),
info.callee_pop_size,
&info.dest,
);
for inst in epilogue_insts {
inst.emit(sink, emit_info, state);
}
let enc: &[u8] = match &info.dest {
CallInstDest::Direct { name } => {
let offset = sink.cur_offset() + 2;
sink.add_reloc_at_offset(offset, Reloc::S390xPLTRel32Dbl, name, 2);
let opcode = 0xc04; &enc_ril_c(opcode, 15, 0)
}
CallInstDest::Indirect { reg } => {
let opcode = 0x07; &enc_rr(opcode, gpr(15), temp_dest.unwrap_or(*reg))
}
};
put(sink, enc);
sink.add_call_site();
}
&Inst::ElfTlsGetOffset { ref symbol, .. } => {
let opcode = 0xc05;
let dest = ExternalName::LibCall(LibCall::ElfTlsGetOffset);
let offset = sink.cur_offset() + 2;
sink.add_reloc_at_offset(offset, Reloc::S390xPLTRel32Dbl, &dest, 2);
match &**symbol {
SymbolReloc::TlsGd { name } => sink.add_reloc(Reloc::S390xTlsGdCall, name, 0),
_ => unreachable!(),
}
put(sink, &enc_ril_b(opcode, gpr(14), 0));
sink.add_call_site();
}
&Inst::Args { .. } => {}
&Inst::Rets { .. } => {}
&Inst::Ret { link } => {
let opcode = 0x07; put(sink, &enc_rr(opcode, gpr(15), link));
}
&Inst::Jump { dest } => {
let off = sink.cur_offset();
sink.use_label_at_offset(off, dest, LabelUse::BranchRIL);
sink.add_uncond_branch(off, off + 6, dest);
let opcode = 0xc04; put(sink, &enc_ril_c(opcode, 15, 0));
}
&Inst::IndirectBr { rn, .. } => {
let opcode = 0x07; put(sink, &enc_rr(opcode, gpr(15), rn));
}
&Inst::CondBr {
taken,
not_taken,
cond,
} => {
let opcode = 0xc04;
let cond_off = sink.cur_offset();
sink.use_label_at_offset(cond_off, taken, LabelUse::BranchRIL);
let inverted = &enc_ril_c(opcode, cond.invert().bits(), 0);
sink.add_cond_branch(cond_off, cond_off + 6, taken, inverted);
put(sink, &enc_ril_c(opcode, cond.bits(), 0));
let uncond_off = sink.cur_offset();
sink.use_label_at_offset(uncond_off, not_taken, LabelUse::BranchRIL);
sink.add_uncond_branch(uncond_off, uncond_off + 6, not_taken);
put(sink, &enc_ril_c(opcode, 15, 0));
}
&Inst::Nop0 => {}
&Inst::Nop2 => {
put(sink, &enc_e(0x0707));
}
&Inst::Debugtrap => {
put(sink, &enc_e(0x0001));
}
&Inst::Trap { trap_code } => {
put_with_trap(sink, &enc_e(0x0000), trap_code);
}
&Inst::TrapIf { cond, trap_code } => {
let opcode = 0xc04; let enc = &enc_ril_c(opcode, cond.bits(), 2);
debug_assert!(enc.len() == 6 && enc[2] == 0 && enc[3] == 0);
put_with_trap(sink, &enc[0..4], trap_code);
put(sink, &enc[4..6]);
}
&Inst::JTSequence {
ridx,
default,
default_cond,
ref targets,
} => {
let table_label = sink.get_label();
let opcode = 0xc04; sink.use_label_at_offset(sink.cur_offset(), default, LabelUse::BranchRIL);
put(sink, &enc_ril_c(opcode, default_cond.bits(), 0));
let rtmp = writable_spilltmp_reg();
let inst = Inst::LoadAddr {
rd: rtmp,
mem: MemArg::Label {
target: table_label,
},
};
inst.emit(sink, emit_info, state);
let inst = Inst::AluRX {
alu_op: ALUOp::Add64Ext32,
rd: rtmp,
ri: rtmp.to_reg(),
mem: MemArg::reg_plus_reg(rtmp.to_reg(), ridx, MemFlags::trusted()),
};
inst.emit(sink, emit_info, state);
let inst = Inst::IndirectBr {
rn: rtmp.to_reg(),
targets: vec![],
};
inst.emit(sink, emit_info, state);
sink.bind_label(table_label, &mut state.ctrl_plane);
let jt_off = sink.cur_offset();
for &target in targets.iter() {
let word_off = sink.cur_offset();
let off_into_table = word_off - jt_off;
sink.use_label_at_offset(word_off, target, LabelUse::PCRel32);
sink.put4(off_into_table.swap_bytes());
}
}
Inst::StackProbeLoop {
probe_count,
guard_size,
} => {
let loop_start = sink.get_label();
sink.bind_label(loop_start, state.ctrl_plane_mut());
let inst = Inst::AluRSImm16 {
alu_op: ALUOp::Add64,
rd: writable_stack_reg(),
ri: stack_reg(),
imm: -guard_size,
};
inst.emit(sink, emit_info, state);
let inst = Inst::StoreImm8 {
imm: 0,
mem: MemArg::reg(stack_reg(), MemFlags::trusted()),
};
inst.emit(sink, emit_info, state);
let opcode = 0xa76; sink.use_label_at_offset(sink.cur_offset(), loop_start, LabelUse::BranchRI);
put(sink, &enc_ri_b(opcode, probe_count.to_reg(), 0));
}
&Inst::Unwind { ref inst } => {
sink.add_unwind(inst.clone());
}
&Inst::DummyUse { .. } => {}
&Inst::LabelAddress { dst, label } => {
let inst = Inst::LoadAddr {
rd: dst,
mem: MemArg::Label { target: label },
};
inst.emit(sink, emit_info, state);
}
&Inst::SequencePoint { .. } => {
}
}
state.clear_post_insn();
}
}