use ::cpu::op::Op;
use ::cpu::types::{Clock, CpuError, CpuState, Memory, MemoryAccess};
#[cfg(feature = "rv32fd")]
use ::softfloat::{self as sf, Sf32, Sf64};
#[cfg(feature = "rv32fd")]
use std::num::FpCategory;
type CpuExit = Result<(), CpuError>;
enum CsrAccess<'a> {
Read(&'a mut u32),
Write(u32),
}
pub struct Interp<'s, 'm, 'c, M: 'm + Memory, C: 'c + Clock> {
pub state: &'s mut CpuState,
pub mem: &'m mut M,
pub clock: &'c mut C,
instsz: u32,
}
impl<'s, 'm, 'c, M: 'm + Memory, C: 'c + Clock> Interp<'s, 'm, 'c, M, C> {
pub fn new(state: &'s mut CpuState, mem: &'m mut M, clock: &'c mut C) -> Self {
Self { state, mem, clock, instsz: 4 }
}
pub fn run(&mut self) -> (CpuError, Option<Op>) {
loop {
if let Err(err) = self.step() {
return err;
}
}
}
pub fn step(&mut self) -> Result<Op, (CpuError, Option<Op>)> {
if !self.clock.check_quota() {
return Err((CpuError::QuotaExceeded, None));
}
let op = match {
#[cfg(feature = "rv32c")]
{
let mut instr_lo: u16 = 0;
if !self.mem.access(self.state.pc, MemoryAccess::Exec(&mut instr_lo)) {
return Err((CpuError::IllegalFetch, None));
}
if (instr_lo & 3) == 3 {
let mut instr_hi: u16 = 0;
if !self.mem.access(self.state.pc + 2, MemoryAccess::Exec(&mut instr_hi)) {
return Err((CpuError::IllegalFetch, None));
}
self.instsz = 4;
Op::parse((instr_hi as u32) << 16 | (instr_lo as u32))
} else {
self.instsz = 2;
Op::parse_c(instr_lo)
}
}
#[cfg(not(feature = "rv32c"))]
{
let mut instr: u32 = 0;
if !self.mem.access(self.state.pc, MemoryAccess::Exec(&mut instr)) {
return Err((CpuError::IllegalFetch, None));
}
Op::parse(instr)
}
} {
Some(op) => op,
None => return Err((CpuError::IllegalInstruction, None)),
};
let res = match op {
Op::Lui { rd, u_imm } => self.lui(rd, u_imm),
Op::Auipc { rd, u_imm } => self.auipc(rd, u_imm),
Op::Jal { rd, j_imm } => self.jal(rd, j_imm),
Op::Jalr { rd, rs1, i_imm } => self.jalr(rd, rs1, i_imm),
Op::Beq { rs1, rs2, b_imm } => self.beq(rs1, rs2, b_imm),
Op::Bne { rs1, rs2, b_imm } => self.bne(rs1, rs2, b_imm),
Op::Blt { rs1, rs2, b_imm } => self.blt(rs1, rs2, b_imm),
Op::Bge { rs1, rs2, b_imm } => self.bge(rs1, rs2, b_imm),
Op::Bltu { rs1, rs2, b_imm } => self.bltu(rs1, rs2, b_imm),
Op::Bgeu { rs1, rs2, b_imm } => self.bgeu(rs1, rs2, b_imm),
Op::Lb { rd, rs1, i_imm } => self.lb(rd, rs1, i_imm),
Op::Lh { rd, rs1, i_imm } => self.lh(rd, rs1, i_imm),
Op::Lw { rd, rs1, i_imm } => self.lw(rd, rs1, i_imm),
Op::Lbu { rd, rs1, i_imm } => self.lbu(rd, rs1, i_imm),
Op::Lhu { rd, rs1, i_imm } => self.lhu(rd, rs1, i_imm),
Op::Sb { rs1, rs2, s_imm } => self.sb(rs1, rs2, s_imm),
Op::Sh { rs1, rs2, s_imm } => self.sh(rs1, rs2, s_imm),
Op::Sw { rs1, rs2, s_imm } => self.sw(rs1, rs2, s_imm),
Op::Addi { rd, rs1, i_imm } => self.addi(rd, rs1, i_imm),
Op::Slti { rd, rs1, i_imm } => self.slti(rd, rs1, i_imm),
Op::Sltiu { rd, rs1, i_imm } => self.sltiu(rd, rs1, i_imm),
Op::Xori { rd, rs1, i_imm } => self.xori(rd, rs1, i_imm),
Op::Ori { rd, rs1, i_imm } => self.ori(rd, rs1, i_imm),
Op::Andi { rd, rs1, i_imm } => self.andi(rd, rs1, i_imm),
Op::Slli { rd, rs1, shamt } => self.slli(rd, rs1, shamt),
Op::Srli { rd, rs1, shamt } => self.srli(rd, rs1, shamt),
Op::Srai { rd, rs1, shamt } => self.srai(rd, rs1, shamt),
Op::Add { rd, rs1, rs2 } => self.add(rd, rs1, rs2),
Op::Sll { rd, rs1, rs2 } => self.sll(rd, rs1, rs2),
Op::Slt { rd, rs1, rs2 } => self.slt(rd, rs1, rs2),
Op::Sltu { rd, rs1, rs2 } => self.sltu(rd, rs1, rs2),
Op::Xor { rd, rs1, rs2 } => self.xor(rd, rs1, rs2),
Op::Srl { rd, rs1, rs2 } => self.srl(rd, rs1, rs2),
Op::Or { rd, rs1, rs2 } => self.or(rd, rs1, rs2),
Op::And { rd, rs1, rs2 } => self.and(rd, rs1, rs2),
Op::Sub { rd, rs1, rs2 } => self.sub(rd, rs1, rs2),
Op::Sra { rd, rs1, rs2 } => self.sra(rd, rs1, rs2),
Op::Fence { pred, succ } => self.fence(pred, succ),
Op::FenceI => self.fence_i(),
Op::Ecall => self.ecall(),
Op::Ebreak => self.ebreak(),
Op::Csrrw { rd, rs1, csr } => self.csrrw(rd, rs1, csr),
Op::Csrrs { rd, rs1, csr } => self.csrrs(rd, rs1, csr),
Op::Csrrc { rd, rs1, csr } => self.csrrc(rd, rs1, csr),
Op::Csrrwi { rd, zimm, csr } => self.csrrwi(rd, zimm, csr),
Op::Csrrsi { rd, zimm, csr } => self.csrrsi(rd, zimm, csr),
Op::Csrrci { rd, zimm, csr } => self.csrrci(rd, zimm, csr),
Op::Mul { rd, rs1, rs2 } => self.mul(rd, rs1, rs2),
Op::Mulh { rd, rs1, rs2 } => self.mulh(rd, rs1, rs2),
Op::Mulhsu { rd, rs1, rs2 } => self.mulhsu(rd, rs1, rs2),
Op::Mulhu { rd, rs1, rs2 } => self.mulhu(rd, rs1, rs2),
Op::Div { rd, rs1, rs2 } => self.div(rd, rs1, rs2),
Op::Divu { rd, rs1, rs2 } => self.divu(rd, rs1, rs2),
Op::Rem { rd, rs1, rs2 } => self.rem(rd, rs1, rs2),
Op::Remu { rd, rs1, rs2 } => self.remu(rd, rs1, rs2),
Op::LrW { rd, rs1, aq, rl } => self.lr_w(rd, rs1, aq, rl),
Op::ScW { rd, rs1, rs2, aq, rl } => self.sc_w(rd, rs1, rs2, aq, rl),
Op::AmoswapW { rd, rs1, rs2, aq, rl } => self.amoswap_w(rd, rs1, rs2, aq, rl),
Op::AmoaddW { rd, rs1, rs2, aq, rl } => self.amoadd_w(rd, rs1, rs2, aq, rl),
Op::AmoxorW { rd, rs1, rs2, aq, rl } => self.amoxor_w(rd, rs1, rs2, aq, rl),
Op::AmoandW { rd, rs1, rs2, aq, rl } => self.amoand_w(rd, rs1, rs2, aq, rl),
Op::AmoorW { rd, rs1, rs2, aq, rl } => self.amoor_w(rd, rs1, rs2, aq, rl),
Op::AmominW { rd, rs1, rs2, aq, rl } => self.amomin_w(rd, rs1, rs2, aq, rl),
Op::AmomaxW { rd, rs1, rs2, aq, rl } => self.amomax_w(rd, rs1, rs2, aq, rl),
Op::AmominuW { rd, rs1, rs2, aq, rl } => self.amominu_w(rd, rs1, rs2, aq, rl),
Op::AmomaxuW { rd, rs1, rs2, aq, rl } => self.amomaxu_w(rd, rs1, rs2, aq, rl),
Op::Flw { rd, rs1, i_imm } => self.flw(rd, rs1, i_imm),
Op::Fsw { rs1, rs2, s_imm } => self.fsw(rs1, rs2, s_imm),
Op::FmaddS { rd, rs1, rs2, rs3, rm } => self.fmadd_s(rd, rs1, rs2, rs3, rm),
Op::FmsubS { rd, rs1, rs2, rs3, rm } => self.fmsub_s(rd, rs1, rs2, rs3, rm),
Op::FnmsubS { rd, rs1, rs2, rs3, rm } => self.fnmsub_s(rd, rs1, rs2, rs3, rm),
Op::FnmaddS { rd, rs1, rs2, rs3, rm } => self.fnmadd_s(rd, rs1, rs2, rs3, rm),
Op::FaddS { rd, rs1, rs2, rm } => self.fadd_s(rd, rs1, rs2, rm),
Op::FsubS { rd, rs1, rs2, rm } => self.fsub_s(rd, rs1, rs2, rm),
Op::FmulS { rd, rs1, rs2, rm } => self.fmul_s(rd, rs1, rs2, rm),
Op::FdivS { rd, rs1, rs2, rm } => self.fdiv_s(rd, rs1, rs2, rm),
Op::FsqrtS { rd, rs1, rm } => self.fsqrt_s(rd, rs1, rm),
Op::FsgnjS { rd, rs1, rs2 } => self.fsgnj_s(rd, rs1, rs2),
Op::FsgnjnS { rd, rs1, rs2 } => self.fsgnjn_s(rd, rs1, rs2),
Op::FsgnjxS { rd, rs1, rs2 } => self.fsgnjx_s(rd, rs1, rs2),
Op::FminS { rd, rs1, rs2 } => self.fmin_s(rd, rs1, rs2),
Op::FmaxS { rd, rs1, rs2 } => self.fmax_s(rd, rs1, rs2),
Op::FcvtWS { rd, rs1, rm } => self.fcvt_w_s(rd, rs1, rm),
Op::FcvtWuS { rd, rs1, rm } => self.fcvt_wu_s(rd, rs1, rm),
Op::FmvXW { rd, rs1 } => self.fmv_x_w(rd, rs1),
Op::FeqS { rd, rs1, rs2 } => self.feq_s(rd, rs1, rs2),
Op::FltS { rd, rs1, rs2 } => self.flt_s(rd, rs1, rs2),
Op::FleS { rd, rs1, rs2 } => self.fle_s(rd, rs1, rs2),
Op::FclassS { rd, rs1 } => self.fclass_s(rd, rs1),
Op::FcvtSW { rd, rs1, rm } => self.fcvt_s_w(rd, rs1, rm),
Op::FcvtSWu { rd, rs1, rm } => self.fcvt_s_wu(rd, rs1, rm),
Op::FmvWX { rd, rs1 } => self.fmv_w_x(rd, rs1),
Op::Fld { rd, rs1, i_imm } => self.fld(rd, rs1, i_imm),
Op::Fsd { rs1, rs2, s_imm } => self.fsd(rs1, rs2, s_imm),
Op::FmaddD { rd, rs1, rs2, rs3, rm } => self.fmadd_d(rd, rs1, rs2, rs3, rm),
Op::FmsubD { rd, rs1, rs2, rs3, rm } => self.fmsub_d(rd, rs1, rs2, rs3, rm),
Op::FnmsubD { rd, rs1, rs2, rs3, rm } => self.fnmsub_d(rd, rs1, rs2, rs3, rm),
Op::FnmaddD { rd, rs1, rs2, rs3, rm } => self.fnmadd_d(rd, rs1, rs2, rs3, rm),
Op::FaddD { rd, rs1, rs2, rm } => self.fadd_d(rd, rs1, rs2, rm),
Op::FsubD { rd, rs1, rs2, rm } => self.fsub_d(rd, rs1, rs2, rm),
Op::FmulD { rd, rs1, rs2, rm } => self.fmul_d(rd, rs1, rs2, rm),
Op::FdivD { rd, rs1, rs2, rm } => self.fdiv_d(rd, rs1, rs2, rm),
Op::FsqrtD { rd, rs1, rm } => self.fsqrt_d(rd, rs1, rm),
Op::FsgnjD { rd, rs1, rs2 } => self.fsgnj_d(rd, rs1, rs2),
Op::FsgnjnD { rd, rs1, rs2 } => self.fsgnjn_d(rd, rs1, rs2),
Op::FsgnjxD { rd, rs1, rs2 } => self.fsgnjx_d(rd, rs1, rs2),
Op::FminD { rd, rs1, rs2 } => self.fmin_d(rd, rs1, rs2),
Op::FmaxD { rd, rs1, rs2 } => self.fmax_d(rd, rs1, rs2),
Op::FcvtWD { rd, rs1, rm } => self.fcvt_w_d(rd, rs1, rm),
Op::FcvtWuD { rd, rs1, rm } => self.fcvt_wu_d(rd, rs1, rm),
Op::FeqD { rd, rs1, rs2 } => self.feq_d(rd, rs1, rs2),
Op::FltD { rd, rs1, rs2 } => self.flt_d(rd, rs1, rs2),
Op::FleD { rd, rs1, rs2 } => self.fle_d(rd, rs1, rs2),
Op::FclassD { rd, rs1 } => self.fclass_d(rd, rs1),
Op::FcvtDW { rd, rs1, rm } => self.fcvt_d_w(rd, rs1, rm),
Op::FcvtDWu { rd, rs1, rm } => self.fcvt_d_wu(rd, rs1, rm),
Op::FcvtSD { rd, rs1, rm } => self.fcvt_s_d(rd, rs1, rm),
Op::FcvtDS { rd, rs1, rm } => self.fcvt_d_s(rd, rs1, rm),
};
self.clock.progress(&op);
match res {
Ok(_) => Ok(op),
Err(err) => Err((err, Some(op))),
}
}
fn access_csr(&mut self, id: u32, access: CsrAccess) -> bool {
match id {
0x001 => {
match access {
CsrAccess::Read(dest) => {
*dest = self.state.fcsr & 0x1f;
true
},
CsrAccess::Write(value) => {
self.state.fcsr = (self.state.fcsr & 0xffff_ffe0) + (value & 0x1f);
true
},
}
},
0x002 => {
match access {
CsrAccess::Read(dest) => {
*dest = (self.state.fcsr & 0xe0) >> 5;
true
},
CsrAccess::Write(value) => {
self.state.fcsr = (self.state.fcsr & 0xffff_ff1f) + ((value & 0x7) << 5);
true
},
}
},
0x003 => {
match access {
CsrAccess::Read(dest) => {
*dest = self.state.fcsr & 0xff;
true
},
CsrAccess::Write(value) => {
self.state.fcsr = (self.state.fcsr & 0xffff_ff00) + (value & 0xff);
true
},
}
},
0xC00 => {
match access {
CsrAccess::Read(dest) => {
*dest = self.clock.read_cycle() as u32;
true
},
CsrAccess::Write(_) => {
true
},
}
},
0xC80 => {
match access {
CsrAccess::Read(dest) => {
*dest = (self.clock.read_cycle() >> 32) as u32;
true
},
CsrAccess::Write(_) => {
true
},
}
},
0xC01 => {
match access {
CsrAccess::Read(dest) => {
*dest = self.clock.read_time() as u32;
true
},
CsrAccess::Write(_) => {
true
},
}
},
0xC81 => {
match access {
CsrAccess::Read(dest) => {
*dest = (self.clock.read_time() >> 32) as u32;
true
},
CsrAccess::Write(_) => {
true
},
}
},
0xC02 => {
match access {
CsrAccess::Read(dest) => {
*dest = self.clock.read_instret() as u32;
true
},
CsrAccess::Write(_) => {
true
},
}
},
0xC82 => {
match access {
CsrAccess::Read(dest) => {
*dest = (self.clock.read_instret() >> 32) as u32;
true
},
CsrAccess::Write(_) => {
true
},
}
},
_ => false,
}
}
fn lui(&mut self, rd: usize, u_imm: i32) -> CpuExit {
write_rd!(self, rd, {
u_imm as u32
});
end_op!(self)
}
fn auipc(&mut self, rd: usize, u_imm: i32) -> CpuExit {
write_rd!(self, rd, {
self.state.pc.wrapping_add(u_imm as u32)
});
end_op!(self)
}
fn jal(&mut self, rd: usize, j_imm: i32) -> CpuExit {
write_rd!(self, rd, {
self.state.pc.wrapping_add(self.instsz)
});
end_jump_op!(self, {
self.state.pc.wrapping_add(j_imm as u32)
})
}
fn jalr(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let dst_base = self.state.x[rs1];
write_rd!(self, rd, {
self.state.pc.wrapping_add(self.instsz)
});
end_jump_op!(self, {
dst_base.wrapping_add(i_imm as u32)
})
}
fn beq(&mut self, rs1: usize, rs2: usize, b_imm: i32) -> CpuExit {
if self.state.x[rs1] == self.state.x[rs2] {
end_branch_op!(self, b_imm)
} else {
end_op!(self)
}
}
fn bne(&mut self, rs1: usize, rs2: usize, b_imm: i32) -> CpuExit {
if self.state.x[rs1] != self.state.x[rs2] {
end_branch_op!(self, b_imm)
} else {
end_op!(self)
}
}
fn blt(&mut self, rs1: usize, rs2: usize, b_imm: i32) -> CpuExit {
if (self.state.x[rs1] as i32) < (self.state.x[rs2] as i32) {
end_branch_op!(self, b_imm)
} else {
end_op!(self)
}
}
fn bge(&mut self, rs1: usize, rs2: usize, b_imm: i32) -> CpuExit {
if (self.state.x[rs1] as i32) >= (self.state.x[rs2] as i32) {
end_branch_op!(self, b_imm)
} else {
end_op!(self)
}
}
fn bltu(&mut self, rs1: usize, rs2: usize, b_imm: i32) -> CpuExit {
if self.state.x[rs1] < self.state.x[rs2] {
end_branch_op!(self, b_imm)
} else {
end_op!(self)
}
}
fn bgeu(&mut self, rs1: usize, rs2: usize, b_imm: i32) -> CpuExit {
if self.state.x[rs1] >= self.state.x[rs2] {
end_branch_op!(self, b_imm)
} else {
end_op!(self)
}
}
fn lb(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: i8 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
write_rd!(self, rd, { value as u32 });
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn lh(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: i16 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
write_rd!(self, rd, { value as u32 });
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn lw(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: u32 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
write_rd!(self, rd, { value as u32 });
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn lbu(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: u8 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
write_rd!(self, rd, { value as u32 });
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn lhu(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: u16 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
write_rd!(self, rd, { value as u32 });
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn sb(&mut self, rs1: usize, rs2: usize, s_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(s_imm as u32);
let value = self.state.x[rs2] as u8;
if self.mem.access(addr, MemoryAccess::Store(value)) {
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn sh(&mut self, rs1: usize, rs2: usize, s_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(s_imm as u32);
let value = self.state.x[rs2] as u16;
if self.mem.access(addr, MemoryAccess::Store(value)) {
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn sw(&mut self, rs1: usize, rs2: usize, s_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(s_imm as u32);
let value = self.state.x[rs2];
if self.mem.access(addr, MemoryAccess::Store(value)) {
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn addi(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_add(i_imm as u32)
});
end_op!(self)
}
fn slti(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
write_rd!(self, rd, {
if (self.state.x[rs1] as i32) < i_imm { 1 } else { 0 }
});
end_op!(self)
}
fn sltiu(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
write_rd!(self, rd, {
if self.state.x[rs1] < i_imm as u32 { 1 } else { 0 }
});
end_op!(self)
}
fn xori(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
write_rd!(self, rd, {
(self.state.x[rs1] ^ i_imm as u32)
});
end_op!(self)
}
fn ori(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
write_rd!(self, rd, {
(self.state.x[rs1] | i_imm as u32)
});
end_op!(self)
}
fn andi(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
write_rd!(self, rd, {
(self.state.x[rs1] & i_imm as u32)
});
end_op!(self)
}
fn slli(&mut self, rd: usize, rs1: usize, shamt: u32) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_shl(shamt)
});
end_op!(self)
}
fn srli(&mut self, rd: usize, rs1: usize, shamt: u32) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_shr(shamt)
});
end_op!(self)
}
fn srai(&mut self, rd: usize, rs1: usize, shamt: u32) -> CpuExit {
write_rd!(self, rd, {
((self.state.x[rs1] as i32).wrapping_shr(shamt) as u32)
});
end_op!(self)
}
fn add(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_add(self.state.x[rs2])
});
end_op!(self)
}
fn sll(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
(self.state.x[rs1]).wrapping_shl(self.state.x[rs2])
});
end_op!(self)
}
fn slt(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
if (self.state.x[rs1] as i32) < (self.state.x[rs2] as i32) { 1 } else { 0 }
});
end_op!(self)
}
fn sltu(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
if self.state.x[rs1] < self.state.x[rs2] { 1 } else { 0 }
});
end_op!(self)
}
fn xor(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1] ^ self.state.x[rs2]
});
end_op!(self)
}
fn srl(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_shr(self.state.x[rs2])
});
end_op!(self)
}
fn or(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1] | self.state.x[rs2]
});
end_op!(self)
}
fn and(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1] & self.state.x[rs2]
});
end_op!(self)
}
fn sub(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_sub(self.state.x[rs2])
});
end_op!(self)
}
fn sra(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
((self.state.x[rs1] as i32).wrapping_shr(self.state.x[rs2])) as u32
});
end_op!(self)
}
fn fence(&mut self, _pred: u32, _succ: u32) -> CpuExit {
end_op!(self)
}
fn fence_i(&mut self) -> CpuExit {
end_op!(self)
}
fn ecall(&mut self) -> CpuExit {
end_op!(self, Ecall)
}
fn ebreak(&mut self) -> CpuExit {
end_op!(self, Ebreak)
}
fn csrrw(&mut self, rd: usize, rs1: usize, csr: u32) -> CpuExit {
let new = self.state.x[rs1];
write_rd!(self, rd, {
let mut old: u32 = 0;
if !self.access_csr(csr, CsrAccess::Read(&mut old)) {
end_op!(self, IllegalInstruction);
}
old
});
if !self.access_csr(csr, CsrAccess::Write(new)) {
end_op!(self, IllegalInstruction);
}
end_op!(self)
}
fn csrrs(&mut self, rd: usize, rs1: usize, csr: u32) -> CpuExit {
let mask = self.state.x[rs1];
let mut old: u32 = 0;
if !self.access_csr(csr, CsrAccess::Read(&mut old)) {
end_op!(self, IllegalInstruction);
}
write_rd!(self, rd, { old });
if rs1 != 0 && !self.access_csr(csr, CsrAccess::Write(old | mask)) {
end_op!(self, IllegalInstruction);
}
end_op!(self)
}
fn csrrc(&mut self, rd: usize, rs1: usize, csr: u32) -> CpuExit {
let mask = self.state.x[rs1];
let mut old: u32 = 0;
if !self.access_csr(csr, CsrAccess::Read(&mut old)) {
end_op!(self, IllegalInstruction);
}
write_rd!(self, rd, { old });
if rs1 != 0 && !self.access_csr(csr, CsrAccess::Write(old & !mask)) {
end_op!(self, IllegalInstruction);
}
end_op!(self)
}
fn csrrwi(&mut self, rd: usize, zimm: u32, csr: u32) -> CpuExit {
write_rd!(self, rd, {
let mut old: u32 = 0;
if !self.access_csr(csr, CsrAccess::Read(&mut old)) {
end_op!(self, IllegalInstruction);
}
old
});
if !self.access_csr(csr, CsrAccess::Write(zimm)) {
end_op!(self, IllegalInstruction);
}
end_op!(self)
}
fn csrrsi(&mut self, rd: usize, zimm: u32, csr: u32) -> CpuExit {
let mut old: u32 = 0;
if !self.access_csr(csr, CsrAccess::Read(&mut old)) {
end_op!(self, IllegalInstruction);
}
write_rd!(self, rd, { old });
if !self.access_csr(csr, CsrAccess::Write(old | zimm)) {
end_op!(self, IllegalInstruction);
}
end_op!(self)
}
fn csrrci(&mut self, rd: usize, zimm: u32, csr: u32) -> CpuExit {
let mut old: u32 = 0;
if !self.access_csr(csr, CsrAccess::Read(&mut old)) {
end_op!(self, IllegalInstruction);
}
write_rd!(self, rd, { old });
if !self.access_csr(csr, CsrAccess::Write(old & !zimm)) {
end_op!(self, IllegalInstruction);
}
end_op!(self)
}
fn mul(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
self.state.x[rs1].wrapping_mul(self.state.x[rs2])
});
end_op!(self)
}
fn mulh(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let x = (self.state.x[rs1] as i32) as i64;
let y = (self.state.x[rs2] as i32) as i64;
(x.wrapping_mul(y) >> 32) as u32
});
end_op!(self)
}
fn mulhsu(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let x = (self.state.x[rs1] as i32) as i64;
let y = self.state.x[rs2] as i64;
(x.wrapping_mul(y) >> 32) as u32
});
end_op!(self)
}
fn mulhu(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let x = self.state.x[rs1] as u64;
let y = self.state.x[rs2] as u64;
(x.wrapping_mul(y) >> 32) as u32
});
end_op!(self)
}
fn div(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let y = self.state.x[rs2] as i32;
if y == 0 {
0xffff_ffff
} else {
let x = self.state.x[rs1] as i32;
x.wrapping_div(y) as u32
}
});
end_op!(self)
}
fn divu(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let y = self.state.x[rs2];
if y == 0 {
0xffff_ffff
} else {
self.state.x[rs1].wrapping_div(y) as u32
}
});
end_op!(self)
}
fn rem(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let y = self.state.x[rs2] as i32;
if y == 0 {
self.state.x[rs1]
} else {
let x = self.state.x[rs1] as i32;
x.wrapping_rem(y) as u32
}
});
end_op!(self)
}
fn remu(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
write_rd!(self, rd, {
let y = self.state.x[rs2];
if y == 0 {
self.state.x[rs1]
} else {
self.state.x[rs1].wrapping_rem(y) as u32
}
});
end_op!(self)
}
fn lr_w(&mut self, rd: usize, rs1: usize, _aq: bool, _rl: bool) -> CpuExit {
let addr = self.state.x[rs1];
let mut value: u32 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
self.state.reservation = Some(addr);
write_rd!(self, rd, { value });
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn sc_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
let addr = self.state.x[rs1];
if self.state.reservation == Some(addr) {
let value = self.state.x[rs2];
if self.mem.access(addr, MemoryAccess::Store(value)) {
write_rd!(self, rd, { 0 });
self.state.reservation = None;
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
} else {
write_rd!(self, rd, { 1 });
end_op!(self)
}
}
fn amoswap_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rs2]
})
}
fn amoadd_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rd].wrapping_add(self.state.x[rs2])
})
}
fn amoxor_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rd] ^ self.state.x[rs2]
})
}
fn amoand_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rd] & self.state.x[rs2]
})
}
fn amoor_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rd] | self.state.x[rs2]
})
}
fn amomin_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
(self.state.x[rd] as i32).min(self.state.x[rs2] as i32) as u32
})
}
fn amomax_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
(self.state.x[rd] as i32).max(self.state.x[rs2] as i32) as u32
})
}
fn amominu_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rd].min(self.state.x[rs2])
})
}
fn amomaxu_w(&mut self, rd: usize, rs1: usize, rs2: usize, _aq: bool, _rl: bool) -> CpuExit {
amo!(self, rd, rs1, {
self.state.x[rd].max(self.state.x[rs2])
})
}
fn flw(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: u32 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
self.state.f[rd] = Sf64::from(Sf32(value));
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn fsw(&mut self, rs1: usize, rs2: usize, s_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(s_imm as u32);
let value = Sf32::from(self.state.f[rs2]).0;
if self.mem.access(addr, MemoryAccess::Store(value)) {
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn fmadd_s(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_mulAdd(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2]),
Sf32::from(self.state.f[rs3])
))
} })
}
fn fmsub_s(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_mulAdd(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2]),
Sf32::from(self.state.f[rs3]).negate()
))
} })
}
fn fnmsub_s(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_mulAdd(
Sf32::from(self.state.f[rs1]).negate(),
Sf32::from(self.state.f[rs2]),
Sf32::from(self.state.f[rs3])
))
} })
}
fn fnmadd_s(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_mulAdd(
Sf32::from(self.state.f[rs1]).negate(),
Sf32::from(self.state.f[rs2]),
Sf32::from(self.state.f[rs3]).negate()
))
} })
}
fn fadd_s(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_add(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
))
} })
}
fn fsub_s(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_sub(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
))
} })
}
fn fmul_s(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_mul(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
))
} })
}
fn fdiv_s(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_div(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
))
} })
}
fn fsqrt_s(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::f32_sqrt(
Sf32::from(self.state.f[rs1])
))
} })
}
fn fsgnj_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
let a = Sf32::from(self.state.f[rs1]).0;
let b = Sf32::from(self.state.f[rs2]).0;
self.state.f[rd] = Sf64::from(Sf32((a & 0x7fff_ffff) | (b & 0x8000_0000)));
end_op!(self)
}
fn fsgnjn_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
let a = Sf32::from(self.state.f[rs1]).0;
let b = Sf32::from(self.state.f[rs2]).0;
self.state.f[rd] = Sf64::from(Sf32((a & 0x7fff_ffff) | (!b & 0x8000_0000)));
end_op!(self)
}
fn fsgnjx_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
let a = Sf32::from(self.state.f[rs1]).0;
let b = Sf32::from(self.state.f[rs2]).0;
self.state.f[rd] = Sf64::from(Sf32(a ^ (b & 0x8000_0000)));
end_op!(self)
}
fn fmin_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_calc!(self, rd, { unsafe {
let a = f32::from(self.state.f[rs1]);
let b = f32::from(self.state.f[rs2]);
if sf::f32_is_signaling_nan(Sf32::from(a)) || sf::f32_is_signaling_nan(Sf32::from(b)) {
sf::raise_flags(sf::FLAG_INVALID);
}
Sf64::from(match (a.classify(), b.classify()) {
(FpCategory::Nan, FpCategory::Nan) => f32::from(Sf32::NAN),
(FpCategory::Nan, _) => b,
(_, FpCategory::Nan) => a,
(FpCategory::Zero, FpCategory::Zero) => {
if a.is_sign_negative() { a } else { b }
},
_ => f32::min(a, b),
})
} })
}
fn fmax_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_calc!(self, rd, { unsafe {
let a = f32::from(self.state.f[rs1]);
let b = f32::from(self.state.f[rs2]);
if sf::f32_is_signaling_nan(Sf32::from(a)) || sf::f32_is_signaling_nan(Sf32::from(b)) {
sf::raise_flags(sf::FLAG_INVALID);
}
Sf64::from(match (a.classify(), b.classify()) {
(FpCategory::Nan, FpCategory::Nan) => f32::from(Sf32::NAN),
(FpCategory::Nan, _) => b,
(_, FpCategory::Nan) => a,
(FpCategory::Zero, FpCategory::Zero) => {
if a.is_sign_positive() { a } else { b }
},
_ => f32::max(a, b),
})
} })
}
fn fcvt_w_s(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_wrap!(self, rm, {
write_rd!(self, rd, { unsafe {
sf::f32_to_i32(
Sf32::from(self.state.f[rs1]),
sf::get_rounding_mode(),
true
) as u32
} });
});
end_op!(self)
}
fn fcvt_wu_s(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_wrap!(self, rm, {
write_rd!(self, rd, { unsafe {
sf::f32_to_u32(
Sf32::from(self.state.f[rs1]),
sf::get_rounding_mode(),
true
)
} });
});
end_op!(self)
}
fn fmv_x_w(&mut self, rd: usize, rs1: usize) -> CpuExit {
self.state.x[rd] = Sf32::from(self.state.f[rs1]).0;
end_op!(self)
}
fn feq_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_wrap!(self, {
write_rd!(self, rd, { unsafe {
let res = sf::f32_eq(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
);
if res { 1 } else { 0 }
} });
});
end_op!(self)
}
fn flt_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_wrap!(self, {
write_rd!(self, rd, { unsafe {
let res = sf::f32_lt(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
);
if res { 1 } else { 0 }
} });
});
end_op!(self)
}
fn fle_s(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_wrap!(self, {
write_rd!(self, rd, { unsafe {
let res = sf::f32_le(
Sf32::from(self.state.f[rs1]),
Sf32::from(self.state.f[rs2])
);
if res { 1 } else { 0 }
} });
});
end_op!(self)
}
fn fclass_s(&mut self, rd: usize, rs1: usize) -> CpuExit {
let v = f32::from(self.state.f[rs1]);
write_rd!(self, rd, { match v.classify() {
FpCategory::Nan => {
if unsafe { sf::f32_is_signaling_nan(Sf32::from(v)) } { 0b01_0000_0000 } else { 0b10_0000_0000 }
},
FpCategory::Infinite => {
if v.is_sign_positive() { 0b00_1000_0000 } else { 0b00_0000_0001 }
},
FpCategory::Zero => {
if v.is_sign_positive() { 0b00_0001_0000 } else { 0b00_0000_1000 }
},
FpCategory::Subnormal => {
if v.is_sign_positive() { 0b00_0010_0000 } else { 0b00_0000_0100 }
},
FpCategory::Normal => {
if v.is_sign_positive() { 0b00_0100_0000 } else { 0b00_0000_0010 }
},
} });
end_op!(self)
}
fn fcvt_s_w(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::i32_to_f32(self.state.x[rs1] as i32))
} });
}
fn fcvt_s_wu(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
Sf64::from(sf::u32_to_f32(self.state.x[rs1]))
} });
}
fn fmv_w_x(&mut self, rd: usize, rs1: usize) -> CpuExit {
self.state.f[rd] = Sf64::from(Sf32(self.state.x[rs1]));
end_op!(self)
}
fn fld(&mut self, rd: usize, rs1: usize, i_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(i_imm as u32);
let mut value: u64 = 0;
if self.mem.access(addr, MemoryAccess::Load(&mut value)) {
self.state.f[rd] = Sf64(value);
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn fsd(&mut self, rs1: usize, rs2: usize, s_imm: i32) -> CpuExit {
let addr = self.state.x[rs1].wrapping_add(s_imm as u32);
let value = self.state.f[rs2].0;
if self.mem.access(addr, MemoryAccess::Store(value)) {
end_op!(self)
} else {
end_op!(self, IllegalAccess)
}
}
fn fmadd_d(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_mulAdd(
self.state.f[rs1],
self.state.f[rs2],
self.state.f[rs3]
)
} })
}
fn fmsub_d(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_mulAdd(
self.state.f[rs1],
self.state.f[rs2],
self.state.f[rs3].negate()
)
} })
}
fn fnmsub_d(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_mulAdd(
self.state.f[rs1].negate(),
self.state.f[rs2],
self.state.f[rs3]
)
} })
}
fn fnmadd_d(&mut self, rd: usize, rs1: usize, rs2: usize, rs3: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_mulAdd(
self.state.f[rs1].negate(),
self.state.f[rs2],
self.state.f[rs3].negate()
)
} })
}
fn fadd_d(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_add(
self.state.f[rs1],
self.state.f[rs2]
)
} })
}
fn fsub_d(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_sub(
self.state.f[rs1],
self.state.f[rs2]
)
} })
}
fn fmul_d(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_mul(
self.state.f[rs1],
self.state.f[rs2]
)
} })
}
fn fdiv_d(&mut self, rd: usize, rs1: usize, rs2: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_div(
self.state.f[rs1],
self.state.f[rs2]
)
} })
}
fn fsqrt_d(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::f64_sqrt(
self.state.f[rs1]
)
} })
}
fn fsgnj_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
let Sf64(a) = self.state.f[rs1];
let Sf64(b) = self.state.f[rs2];
self.state.f[rd] = Sf64((a & 0x7fff_ffff_ffff_ffff) | (b & 0x8000_0000_0000_0000));
end_op!(self)
}
fn fsgnjn_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
let Sf64(a) = self.state.f[rs1];
let Sf64(b) = self.state.f[rs2];
self.state.f[rd] = Sf64((a & 0x7fff_ffff_ffff_ffff) | (!b & 0x8000_0000_0000_0000));
end_op!(self)
}
fn fsgnjx_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
let Sf64(a) = self.state.f[rs1];
let Sf64(b) = self.state.f[rs2];
self.state.f[rd] = Sf64(a ^ (b & 0x8000_0000_0000_0000));
end_op!(self)
}
fn fmin_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_calc!(self, rd, { unsafe {
let a = f64::from(self.state.f[rs1]);
let b = f64::from(self.state.f[rs2]);
if sf::f64_is_signaling_nan(Sf64::from(a)) || sf::f64_is_signaling_nan(Sf64::from(b)) {
sf::raise_flags(sf::FLAG_INVALID);
}
Sf64::from(match (a.classify(), b.classify()) {
(FpCategory::Nan, FpCategory::Nan) => f64::from(Sf64::NAN),
(FpCategory::Nan, _) => b,
(_, FpCategory::Nan) => a,
(FpCategory::Zero, FpCategory::Zero) => {
if a.is_sign_negative() { a } else { b }
},
_ => f64::min(a, b),
})
} })
}
fn fmax_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_calc!(self, rd, { unsafe {
let a = f64::from(self.state.f[rs1]);
let b = f64::from(self.state.f[rs2]);
if sf::f64_is_signaling_nan(Sf64::from(a)) || sf::f64_is_signaling_nan(Sf64::from(b)) {
sf::raise_flags(sf::FLAG_INVALID);
}
Sf64::from(match (a.classify(), b.classify()) {
(FpCategory::Nan, FpCategory::Nan) => f64::from(Sf64::NAN),
(FpCategory::Nan, _) => b,
(_, FpCategory::Nan) => a,
(FpCategory::Zero, FpCategory::Zero) => {
if a.is_sign_positive() { a } else { b }
},
_ => f64::max(a, b),
})
} })
}
fn fcvt_w_d(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_wrap!(self, rm, {
write_rd!(self, rd, { unsafe {
sf::f64_to_i32(
self.state.f[rs1],
sf::get_rounding_mode(),
true
) as u32
} });
});
end_op!(self)
}
fn fcvt_wu_d(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_wrap!(self, rm, {
write_rd!(self, rd, { unsafe {
sf::f64_to_u32(
self.state.f[rs1],
sf::get_rounding_mode(),
true
)
} });
});
end_op!(self)
}
fn feq_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_wrap!(self, {
write_rd!(self, rd, { unsafe {
let res = sf::f64_eq(
self.state.f[rs1],
self.state.f[rs2]
);
if res { 1 } else { 0 }
} });
});
end_op!(self)
}
fn flt_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_wrap!(self, {
write_rd!(self, rd, { unsafe {
let res = sf::f64_lt(
self.state.f[rs1],
self.state.f[rs2]
);
if res { 1 } else { 0 }
} });
});
end_op!(self)
}
fn fle_d(&mut self, rd: usize, rs1: usize, rs2: usize) -> CpuExit {
sf_wrap!(self, {
write_rd!(self, rd, { unsafe {
let res = sf::f64_le(
self.state.f[rs1],
self.state.f[rs2]
);
if res { 1 } else { 0 }
} });
});
end_op!(self)
}
fn fclass_d(&mut self, rd: usize, rs1: usize) -> CpuExit {
let v = f64::from(self.state.f[rs1]);
write_rd!(self, rd, { match v.classify() {
FpCategory::Nan => {
if unsafe { sf::f64_is_signaling_nan(Sf64::from(v)) } { 0b01_0000_0000 } else { 0b10_0000_0000 }
},
FpCategory::Infinite => {
if v.is_sign_positive() { 0b00_1000_0000 } else { 0b00_0000_0001 }
},
FpCategory::Zero => {
if v.is_sign_positive() { 0b00_0001_0000 } else { 0b00_0000_1000 }
},
FpCategory::Subnormal => {
if v.is_sign_positive() { 0b00_0010_0000 } else { 0b00_0000_0100 }
},
FpCategory::Normal => {
if v.is_sign_positive() { 0b00_0100_0000 } else { 0b00_0000_0010 }
},
} });
end_op!(self)
}
fn fcvt_d_w(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::i32_to_f64(self.state.x[rs1] as i32)
} });
}
fn fcvt_d_wu(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
sf::u32_to_f64(self.state.x[rs1])
} });
}
fn fcvt_s_d(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
let v = self.state.f[rs1];
if f64::from(v).is_nan() {
Sf64::from(Sf32::NAN)
} else {
Sf64::from(sf::f64_to_f32(v))
}
} });
}
fn fcvt_d_s(&mut self, rd: usize, rs1: usize, rm: u32) -> CpuExit {
sf_calc!(self, rm, rd, { unsafe {
let v = Sf32::from(self.state.f[rs1]);
if f32::from(v).is_nan() {
Sf64::NAN
} else {
sf::f32_to_f64(v)
}
} });
}
}