use super::opcodes::*;
use crate::core::emitter::*;
use crate::core::operand::*;
pub trait EmitterExplicit: Emitter {
fn add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ADD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ADDUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn addi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ADDI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn addiw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ADDIW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn addw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ADDW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn aes32dsi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::AES32DSI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn aes32dsmi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::AES32DSMI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn aes32esi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::AES32ESI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn aes32esmi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::AES32ESMI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn aes64ds(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AES64DS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn aes64dsm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AES64DSM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn aes64es(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AES64ES as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn aes64esm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AES64ESM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn aes64im(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::AES64IM as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn aes64ks1i(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AES64KS1I as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn aes64ks2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AES64KS2 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn amoadd_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOADDB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoadd_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOADDD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoadd_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOADDH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoadd_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOADDW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoand_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOANDB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoand_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOANDD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoand_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOANDH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoand_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOANDW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amocas_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOCASB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amocas_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOCASD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amocas_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOCASH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amocas_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOCASQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amocas_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOCASW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomax_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomax_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomax_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomax_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomaxu_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXUB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomaxu_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXUD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomaxu_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXUH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomaxu_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMAXUW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomin_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomin_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMIND as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomin_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amomin_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amominu_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINUB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amominu_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINUD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amominu_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINUH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amominu_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOMINUW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoor_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOORB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoor_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOORD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoor_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOORH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoor_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOORW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoswap_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOSWAPB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoswap_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOSWAPD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoswap_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOSWAPH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoswap_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOSWAPW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoxor_b(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOXORB as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoxor_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOXORD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoxor_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOXORH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn amoxor_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::AMOXORW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn and(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::AND as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn andi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ANDI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn andn(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ANDN as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn auipc(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::AUIPC as i64, &[op0.as_operand(), op1.as_operand()])
}
fn bclr(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BCLR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bclri(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BCLRI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bclri_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BCLRIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn beq(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BEQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn beqz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BEQZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn bext(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BEXT as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bexti(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BEXTI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bexti_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BEXTIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bge(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BGE as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bgeu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BGEU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bgez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BGEZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn bgt(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BGT as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bgtu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BGTU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bgtz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BGTZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn binv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BINV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn binvi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BINVI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn binvi_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BINVIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn ble(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BLE as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bleu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BLEU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn blez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BLEZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn blt(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BLT as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bltu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BLTU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bltz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BLTZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn bne(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BNE as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bnez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BNEZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn brev8(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::BREV8 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn bset(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BSET as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bseti(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BSETI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn bseti_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::BSETIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_add(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CADD as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_addi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CADDI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_addi16sp(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CADDI16SP as i64, &[op0.as_operand()])
}
fn c_addi4spn(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CADDI4SPN as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn c_addiw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CADDIW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_addw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CADDW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_and(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CAND as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_andi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CANDI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_beqz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CBEQZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_bnez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CBNEZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_ebreak(&mut self) {
self.emit_n(Opcode::CEBREAK as i64, &[])
}
fn c_fld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CFLD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_fldsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CFLDSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_flw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CFLW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_flwsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CFLWSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_fsd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CFSD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_fsdsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CFSDSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_fsw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CFSW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_fswsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CFSWSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_j(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CJ as i64, &[op0.as_operand()])
}
fn c_jal(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CJAL as i64, &[op0.as_operand()])
}
fn c_jalr(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CJALR as i64, &[op0.as_operand()])
}
fn c_jr(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CJR as i64, &[op0.as_operand()])
}
fn c_lbu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLBU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_ld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_ldsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CLDSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_lh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_lhu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLHU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_li(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CLI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_lui(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CLUI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_lw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_lwsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CLWSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_mop_1(&mut self) {
self.emit_n(Opcode::CMOP1 as i64, &[])
}
fn c_mop_11(&mut self) {
self.emit_n(Opcode::CMOP11 as i64, &[])
}
fn c_mop_13(&mut self) {
self.emit_n(Opcode::CMOP13 as i64, &[])
}
fn c_mop_15(&mut self) {
self.emit_n(Opcode::CMOP15 as i64, &[])
}
fn c_mop_3(&mut self) {
self.emit_n(Opcode::CMOP3 as i64, &[])
}
fn c_mop_5(&mut self) {
self.emit_n(Opcode::CMOP5 as i64, &[])
}
fn c_mop_7(&mut self) {
self.emit_n(Opcode::CMOP7 as i64, &[])
}
fn c_mop_9(&mut self) {
self.emit_n(Opcode::CMOP9 as i64, &[])
}
fn c_mop_n(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CMOPN as i64, &[op0.as_operand()])
}
fn c_mul(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CMUL as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_mv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CMV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_nop(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CNOP as i64, &[op0.as_operand()])
}
fn c_not(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CNOT as i64, &[op0.as_operand()])
}
fn c_ntl_all(&mut self) {
self.emit_n(Opcode::CNTLALL as i64, &[])
}
fn c_ntl_p1(&mut self) {
self.emit_n(Opcode::CNTLP1 as i64, &[])
}
fn c_ntl_pall(&mut self) {
self.emit_n(Opcode::CNTLPALL as i64, &[])
}
fn c_ntl_s1(&mut self) {
self.emit_n(Opcode::CNTLS1 as i64, &[])
}
fn c_or(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::COR as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_sb(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSB as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_sd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_sdsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSDSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_sext_b(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CSEXTB as i64, &[op0.as_operand()])
}
fn c_sext_h(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CSEXTH as i64, &[op0.as_operand()])
}
fn c_sext_w(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CSEXTW as i64, &[op0.as_operand()])
}
fn c_sh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_slli(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSLLI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_slli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CSLLIRV32 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn c_srai(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRAI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_srai_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CSRAIRV32 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn c_srli(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRLI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_srli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CSRLIRV32 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn c_sub(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSUB as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_subw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSUBW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_sw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn c_swsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSWSP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_xor(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CXOR as i64, &[op0.as_operand(), op1.as_operand()])
}
fn c_zext_b(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CZEXTB as i64, &[op0.as_operand()])
}
fn c_zext_h(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CZEXTH as i64, &[op0.as_operand()])
}
fn c_zext_w(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CZEXTW as i64, &[op0.as_operand()])
}
fn cbo_clean(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CBOCLEAN as i64, &[op0.as_operand()])
}
fn cbo_flush(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CBOFLUSH as i64, &[op0.as_operand()])
}
fn cbo_inval(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CBOINVAL as i64, &[op0.as_operand()])
}
fn cbo_zero(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CBOZERO as i64, &[op0.as_operand()])
}
fn clmul(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLMUL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn clmulh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLMULH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn clmulr(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CLMULR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn clz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CLZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn clzw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CLZW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn cm_jalt(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::CMJALT as i64, &[op0.as_operand()])
}
fn cm_mva01s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CMMVA01S as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn cm_mvsa01(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CMMVSA01 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn cm_pop(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CMPOP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn cm_popret(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CMPOPRET as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn cm_popretz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::CMPOPRETZ as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn cm_push(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CMPUSH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn cpop(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CPOP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn cpopw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CPOPW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrc(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRC as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrci(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRCI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrr(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRR as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrrc(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSRRC as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn csrrci(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSRRCI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn csrrs(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSRRS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn csrrsi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSRRSI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn csrrw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSRRW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn csrrwi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CSRRWI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn csrs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRS as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrsi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRSI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn csrwi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CSRWI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn ctz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CTZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn ctzw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::CTZW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn czero_eqz(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CZEROEQZ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn czero_nez(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::CZERONEZ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn div(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::DIV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn divu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::DIVU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn divuw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::DIVUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn divw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::DIVW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn dret(&mut self) {
self.emit_n(Opcode::DRET as i64, &[])
}
fn ebreak(&mut self) {
self.emit_n(Opcode::EBREAK as i64, &[])
}
fn ecall(&mut self) {
self.emit_n(Opcode::ECALL as i64, &[])
}
fn fabs_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FABSD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fabs_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FABSH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fabs_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FABSQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fabs_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FABSS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fadd_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FADDD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fadd_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FADDH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fadd_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FADDQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fadd_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FADDS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fclass_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FCLASSD as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fclass_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FCLASSH as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fclass_q(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FCLASSQ as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fclass_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FCLASSS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fcvt_d_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_d_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_d_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDLU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_d_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_d_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_d_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_d_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTDWU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHLU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_h_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTHWU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_l_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_l_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_l_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_l_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_lu_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLUD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_lu_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLUH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_lu_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLUQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_lu_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTLUS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQLU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_q_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTQWU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSLU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_s_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTSWU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_w_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_w_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_w_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_w_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_wu_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWUD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_wu_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWUH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_wu_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWUQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvt_wu_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FCVTWUS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fcvtmod_w_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FCVTMODWD as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fdiv_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FDIVD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fdiv_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FDIVH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fdiv_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FDIVQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fdiv_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FDIVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fence(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FENCE as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fence_i(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FENCEI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fence_tso(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FENCETSO as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn feq_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FEQD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn feq_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FEQH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn feq_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FEQQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn feq_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FEQS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fle_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLED as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fle_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLEH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fle_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLEQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fle_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLES as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fleq_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLEQD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fleq_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLEQH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fleq_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLEQQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fleq_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLEQS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn flh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fli_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FLID as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fli_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FLIH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fli_q(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FLIQ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fli_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FLIS as i64, &[op0.as_operand(), op1.as_operand()])
}
fn flq(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn flt_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn flt_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn flt_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn flt_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fltq_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTQD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fltq_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTQH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fltq_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTQQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fltq_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLTQS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn flw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FLW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmadd_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMADDD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmadd_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMADDH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmadd_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMADDQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmadd_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMADDS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmax_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmax_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmax_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmax_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmaxm_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXMD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmaxm_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXMH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmaxm_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXMQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmaxm_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMAXMS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmin_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMIND as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmin_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmin_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmin_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fminm_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINMD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fminm_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINMH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fminm_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINMQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fminm_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMINMS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmsub_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMSUBD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmsub_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMSUBH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmsub_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMSUBQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmsub_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FMSUBS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fmul_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FMULD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fmul_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FMULH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fmul_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FMULQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fmul_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FMULS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fmv_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMVD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmv_d_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVDX as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMVH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmv_h_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVHX as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMVQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmv_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMVS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmv_s_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVSX as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_w_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVWX as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_x_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVXD as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_x_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVXH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_x_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVXS as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmv_x_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVXW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmvh_x_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVHXD as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmvh_x_q(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FMVHXQ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fmvp_d_x(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMVPDX as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fmvp_q_x(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FMVPQX as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fneg_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FNEGD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fneg_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FNEGH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fneg_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FNEGQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fneg_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FNEGS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fnmadd_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMADDD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmadd_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMADDH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmadd_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMADDQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmadd_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMADDS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmsub_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMSUBD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmsub_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMSUBH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmsub_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMSUBQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn fnmsub_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::FNMSUBS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn frcsr(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::FRCSR as i64, &[op0.as_operand()])
}
fn frflags(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::FRFLAGS as i64, &[op0.as_operand()])
}
fn fround_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fround_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fround_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fround_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn froundnx_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDNXD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn froundnx_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDNXH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn froundnx_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDNXQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn froundnx_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FROUNDNXS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn frrm(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::FRRM as i64, &[op0.as_operand()])
}
fn fscsr(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FSCSR as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fsd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsflags(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FSFLAGS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fsflagsi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::FSFLAGSI as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn fsgnj_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnj_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnj_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnj_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjn_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJND as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjn_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJNH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjn_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJNQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjn_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJNS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjx_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJXD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjx_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJXH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjx_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJXQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsgnjx_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSGNJXS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsq(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsqrt_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSQRTD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsqrt_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSQRTH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsqrt_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSQRTQ as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsqrt_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSQRTS as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn fsrm(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FSRM as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fsrmi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::FSRMI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn fsub_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FSUBD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fsub_h(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FSUBH as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fsub_q(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FSUBQ as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fsub_s(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::FSUBS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn fsw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::FSW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn hfence_gvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::HFENCEGVMA as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn hfence_vvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::HFENCEVVMA as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn hinval_gvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::HINVALGVMA as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn hinval_vvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::HINVALVVMA as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn hlv_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVB as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlv_bu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVBU as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlv_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVD as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlv_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlv_hu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVHU as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlv_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlv_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVWU as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlvx_hu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVXHU as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hlvx_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HLVXWU as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hsv_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HSVB as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hsv_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HSVD as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hsv_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HSVH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn hsv_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::HSVW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn j(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::J as i64, &[op0.as_operand()])
}
fn jal(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::JAL as i64, &[op0.as_operand(), op1.as_operand()])
}
fn jal_pseudo(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::JALPSEUDO as i64, &[op0.as_operand()])
}
fn jalr(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::JALR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn jalr_pseudo(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::JALRPSEUDO as i64, &[op0.as_operand()])
}
fn jr(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::JR as i64, &[op0.as_operand()])
}
fn lb(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LB as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn lbu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LBU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn ld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn lh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn lhu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LHU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn lr_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::LRD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn lr_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::LRW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn lui(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::LUI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn lw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn lwu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::LWU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn max(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MAX as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn maxu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MAXU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn min(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MIN as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn minu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MINU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_r_0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR0 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR1 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_10(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR10 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_11(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR11 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_12(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR12 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_13(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR13 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_14(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR14 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_15(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR15 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_16(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR16 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_17(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR17 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_18(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR18 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_19(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR19 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_2(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR2 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_20(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR20 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_21(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR21 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_22(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR22 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_23(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR23 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_24(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR24 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_25(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR25 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_26(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR26 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_27(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR27 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_28(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR28 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_29(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR29 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_3(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR3 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_30(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR30 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_31(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR31 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_4(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR4 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_5(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR5 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_6(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR6 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_7(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR7 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_8(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR8 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_9(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MOPR9 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn mop_r_n(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::MOPRN as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn mop_rr_0(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR0 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_1(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR1 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR2 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_3(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR3 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR4 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_5(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR5 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_6(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR6 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_7(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MOPRR7 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mop_rr_n(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::MOPRRN as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn mret(&mut self) {
self.emit_n(Opcode::MRET as i64, &[])
}
fn mul(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MUL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mulh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MULH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mulhsu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MULHSU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mulhu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MULHU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mulw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::MULW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn mv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::MV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn neg(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::NEG as i64, &[op0.as_operand(), op1.as_operand()])
}
fn nop(&mut self) {
self.emit_n(Opcode::NOP as i64, &[])
}
fn ntl_all(&mut self) {
self.emit_n(Opcode::NTLALL as i64, &[])
}
fn ntl_p1(&mut self) {
self.emit_n(Opcode::NTLP1 as i64, &[])
}
fn ntl_pall(&mut self) {
self.emit_n(Opcode::NTLPALL as i64, &[])
}
fn ntl_s1(&mut self) {
self.emit_n(Opcode::NTLS1 as i64, &[])
}
fn or(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::OR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn orc_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::ORCB as i64, &[op0.as_operand(), op1.as_operand()])
}
fn ori(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ORI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn orn(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ORN as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn pack(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::PACK as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn packh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::PACKH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn packw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::PACKW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn pause(&mut self) {
self.emit_n(Opcode::PAUSE as i64, &[])
}
fn prefetch_i(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::PREFETCHI as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn prefetch_r(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::PREFETCHR as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn prefetch_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::PREFETCHW as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn rdcycle(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::RDCYCLE as i64, &[op0.as_operand()])
}
fn rdcycleh(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::RDCYCLEH as i64, &[op0.as_operand()])
}
fn rdinstret(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::RDINSTRET as i64, &[op0.as_operand()])
}
fn rdinstreth(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::RDINSTRETH as i64, &[op0.as_operand()])
}
fn rdtime(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::RDTIME as i64, &[op0.as_operand()])
}
fn rdtimeh(&mut self, op0: impl OperandCast) {
self.emit_n(Opcode::RDTIMEH as i64, &[op0.as_operand()])
}
fn rem(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::REM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn remu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::REMU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn remuw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::REMUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn remw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::REMW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn ret(&mut self) {
self.emit_n(Opcode::RET as i64, &[])
}
fn rev8(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::REV8 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn rev8_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::REV8RV32 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn rol(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ROL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn rolw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ROLW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn ror(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::ROR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn rori(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::RORI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn rori_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::RORIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn roriw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::RORIW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn rorw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::RORW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sb(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SB as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sbreak(&mut self) {
self.emit_n(Opcode::SBREAK as i64, &[])
}
fn sc_d(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::SCD as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn sc_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::SCW as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn scall(&mut self) {
self.emit_n(Opcode::SCALL as i64, &[])
}
fn sd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn seqz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SEQZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sext_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SEXTB as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sext_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SEXTH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sext_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SEXTW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sfence_inval_ir(&mut self) {
self.emit_n(Opcode::SFENCEINVALIR as i64, &[])
}
fn sfence_vma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SFENCEVMA as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sfence_w_inval(&mut self) {
self.emit_n(Opcode::SFENCEWINVAL as i64, &[])
}
fn sgtz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SGTZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sh1add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH1ADD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sh1add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH1ADDUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sh2add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH2ADD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sh2add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH2ADDUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sh3add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH3ADD as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sh3add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SH3ADDUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sha256sig0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA256SIG0 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha256sig1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA256SIG1 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha256sum0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA256SUM0 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha256sum1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA256SUM1 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha512sig0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA512SIG0 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha512sig0h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SHA512SIG0H as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sha512sig0l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SHA512SIG0L as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sha512sig1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA512SIG1 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha512sig1h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SHA512SIG1H as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sha512sig1l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SHA512SIG1L as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sha512sum0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA512SUM0 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha512sum0r(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SHA512SUM0R as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sha512sum1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SHA512SUM1 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sha512sum1r(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SHA512SUM1R as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sinval_vma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::SINVALVMA as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn sll(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn slli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLLI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn slli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLLIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn slli_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLLIUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn slliw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLLIW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sllw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLLW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn slt(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLT as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn slti(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLTI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sltiu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLTIU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sltu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SLTU as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sltz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SLTZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sm3p0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SM3P0 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sm3p1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SM3P1 as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sm4ed(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::SM4ED as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn sm4ks(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::SM4KS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn snez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::SNEZ as i64, &[op0.as_operand(), op1.as_operand()])
}
fn sra(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRA as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn srai(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRAI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn srai_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRAIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sraiw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRAIW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sraw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRAW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sret(&mut self) {
self.emit_n(Opcode::SRET as i64, &[])
}
fn srl(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn srli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRLI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn srli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRLIRV32 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn srliw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRLIW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn srlw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SRLW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sub(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SUB as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn subw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SUBW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn sw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::SW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn unzip(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::UNZIP as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vaadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VAADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vaadd_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VAADDVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vaaddu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VAADDUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vaaddu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VAADDUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vadc_vim(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VADCVIM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vadc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VADCVVM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vadc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VADCVXM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vadd_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VADDVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vadd_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VADDVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vaesdf_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESDFVS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesdf_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESDFVV as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesdm_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESDMVS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesdm_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESDMVV as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesef_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESEFVS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesef_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESEFVV as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesem_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESEMVS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaesem_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESEMVV as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vaeskf1_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VAESKF1VI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vaeskf2_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VAESKF2VI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vaesz_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VAESZVS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vand_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VANDVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vand_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VANDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vand_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VANDVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vandn_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VANDNVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vandn_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VANDNVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vasub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VASUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vasub_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VASUBVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vasubu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VASUBUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vasubu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VASUBUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vbrev8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VBREV8V as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vbrev_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VBREVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vclmul_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VCLMULVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vclmul_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VCLMULVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vclmulh_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VCLMULHVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vclmulh_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VCLMULHVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vclz_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VCLZV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vcompress_vm(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VCOMPRESSVM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vcpop_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VCPOPM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vcpop_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VCPOPV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vctz_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VCTZV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vdiv_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VDIVVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vdiv_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VDIVVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vdivu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VDIVUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vdivu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VDIVUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfadd_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFADDVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfclass_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFCLASSV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfcvt_f_x_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFCVTFXV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfcvt_f_xu_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFCVTFXUV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfcvt_rtz_x_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFCVTRTZXFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfcvt_rtz_xu_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFCVTRTZXUFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfcvt_x_f_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFCVTXFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfcvt_xu_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFCVTXUFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfdiv_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFDIVVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfdiv_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFDIVVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfirst_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFIRSTM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfmacc_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMACCVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmacc_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMACCVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmadd_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMADDVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmax_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMAXVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmax_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMAXVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmerge_vfm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFMERGEVFM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfmin_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMINVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmin_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMINVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmsac_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMSACVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmsac_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMSACVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmsub_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMSUBVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmul_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMULVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmul_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFMULVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfmv_f_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VFMVFS as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vfmv_s_f(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VFMVSF as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vfmv_v_f(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VFMVVF as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vfncvt_f_f_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTFFW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_f_x_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTFXW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_f_xu_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTFXUW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_rod_f_f_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTRODFFW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_rtz_x_f_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTRTZXFW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_rtz_xu_f_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTRTZXUFW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_x_f_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTXFW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfncvt_xu_f_w(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFNCVTXUFW as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfnmacc_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMACCVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmacc_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMACCVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmadd_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMADDVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmsac_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMSACVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmsac_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMSACVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmsub_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMSUBVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfnmsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFNMSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfrdiv_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFRDIVVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfrec7_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFREC7V as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfredmax_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFREDMAXVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfredmin_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFREDMINVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfredosum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFREDOSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfredsum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFREDSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfredusum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFREDUSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfrsqrt7_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFRSQRT7V as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfrsub_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFRSUBVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsgnj_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSGNJVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsgnj_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSGNJVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsgnjn_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSGNJNVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsgnjn_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSGNJNVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsgnjx_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSGNJXVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsgnjx_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSGNJXVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfslide1down_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSLIDE1DOWNVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfslide1up_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSLIDE1UPVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsqrt_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VFSQRTV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfsub_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSUBVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwadd_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWADDVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwadd_wf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWADDWF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwadd_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWADDWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwcvt_f_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTFFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwcvt_f_x_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTFXV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwcvt_f_xu_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTFXUV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwcvt_rtz_x_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTRTZXFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwcvt_rtz_xu_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTRTZXUFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwcvt_x_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTXFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwcvt_xu_f_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
) {
self.emit_n(
Opcode::VFWCVTXUFV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vfwmacc_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWMACCVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwmacc_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWMACCVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwmsac_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWMSACVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwmsac_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWMSACVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwmul_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWMULVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwmul_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWMULVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwnmacc_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWNMACCVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwnmacc_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWNMACCVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwnmsac_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWNMSACVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwnmsac_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWNMSACVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwredosum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWREDOSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwredsum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWREDSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwredusum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWREDUSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwsub_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWSUBVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwsub_wf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWSUBWF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vfwsub_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VFWSUBWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vghsh_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VGHSHVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vgmul_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VGMULVV as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vid_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VIDV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn viota_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VIOTAM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vl1r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VL1RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vl1re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL1RE16V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl1re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL1RE32V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl1re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL1RE64V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl1re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL1RE8V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl2r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VL2RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vl2re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL2RE16V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl2re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL2RE32V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl2re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL2RE64V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl2re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL2RE8V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl4r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VL4RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vl4re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL4RE16V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl4re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL4RE32V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl4re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL4RE64V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl4re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL4RE8V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl8r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VL8RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vl8re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL8RE16V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl8re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL8RE32V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl8re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL8RE64V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vl8re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VL8RE8V as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vle16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle16ff_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE16FFV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle1_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VLE1V as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vle32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle32ff_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE32FFV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle64ff_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE64FFV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vle8ff_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VLE8FFV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vlm_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VLMV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vloxei16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLOXEI16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vloxei32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLOXEI32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vloxei64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLOXEI64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vloxei8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLOXEI8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vlse16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLSE16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vlse32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLSE32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vlse64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLSE64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vlse8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLSE8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vluxei16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLUXEI16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vluxei32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLUXEI32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vluxei64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLUXEI64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vluxei8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VLUXEI8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vmacc_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMACCVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmacc_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMACCVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmadc_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMADCVI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmadc_vim(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMADCVIM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmadc_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMADCVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmadc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMADCVVM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmadc_vx(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMADCVX as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmadc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMADCVXM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmadd_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMADDVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmand_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMANDMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmandn_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMANDNMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmandnot_mm(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMANDNOTMM as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmax_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMAXVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmax_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMAXVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmaxu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMAXUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmaxu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMAXUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmerge_vim(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMERGEVIM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmerge_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMERGEVVM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmerge_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMERGEVXM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmfeq_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFEQVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfeq_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFEQVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfge_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFGEVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfgt_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFGTVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfle_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFLEVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfle_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFLEVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmflt_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFLTVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmflt_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFLTVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfne_vf(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFNEVF as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmfne_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMFNEVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmin_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMINVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmin_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMINVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vminu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMINUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vminu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMINUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmnand_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMNANDMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmnor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMNORMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMORMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmorn_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMORNMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmornot_mm(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMORNOTMM as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsbc_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSBCVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmsbc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSBCVVM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmsbc_vx(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSBCVX as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmsbc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSBCVXM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmsbf_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSBFM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmseq_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSEQVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmseq_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSEQVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmseq_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSEQVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsgt_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSGTVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsgt_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSGTVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsgtu_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSGTUVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsgtu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSGTUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsif_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSIFM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmsle_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLEVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsle_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLEVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsle_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLEVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsleu_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLEUVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsleu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLEUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsleu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLEUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmslt_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLTVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmslt_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLTVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsltu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLTUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsltu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSLTUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsne_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSNEVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsne_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSNEVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsne_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMSNEVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmsof_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMSOFM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmul_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmul_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmulh_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULHVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmulh_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULHVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmulhsu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULHSUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmulhsu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULHSUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmulhu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULHUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmulhu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VMULHUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vmv1r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMV1RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv2r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMV2RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv4r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMV4RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv8r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMV8RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv_s_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMVSX as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv_v_i(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMVVI as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv_v_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMVVV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv_v_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMVVX as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmv_x_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VMVXS as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vmxnor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMXNORMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vmxor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VMXORMM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vnclip_wi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNCLIPWI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnclip_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNCLIPWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnclip_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNCLIPWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnclipu_wi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNCLIPUWI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnclipu_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNCLIPUWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnclipu_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNCLIPUWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnmsac_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNMSACVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnmsac_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNMSACVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnmsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNMSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnmsub_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNMSUBVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnsra_wi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNSRAWI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnsra_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNSRAWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnsra_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNSRAWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnsrl_wi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNSRLWI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnsrl_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNSRLWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vnsrl_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VNSRLWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vor_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VORVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vor_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VORVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vor_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VORVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vpopc_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VPOPCM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vredand_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDANDVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredmax_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDMAXVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredmaxu_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDMAXUVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredmin_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDMINVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredminu_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDMINUVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredor_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDORVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredsum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vredxor_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREDXORVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrem_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREMVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrem_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREMVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vremu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREMUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vremu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VREMUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrev8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VREV8V as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vrgather_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRGATHERVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrgather_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRGATHERVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrgather_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRGATHERVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrgatherei16_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRGATHEREI16VV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrol_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VROLVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrol_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VROLVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vror_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRORVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vror_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRORVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vror_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRORVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrsub_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRSUBVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vrsub_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VRSUBVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vs1r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VS1RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vs2r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VS2RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vs4r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VS4RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vs8r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VS8RV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vsadd_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSADDVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsadd_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSADDVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsaddu_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSADDUVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsaddu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSADDUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsaddu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSADDUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsbc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSBCVVM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsbc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSBCVXM as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vse16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSE16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vse1_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VSE1V as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vse32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSE32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vse64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSE64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vse8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSE8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsetivli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSETIVLI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsetvl(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSETVL as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsetvli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSETVLI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsext_vf2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSEXTVF2 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsext_vf4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSEXTVF4 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsext_vf8(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSEXTVF8 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsha2ch_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSHA2CHVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsha2cl_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSHA2CLVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsha2ms_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSHA2MSVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vslide1down_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLIDE1DOWNVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vslide1up_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLIDE1UPVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vslidedown_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLIDEDOWNVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vslidedown_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLIDEDOWNVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vslideup_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLIDEUPVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vslideup_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLIDEUPVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsll_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLLVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsll_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLLVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsll_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSLLVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsm3c_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSM3CVI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsm3me_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSM3MEVV as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsm4k_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VSM4KVI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vsm4r_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VSM4RVS as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vsm4r_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::VSM4RVV as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn vsm_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::VSMV as i64, &[op0.as_operand(), op1.as_operand()])
}
fn vsmul_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSMULVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsmul_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSMULVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsoxei16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSOXEI16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsoxei32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSOXEI32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsoxei64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSOXEI64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsoxei8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSOXEI8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsra_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSRAVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsra_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSRAVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsra_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSRAVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsrl_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSRLVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsrl_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSRLVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsrl_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSRLVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsse16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSSE16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsse32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSSE32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsse64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSSE64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsse8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSSE8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vssra_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSRAVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssra_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSRAVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssra_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSRAVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssrl_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSRLVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssrl_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSRLVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssrl_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSRLVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssub_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSUBVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssubu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSUBUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vssubu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSSUBUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsub_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VSUBVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vsuxei16_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSUXEI16V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsuxei32_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSUXEI32V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsuxei64_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSUXEI64V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vsuxei8_v(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
op4: impl OperandCast,
) {
self.emit_n(
Opcode::VSUXEI8V as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
op4.as_operand(),
],
)
}
fn vwadd_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwadd_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwadd_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwadd_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwaddu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwaddu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwaddu_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDUWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwaddu_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWADDUWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmacc_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmacc_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmaccsu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCSUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmaccsu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCSUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmaccu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmaccu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmaccus_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMACCUSVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmul_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMULVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmul_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMULVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmulsu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMULSUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmulsu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMULSUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmulu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMULUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwmulu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWMULUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwredsum_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWREDSUMVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwredsumu_vs(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWREDSUMUVS as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsll_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSLLVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsll_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSLLVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsll_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSLLVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsub_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsub_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsub_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsub_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsubu_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBUVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsubu_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBUVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsubu_wv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBUWV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vwsubu_wx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VWSUBUWX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vxor_vi(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VXORVI as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vxor_vv(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VXORVV as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vxor_vx(
&mut self,
op0: impl OperandCast,
op1: impl OperandCast,
op2: impl OperandCast,
op3: impl OperandCast,
) {
self.emit_n(
Opcode::VXORVX as i64,
&[
op0.as_operand(),
op1.as_operand(),
op2.as_operand(),
op3.as_operand(),
],
)
}
fn vzext_vf2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VZEXTVF2 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vzext_vf4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VZEXTVF4 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn vzext_vf8(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::VZEXTVF8 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn wfi(&mut self) {
self.emit_n(Opcode::WFI as i64, &[])
}
fn wrs_nto(&mut self) {
self.emit_n(Opcode::WRSNTO as i64, &[])
}
fn wrs_sto(&mut self) {
self.emit_n(Opcode::WRSSTO as i64, &[])
}
fn xnor(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::XNOR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn xor(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::XOR as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn xori(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::XORI as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn xperm4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::XPERM4 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn xperm8(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
self.emit_n(
Opcode::XPERM8 as i64,
&[op0.as_operand(), op1.as_operand(), op2.as_operand()],
)
}
fn zext_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::ZEXTB as i64, &[op0.as_operand(), op1.as_operand()])
}
fn zext_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::ZEXTH as i64, &[op0.as_operand(), op1.as_operand()])
}
fn zext_h_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(
Opcode::ZEXTHRV32 as i64,
&[op0.as_operand(), op1.as_operand()],
)
}
fn zext_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::ZEXTW as i64, &[op0.as_operand(), op1.as_operand()])
}
fn zip(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
self.emit_n(Opcode::ZIP as i64, &[op0.as_operand(), op1.as_operand()])
}
}