use crate::x86::assembler::*;
use crate::x86::operands::*;
use super::super::opcodes::*;
use crate::core::emitter::*;
use crate::core::operand::*;
/// A dummy operand that represents no register. Here just for simplicity.
const NOREG: Operand = Operand::new();
/// `AADD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
pub trait AaddEmitter<A, B> {
fn aadd(&mut self, op0: A, op1: B);
}
impl<'a> AaddEmitter<Mem, Gpd> for Assembler<'a> {
fn aadd(&mut self, op0: Mem, op1: Gpd) {
self.emit(AADD32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AaddEmitter<Mem, Gpq> for Assembler<'a> {
fn aadd(&mut self, op0: Mem, op1: Gpq) {
self.emit(AADD64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `AAND`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
pub trait AandEmitter<A, B> {
fn aand(&mut self, op0: A, op1: B);
}
impl<'a> AandEmitter<Mem, Gpd> for Assembler<'a> {
fn aand(&mut self, op0: Mem, op1: Gpd) {
self.emit(AAND32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AandEmitter<Mem, Gpq> for Assembler<'a> {
fn aand(&mut self, op0: Mem, op1: Gpq) {
self.emit(AAND64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `ADC` (ADC).
/// Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADC.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait AdcEmitter<A, B> {
fn adc(&mut self, op0: A, op1: B);
}
impl<'a> AdcEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn adc(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(ADC8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Mem, GpbLo> for Assembler<'a> {
fn adc(&mut self, op0: Mem, op1: GpbLo) {
self.emit(ADC8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpw, Gpw> for Assembler<'a> {
fn adc(&mut self, op0: Gpw, op1: Gpw) {
self.emit(ADC16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Mem, Gpw> for Assembler<'a> {
fn adc(&mut self, op0: Mem, op1: Gpw) {
self.emit(ADC16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpd, Gpd> for Assembler<'a> {
fn adc(&mut self, op0: Gpd, op1: Gpd) {
self.emit(ADC32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Mem, Gpd> for Assembler<'a> {
fn adc(&mut self, op0: Mem, op1: Gpd) {
self.emit(ADC32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpq, Gpq> for Assembler<'a> {
fn adc(&mut self, op0: Gpq, op1: Gpq) {
self.emit(ADC64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Mem, Gpq> for Assembler<'a> {
fn adc(&mut self, op0: Mem, op1: Gpq) {
self.emit(ADC64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<GpbLo, Mem> for Assembler<'a> {
fn adc(&mut self, op0: GpbLo, op1: Mem) {
self.emit(ADC8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpw, Mem> for Assembler<'a> {
fn adc(&mut self, op0: Gpw, op1: Mem) {
self.emit(ADC16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpd, Mem> for Assembler<'a> {
fn adc(&mut self, op0: Gpd, op1: Mem) {
self.emit(ADC32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpq, Mem> for Assembler<'a> {
fn adc(&mut self, op0: Gpq, op1: Mem) {
self.emit(ADC64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<GpbLo, Imm> for Assembler<'a> {
fn adc(&mut self, op0: GpbLo, op1: Imm) {
self.emit(ADC8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpw, Imm> for Assembler<'a> {
fn adc(&mut self, op0: Gpw, op1: Imm) {
self.emit(ADC16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpd, Imm> for Assembler<'a> {
fn adc(&mut self, op0: Gpd, op1: Imm) {
self.emit(ADC32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Gpq, Imm> for Assembler<'a> {
fn adc(&mut self, op0: Gpq, op1: Imm) {
self.emit(ADC64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AdcEmitter<Mem, Imm> for Assembler<'a> {
fn adc(&mut self, op0: Mem, op1: Imm) {
self.emit(ADC8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `ADD` (ADD).
/// Adds the destination operand (first operand) and the source operand (second operand) and then stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait AddEmitter<A, B> {
fn add(&mut self, op0: A, op1: B);
}
impl<'a> AddEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn add(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(ADD8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Mem, GpbLo> for Assembler<'a> {
fn add(&mut self, op0: Mem, op1: GpbLo) {
self.emit(ADD8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpw, Gpw> for Assembler<'a> {
fn add(&mut self, op0: Gpw, op1: Gpw) {
self.emit(ADD16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Mem, Gpw> for Assembler<'a> {
fn add(&mut self, op0: Mem, op1: Gpw) {
self.emit(ADD16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpd, Gpd> for Assembler<'a> {
fn add(&mut self, op0: Gpd, op1: Gpd) {
self.emit(ADD32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Mem, Gpd> for Assembler<'a> {
fn add(&mut self, op0: Mem, op1: Gpd) {
self.emit(ADD32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpq, Gpq> for Assembler<'a> {
fn add(&mut self, op0: Gpq, op1: Gpq) {
self.emit(ADD64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Mem, Gpq> for Assembler<'a> {
fn add(&mut self, op0: Mem, op1: Gpq) {
self.emit(ADD64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<GpbLo, Mem> for Assembler<'a> {
fn add(&mut self, op0: GpbLo, op1: Mem) {
self.emit(ADD8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpw, Mem> for Assembler<'a> {
fn add(&mut self, op0: Gpw, op1: Mem) {
self.emit(ADD16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpd, Mem> for Assembler<'a> {
fn add(&mut self, op0: Gpd, op1: Mem) {
self.emit(ADD32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpq, Mem> for Assembler<'a> {
fn add(&mut self, op0: Gpq, op1: Mem) {
self.emit(ADD64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<GpbLo, Imm> for Assembler<'a> {
fn add(&mut self, op0: GpbLo, op1: Imm) {
self.emit(ADD8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpw, Imm> for Assembler<'a> {
fn add(&mut self, op0: Gpw, op1: Imm) {
self.emit(ADD16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpd, Imm> for Assembler<'a> {
fn add(&mut self, op0: Gpd, op1: Imm) {
self.emit(ADD32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Gpq, Imm> for Assembler<'a> {
fn add(&mut self, op0: Gpq, op1: Imm) {
self.emit(ADD64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AddEmitter<Mem, Imm> for Assembler<'a> {
fn add(&mut self, op0: Mem, op1: Imm) {
self.emit(ADD8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `AND` (AND).
/// Performs a bitwise AND operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is set to 1 if both corresponding bits of the first and second operands are 1; otherwise, it is set to 0.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AND.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait AndEmitter<A, B> {
fn and(&mut self, op0: A, op1: B);
}
impl<'a> AndEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn and(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(AND8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Mem, GpbLo> for Assembler<'a> {
fn and(&mut self, op0: Mem, op1: GpbLo) {
self.emit(AND8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpw, Gpw> for Assembler<'a> {
fn and(&mut self, op0: Gpw, op1: Gpw) {
self.emit(AND16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Mem, Gpw> for Assembler<'a> {
fn and(&mut self, op0: Mem, op1: Gpw) {
self.emit(AND16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpd, Gpd> for Assembler<'a> {
fn and(&mut self, op0: Gpd, op1: Gpd) {
self.emit(AND32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Mem, Gpd> for Assembler<'a> {
fn and(&mut self, op0: Mem, op1: Gpd) {
self.emit(AND32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpq, Gpq> for Assembler<'a> {
fn and(&mut self, op0: Gpq, op1: Gpq) {
self.emit(AND64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Mem, Gpq> for Assembler<'a> {
fn and(&mut self, op0: Mem, op1: Gpq) {
self.emit(AND64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<GpbLo, Mem> for Assembler<'a> {
fn and(&mut self, op0: GpbLo, op1: Mem) {
self.emit(AND8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpw, Mem> for Assembler<'a> {
fn and(&mut self, op0: Gpw, op1: Mem) {
self.emit(AND16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpd, Mem> for Assembler<'a> {
fn and(&mut self, op0: Gpd, op1: Mem) {
self.emit(AND32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpq, Mem> for Assembler<'a> {
fn and(&mut self, op0: Gpq, op1: Mem) {
self.emit(AND64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<GpbLo, Imm> for Assembler<'a> {
fn and(&mut self, op0: GpbLo, op1: Imm) {
self.emit(AND8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpw, Imm> for Assembler<'a> {
fn and(&mut self, op0: Gpw, op1: Imm) {
self.emit(AND16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpd, Imm> for Assembler<'a> {
fn and(&mut self, op0: Gpd, op1: Imm) {
self.emit(AND32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Gpq, Imm> for Assembler<'a> {
fn and(&mut self, op0: Gpq, op1: Imm) {
self.emit(AND64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AndEmitter<Mem, Imm> for Assembler<'a> {
fn and(&mut self, op0: Mem, op1: Imm) {
self.emit(AND8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `AOR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
pub trait AorEmitter<A, B> {
fn aor(&mut self, op0: A, op1: B);
}
impl<'a> AorEmitter<Mem, Gpd> for Assembler<'a> {
fn aor(&mut self, op0: Mem, op1: Gpd) {
self.emit(AOR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AorEmitter<Mem, Gpq> for Assembler<'a> {
fn aor(&mut self, op0: Mem, op1: Gpq) {
self.emit(AOR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `AXOR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
pub trait AxorEmitter<A, B> {
fn axor(&mut self, op0: A, op1: B);
}
impl<'a> AxorEmitter<Mem, Gpd> for Assembler<'a> {
fn axor(&mut self, op0: Mem, op1: Gpd) {
self.emit(AXOR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> AxorEmitter<Mem, Gpq> for Assembler<'a> {
fn axor(&mut self, op0: Mem, op1: Gpq) {
self.emit(AXOR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `BSF` (BSF).
/// Searches the source operand (second operand) for the least significant set bit (1 bit). If a least significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content of the source operand is 0, the content of the destination operand is undefined.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSF.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait BsfEmitter<A, B> {
fn bsf(&mut self, op0: A, op1: B);
}
impl<'a> BsfEmitter<Gpw, Gpw> for Assembler<'a> {
fn bsf(&mut self, op0: Gpw, op1: Gpw) {
self.emit(BSF16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsfEmitter<Gpw, Mem> for Assembler<'a> {
fn bsf(&mut self, op0: Gpw, op1: Mem) {
self.emit(BSF16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsfEmitter<Gpd, Gpd> for Assembler<'a> {
fn bsf(&mut self, op0: Gpd, op1: Gpd) {
self.emit(BSF32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsfEmitter<Gpd, Mem> for Assembler<'a> {
fn bsf(&mut self, op0: Gpd, op1: Mem) {
self.emit(BSF32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsfEmitter<Gpq, Gpq> for Assembler<'a> {
fn bsf(&mut self, op0: Gpq, op1: Gpq) {
self.emit(BSF64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsfEmitter<Gpq, Mem> for Assembler<'a> {
fn bsf(&mut self, op0: Gpq, op1: Mem) {
self.emit(BSF64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `BSR` (BSR).
/// Searches the source operand (second operand) for the most significant set bit (1 bit). If a most significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content source operand is 0, the content of the destination operand is undefined.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait BsrEmitter<A, B> {
fn bsr(&mut self, op0: A, op1: B);
}
impl<'a> BsrEmitter<Gpw, Gpw> for Assembler<'a> {
fn bsr(&mut self, op0: Gpw, op1: Gpw) {
self.emit(BSR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsrEmitter<Gpw, Mem> for Assembler<'a> {
fn bsr(&mut self, op0: Gpw, op1: Mem) {
self.emit(BSR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsrEmitter<Gpd, Gpd> for Assembler<'a> {
fn bsr(&mut self, op0: Gpd, op1: Gpd) {
self.emit(BSR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsrEmitter<Gpd, Mem> for Assembler<'a> {
fn bsr(&mut self, op0: Gpd, op1: Mem) {
self.emit(BSR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsrEmitter<Gpq, Gpq> for Assembler<'a> {
fn bsr(&mut self, op0: Gpq, op1: Gpq) {
self.emit(BSR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BsrEmitter<Gpq, Mem> for Assembler<'a> {
fn bsr(&mut self, op0: Gpq, op1: Mem) {
self.emit(BSR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `BT` (BT).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BT.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
pub trait BtEmitter<A, B> {
fn bt(&mut self, op0: A, op1: B);
}
impl<'a> BtEmitter<Gpw, Gpw> for Assembler<'a> {
fn bt(&mut self, op0: Gpw, op1: Gpw) {
self.emit(BT16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Mem, Gpw> for Assembler<'a> {
fn bt(&mut self, op0: Mem, op1: Gpw) {
self.emit(BT16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Gpd, Gpd> for Assembler<'a> {
fn bt(&mut self, op0: Gpd, op1: Gpd) {
self.emit(BT32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Mem, Gpd> for Assembler<'a> {
fn bt(&mut self, op0: Mem, op1: Gpd) {
self.emit(BT32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Gpq, Gpq> for Assembler<'a> {
fn bt(&mut self, op0: Gpq, op1: Gpq) {
self.emit(BT64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Mem, Gpq> for Assembler<'a> {
fn bt(&mut self, op0: Mem, op1: Gpq) {
self.emit(BT64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Gpw, Imm> for Assembler<'a> {
fn bt(&mut self, op0: Gpw, op1: Imm) {
self.emit(BT16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Mem, Imm> for Assembler<'a> {
fn bt(&mut self, op0: Mem, op1: Imm) {
self.emit(BT16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Gpd, Imm> for Assembler<'a> {
fn bt(&mut self, op0: Gpd, op1: Imm) {
self.emit(BT32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtEmitter<Gpq, Imm> for Assembler<'a> {
fn bt(&mut self, op0: Gpq, op1: Imm) {
self.emit(BT64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `BTC` (BTC).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTC.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
pub trait BtcEmitter<A, B> {
fn btc(&mut self, op0: A, op1: B);
}
impl<'a> BtcEmitter<Gpw, Imm> for Assembler<'a> {
fn btc(&mut self, op0: Gpw, op1: Imm) {
self.emit(BTC16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Mem, Imm> for Assembler<'a> {
fn btc(&mut self, op0: Mem, op1: Imm) {
self.emit(BTC16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Gpd, Imm> for Assembler<'a> {
fn btc(&mut self, op0: Gpd, op1: Imm) {
self.emit(BTC32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Gpq, Imm> for Assembler<'a> {
fn btc(&mut self, op0: Gpq, op1: Imm) {
self.emit(BTC64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Gpw, Gpw> for Assembler<'a> {
fn btc(&mut self, op0: Gpw, op1: Gpw) {
self.emit(BTC16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Mem, Gpw> for Assembler<'a> {
fn btc(&mut self, op0: Mem, op1: Gpw) {
self.emit(BTC16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Gpd, Gpd> for Assembler<'a> {
fn btc(&mut self, op0: Gpd, op1: Gpd) {
self.emit(BTC32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Mem, Gpd> for Assembler<'a> {
fn btc(&mut self, op0: Mem, op1: Gpd) {
self.emit(BTC32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Gpq, Gpq> for Assembler<'a> {
fn btc(&mut self, op0: Gpq, op1: Gpq) {
self.emit(BTC64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtcEmitter<Mem, Gpq> for Assembler<'a> {
fn btc(&mut self, op0: Mem, op1: Gpq) {
self.emit(BTC64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `BTR` (BTR).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and clears the selected bit in the bit string to 0. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
pub trait BtrEmitter<A, B> {
fn btr(&mut self, op0: A, op1: B);
}
impl<'a> BtrEmitter<Gpw, Gpw> for Assembler<'a> {
fn btr(&mut self, op0: Gpw, op1: Gpw) {
self.emit(BTR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Mem, Gpw> for Assembler<'a> {
fn btr(&mut self, op0: Mem, op1: Gpw) {
self.emit(BTR16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Gpd, Gpd> for Assembler<'a> {
fn btr(&mut self, op0: Gpd, op1: Gpd) {
self.emit(BTR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Mem, Gpd> for Assembler<'a> {
fn btr(&mut self, op0: Mem, op1: Gpd) {
self.emit(BTR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Gpq, Gpq> for Assembler<'a> {
fn btr(&mut self, op0: Gpq, op1: Gpq) {
self.emit(BTR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Mem, Gpq> for Assembler<'a> {
fn btr(&mut self, op0: Mem, op1: Gpq) {
self.emit(BTR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Gpw, Imm> for Assembler<'a> {
fn btr(&mut self, op0: Gpw, op1: Imm) {
self.emit(BTR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Mem, Imm> for Assembler<'a> {
fn btr(&mut self, op0: Mem, op1: Imm) {
self.emit(BTR16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Gpd, Imm> for Assembler<'a> {
fn btr(&mut self, op0: Gpd, op1: Imm) {
self.emit(BTR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtrEmitter<Gpq, Imm> for Assembler<'a> {
fn btr(&mut self, op0: Gpq, op1: Imm) {
self.emit(BTR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `BTS` (BTS).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and sets the selected bit in the bit string to 1. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTS.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
pub trait BtsEmitter<A, B> {
fn bts(&mut self, op0: A, op1: B);
}
impl<'a> BtsEmitter<Gpw, Gpw> for Assembler<'a> {
fn bts(&mut self, op0: Gpw, op1: Gpw) {
self.emit(BTS16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Mem, Gpw> for Assembler<'a> {
fn bts(&mut self, op0: Mem, op1: Gpw) {
self.emit(BTS16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Gpd, Gpd> for Assembler<'a> {
fn bts(&mut self, op0: Gpd, op1: Gpd) {
self.emit(BTS32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Mem, Gpd> for Assembler<'a> {
fn bts(&mut self, op0: Mem, op1: Gpd) {
self.emit(BTS32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Gpq, Gpq> for Assembler<'a> {
fn bts(&mut self, op0: Gpq, op1: Gpq) {
self.emit(BTS64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Mem, Gpq> for Assembler<'a> {
fn bts(&mut self, op0: Mem, op1: Gpq) {
self.emit(BTS64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Gpw, Imm> for Assembler<'a> {
fn bts(&mut self, op0: Gpw, op1: Imm) {
self.emit(BTS16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Mem, Imm> for Assembler<'a> {
fn bts(&mut self, op0: Mem, op1: Imm) {
self.emit(BTS16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Gpd, Imm> for Assembler<'a> {
fn bts(&mut self, op0: Gpd, op1: Imm) {
self.emit(BTS32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> BtsEmitter<Gpq, Imm> for Assembler<'a> {
fn bts(&mut self, op0: Gpq, op1: Imm) {
self.emit(BTS64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `CALL` (CALL).
/// Saves procedure linking information on the stack and branches to the called procedure specified using the target operand. The target operand specifies the address of the first instruction in the called procedure. The operand can be an immediate value, a general-purpose register, or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CALL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Imm |
/// | 3 | Label |
/// | 4 | Mem |
/// | 5 | Sym |
/// +---+----------+
/// ```
pub trait CallEmitter<A> {
fn call(&mut self, op0: A);
}
impl<'a> CallEmitter<Imm> for Assembler<'a> {
fn call(&mut self, op0: Imm) {
self.emit(CALL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> CallEmitter<Sym> for Assembler<'a> {
fn call(&mut self, op0: Sym) {
self.emit(CALL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> CallEmitter<Label> for Assembler<'a> {
fn call(&mut self, op0: Label) {
self.emit(CALL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> CallEmitter<Gpq> for Assembler<'a> {
fn call(&mut self, op0: Gpq) {
self.emit(CALLR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> CallEmitter<Mem> for Assembler<'a> {
fn call(&mut self, op0: Mem) {
self.emit(CALLM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `CALLF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait CallfEmitter<A> {
fn callf(&mut self, op0: A);
}
impl<'a> CallfEmitter<Mem> for Assembler<'a> {
fn callf(&mut self, op0: Mem) {
self.emit(CALLF16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `CBW`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CbwEmitter {
fn cbw(&mut self);
}
impl<'a> CbwEmitter for Assembler<'a> {
fn cbw(&mut self) {
self.emit(CBW, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CDQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CdqEmitter {
fn cdq(&mut self);
}
impl<'a> CdqEmitter for Assembler<'a> {
fn cdq(&mut self) {
self.emit(CDQ, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CDQE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CdqeEmitter {
fn cdqe(&mut self);
}
impl<'a> CdqeEmitter for Assembler<'a> {
fn cdqe(&mut self) {
self.emit(CDQE, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CLC` (CLC).
/// Clears the CF flag in the EFLAGS register. Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait ClcEmitter {
fn clc(&mut self);
}
impl<'a> ClcEmitter for Assembler<'a> {
fn clc(&mut self) {
self.emit(CLC, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CLD` (CLD).
/// Clears the DF flag in the EFLAGS register. When the DF flag is set to 0, string operations increment the index registers (ESI and/or EDI). Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CldEmitter {
fn cld(&mut self);
}
impl<'a> CldEmitter for Assembler<'a> {
fn cld(&mut self) {
self.emit(CLD, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CLFLUSH` (CLFLUSH).
/// Invalidates from every level of the cache hierarchy in the cache coherence domain the cache line that contains the linear address specified with the memory operand. If that cache line contains modified data at any level of the cache hierarchy, that data is written back to memory. The source operand is a byte memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLFLUSH.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait ClflushEmitter<A> {
fn clflush(&mut self, op0: A);
}
impl<'a> ClflushEmitter<Mem> for Assembler<'a> {
fn clflush(&mut self, op0: Mem) {
self.emit(CLFLUSHM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `CLI` (CLI).
/// In most cases, CLI clears the IF flag in the EFLAGS register and no other flags are affected. Clearing the IF flag causes the processor to ignore maskable external interrupts. The IF flag and the CLI and STI instruction have no effect on the generation of exceptions and NMI interrupts.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLI.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CliEmitter {
fn cli(&mut self);
}
impl<'a> CliEmitter for Assembler<'a> {
fn cli(&mut self) {
self.emit(CLI, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CLTS` (CLTS).
/// Clears the task-switched (TS) flag in the CR0 register. This instruction is intended for use in operating-system procedures. It is a privileged instruction that can only be executed at a CPL of 0. It is allowed to be executed in real-address mode to allow initialization for protected mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLTS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CltsEmitter {
fn clts(&mut self);
}
impl<'a> CltsEmitter for Assembler<'a> {
fn clts(&mut self) {
self.emit(CLTS, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CMC` (CMC).
/// Complements the CF flag in the EFLAGS register. CMC operation is the same in non-64-bit modes and 64-bit mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CmcEmitter {
fn cmc(&mut self);
}
impl<'a> CmcEmitter for Assembler<'a> {
fn cmc(&mut self) {
self.emit(CMC, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CMP` (CMP).
/// Compares the first source operand with the second source operand and sets the status flags in the EFLAGS register according to the results. The comparison is performed by subtracting the second operand from the first operand and then setting the status flags in the same manner as the SUB instruction. When an immediate value is used as an operand, it is sign-extended to the length of the first operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMP.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait CmpEmitter<A, B> {
fn cmp(&mut self, op0: A, op1: B);
}
impl<'a> CmpEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn cmp(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(CMP8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Mem, GpbLo> for Assembler<'a> {
fn cmp(&mut self, op0: Mem, op1: GpbLo) {
self.emit(CMP8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpw, Gpw> for Assembler<'a> {
fn cmp(&mut self, op0: Gpw, op1: Gpw) {
self.emit(CMP16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Mem, Gpw> for Assembler<'a> {
fn cmp(&mut self, op0: Mem, op1: Gpw) {
self.emit(CMP16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpd, Gpd> for Assembler<'a> {
fn cmp(&mut self, op0: Gpd, op1: Gpd) {
self.emit(CMP32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Mem, Gpd> for Assembler<'a> {
fn cmp(&mut self, op0: Mem, op1: Gpd) {
self.emit(CMP32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpq, Gpq> for Assembler<'a> {
fn cmp(&mut self, op0: Gpq, op1: Gpq) {
self.emit(CMP64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Mem, Gpq> for Assembler<'a> {
fn cmp(&mut self, op0: Mem, op1: Gpq) {
self.emit(CMP64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<GpbLo, Mem> for Assembler<'a> {
fn cmp(&mut self, op0: GpbLo, op1: Mem) {
self.emit(CMP8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpw, Mem> for Assembler<'a> {
fn cmp(&mut self, op0: Gpw, op1: Mem) {
self.emit(CMP16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpd, Mem> for Assembler<'a> {
fn cmp(&mut self, op0: Gpd, op1: Mem) {
self.emit(CMP32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpq, Mem> for Assembler<'a> {
fn cmp(&mut self, op0: Gpq, op1: Mem) {
self.emit(CMP64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<GpbLo, Imm> for Assembler<'a> {
fn cmp(&mut self, op0: GpbLo, op1: Imm) {
self.emit(CMP8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpw, Imm> for Assembler<'a> {
fn cmp(&mut self, op0: Gpw, op1: Imm) {
self.emit(CMP16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpd, Imm> for Assembler<'a> {
fn cmp(&mut self, op0: Gpd, op1: Imm) {
self.emit(CMP32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Gpq, Imm> for Assembler<'a> {
fn cmp(&mut self, op0: Gpq, op1: Imm) {
self.emit(CMP64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> CmpEmitter<Mem, Imm> for Assembler<'a> {
fn cmp(&mut self, op0: Mem, op1: Imm) {
self.emit(CMP8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `CMPS` (CMPS).
/// Compares the byte, word, doubleword, or quadword specified with the first source operand with the byte, word, doubleword, or quadword specified with the second source operand and sets the status flags in the EFLAGS register according to the results.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMPS%3ACMPSB%3ACMPSW%3ACMPSD%3ACMPSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CmpsEmitter {
fn cmps(&mut self);
}
impl<'a> CmpsEmitter for Assembler<'a> {
fn cmps(&mut self) {
self.emit(CMPS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CQO`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CqoEmitter {
fn cqo(&mut self);
}
impl<'a> CqoEmitter for Assembler<'a> {
fn cqo(&mut self) {
self.emit(CQO, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CWD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CwdEmitter {
fn cwd(&mut self);
}
impl<'a> CwdEmitter for Assembler<'a> {
fn cwd(&mut self) {
self.emit(CWD, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `CWDE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CwdeEmitter {
fn cwde(&mut self);
}
impl<'a> CwdeEmitter for Assembler<'a> {
fn cwde(&mut self) {
self.emit(CWDE, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `C_EX`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CExEmitter {
fn c_ex(&mut self);
}
impl<'a> CExEmitter for Assembler<'a> {
fn c_ex(&mut self) {
self.emit(C_EX16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `C_SEP`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait CSepEmitter {
fn c_sep(&mut self);
}
impl<'a> CSepEmitter for Assembler<'a> {
fn c_sep(&mut self) {
self.emit(C_SEP16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `DEC` (DEC).
/// Subtracts 1 from the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (To perform a decrement operation that updates the CF flag, use a SUB instruction with an immediate operand of 1.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DEC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait DecEmitter<A> {
fn dec(&mut self, op0: A);
}
impl<'a> DecEmitter<GpbLo> for Assembler<'a> {
fn dec(&mut self, op0: GpbLo) {
self.emit(DEC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DecEmitter<Mem> for Assembler<'a> {
fn dec(&mut self, op0: Mem) {
self.emit(DEC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DecEmitter<Gpw> for Assembler<'a> {
fn dec(&mut self, op0: Gpw) {
self.emit(DEC16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DecEmitter<Gpd> for Assembler<'a> {
fn dec(&mut self, op0: Gpd) {
self.emit(DEC32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DecEmitter<Gpq> for Assembler<'a> {
fn dec(&mut self, op0: Gpq) {
self.emit(DEC64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `DIV` (DIV).
/// Divides unsigned the value in the AX, DX:AX, EDX:EAX, or RDX:RAX registers (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, EDX:EAX, or RDX:RAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor). Division using 64-bit operand is available only in 64-bit mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DIV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait DivEmitter<A> {
fn div(&mut self, op0: A);
}
impl<'a> DivEmitter<GpbLo> for Assembler<'a> {
fn div(&mut self, op0: GpbLo) {
self.emit(DIV8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DivEmitter<Mem> for Assembler<'a> {
fn div(&mut self, op0: Mem) {
self.emit(DIV8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DivEmitter<Gpw> for Assembler<'a> {
fn div(&mut self, op0: Gpw) {
self.emit(DIV16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DivEmitter<Gpd> for Assembler<'a> {
fn div(&mut self, op0: Gpd) {
self.emit(DIV32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> DivEmitter<Gpq> for Assembler<'a> {
fn div(&mut self, op0: Gpq) {
self.emit(DIV64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `ENTER` (ENTER).
/// Creates a stack frame (comprising of space for dynamic storage and 1-32 frame pointer storage) for a procedure. The first operand (imm16) specifies the size of the dynamic storage in the stack frame (that is, the number of bytes of dynamically allocated on the stack for the procedure). The second operand (imm8) gives the lexical nesting level (0 to 31) of the procedure. The nesting level (imm8 mod 32) and the OperandSize attribute determine the size in bytes of the storage space for frame pointers.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ENTER.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
pub trait EnterEmitter<A> {
fn enter(&mut self, op0: A);
}
impl<'a> EnterEmitter<Imm> for Assembler<'a> {
fn enter(&mut self, op0: Imm) {
self.emit(ENTER16I, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `FWAIT` (FWAIT).
/// Causes the processor to check for and handle pending, unmasked, floating-point exceptions before proceeding. (FWAIT is an alternate mnemonic for WAIT.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/WAIT%3AFWAIT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait FwaitEmitter {
fn fwait(&mut self);
}
impl<'a> FwaitEmitter for Assembler<'a> {
fn fwait(&mut self) {
self.emit(FWAIT, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `HLT` (HLT).
/// Stops instruction execution and places the processor in a HALT state. An enabled interrupt (including NMI and SMI), a debug exception, the BINIT# signal, the INIT# signal, or the RESET# signal will resume execution. If an interrupt (including NMI) is used to resume execution after a HLT instruction, the saved instruction pointer (CS:EIP) points to the instruction following the HLT instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/HLT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait HltEmitter {
fn hlt(&mut self);
}
impl<'a> HltEmitter for Assembler<'a> {
fn hlt(&mut self) {
self.emit(HLT, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `IDIV` (IDIV).
/// Divides the (signed) value in the AX, DX:AX, or EDX:EAX (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, or EDX:EAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IDIV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait IdivEmitter<A> {
fn idiv(&mut self, op0: A);
}
impl<'a> IdivEmitter<GpbLo> for Assembler<'a> {
fn idiv(&mut self, op0: GpbLo) {
self.emit(IDIV8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IdivEmitter<Mem> for Assembler<'a> {
fn idiv(&mut self, op0: Mem) {
self.emit(IDIV8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IdivEmitter<Gpw> for Assembler<'a> {
fn idiv(&mut self, op0: Gpw) {
self.emit(IDIV16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IdivEmitter<Gpd> for Assembler<'a> {
fn idiv(&mut self, op0: Gpd) {
self.emit(IDIV32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IdivEmitter<Gpq> for Assembler<'a> {
fn idiv(&mut self, op0: Gpq) {
self.emit(IDIV64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `IMUL` (IMUL).
/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait ImulEmitter_1<A> {
fn imul_1(&mut self, op0: A);
}
impl<'a> ImulEmitter_1<GpbLo> for Assembler<'a> {
fn imul_1(&mut self, op0: GpbLo) {
self.emit(IMUL8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_1<Mem> for Assembler<'a> {
fn imul_1(&mut self, op0: Mem) {
self.emit(IMUL8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_1<Gpw> for Assembler<'a> {
fn imul_1(&mut self, op0: Gpw) {
self.emit(IMUL16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_1<Gpd> for Assembler<'a> {
fn imul_1(&mut self, op0: Gpd) {
self.emit(IMUL32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_1<Gpq> for Assembler<'a> {
fn imul_1(&mut self, op0: Gpq) {
self.emit(IMUL64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `IMUL` (IMUL).
/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait ImulEmitter_2<A, B> {
fn imul_2(&mut self, op0: A, op1: B);
}
impl<'a> ImulEmitter_2<Gpw, Gpw> for Assembler<'a> {
fn imul_2(&mut self, op0: Gpw, op1: Gpw) {
self.emit(IMUL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_2<Gpw, Mem> for Assembler<'a> {
fn imul_2(&mut self, op0: Gpw, op1: Mem) {
self.emit(IMUL16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_2<Gpd, Gpd> for Assembler<'a> {
fn imul_2(&mut self, op0: Gpd, op1: Gpd) {
self.emit(IMUL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_2<Gpd, Mem> for Assembler<'a> {
fn imul_2(&mut self, op0: Gpd, op1: Mem) {
self.emit(IMUL32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_2<Gpq, Gpq> for Assembler<'a> {
fn imul_2(&mut self, op0: Gpq, op1: Gpq) {
self.emit(IMUL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ImulEmitter_2<Gpq, Mem> for Assembler<'a> {
fn imul_2(&mut self, op0: Gpq, op1: Mem) {
self.emit(IMUL64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `IMUL` (IMUL).
/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Gpd, Gpd, Imm |
/// | 2 | Gpd, Mem, Imm |
/// | 3 | Gpq, Gpq, Imm |
/// | 4 | Gpq, Mem, Imm |
/// | 5 | Gpw, Gpw, Imm |
/// | 6 | Gpw, Mem, Imm |
/// +---+---------------+
/// ```
pub trait ImulEmitter_3<A, B, C> {
fn imul_3(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> ImulEmitter_3<Gpw, Gpw, Imm> for Assembler<'a> {
fn imul_3(&mut self, op0: Gpw, op1: Gpw, op2: Imm) {
self.emit(IMUL16RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ImulEmitter_3<Gpw, Mem, Imm> for Assembler<'a> {
fn imul_3(&mut self, op0: Gpw, op1: Mem, op2: Imm) {
self.emit(IMUL16RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ImulEmitter_3<Gpd, Gpd, Imm> for Assembler<'a> {
fn imul_3(&mut self, op0: Gpd, op1: Gpd, op2: Imm) {
self.emit(IMUL32RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ImulEmitter_3<Gpd, Mem, Imm> for Assembler<'a> {
fn imul_3(&mut self, op0: Gpd, op1: Mem, op2: Imm) {
self.emit(IMUL32RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ImulEmitter_3<Gpq, Gpq, Imm> for Assembler<'a> {
fn imul_3(&mut self, op0: Gpq, op1: Gpq, op2: Imm) {
self.emit(IMUL64RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ImulEmitter_3<Gpq, Mem, Imm> for Assembler<'a> {
fn imul_3(&mut self, op0: Gpq, op1: Mem, op2: Imm) {
self.emit(IMUL64RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `IN` (IN).
/// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait InEmitter {
fn r#in(&mut self);
}
impl<'a> InEmitter for Assembler<'a> {
fn r#in(&mut self) {
self.emit(IN8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `IN` (IN).
/// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+------------+
/// | # | Operands |
/// +---+------------+
/// | 1 | GpbLo, Imm |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Imm |
/// | 4 | Gpw, Imm |
/// +---+------------+
/// ```
pub trait InEmitter_2<A, B> {
fn r#in_2(&mut self, op0: A, op1: B);
}
impl<'a> InEmitter_2<GpbLo, Imm> for Assembler<'a> {
fn r#in_2(&mut self, op0: GpbLo, op1: Imm) {
self.emit(IN8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> InEmitter_2<Gpw, Imm> for Assembler<'a> {
fn r#in_2(&mut self, op0: Gpw, op1: Imm) {
self.emit(IN16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> InEmitter_2<Gpd, Imm> for Assembler<'a> {
fn r#in_2(&mut self, op0: Gpd, op1: Imm) {
self.emit(IN32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> InEmitter_2<Gpq, Imm> for Assembler<'a> {
fn r#in_2(&mut self, op0: Gpq, op1: Imm) {
self.emit(IN64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `INC` (INC).
/// Adds 1 to the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (Use a ADD instruction with an immediate operand of 1 to perform an increment operation that does updates the CF flag.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait IncEmitter<A> {
fn inc(&mut self, op0: A);
}
impl<'a> IncEmitter<GpbLo> for Assembler<'a> {
fn inc(&mut self, op0: GpbLo) {
self.emit(INC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IncEmitter<Mem> for Assembler<'a> {
fn inc(&mut self, op0: Mem) {
self.emit(INC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IncEmitter<Gpw> for Assembler<'a> {
fn inc(&mut self, op0: Gpw) {
self.emit(INC16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IncEmitter<Gpd> for Assembler<'a> {
fn inc(&mut self, op0: Gpd) {
self.emit(INC32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> IncEmitter<Gpq> for Assembler<'a> {
fn inc(&mut self, op0: Gpq) {
self.emit(INC64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `INS` (INS).
/// Copies the data from the I/O port specified with the source operand (second operand) to the destination operand (first operand). The source operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The destination operand is a memory location, the address of which is read from either the ES:DI, ES:EDI or the RDI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The ES segment cannot be overridden with a segment override prefix.) The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INS%3AINSB%3AINSW%3AINSD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait InsEmitter {
fn ins(&mut self);
}
impl<'a> InsEmitter for Assembler<'a> {
fn ins(&mut self) {
self.emit(INS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `INT` (INT).
/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
pub trait IntEmitter<A> {
fn int(&mut self, op0: A);
}
impl<'a> IntEmitter<Imm> for Assembler<'a> {
fn int(&mut self, op0: Imm) {
self.emit(INTI, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `INT1` (INT1).
/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait Int1Emitter {
fn int1(&mut self);
}
impl<'a> Int1Emitter for Assembler<'a> {
fn int1(&mut self) {
self.emit(INT1, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `INT3` (INT3).
/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait Int3Emitter {
fn int3(&mut self);
}
impl<'a> Int3Emitter for Assembler<'a> {
fn int3(&mut self) {
self.emit(INT3, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `IRET` (IRET).
/// Returns program control from an exception or interrupt handler to a program or procedure that was interrupted by an exception, an external interrupt, or a software-generated interrupt. These instructions are also used to perform a return from a nested task. (A nested task is created when a CALL instruction is used to initiate a task switch or when an interrupt or exception causes a task switch to an interrupt or exception handler.) See the section titled “Task Linking” in Chapter 8 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 3A.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IRET%3AIRETD%3AIRETQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait IretEmitter {
fn iret(&mut self);
}
impl<'a> IretEmitter for Assembler<'a> {
fn iret(&mut self) {
self.emit(IRET16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `JA` (JA).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JaEmitter<A> {
fn ja(&mut self, op0: A);
}
impl<'a> JaEmitter<Imm> for Assembler<'a> {
fn ja(&mut self, op0: Imm) {
self.emit(JA, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JaEmitter<Sym> for Assembler<'a> {
fn ja(&mut self, op0: Sym) {
self.emit(JA, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JaEmitter<Label> for Assembler<'a> {
fn ja(&mut self, op0: Label) {
self.emit(JA, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JBE` (JBE).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JbeEmitter<A> {
fn jbe(&mut self, op0: A);
}
impl<'a> JbeEmitter<Imm> for Assembler<'a> {
fn jbe(&mut self, op0: Imm) {
self.emit(JBE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JbeEmitter<Sym> for Assembler<'a> {
fn jbe(&mut self, op0: Sym) {
self.emit(JBE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JbeEmitter<Label> for Assembler<'a> {
fn jbe(&mut self, op0: Label) {
self.emit(JBE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JC` (JC).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JcEmitter<A> {
fn jc(&mut self, op0: A);
}
impl<'a> JcEmitter<Imm> for Assembler<'a> {
fn jc(&mut self, op0: Imm) {
self.emit(JC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JcEmitter<Sym> for Assembler<'a> {
fn jc(&mut self, op0: Sym) {
self.emit(JC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JcEmitter<Label> for Assembler<'a> {
fn jc(&mut self, op0: Label) {
self.emit(JC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JCXZ` (JCXZ).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JcxzEmitter<A> {
fn jcxz(&mut self, op0: A);
}
impl<'a> JcxzEmitter<Imm> for Assembler<'a> {
fn jcxz(&mut self, op0: Imm) {
self.emit(JCXZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JcxzEmitter<Sym> for Assembler<'a> {
fn jcxz(&mut self, op0: Sym) {
self.emit(JCXZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JcxzEmitter<Label> for Assembler<'a> {
fn jcxz(&mut self, op0: Label) {
self.emit(JCXZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JG` (JG).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JgEmitter<A> {
fn jg(&mut self, op0: A);
}
impl<'a> JgEmitter<Imm> for Assembler<'a> {
fn jg(&mut self, op0: Imm) {
self.emit(JG, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JgEmitter<Sym> for Assembler<'a> {
fn jg(&mut self, op0: Sym) {
self.emit(JG, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JgEmitter<Label> for Assembler<'a> {
fn jg(&mut self, op0: Label) {
self.emit(JG, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JGE` (JGE).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JgeEmitter<A> {
fn jge(&mut self, op0: A);
}
impl<'a> JgeEmitter<Imm> for Assembler<'a> {
fn jge(&mut self, op0: Imm) {
self.emit(JGE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JgeEmitter<Sym> for Assembler<'a> {
fn jge(&mut self, op0: Sym) {
self.emit(JGE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JgeEmitter<Label> for Assembler<'a> {
fn jge(&mut self, op0: Label) {
self.emit(JGE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JL` (JL).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JlEmitter<A> {
fn jl(&mut self, op0: A);
}
impl<'a> JlEmitter<Imm> for Assembler<'a> {
fn jl(&mut self, op0: Imm) {
self.emit(JL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JlEmitter<Sym> for Assembler<'a> {
fn jl(&mut self, op0: Sym) {
self.emit(JL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JlEmitter<Label> for Assembler<'a> {
fn jl(&mut self, op0: Label) {
self.emit(JL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JLE` (JLE).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JleEmitter<A> {
fn jle(&mut self, op0: A);
}
impl<'a> JleEmitter<Imm> for Assembler<'a> {
fn jle(&mut self, op0: Imm) {
self.emit(JLE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JleEmitter<Sym> for Assembler<'a> {
fn jle(&mut self, op0: Sym) {
self.emit(JLE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JleEmitter<Label> for Assembler<'a> {
fn jle(&mut self, op0: Label) {
self.emit(JLE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JMP` (JMP).
/// Transfers program control to a different point in the instruction stream without recording return information. The destination (target) operand specifies the address of the instruction being jumped to. This operand can be an immediate value, a general-purpose register, or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/JMP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Imm |
/// | 3 | Label |
/// | 4 | Mem |
/// | 5 | Sym |
/// +---+----------+
/// ```
pub trait JmpEmitter<A> {
fn jmp(&mut self, op0: A);
}
impl<'a> JmpEmitter<Imm> for Assembler<'a> {
fn jmp(&mut self, op0: Imm) {
self.emit(JMP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JmpEmitter<Sym> for Assembler<'a> {
fn jmp(&mut self, op0: Sym) {
self.emit(JMP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JmpEmitter<Label> for Assembler<'a> {
fn jmp(&mut self, op0: Label) {
self.emit(JMP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JmpEmitter<Gpq> for Assembler<'a> {
fn jmp(&mut self, op0: Gpq) {
self.emit(JMPR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JmpEmitter<Mem> for Assembler<'a> {
fn jmp(&mut self, op0: Mem) {
self.emit(JMPM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JMPF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait JmpfEmitter<A> {
fn jmpf(&mut self, op0: A);
}
impl<'a> JmpfEmitter<Mem> for Assembler<'a> {
fn jmpf(&mut self, op0: Mem) {
self.emit(JMPF16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JNC` (JNC).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JncEmitter<A> {
fn jnc(&mut self, op0: A);
}
impl<'a> JncEmitter<Imm> for Assembler<'a> {
fn jnc(&mut self, op0: Imm) {
self.emit(JNC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JncEmitter<Sym> for Assembler<'a> {
fn jnc(&mut self, op0: Sym) {
self.emit(JNC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JncEmitter<Label> for Assembler<'a> {
fn jnc(&mut self, op0: Label) {
self.emit(JNC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JNO` (JNO).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JnoEmitter<A> {
fn jno(&mut self, op0: A);
}
impl<'a> JnoEmitter<Imm> for Assembler<'a> {
fn jno(&mut self, op0: Imm) {
self.emit(JNO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnoEmitter<Sym> for Assembler<'a> {
fn jno(&mut self, op0: Sym) {
self.emit(JNO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnoEmitter<Label> for Assembler<'a> {
fn jno(&mut self, op0: Label) {
self.emit(JNO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JNP` (JNP).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JnpEmitter<A> {
fn jnp(&mut self, op0: A);
}
impl<'a> JnpEmitter<Imm> for Assembler<'a> {
fn jnp(&mut self, op0: Imm) {
self.emit(JNP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnpEmitter<Sym> for Assembler<'a> {
fn jnp(&mut self, op0: Sym) {
self.emit(JNP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnpEmitter<Label> for Assembler<'a> {
fn jnp(&mut self, op0: Label) {
self.emit(JNP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JNS` (JNS).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JnsEmitter<A> {
fn jns(&mut self, op0: A);
}
impl<'a> JnsEmitter<Imm> for Assembler<'a> {
fn jns(&mut self, op0: Imm) {
self.emit(JNS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnsEmitter<Sym> for Assembler<'a> {
fn jns(&mut self, op0: Sym) {
self.emit(JNS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnsEmitter<Label> for Assembler<'a> {
fn jns(&mut self, op0: Label) {
self.emit(JNS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JNZ` (JNZ).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JnzEmitter<A> {
fn jnz(&mut self, op0: A);
}
impl<'a> JnzEmitter<Imm> for Assembler<'a> {
fn jnz(&mut self, op0: Imm) {
self.emit(JNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnzEmitter<Sym> for Assembler<'a> {
fn jnz(&mut self, op0: Sym) {
self.emit(JNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JnzEmitter<Label> for Assembler<'a> {
fn jnz(&mut self, op0: Label) {
self.emit(JNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JO` (JO).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JoEmitter<A> {
fn jo(&mut self, op0: A);
}
impl<'a> JoEmitter<Imm> for Assembler<'a> {
fn jo(&mut self, op0: Imm) {
self.emit(JO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JoEmitter<Sym> for Assembler<'a> {
fn jo(&mut self, op0: Sym) {
self.emit(JO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JoEmitter<Label> for Assembler<'a> {
fn jo(&mut self, op0: Label) {
self.emit(JO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JP` (JP).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JpEmitter<A> {
fn jp(&mut self, op0: A);
}
impl<'a> JpEmitter<Imm> for Assembler<'a> {
fn jp(&mut self, op0: Imm) {
self.emit(JP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JpEmitter<Sym> for Assembler<'a> {
fn jp(&mut self, op0: Sym) {
self.emit(JP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JpEmitter<Label> for Assembler<'a> {
fn jp(&mut self, op0: Label) {
self.emit(JP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JS` (JS).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JsEmitter<A> {
fn js(&mut self, op0: A);
}
impl<'a> JsEmitter<Imm> for Assembler<'a> {
fn js(&mut self, op0: Imm) {
self.emit(JS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JsEmitter<Sym> for Assembler<'a> {
fn js(&mut self, op0: Sym) {
self.emit(JS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JsEmitter<Label> for Assembler<'a> {
fn js(&mut self, op0: Label) {
self.emit(JS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JZ` (JZ).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JzEmitter<A> {
fn jz(&mut self, op0: A);
}
impl<'a> JzEmitter<Imm> for Assembler<'a> {
fn jz(&mut self, op0: Imm) {
self.emit(JZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JzEmitter<Sym> for Assembler<'a> {
fn jz(&mut self, op0: Sym) {
self.emit(JZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JzEmitter<Label> for Assembler<'a> {
fn jz(&mut self, op0: Label) {
self.emit(JZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `JCC` (JO).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait JccEmitter<A> {
fn jcc(&mut self, op0: A);
}
impl<'a> JccEmitter<Imm> for Assembler<'a> {
fn jcc(&mut self, op0: Imm) {
self.emit(JCC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JccEmitter<Sym> for Assembler<'a> {
fn jcc(&mut self, op0: Sym) {
self.emit(JCC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> JccEmitter<Label> for Assembler<'a> {
fn jcc(&mut self, op0: Label) {
self.emit(JCC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LAHF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait LahfEmitter {
fn lahf(&mut self);
}
impl<'a> LahfEmitter for Assembler<'a> {
fn lahf(&mut self) {
self.emit(LAHF, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `LAR` (LAR).
/// Loads the access rights from the segment descriptor specified by the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the flag register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. If the source operand is a memory address, only 16 bits of data are accessed. The destination operand is a general-purpose register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LAR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpw |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpw |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait LarEmitter<A, B> {
fn lar(&mut self, op0: A, op1: B);
}
impl<'a> LarEmitter<Gpw, Gpw> for Assembler<'a> {
fn lar(&mut self, op0: Gpw, op1: Gpw) {
self.emit(LAR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LarEmitter<Gpw, Mem> for Assembler<'a> {
fn lar(&mut self, op0: Gpw, op1: Mem) {
self.emit(LAR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LarEmitter<Gpd, Gpw> for Assembler<'a> {
fn lar(&mut self, op0: Gpd, op1: Gpw) {
self.emit(LAR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LarEmitter<Gpd, Mem> for Assembler<'a> {
fn lar(&mut self, op0: Gpd, op1: Mem) {
self.emit(LAR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LarEmitter<Gpq, Gpw> for Assembler<'a> {
fn lar(&mut self, op0: Gpq, op1: Gpw) {
self.emit(LAR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LarEmitter<Gpq, Mem> for Assembler<'a> {
fn lar(&mut self, op0: Gpq, op1: Mem) {
self.emit(LAR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `LDTILECFG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait LdtilecfgEmitter<A> {
fn ldtilecfg(&mut self, op0: A);
}
impl<'a> LdtilecfgEmitter<Mem> for Assembler<'a> {
fn ldtilecfg(&mut self, op0: Mem) {
self.emit(LDTILECFGM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LEA` (LEA).
/// Computes the effective address of the second operand (the source operand) and stores it in the first operand (destination operand). The source operand is a memory address (offset part) specified with one of the processors addressing modes; the destination operand is a general-purpose register. The address-size and operand-size attributes affect the action performed by this instruction, as shown in the following table. The operand-size attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the attribute of the code segment.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEA.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait LeaEmitter<A, B> {
fn lea(&mut self, op0: A, op1: B);
}
impl<'a> LeaEmitter<Gpw, Mem> for Assembler<'a> {
fn lea(&mut self, op0: Gpw, op1: Mem) {
self.emit(LEA16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LeaEmitter<Gpd, Mem> for Assembler<'a> {
fn lea(&mut self, op0: Gpd, op1: Mem) {
self.emit(LEA32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LeaEmitter<Gpq, Mem> for Assembler<'a> {
fn lea(&mut self, op0: Gpq, op1: Mem) {
self.emit(LEA64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `LEAVE` (LEAVE).
/// Releases the stack frame set up by an earlier ENTER instruction. The LEAVE instruction copies the frame pointer (in the EBP register) into the stack pointer register (ESP), which releases the stack space allocated to the stack frame. The old frame pointer (the frame pointer for the calling procedure that was saved by the ENTER instruction) is then popped from the stack into the EBP register, restoring the calling procedure’s stack frame.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEAVE.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait LeaveEmitter {
fn leave(&mut self);
}
impl<'a> LeaveEmitter for Assembler<'a> {
fn leave(&mut self) {
self.emit(LEAVE16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `LFS` (LFS).
/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait LfsEmitter<A, B> {
fn lfs(&mut self, op0: A, op1: B);
}
impl<'a> LfsEmitter<Gpw, Mem> for Assembler<'a> {
fn lfs(&mut self, op0: Gpw, op1: Mem) {
self.emit(LFS16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LfsEmitter<Gpd, Mem> for Assembler<'a> {
fn lfs(&mut self, op0: Gpd, op1: Mem) {
self.emit(LFS32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LfsEmitter<Gpq, Mem> for Assembler<'a> {
fn lfs(&mut self, op0: Gpq, op1: Mem) {
self.emit(LFS64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `LGDT` (LGDT).
/// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait LgdtEmitter<A> {
fn lgdt(&mut self, op0: A);
}
impl<'a> LgdtEmitter<Mem> for Assembler<'a> {
fn lgdt(&mut self, op0: Mem) {
self.emit(LGDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LGS` (LGS).
/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait LgsEmitter<A, B> {
fn lgs(&mut self, op0: A, op1: B);
}
impl<'a> LgsEmitter<Gpw, Mem> for Assembler<'a> {
fn lgs(&mut self, op0: Gpw, op1: Mem) {
self.emit(LGS16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LgsEmitter<Gpd, Mem> for Assembler<'a> {
fn lgs(&mut self, op0: Gpd, op1: Mem) {
self.emit(LGS32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LgsEmitter<Gpq, Mem> for Assembler<'a> {
fn lgs(&mut self, op0: Gpq, op1: Mem) {
self.emit(LGS64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `LIDT` (LIDT).
/// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait LidtEmitter<A> {
fn lidt(&mut self, op0: A);
}
impl<'a> LidtEmitter<Mem> for Assembler<'a> {
fn lidt(&mut self, op0: Mem) {
self.emit(LIDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LLDT` (LLDT).
/// Loads the source operand into the segment selector field of the local descriptor table register (LDTR). The source operand (a general-purpose register or a memory location) contains a segment selector that points to a local descriptor table (LDT). After the segment selector is loaded in the LDTR, the processor uses the segment selector to locate the segment descriptor for the LDT in the global descriptor table (GDT). It then loads the segment limit and base address for the LDT from the segment descriptor into the LDTR. The segment registers DS, ES, SS, FS, GS, and CS are not affected by this instruction, nor is the LDTR field in the task state segment (TSS) for the current task.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LLDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait LldtEmitter<A> {
fn lldt(&mut self, op0: A);
}
impl<'a> LldtEmitter<Gpd> for Assembler<'a> {
fn lldt(&mut self, op0: Gpd) {
self.emit(LLDTR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LldtEmitter<Mem> for Assembler<'a> {
fn lldt(&mut self, op0: Mem) {
self.emit(LLDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LMSW` (LMSW).
/// Loads the source operand into the machine status word, bits 0 through 15 of register CR0. The source operand can be a 16-bit general-purpose register or a memory location. Only the low-order 4 bits of the source operand (which contains the PE, MP, EM, and TS flags) are loaded into CR0. The PG, CD, NW, AM, WP, NE, and ET flags of CR0 are not affected. The operand-size attribute has no effect on this instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LMSW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait LmswEmitter<A> {
fn lmsw(&mut self, op0: A);
}
impl<'a> LmswEmitter<Gpd> for Assembler<'a> {
fn lmsw(&mut self, op0: Gpd) {
self.emit(LMSWR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LmswEmitter<Mem> for Assembler<'a> {
fn lmsw(&mut self, op0: Mem) {
self.emit(LMSWM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LODS` (LODS).
/// Loads a byte, word, or doubleword from the source operand into the AL, AX, or EAX register, respectively. The source operand is a memory location, the address of which is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The DS segment may be overridden with a segment override prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LODS%3ALODSB%3ALODSW%3ALODSD%3ALODSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait LodsEmitter {
fn lods(&mut self);
}
impl<'a> LodsEmitter for Assembler<'a> {
fn lods(&mut self) {
self.emit(LODS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `LOOP` (LOOP).
/// Performs a loop operation using the RCX, ECX or CX register as a counter (depending on whether address size is 64 bits, 32 bits, or 16 bits). Note that the LOOP instruction ignores REX.W; but 64-bit address size can be over-ridden using a 67H prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LOOP%3ALOOPcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait LoopEmitter<A> {
fn r#loop(&mut self, op0: A);
}
impl<'a> LoopEmitter<Imm> for Assembler<'a> {
fn r#loop(&mut self, op0: Imm) {
self.emit(LOOP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LoopEmitter<Sym> for Assembler<'a> {
fn r#loop(&mut self, op0: Sym) {
self.emit(LOOP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LoopEmitter<Label> for Assembler<'a> {
fn r#loop(&mut self, op0: Label) {
self.emit(LOOP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LOOPNZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait LoopnzEmitter<A> {
fn loopnz(&mut self, op0: A);
}
impl<'a> LoopnzEmitter<Imm> for Assembler<'a> {
fn loopnz(&mut self, op0: Imm) {
self.emit(LOOPNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LoopnzEmitter<Sym> for Assembler<'a> {
fn loopnz(&mut self, op0: Sym) {
self.emit(LOOPNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LoopnzEmitter<Label> for Assembler<'a> {
fn loopnz(&mut self, op0: Label) {
self.emit(LOOPNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LOOPZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
pub trait LoopzEmitter<A> {
fn loopz(&mut self, op0: A);
}
impl<'a> LoopzEmitter<Imm> for Assembler<'a> {
fn loopz(&mut self, op0: Imm) {
self.emit(LOOPZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LoopzEmitter<Sym> for Assembler<'a> {
fn loopz(&mut self, op0: Sym) {
self.emit(LOOPZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LoopzEmitter<Label> for Assembler<'a> {
fn loopz(&mut self, op0: Label) {
self.emit(LOOPZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `LSL` (LSL).
/// Loads the unscrambled segment limit from the segment descriptor specified with the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the EFLAGS register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. The destination operand is a general-purpose register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LSL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpw |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpw |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait LslEmitter<A, B> {
fn lsl(&mut self, op0: A, op1: B);
}
impl<'a> LslEmitter<Gpw, Gpw> for Assembler<'a> {
fn lsl(&mut self, op0: Gpw, op1: Gpw) {
self.emit(LSL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LslEmitter<Gpw, Mem> for Assembler<'a> {
fn lsl(&mut self, op0: Gpw, op1: Mem) {
self.emit(LSL16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LslEmitter<Gpd, Gpw> for Assembler<'a> {
fn lsl(&mut self, op0: Gpd, op1: Gpw) {
self.emit(LSL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LslEmitter<Gpd, Mem> for Assembler<'a> {
fn lsl(&mut self, op0: Gpd, op1: Mem) {
self.emit(LSL32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LslEmitter<Gpq, Gpw> for Assembler<'a> {
fn lsl(&mut self, op0: Gpq, op1: Gpw) {
self.emit(LSL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LslEmitter<Gpq, Mem> for Assembler<'a> {
fn lsl(&mut self, op0: Gpq, op1: Mem) {
self.emit(LSL64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `LSS` (LSS).
/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait LssEmitter<A, B> {
fn lss(&mut self, op0: A, op1: B);
}
impl<'a> LssEmitter<Gpw, Mem> for Assembler<'a> {
fn lss(&mut self, op0: Gpw, op1: Mem) {
self.emit(LSS16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LssEmitter<Gpd, Mem> for Assembler<'a> {
fn lss(&mut self, op0: Gpd, op1: Mem) {
self.emit(LSS32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> LssEmitter<Gpq, Mem> for Assembler<'a> {
fn lss(&mut self, op0: Gpq, op1: Mem) {
self.emit(LSS64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `LTR` (LTR).
/// Loads the source operand into the segment selector field of the task register. The source operand (a general-purpose register or a memory location) contains a segment selector that points to a task state segment (TSS). After the segment selector is loaded in the task register, the processor uses the segment selector to locate the segment descriptor for the TSS in the global descriptor table (GDT). It then loads the segment limit and base address for the TSS from the segment descriptor into the task register. The task pointed to by the task register is marked busy, but a switch to the task does not occur.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LTR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait LtrEmitter<A> {
fn ltr(&mut self, op0: A);
}
impl<'a> LtrEmitter<Gpd> for Assembler<'a> {
fn ltr(&mut self, op0: Gpd) {
self.emit(LTRR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> LtrEmitter<Mem> for Assembler<'a> {
fn ltr(&mut self, op0: Mem) {
self.emit(LTRM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `MOV` (MOV).
/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+------------------------+
/// | # | Operands |
/// +----+------------------------+
/// | 1 | AbsoluteAddress, GpbLo |
/// | 2 | AbsoluteAddress, Gpd |
/// | 3 | AbsoluteAddress, Gpq |
/// | 4 | AbsoluteAddress, Gpw |
/// | 5 | GpbLo, AbsoluteAddress |
/// | 6 | GpbLo, GpbLo |
/// | 7 | GpbLo, Imm |
/// | 8 | GpbLo, Mem |
/// | 9 | Gpd, AbsoluteAddress |
/// | 10 | Gpd, Gpd |
/// | 11 | Gpd, Imm |
/// | 12 | Gpd, Mem |
/// | 13 | Gpq, AbsoluteAddress |
/// | 14 | Gpq, Gpq |
/// | 15 | Gpq, Imm |
/// | 16 | Gpq, Mem |
/// | 17 | Gpw, AbsoluteAddress |
/// | 18 | Gpw, Gpw |
/// | 19 | Gpw, Imm |
/// | 20 | Gpw, Mem |
/// | 21 | Mem, GpbLo |
/// | 22 | Mem, Gpd |
/// | 23 | Mem, Gpq |
/// | 24 | Mem, Gpw |
/// | 25 | Mem, Imm |
/// +----+------------------------+
/// ```
pub trait MovEmitter<A, B> {
fn mov(&mut self, op0: A, op1: B);
}
impl<'a> MovEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn mov(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(MOV8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Mem, GpbLo> for Assembler<'a> {
fn mov(&mut self, op0: Mem, op1: GpbLo) {
self.emit(MOV8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpw, Gpw> for Assembler<'a> {
fn mov(&mut self, op0: Gpw, op1: Gpw) {
self.emit(MOV16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Mem, Gpw> for Assembler<'a> {
fn mov(&mut self, op0: Mem, op1: Gpw) {
self.emit(MOV16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpd, Gpd> for Assembler<'a> {
fn mov(&mut self, op0: Gpd, op1: Gpd) {
self.emit(MOV32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Mem, Gpd> for Assembler<'a> {
fn mov(&mut self, op0: Mem, op1: Gpd) {
self.emit(MOV32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpq, Gpq> for Assembler<'a> {
fn mov(&mut self, op0: Gpq, op1: Gpq) {
self.emit(MOV64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Mem, Gpq> for Assembler<'a> {
fn mov(&mut self, op0: Mem, op1: Gpq) {
self.emit(MOV64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<GpbLo, Mem> for Assembler<'a> {
fn mov(&mut self, op0: GpbLo, op1: Mem) {
self.emit(MOV8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpw, Mem> for Assembler<'a> {
fn mov(&mut self, op0: Gpw, op1: Mem) {
self.emit(MOV16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpd, Mem> for Assembler<'a> {
fn mov(&mut self, op0: Gpd, op1: Mem) {
self.emit(MOV32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpq, Mem> for Assembler<'a> {
fn mov(&mut self, op0: Gpq, op1: Mem) {
self.emit(MOV64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<GpbLo, AbsoluteAddress> for Assembler<'a> {
fn mov(&mut self, op0: GpbLo, op1: AbsoluteAddress) {
self.emit(MOV8RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpw, AbsoluteAddress> for Assembler<'a> {
fn mov(&mut self, op0: Gpw, op1: AbsoluteAddress) {
self.emit(MOV16RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpd, AbsoluteAddress> for Assembler<'a> {
fn mov(&mut self, op0: Gpd, op1: AbsoluteAddress) {
self.emit(MOV32RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpq, AbsoluteAddress> for Assembler<'a> {
fn mov(&mut self, op0: Gpq, op1: AbsoluteAddress) {
self.emit(MOV64RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<AbsoluteAddress, GpbLo> for Assembler<'a> {
fn mov(&mut self, op0: AbsoluteAddress, op1: GpbLo) {
self.emit(MOV8AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<AbsoluteAddress, Gpw> for Assembler<'a> {
fn mov(&mut self, op0: AbsoluteAddress, op1: Gpw) {
self.emit(MOV16AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<AbsoluteAddress, Gpd> for Assembler<'a> {
fn mov(&mut self, op0: AbsoluteAddress, op1: Gpd) {
self.emit(MOV32AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<AbsoluteAddress, Gpq> for Assembler<'a> {
fn mov(&mut self, op0: AbsoluteAddress, op1: Gpq) {
self.emit(MOV64AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<GpbLo, Imm> for Assembler<'a> {
fn mov(&mut self, op0: GpbLo, op1: Imm) {
self.emit(MOV8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpw, Imm> for Assembler<'a> {
fn mov(&mut self, op0: Gpw, op1: Imm) {
self.emit(MOV16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpd, Imm> for Assembler<'a> {
fn mov(&mut self, op0: Gpd, op1: Imm) {
self.emit(MOV32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Gpq, Imm> for Assembler<'a> {
fn mov(&mut self, op0: Gpq, op1: Imm) {
self.emit(MOV64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovEmitter<Mem, Imm> for Assembler<'a> {
fn mov(&mut self, op0: Mem, op1: Imm) {
self.emit(MOV8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOVS` (MOVS).
/// Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified with the first operand (destination operand). Both the source and destination operands are located in memory. The address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be overridden with a segment override prefix, but the ES segment cannot be overridden.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVS%3AMOVSB%3AMOVSW%3AMOVSD%3AMOVSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait MovsEmitter {
fn movs(&mut self);
}
impl<'a> MovsEmitter for Assembler<'a> {
fn movs(&mut self) {
self.emit(MOVS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `MOVSX` (MOVSX).
/// Copies the contents of the source operand (register or memory location) to the destination operand (register) and sign extends the value to 16 or 32 bits (see Figure 7-6 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The size of the converted value depends on the operand-size attribute.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVSX%3AMOVSXD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+------------+
/// | # | Operands |
/// +----+------------+
/// | 1 | Gpd, GpbLo |
/// | 2 | Gpd, Gpd |
/// | 3 | Gpd, Gpw |
/// | 4 | Gpd, Mem |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Gpd |
/// | 7 | Gpq, Gpw |
/// | 8 | Gpq, Mem |
/// | 9 | Gpw, GpbLo |
/// | 10 | Gpw, Gpd |
/// | 11 | Gpw, Gpw |
/// | 12 | Gpw, Mem |
/// +----+------------+
/// ```
pub trait MovsxEmitter<A, B> {
fn movsx(&mut self, op0: A, op1: B);
}
impl<'a> MovsxEmitter<Gpw, Gpd> for Assembler<'a> {
fn movsx(&mut self, op0: Gpw, op1: Gpd) {
self.emit(MOVSXR16R32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpw, Mem> for Assembler<'a> {
fn movsx(&mut self, op0: Gpw, op1: Mem) {
self.emit(MOVSXR16M32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpd, Gpd> for Assembler<'a> {
fn movsx(&mut self, op0: Gpd, op1: Gpd) {
self.emit(MOVSXR32R32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpd, Mem> for Assembler<'a> {
fn movsx(&mut self, op0: Gpd, op1: Mem) {
self.emit(MOVSXR32M32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpq, Gpd> for Assembler<'a> {
fn movsx(&mut self, op0: Gpq, op1: Gpd) {
self.emit(MOVSXR64R32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpq, Mem> for Assembler<'a> {
fn movsx(&mut self, op0: Gpq, op1: Mem) {
self.emit(MOVSXR64M32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpw, GpbLo> for Assembler<'a> {
fn movsx(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(MOVSXR16R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpd, GpbLo> for Assembler<'a> {
fn movsx(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(MOVSXR32R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpq, GpbLo> for Assembler<'a> {
fn movsx(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(MOVSXR64R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpw, Gpw> for Assembler<'a> {
fn movsx(&mut self, op0: Gpw, op1: Gpw) {
self.emit(MOVSXR16R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpd, Gpw> for Assembler<'a> {
fn movsx(&mut self, op0: Gpd, op1: Gpw) {
self.emit(MOVSXR32R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovsxEmitter<Gpq, Gpw> for Assembler<'a> {
fn movsx(&mut self, op0: Gpq, op1: Gpw) {
self.emit(MOVSXR64R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOVZX` (MOVZX).
/// Copies the contents of the source operand (register or memory location) to the destination operand (register) and zero extends the value. The size of the converted value depends on the operand-size attribute.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVZX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+------------+
/// | # | Operands |
/// +---+------------+
/// | 1 | Gpd, GpbLo |
/// | 2 | Gpd, Gpw |
/// | 3 | Gpd, Mem |
/// | 4 | Gpq, GpbLo |
/// | 5 | Gpq, Gpw |
/// | 6 | Gpq, Mem |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Gpw |
/// | 9 | Gpw, Mem |
/// +---+------------+
/// ```
pub trait MovzxEmitter<A, B> {
fn movzx(&mut self, op0: A, op1: B);
}
impl<'a> MovzxEmitter<Gpw, GpbLo> for Assembler<'a> {
fn movzx(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(MOVZXR16R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpw, Mem> for Assembler<'a> {
fn movzx(&mut self, op0: Gpw, op1: Mem) {
self.emit(MOVZXR16M8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpd, GpbLo> for Assembler<'a> {
fn movzx(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(MOVZXR32R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpd, Mem> for Assembler<'a> {
fn movzx(&mut self, op0: Gpd, op1: Mem) {
self.emit(MOVZXR32M8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpq, GpbLo> for Assembler<'a> {
fn movzx(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(MOVZXR64R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpq, Mem> for Assembler<'a> {
fn movzx(&mut self, op0: Gpq, op1: Mem) {
self.emit(MOVZXR64M8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpw, Gpw> for Assembler<'a> {
fn movzx(&mut self, op0: Gpw, op1: Gpw) {
self.emit(MOVZXR16R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpd, Gpw> for Assembler<'a> {
fn movzx(&mut self, op0: Gpd, op1: Gpw) {
self.emit(MOVZXR32R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovzxEmitter<Gpq, Gpw> for Assembler<'a> {
fn movzx(&mut self, op0: Gpq, op1: Gpw) {
self.emit(MOVZXR64R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOV_CR2G`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | Gpq, CReg |
/// +---+-----------+
/// ```
pub trait MovCr2gEmitter<A, B> {
fn mov_cr2g(&mut self, op0: A, op1: B);
}
impl<'a> MovCr2gEmitter<Gpq, CReg> for Assembler<'a> {
fn mov_cr2g(&mut self, op0: Gpq, op1: CReg) {
self.emit(MOV_CR2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOV_DR2G`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | Gpq, DReg |
/// +---+-----------+
/// ```
pub trait MovDr2gEmitter<A, B> {
fn mov_dr2g(&mut self, op0: A, op1: B);
}
impl<'a> MovDr2gEmitter<Gpq, DReg> for Assembler<'a> {
fn mov_dr2g(&mut self, op0: Gpq, op1: DReg) {
self.emit(MOV_DR2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOV_G2CR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | CReg, Gpq |
/// +---+-----------+
/// ```
pub trait MovG2crEmitter<A, B> {
fn mov_g2cr(&mut self, op0: A, op1: B);
}
impl<'a> MovG2crEmitter<CReg, Gpq> for Assembler<'a> {
fn mov_g2cr(&mut self, op0: CReg, op1: Gpq) {
self.emit(MOV_G2CRRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOV_G2DR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | DReg, Gpq |
/// +---+-----------+
/// ```
pub trait MovG2drEmitter<A, B> {
fn mov_g2dr(&mut self, op0: A, op1: B);
}
impl<'a> MovG2drEmitter<DReg, Gpq> for Assembler<'a> {
fn mov_g2dr(&mut self, op0: DReg, op1: Gpq) {
self.emit(MOV_G2DRRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOV_G2S` (MOV).
/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | SReg, Gpd |
/// | 2 | SReg, Mem |
/// +---+-----------+
/// ```
pub trait MovG2sEmitter<A, B> {
fn mov_g2s(&mut self, op0: A, op1: B);
}
impl<'a> MovG2sEmitter<SReg, Gpd> for Assembler<'a> {
fn mov_g2s(&mut self, op0: SReg, op1: Gpd) {
self.emit(MOV_G2SRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovG2sEmitter<SReg, Mem> for Assembler<'a> {
fn mov_g2s(&mut self, op0: SReg, op1: Mem) {
self.emit(MOV_G2SRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MOV_S2G` (MOV).
/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | Gpd, SReg |
/// | 2 | Mem, SReg |
/// +---+-----------+
/// ```
pub trait MovS2gEmitter<A, B> {
fn mov_s2g(&mut self, op0: A, op1: B);
}
impl<'a> MovS2gEmitter<Gpd, SReg> for Assembler<'a> {
fn mov_s2g(&mut self, op0: Gpd, op1: SReg) {
self.emit(MOV_S2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> MovS2gEmitter<Mem, SReg> for Assembler<'a> {
fn mov_s2g(&mut self, op0: Mem, op1: SReg) {
self.emit(MOV_S2GMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `MUL` (MUL).
/// Performs an unsigned multiplication of the first operand (destination operand) and the second operand (source operand) and stores the result in the destination operand. The destination operand is an implied operand located in register AL, AX or EAX (depending on the size of the operand); the source operand is located in a general-purpose register or a memory location. The action of this instruction and the location of the result depends on the opcode and the operand size as shown in Table 4-9.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait MulEmitter<A> {
fn mul(&mut self, op0: A);
}
impl<'a> MulEmitter<GpbLo> for Assembler<'a> {
fn mul(&mut self, op0: GpbLo) {
self.emit(MUL8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> MulEmitter<Mem> for Assembler<'a> {
fn mul(&mut self, op0: Mem) {
self.emit(MUL8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> MulEmitter<Gpw> for Assembler<'a> {
fn mul(&mut self, op0: Gpw) {
self.emit(MUL16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> MulEmitter<Gpd> for Assembler<'a> {
fn mul(&mut self, op0: Gpd) {
self.emit(MUL32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> MulEmitter<Gpq> for Assembler<'a> {
fn mul(&mut self, op0: Gpq) {
self.emit(MUL64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `NEG` (NEG).
/// Replaces the value of operand (the destination operand) with its two's complement. (This operation is equivalent to subtracting the operand from 0.) The destination operand is located in a general-purpose register or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NEG.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait NegEmitter<A> {
fn neg(&mut self, op0: A);
}
impl<'a> NegEmitter<GpbLo> for Assembler<'a> {
fn neg(&mut self, op0: GpbLo) {
self.emit(NEG8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NegEmitter<Mem> for Assembler<'a> {
fn neg(&mut self, op0: Mem) {
self.emit(NEG8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NegEmitter<Gpw> for Assembler<'a> {
fn neg(&mut self, op0: Gpw) {
self.emit(NEG16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NegEmitter<Gpd> for Assembler<'a> {
fn neg(&mut self, op0: Gpd) {
self.emit(NEG32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NegEmitter<Gpq> for Assembler<'a> {
fn neg(&mut self, op0: Gpq) {
self.emit(NEG64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `NOP` (NOP).
/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait NopEmitter {
fn nop(&mut self);
}
impl<'a> NopEmitter for Assembler<'a> {
fn nop(&mut self) {
self.emit(NOP, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `NOP` (NOP).
/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Gpq |
/// | 3 | Gpw |
/// | 4 | Mem |
/// +---+----------+
/// ```
pub trait NopEmitter_1<A> {
fn nop_1(&mut self, op0: A);
}
impl<'a> NopEmitter_1<Gpw> for Assembler<'a> {
fn nop_1(&mut self, op0: Gpw) {
self.emit(NOP16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NopEmitter_1<Mem> for Assembler<'a> {
fn nop_1(&mut self, op0: Mem) {
self.emit(NOP16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NopEmitter_1<Gpd> for Assembler<'a> {
fn nop_1(&mut self, op0: Gpd) {
self.emit(NOP32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NopEmitter_1<Gpq> for Assembler<'a> {
fn nop_1(&mut self, op0: Gpq) {
self.emit(NOP64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `NOT` (NOT).
/// Performs a bitwise NOT operation (each 1 is set to 0, and each 0 is set to 1) on the destination operand and stores the result in the destination operand location. The destination operand can be a register or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
pub trait NotEmitter<A> {
fn not(&mut self, op0: A);
}
impl<'a> NotEmitter<GpbLo> for Assembler<'a> {
fn not(&mut self, op0: GpbLo) {
self.emit(NOT8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NotEmitter<Mem> for Assembler<'a> {
fn not(&mut self, op0: Mem) {
self.emit(NOT8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NotEmitter<Gpw> for Assembler<'a> {
fn not(&mut self, op0: Gpw) {
self.emit(NOT16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NotEmitter<Gpd> for Assembler<'a> {
fn not(&mut self, op0: Gpd) {
self.emit(NOT32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> NotEmitter<Gpq> for Assembler<'a> {
fn not(&mut self, op0: Gpq) {
self.emit(NOT64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `OR` (OR).
/// Performs a bitwise inclusive OR operation between the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result of the OR instruction is set to 0 if both corresponding bits of the first and second operands are 0; otherwise, each bit is set to 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait OrEmitter<A, B> {
fn or(&mut self, op0: A, op1: B);
}
impl<'a> OrEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn or(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(OR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Mem, GpbLo> for Assembler<'a> {
fn or(&mut self, op0: Mem, op1: GpbLo) {
self.emit(OR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpw, Gpw> for Assembler<'a> {
fn or(&mut self, op0: Gpw, op1: Gpw) {
self.emit(OR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Mem, Gpw> for Assembler<'a> {
fn or(&mut self, op0: Mem, op1: Gpw) {
self.emit(OR16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpd, Gpd> for Assembler<'a> {
fn or(&mut self, op0: Gpd, op1: Gpd) {
self.emit(OR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Mem, Gpd> for Assembler<'a> {
fn or(&mut self, op0: Mem, op1: Gpd) {
self.emit(OR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpq, Gpq> for Assembler<'a> {
fn or(&mut self, op0: Gpq, op1: Gpq) {
self.emit(OR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Mem, Gpq> for Assembler<'a> {
fn or(&mut self, op0: Mem, op1: Gpq) {
self.emit(OR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<GpbLo, Mem> for Assembler<'a> {
fn or(&mut self, op0: GpbLo, op1: Mem) {
self.emit(OR8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpw, Mem> for Assembler<'a> {
fn or(&mut self, op0: Gpw, op1: Mem) {
self.emit(OR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpd, Mem> for Assembler<'a> {
fn or(&mut self, op0: Gpd, op1: Mem) {
self.emit(OR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpq, Mem> for Assembler<'a> {
fn or(&mut self, op0: Gpq, op1: Mem) {
self.emit(OR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<GpbLo, Imm> for Assembler<'a> {
fn or(&mut self, op0: GpbLo, op1: Imm) {
self.emit(OR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpw, Imm> for Assembler<'a> {
fn or(&mut self, op0: Gpw, op1: Imm) {
self.emit(OR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpd, Imm> for Assembler<'a> {
fn or(&mut self, op0: Gpd, op1: Imm) {
self.emit(OR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Gpq, Imm> for Assembler<'a> {
fn or(&mut self, op0: Gpq, op1: Imm) {
self.emit(OR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OrEmitter<Mem, Imm> for Assembler<'a> {
fn or(&mut self, op0: Mem, op1: Imm) {
self.emit(OR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `OUT` (OUT).
/// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait OutEmitter {
fn r#out(&mut self);
}
impl<'a> OutEmitter for Assembler<'a> {
fn r#out(&mut self) {
self.emit(OUT8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `OUT` (OUT).
/// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+------------+
/// | # | Operands |
/// +---+------------+
/// | 1 | GpbLo, Imm |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Imm |
/// | 4 | Gpw, Imm |
/// +---+------------+
/// ```
pub trait OutEmitter_2<A, B> {
fn r#out_2(&mut self, op0: A, op1: B);
}
impl<'a> OutEmitter_2<GpbLo, Imm> for Assembler<'a> {
fn r#out_2(&mut self, op0: GpbLo, op1: Imm) {
self.emit(OUT8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OutEmitter_2<Gpw, Imm> for Assembler<'a> {
fn r#out_2(&mut self, op0: Gpw, op1: Imm) {
self.emit(OUT16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OutEmitter_2<Gpd, Imm> for Assembler<'a> {
fn r#out_2(&mut self, op0: Gpd, op1: Imm) {
self.emit(OUT32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> OutEmitter_2<Gpq, Imm> for Assembler<'a> {
fn r#out_2(&mut self, op0: Gpq, op1: Imm) {
self.emit(OUT64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `OUTS` (OUTS).
/// Copies data from the source operand (second operand) to the I/O port specified with the destination operand (first operand). The source operand is a memory location, the address of which is read from either the DS:SI, DS:ESI or the RSI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The DS segment may be overridden with a segment override prefix.) The destination operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUTS%3AOUTSB%3AOUTSW%3AOUTSD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait OutsEmitter {
fn outs(&mut self);
}
impl<'a> OutsEmitter for Assembler<'a> {
fn outs(&mut self) {
self.emit(OUTS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `PAUSE` (NOP).
/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait PauseEmitter {
fn pause(&mut self);
}
impl<'a> PauseEmitter for Assembler<'a> {
fn pause(&mut self) {
self.emit(PAUSE, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `POP` (POP).
/// Loads the value from the top of the stack to the location specified with the destination operand (or explicit opcode) and then increments the stack pointer. The destination operand can be a general-purpose register, memory location, or segment register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Gpw |
/// | 3 | Mem |
/// +---+----------+
/// ```
pub trait PopEmitter<A> {
fn pop(&mut self, op0: A);
}
impl<'a> PopEmitter<Gpw> for Assembler<'a> {
fn pop(&mut self, op0: Gpw) {
self.emit(POP16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> PopEmitter<Gpq> for Assembler<'a> {
fn pop(&mut self, op0: Gpq) {
self.emit(POPR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> PopEmitter<Mem> for Assembler<'a> {
fn pop(&mut self, op0: Mem) {
self.emit(POP16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `POPF` (POPF).
/// Pops a doubleword (POPFD) from the top of the stack (if the current operand-size attribute is 32) and stores the value in the EFLAGS register, or pops a word from the top of the stack (if the operand-size attribute is 16) and stores it in the lower 16 bits of the EFLAGS register (that is, the FLAGS register). These instructions reverse the operation of the PUSHF/PUSHFD/PUSHFQ instructions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POPF%3APOPFD%3APOPFQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait PopfEmitter {
fn popf(&mut self);
}
impl<'a> PopfEmitter for Assembler<'a> {
fn popf(&mut self) {
self.emit(POPF16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `POP_SEG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | SReg |
/// +---+----------+
/// ```
pub trait PopSegEmitter<A> {
fn pop_seg(&mut self, op0: A);
}
impl<'a> PopSegEmitter<SReg> for Assembler<'a> {
fn pop_seg(&mut self, op0: SReg) {
self.emit(POP_SEG16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `PUSH` (PUSH).
/// Decrements the stack pointer and then stores the source operand on the top of the stack. Address and operand sizes are determined and used as follows
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSH.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Gpw |
/// | 3 | Imm |
/// | 4 | Mem |
/// +---+----------+
/// ```
pub trait PushEmitter<A> {
fn push(&mut self, op0: A);
}
impl<'a> PushEmitter<Gpw> for Assembler<'a> {
fn push(&mut self, op0: Gpw) {
self.emit(PUSH16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> PushEmitter<Gpq> for Assembler<'a> {
fn push(&mut self, op0: Gpq) {
self.emit(PUSHR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> PushEmitter<Imm> for Assembler<'a> {
fn push(&mut self, op0: Imm) {
self.emit(PUSH16I, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> PushEmitter<Mem> for Assembler<'a> {
fn push(&mut self, op0: Mem) {
self.emit(PUSH16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `PUSHF` (PUSHF).
/// Decrements the stack pointer by 4 (if the current operand-size attribute is 32) and pushes the entire contents of the EFLAGS register onto the stack, or decrements the stack pointer by 2 (if the operand-size attribute is 16) and pushes the lower 16 bits of the EFLAGS register (that is, the FLAGS register) onto the stack. These instructions reverse the operation of the POPF/POPFD instructions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSHF%3APUSHFD%3APUSHFQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait PushfEmitter {
fn pushf(&mut self);
}
impl<'a> PushfEmitter for Assembler<'a> {
fn pushf(&mut self) {
self.emit(PUSHF16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `PUSH_SEG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | SReg |
/// +---+----------+
/// ```
pub trait PushSegEmitter<A> {
fn push_seg(&mut self, op0: A);
}
impl<'a> PushSegEmitter<SReg> for Assembler<'a> {
fn push_seg(&mut self, op0: SReg) {
self.emit(PUSH_SEG16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `RCL` (RCL).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait RclEmitter<A, B> {
fn rcl(&mut self, op0: A, op1: B);
}
impl<'a> RclEmitter<GpbLo, Imm> for Assembler<'a> {
fn rcl(&mut self, op0: GpbLo, op1: Imm) {
self.emit(RCL8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Mem, Imm> for Assembler<'a> {
fn rcl(&mut self, op0: Mem, op1: Imm) {
self.emit(RCL8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Gpw, Imm> for Assembler<'a> {
fn rcl(&mut self, op0: Gpw, op1: Imm) {
self.emit(RCL16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Gpd, Imm> for Assembler<'a> {
fn rcl(&mut self, op0: Gpd, op1: Imm) {
self.emit(RCL32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Gpq, Imm> for Assembler<'a> {
fn rcl(&mut self, op0: Gpq, op1: Imm) {
self.emit(RCL64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn rcl(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(RCL8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Mem, GpbLo> for Assembler<'a> {
fn rcl(&mut self, op0: Mem, op1: GpbLo) {
self.emit(RCL8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Gpw, GpbLo> for Assembler<'a> {
fn rcl(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(RCL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Gpd, GpbLo> for Assembler<'a> {
fn rcl(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(RCL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RclEmitter<Gpq, GpbLo> for Assembler<'a> {
fn rcl(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(RCL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `RCR` (RCR).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait RcrEmitter<A, B> {
fn rcr(&mut self, op0: A, op1: B);
}
impl<'a> RcrEmitter<GpbLo, Imm> for Assembler<'a> {
fn rcr(&mut self, op0: GpbLo, op1: Imm) {
self.emit(RCR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Mem, Imm> for Assembler<'a> {
fn rcr(&mut self, op0: Mem, op1: Imm) {
self.emit(RCR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Gpw, Imm> for Assembler<'a> {
fn rcr(&mut self, op0: Gpw, op1: Imm) {
self.emit(RCR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Gpd, Imm> for Assembler<'a> {
fn rcr(&mut self, op0: Gpd, op1: Imm) {
self.emit(RCR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Gpq, Imm> for Assembler<'a> {
fn rcr(&mut self, op0: Gpq, op1: Imm) {
self.emit(RCR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn rcr(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(RCR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Mem, GpbLo> for Assembler<'a> {
fn rcr(&mut self, op0: Mem, op1: GpbLo) {
self.emit(RCR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Gpw, GpbLo> for Assembler<'a> {
fn rcr(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(RCR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Gpd, GpbLo> for Assembler<'a> {
fn rcr(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(RCR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RcrEmitter<Gpq, GpbLo> for Assembler<'a> {
fn rcr(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(RCR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `RET` (RET).
/// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait RetEmitter {
fn ret(&mut self);
}
impl<'a> RetEmitter for Assembler<'a> {
fn ret(&mut self) {
self.emit(RET, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `RET` (RET).
/// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
pub trait RetEmitter_1<A> {
fn ret_1(&mut self, op0: A);
}
impl<'a> RetEmitter_1<Imm> for Assembler<'a> {
fn ret_1(&mut self, op0: Imm) {
self.emit(RETI, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `RETF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait RetfEmitter {
fn retf(&mut self);
}
impl<'a> RetfEmitter for Assembler<'a> {
fn retf(&mut self) {
self.emit(RETF16, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `RETF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
pub trait RetfEmitter_1<A> {
fn retf_1(&mut self, op0: A);
}
impl<'a> RetfEmitter_1<Imm> for Assembler<'a> {
fn retf_1(&mut self, op0: Imm) {
self.emit(RETF16I, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `ROL` (ROL).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait RolEmitter<A, B> {
fn rol(&mut self, op0: A, op1: B);
}
impl<'a> RolEmitter<GpbLo, Imm> for Assembler<'a> {
fn rol(&mut self, op0: GpbLo, op1: Imm) {
self.emit(ROL8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Mem, Imm> for Assembler<'a> {
fn rol(&mut self, op0: Mem, op1: Imm) {
self.emit(ROL8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Gpw, Imm> for Assembler<'a> {
fn rol(&mut self, op0: Gpw, op1: Imm) {
self.emit(ROL16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Gpd, Imm> for Assembler<'a> {
fn rol(&mut self, op0: Gpd, op1: Imm) {
self.emit(ROL32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Gpq, Imm> for Assembler<'a> {
fn rol(&mut self, op0: Gpq, op1: Imm) {
self.emit(ROL64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn rol(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(ROL8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Mem, GpbLo> for Assembler<'a> {
fn rol(&mut self, op0: Mem, op1: GpbLo) {
self.emit(ROL8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Gpw, GpbLo> for Assembler<'a> {
fn rol(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(ROL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Gpd, GpbLo> for Assembler<'a> {
fn rol(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(ROL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RolEmitter<Gpq, GpbLo> for Assembler<'a> {
fn rol(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(ROL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `ROR` (ROR).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait RorEmitter<A, B> {
fn ror(&mut self, op0: A, op1: B);
}
impl<'a> RorEmitter<GpbLo, Imm> for Assembler<'a> {
fn ror(&mut self, op0: GpbLo, op1: Imm) {
self.emit(ROR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Mem, Imm> for Assembler<'a> {
fn ror(&mut self, op0: Mem, op1: Imm) {
self.emit(ROR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Gpw, Imm> for Assembler<'a> {
fn ror(&mut self, op0: Gpw, op1: Imm) {
self.emit(ROR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Gpd, Imm> for Assembler<'a> {
fn ror(&mut self, op0: Gpd, op1: Imm) {
self.emit(ROR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Gpq, Imm> for Assembler<'a> {
fn ror(&mut self, op0: Gpq, op1: Imm) {
self.emit(ROR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn ror(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(ROR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Mem, GpbLo> for Assembler<'a> {
fn ror(&mut self, op0: Mem, op1: GpbLo) {
self.emit(ROR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Gpw, GpbLo> for Assembler<'a> {
fn ror(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(ROR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Gpd, GpbLo> for Assembler<'a> {
fn ror(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(ROR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> RorEmitter<Gpq, GpbLo> for Assembler<'a> {
fn ror(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(ROR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `SAHF` (SAHF).
/// Loads the SF, ZF, AF, PF, and CF flags of the EFLAGS register with values from the corresponding bits in the AH register (bits 7, 6, 4, 2, and 0, respectively). Bits 1, 3, and 5 of register AH are ignored; the corresponding reserved bits (1, 3, and 5) in the EFLAGS register remain as shown in the “Operation” section below.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAHF.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait SahfEmitter {
fn sahf(&mut self);
}
impl<'a> SahfEmitter for Assembler<'a> {
fn sahf(&mut self) {
self.emit(SAHF, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `SAR` (SAR).
/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait SarEmitter<A, B> {
fn sar(&mut self, op0: A, op1: B);
}
impl<'a> SarEmitter<GpbLo, Imm> for Assembler<'a> {
fn sar(&mut self, op0: GpbLo, op1: Imm) {
self.emit(SAR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Mem, Imm> for Assembler<'a> {
fn sar(&mut self, op0: Mem, op1: Imm) {
self.emit(SAR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Gpw, Imm> for Assembler<'a> {
fn sar(&mut self, op0: Gpw, op1: Imm) {
self.emit(SAR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Gpd, Imm> for Assembler<'a> {
fn sar(&mut self, op0: Gpd, op1: Imm) {
self.emit(SAR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Gpq, Imm> for Assembler<'a> {
fn sar(&mut self, op0: Gpq, op1: Imm) {
self.emit(SAR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn sar(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(SAR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Mem, GpbLo> for Assembler<'a> {
fn sar(&mut self, op0: Mem, op1: GpbLo) {
self.emit(SAR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Gpw, GpbLo> for Assembler<'a> {
fn sar(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(SAR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Gpd, GpbLo> for Assembler<'a> {
fn sar(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(SAR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SarEmitter<Gpq, GpbLo> for Assembler<'a> {
fn sar(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(SAR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `SBB` (SBB).
/// Adds the source operand (second operand) and the carry (CF) flag, and subtracts the result from the destination operand (first operand). The result of the subtraction is stored in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SBB.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait SbbEmitter<A, B> {
fn sbb(&mut self, op0: A, op1: B);
}
impl<'a> SbbEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn sbb(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(SBB8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Mem, GpbLo> for Assembler<'a> {
fn sbb(&mut self, op0: Mem, op1: GpbLo) {
self.emit(SBB8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpw, Gpw> for Assembler<'a> {
fn sbb(&mut self, op0: Gpw, op1: Gpw) {
self.emit(SBB16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Mem, Gpw> for Assembler<'a> {
fn sbb(&mut self, op0: Mem, op1: Gpw) {
self.emit(SBB16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpd, Gpd> for Assembler<'a> {
fn sbb(&mut self, op0: Gpd, op1: Gpd) {
self.emit(SBB32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Mem, Gpd> for Assembler<'a> {
fn sbb(&mut self, op0: Mem, op1: Gpd) {
self.emit(SBB32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpq, Gpq> for Assembler<'a> {
fn sbb(&mut self, op0: Gpq, op1: Gpq) {
self.emit(SBB64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Mem, Gpq> for Assembler<'a> {
fn sbb(&mut self, op0: Mem, op1: Gpq) {
self.emit(SBB64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<GpbLo, Mem> for Assembler<'a> {
fn sbb(&mut self, op0: GpbLo, op1: Mem) {
self.emit(SBB8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpw, Mem> for Assembler<'a> {
fn sbb(&mut self, op0: Gpw, op1: Mem) {
self.emit(SBB16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpd, Mem> for Assembler<'a> {
fn sbb(&mut self, op0: Gpd, op1: Mem) {
self.emit(SBB32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpq, Mem> for Assembler<'a> {
fn sbb(&mut self, op0: Gpq, op1: Mem) {
self.emit(SBB64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<GpbLo, Imm> for Assembler<'a> {
fn sbb(&mut self, op0: GpbLo, op1: Imm) {
self.emit(SBB8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpw, Imm> for Assembler<'a> {
fn sbb(&mut self, op0: Gpw, op1: Imm) {
self.emit(SBB16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpd, Imm> for Assembler<'a> {
fn sbb(&mut self, op0: Gpd, op1: Imm) {
self.emit(SBB32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Gpq, Imm> for Assembler<'a> {
fn sbb(&mut self, op0: Gpq, op1: Imm) {
self.emit(SBB64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SbbEmitter<Mem, Imm> for Assembler<'a> {
fn sbb(&mut self, op0: Mem, op1: Imm) {
self.emit(SBB8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `SCAS` (SCAS).
/// In non-64-bit modes and in default 64-bit mode: this instruction compares a byte, word, doubleword or quadword specified using a memory operand with the value in AL, AX, or EAX. It then sets status flags in EFLAGS recording the results. The memory operand address is read from ES:(E)DI register (depending on the address-size attribute of the instruction and the current operational mode). Note that ES cannot be overridden with a segment override prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SCAS%3ASCASB%3ASCASW%3ASCASD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait ScasEmitter {
fn scas(&mut self);
}
impl<'a> ScasEmitter for Assembler<'a> {
fn scas(&mut self) {
self.emit(SCAS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `SETA` (SETA).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetaEmitter<A> {
fn seta(&mut self, op0: A);
}
impl<'a> SetaEmitter<GpbLo> for Assembler<'a> {
fn seta(&mut self, op0: GpbLo) {
self.emit(SETA8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetaEmitter<Mem> for Assembler<'a> {
fn seta(&mut self, op0: Mem) {
self.emit(SETA8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETBE` (SETBE).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetbeEmitter<A> {
fn setbe(&mut self, op0: A);
}
impl<'a> SetbeEmitter<GpbLo> for Assembler<'a> {
fn setbe(&mut self, op0: GpbLo) {
self.emit(SETBE8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetbeEmitter<Mem> for Assembler<'a> {
fn setbe(&mut self, op0: Mem) {
self.emit(SETBE8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETC` (SETC).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetcEmitter<A> {
fn setc(&mut self, op0: A);
}
impl<'a> SetcEmitter<GpbLo> for Assembler<'a> {
fn setc(&mut self, op0: GpbLo) {
self.emit(SETC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetcEmitter<Mem> for Assembler<'a> {
fn setc(&mut self, op0: Mem) {
self.emit(SETC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETG` (SETG).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetgEmitter<A> {
fn setg(&mut self, op0: A);
}
impl<'a> SetgEmitter<GpbLo> for Assembler<'a> {
fn setg(&mut self, op0: GpbLo) {
self.emit(SETG8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetgEmitter<Mem> for Assembler<'a> {
fn setg(&mut self, op0: Mem) {
self.emit(SETG8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETGE` (SETGE).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetgeEmitter<A> {
fn setge(&mut self, op0: A);
}
impl<'a> SetgeEmitter<GpbLo> for Assembler<'a> {
fn setge(&mut self, op0: GpbLo) {
self.emit(SETGE8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetgeEmitter<Mem> for Assembler<'a> {
fn setge(&mut self, op0: Mem) {
self.emit(SETGE8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETL` (SETL).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetlEmitter<A> {
fn setl(&mut self, op0: A);
}
impl<'a> SetlEmitter<GpbLo> for Assembler<'a> {
fn setl(&mut self, op0: GpbLo) {
self.emit(SETL8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetlEmitter<Mem> for Assembler<'a> {
fn setl(&mut self, op0: Mem) {
self.emit(SETL8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETLE` (SETLE).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetleEmitter<A> {
fn setle(&mut self, op0: A);
}
impl<'a> SetleEmitter<GpbLo> for Assembler<'a> {
fn setle(&mut self, op0: GpbLo) {
self.emit(SETLE8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetleEmitter<Mem> for Assembler<'a> {
fn setle(&mut self, op0: Mem) {
self.emit(SETLE8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETNC` (SETNC).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetncEmitter<A> {
fn setnc(&mut self, op0: A);
}
impl<'a> SetncEmitter<GpbLo> for Assembler<'a> {
fn setnc(&mut self, op0: GpbLo) {
self.emit(SETNC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetncEmitter<Mem> for Assembler<'a> {
fn setnc(&mut self, op0: Mem) {
self.emit(SETNC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETNO` (SETNO).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetnoEmitter<A> {
fn setno(&mut self, op0: A);
}
impl<'a> SetnoEmitter<GpbLo> for Assembler<'a> {
fn setno(&mut self, op0: GpbLo) {
self.emit(SETNO8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetnoEmitter<Mem> for Assembler<'a> {
fn setno(&mut self, op0: Mem) {
self.emit(SETNO8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETNP` (SETNP).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetnpEmitter<A> {
fn setnp(&mut self, op0: A);
}
impl<'a> SetnpEmitter<GpbLo> for Assembler<'a> {
fn setnp(&mut self, op0: GpbLo) {
self.emit(SETNP8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetnpEmitter<Mem> for Assembler<'a> {
fn setnp(&mut self, op0: Mem) {
self.emit(SETNP8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETNS` (SETNS).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetnsEmitter<A> {
fn setns(&mut self, op0: A);
}
impl<'a> SetnsEmitter<GpbLo> for Assembler<'a> {
fn setns(&mut self, op0: GpbLo) {
self.emit(SETNS8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetnsEmitter<Mem> for Assembler<'a> {
fn setns(&mut self, op0: Mem) {
self.emit(SETNS8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETNZ` (SETNZ).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetnzEmitter<A> {
fn setnz(&mut self, op0: A);
}
impl<'a> SetnzEmitter<GpbLo> for Assembler<'a> {
fn setnz(&mut self, op0: GpbLo) {
self.emit(SETNZ8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetnzEmitter<Mem> for Assembler<'a> {
fn setnz(&mut self, op0: Mem) {
self.emit(SETNZ8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETO` (SETO).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetoEmitter<A> {
fn seto(&mut self, op0: A);
}
impl<'a> SetoEmitter<GpbLo> for Assembler<'a> {
fn seto(&mut self, op0: GpbLo) {
self.emit(SETO8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetoEmitter<Mem> for Assembler<'a> {
fn seto(&mut self, op0: Mem) {
self.emit(SETO8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETP` (SETP).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetpEmitter<A> {
fn setp(&mut self, op0: A);
}
impl<'a> SetpEmitter<GpbLo> for Assembler<'a> {
fn setp(&mut self, op0: GpbLo) {
self.emit(SETP8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetpEmitter<Mem> for Assembler<'a> {
fn setp(&mut self, op0: Mem) {
self.emit(SETP8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETS` (SETS).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetsEmitter<A> {
fn sets(&mut self, op0: A);
}
impl<'a> SetsEmitter<GpbLo> for Assembler<'a> {
fn sets(&mut self, op0: GpbLo) {
self.emit(SETS8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetsEmitter<Mem> for Assembler<'a> {
fn sets(&mut self, op0: Mem) {
self.emit(SETS8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETZ` (SETZ).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetzEmitter<A> {
fn setz(&mut self, op0: A);
}
impl<'a> SetzEmitter<GpbLo> for Assembler<'a> {
fn setz(&mut self, op0: GpbLo) {
self.emit(SETZ8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetzEmitter<Mem> for Assembler<'a> {
fn setz(&mut self, op0: Mem) {
self.emit(SETZ8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SETCC` (SETO).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SetccEmitter<A> {
fn setcc(&mut self, op0: A);
}
impl<'a> SetccEmitter<GpbLo> for Assembler<'a> {
fn setcc(&mut self, op0: GpbLo) {
self.emit(SETCC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SetccEmitter<Mem> for Assembler<'a> {
fn setcc(&mut self, op0: Mem) {
self.emit(SETCC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SGDT`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait SgdtEmitter<A> {
fn sgdt(&mut self, op0: A);
}
impl<'a> SgdtEmitter<Mem> for Assembler<'a> {
fn sgdt(&mut self, op0: Mem) {
self.emit(SGDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SHL` (SHL).
/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait ShlEmitter<A, B> {
fn shl(&mut self, op0: A, op1: B);
}
impl<'a> ShlEmitter<GpbLo, Imm> for Assembler<'a> {
fn shl(&mut self, op0: GpbLo, op1: Imm) {
self.emit(SHL8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Mem, Imm> for Assembler<'a> {
fn shl(&mut self, op0: Mem, op1: Imm) {
self.emit(SHL8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Gpw, Imm> for Assembler<'a> {
fn shl(&mut self, op0: Gpw, op1: Imm) {
self.emit(SHL16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Gpd, Imm> for Assembler<'a> {
fn shl(&mut self, op0: Gpd, op1: Imm) {
self.emit(SHL32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Gpq, Imm> for Assembler<'a> {
fn shl(&mut self, op0: Gpq, op1: Imm) {
self.emit(SHL64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn shl(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(SHL8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Mem, GpbLo> for Assembler<'a> {
fn shl(&mut self, op0: Mem, op1: GpbLo) {
self.emit(SHL8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Gpw, GpbLo> for Assembler<'a> {
fn shl(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(SHL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Gpd, GpbLo> for Assembler<'a> {
fn shl(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(SHL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShlEmitter<Gpq, GpbLo> for Assembler<'a> {
fn shl(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(SHL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `SHLD` (SHLD).
/// The SHLD instruction is used for multi-precision shifts of 64 bits or more.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHLD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+-----------------+
/// | # | Operands |
/// +----+-----------------+
/// | 1 | Gpd, Gpd, GpbLo |
/// | 2 | Gpd, Gpd, Imm |
/// | 3 | Gpq, Gpq, GpbLo |
/// | 4 | Gpq, Gpq, Imm |
/// | 5 | Gpw, Gpw, GpbLo |
/// | 6 | Gpw, Gpw, Imm |
/// | 7 | Mem, Gpd, GpbLo |
/// | 8 | Mem, Gpd, Imm |
/// | 9 | Mem, Gpq, GpbLo |
/// | 10 | Mem, Gpq, Imm |
/// | 11 | Mem, Gpw, GpbLo |
/// | 12 | Mem, Gpw, Imm |
/// +----+-----------------+
/// ```
pub trait ShldEmitter<A, B, C> {
fn shld(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> ShldEmitter<Gpw, Gpw, Imm> for Assembler<'a> {
fn shld(&mut self, op0: Gpw, op1: Gpw, op2: Imm) {
self.emit(SHLD16RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Mem, Gpw, Imm> for Assembler<'a> {
fn shld(&mut self, op0: Mem, op1: Gpw, op2: Imm) {
self.emit(SHLD16MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Gpd, Gpd, Imm> for Assembler<'a> {
fn shld(&mut self, op0: Gpd, op1: Gpd, op2: Imm) {
self.emit(SHLD32RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Mem, Gpd, Imm> for Assembler<'a> {
fn shld(&mut self, op0: Mem, op1: Gpd, op2: Imm) {
self.emit(SHLD32MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Gpq, Gpq, Imm> for Assembler<'a> {
fn shld(&mut self, op0: Gpq, op1: Gpq, op2: Imm) {
self.emit(SHLD64RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Mem, Gpq, Imm> for Assembler<'a> {
fn shld(&mut self, op0: Mem, op1: Gpq, op2: Imm) {
self.emit(SHLD64MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Gpw, Gpw, GpbLo> for Assembler<'a> {
fn shld(&mut self, op0: Gpw, op1: Gpw, op2: GpbLo) {
self.emit(SHLD16RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Mem, Gpw, GpbLo> for Assembler<'a> {
fn shld(&mut self, op0: Mem, op1: Gpw, op2: GpbLo) {
self.emit(SHLD16MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Gpd, Gpd, GpbLo> for Assembler<'a> {
fn shld(&mut self, op0: Gpd, op1: Gpd, op2: GpbLo) {
self.emit(SHLD32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Mem, Gpd, GpbLo> for Assembler<'a> {
fn shld(&mut self, op0: Mem, op1: Gpd, op2: GpbLo) {
self.emit(SHLD32MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Gpq, Gpq, GpbLo> for Assembler<'a> {
fn shld(&mut self, op0: Gpq, op1: Gpq, op2: GpbLo) {
self.emit(SHLD64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShldEmitter<Mem, Gpq, GpbLo> for Assembler<'a> {
fn shld(&mut self, op0: Mem, op1: Gpq, op2: GpbLo) {
self.emit(SHLD64MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `SHR` (SHR).
/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait ShrEmitter<A, B> {
fn shr(&mut self, op0: A, op1: B);
}
impl<'a> ShrEmitter<GpbLo, Imm> for Assembler<'a> {
fn shr(&mut self, op0: GpbLo, op1: Imm) {
self.emit(SHR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Mem, Imm> for Assembler<'a> {
fn shr(&mut self, op0: Mem, op1: Imm) {
self.emit(SHR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Gpw, Imm> for Assembler<'a> {
fn shr(&mut self, op0: Gpw, op1: Imm) {
self.emit(SHR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Gpd, Imm> for Assembler<'a> {
fn shr(&mut self, op0: Gpd, op1: Imm) {
self.emit(SHR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Gpq, Imm> for Assembler<'a> {
fn shr(&mut self, op0: Gpq, op1: Imm) {
self.emit(SHR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn shr(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(SHR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Mem, GpbLo> for Assembler<'a> {
fn shr(&mut self, op0: Mem, op1: GpbLo) {
self.emit(SHR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Gpw, GpbLo> for Assembler<'a> {
fn shr(&mut self, op0: Gpw, op1: GpbLo) {
self.emit(SHR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Gpd, GpbLo> for Assembler<'a> {
fn shr(&mut self, op0: Gpd, op1: GpbLo) {
self.emit(SHR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> ShrEmitter<Gpq, GpbLo> for Assembler<'a> {
fn shr(&mut self, op0: Gpq, op1: GpbLo) {
self.emit(SHR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `SHRD` (SHRD).
/// The SHRD instruction is useful for multi-precision shifts of 64 bits or more.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHRD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+-----------------+
/// | # | Operands |
/// +----+-----------------+
/// | 1 | Gpd, Gpd, GpbLo |
/// | 2 | Gpd, Gpd, Imm |
/// | 3 | Gpq, Gpq, GpbLo |
/// | 4 | Gpq, Gpq, Imm |
/// | 5 | Gpw, Gpw, GpbLo |
/// | 6 | Gpw, Gpw, Imm |
/// | 7 | Mem, Gpd, GpbLo |
/// | 8 | Mem, Gpd, Imm |
/// | 9 | Mem, Gpq, GpbLo |
/// | 10 | Mem, Gpq, Imm |
/// | 11 | Mem, Gpw, GpbLo |
/// | 12 | Mem, Gpw, Imm |
/// +----+-----------------+
/// ```
pub trait ShrdEmitter<A, B, C> {
fn shrd(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> ShrdEmitter<Gpw, Gpw, Imm> for Assembler<'a> {
fn shrd(&mut self, op0: Gpw, op1: Gpw, op2: Imm) {
self.emit(SHRD16RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Mem, Gpw, Imm> for Assembler<'a> {
fn shrd(&mut self, op0: Mem, op1: Gpw, op2: Imm) {
self.emit(SHRD16MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Gpd, Gpd, Imm> for Assembler<'a> {
fn shrd(&mut self, op0: Gpd, op1: Gpd, op2: Imm) {
self.emit(SHRD32RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Mem, Gpd, Imm> for Assembler<'a> {
fn shrd(&mut self, op0: Mem, op1: Gpd, op2: Imm) {
self.emit(SHRD32MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Gpq, Gpq, Imm> for Assembler<'a> {
fn shrd(&mut self, op0: Gpq, op1: Gpq, op2: Imm) {
self.emit(SHRD64RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Mem, Gpq, Imm> for Assembler<'a> {
fn shrd(&mut self, op0: Mem, op1: Gpq, op2: Imm) {
self.emit(SHRD64MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Gpw, Gpw, GpbLo> for Assembler<'a> {
fn shrd(&mut self, op0: Gpw, op1: Gpw, op2: GpbLo) {
self.emit(SHRD16RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Mem, Gpw, GpbLo> for Assembler<'a> {
fn shrd(&mut self, op0: Mem, op1: Gpw, op2: GpbLo) {
self.emit(SHRD16MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Gpd, Gpd, GpbLo> for Assembler<'a> {
fn shrd(&mut self, op0: Gpd, op1: Gpd, op2: GpbLo) {
self.emit(SHRD32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Mem, Gpd, GpbLo> for Assembler<'a> {
fn shrd(&mut self, op0: Mem, op1: Gpd, op2: GpbLo) {
self.emit(SHRD32MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Gpq, Gpq, GpbLo> for Assembler<'a> {
fn shrd(&mut self, op0: Gpq, op1: Gpq, op2: GpbLo) {
self.emit(SHRD64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> ShrdEmitter<Mem, Gpq, GpbLo> for Assembler<'a> {
fn shrd(&mut self, op0: Mem, op1: Gpq, op2: GpbLo) {
self.emit(SHRD64MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `SIDT`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait SidtEmitter<A> {
fn sidt(&mut self, op0: A);
}
impl<'a> SidtEmitter<Mem> for Assembler<'a> {
fn sidt(&mut self, op0: Mem) {
self.emit(SIDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SLDT` (SLDT).
/// Stores the segment selector from the local descriptor table register (LDTR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the segment descriptor (located in the GDT) for the current LDT. This instruction can only be executed in protected mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SLDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait SldtEmitter<A> {
fn sldt(&mut self, op0: A);
}
impl<'a> SldtEmitter<Gpd> for Assembler<'a> {
fn sldt(&mut self, op0: Gpd) {
self.emit(SLDTR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SldtEmitter<Mem> for Assembler<'a> {
fn sldt(&mut self, op0: Mem) {
self.emit(SLDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SMSW` (SMSW).
/// Stores the machine status word (bits 0 through 15 of control register CR0) into the destination operand. The destination operand can be a general-purpose register or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SMSW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Gpq |
/// | 3 | Gpw |
/// | 4 | Mem |
/// +---+----------+
/// ```
pub trait SmswEmitter<A> {
fn smsw(&mut self, op0: A);
}
impl<'a> SmswEmitter<Mem> for Assembler<'a> {
fn smsw(&mut self, op0: Mem) {
self.emit(SMSWM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SmswEmitter<Gpw> for Assembler<'a> {
fn smsw(&mut self, op0: Gpw) {
self.emit(SMSW16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SmswEmitter<Gpd> for Assembler<'a> {
fn smsw(&mut self, op0: Gpd) {
self.emit(SMSW32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> SmswEmitter<Gpq> for Assembler<'a> {
fn smsw(&mut self, op0: Gpq) {
self.emit(SMSW64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `STC` (STC).
/// Sets the CF flag in the EFLAGS register. Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait StcEmitter {
fn stc(&mut self);
}
impl<'a> StcEmitter for Assembler<'a> {
fn stc(&mut self) {
self.emit(STC, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `STD` (STD).
/// Sets the DF flag in the EFLAGS register. When the DF flag is set to 1, string operations decrement the index registers (ESI and/or EDI). Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait StdEmitter {
fn std(&mut self);
}
impl<'a> StdEmitter for Assembler<'a> {
fn std(&mut self) {
self.emit(STD, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `STI` (STI).
/// In most cases, STI sets the interrupt flag (IF) in the EFLAGS register. This allows the processor to respond to maskable hardware interrupts.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STI.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait StiEmitter {
fn sti(&mut self);
}
impl<'a> StiEmitter for Assembler<'a> {
fn sti(&mut self) {
self.emit(STI, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `STOS` (STOS).
/// In non-64-bit and default 64-bit mode; stores a byte, word, or doubleword from the AL, AX, or EAX register (respectively) into the destination operand. The destination operand is a memory location, the address of which is read from either the ES:EDI or ES:DI register (depending on the address-size attribute of the instruction and the mode of operation). The ES segment cannot be overridden with a segment override prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STOS%3ASTOSB%3ASTOSW%3ASTOSD%3ASTOSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait StosEmitter {
fn stos(&mut self);
}
impl<'a> StosEmitter for Assembler<'a> {
fn stos(&mut self) {
self.emit(STOS8, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `STR` (STR).
/// Stores the segment selector from the task register (TR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the task state segment (TSS) for the currently running task.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait StrEmitter<A> {
fn str(&mut self, op0: A);
}
impl<'a> StrEmitter<Gpd> for Assembler<'a> {
fn str(&mut self, op0: Gpd) {
self.emit(STRR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> StrEmitter<Mem> for Assembler<'a> {
fn str(&mut self, op0: Mem) {
self.emit(STRM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `STTILECFG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
pub trait SttilecfgEmitter<A> {
fn sttilecfg(&mut self, op0: A);
}
impl<'a> SttilecfgEmitter<Mem> for Assembler<'a> {
fn sttilecfg(&mut self, op0: Mem) {
self.emit(STTILECFGM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `SUB` (SUB).
/// Subtracts the second operand (source operand) from the first operand (destination operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, register, or memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SUB.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait SubEmitter<A, B> {
fn sub(&mut self, op0: A, op1: B);
}
impl<'a> SubEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn sub(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(SUB8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Mem, GpbLo> for Assembler<'a> {
fn sub(&mut self, op0: Mem, op1: GpbLo) {
self.emit(SUB8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpw, Gpw> for Assembler<'a> {
fn sub(&mut self, op0: Gpw, op1: Gpw) {
self.emit(SUB16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Mem, Gpw> for Assembler<'a> {
fn sub(&mut self, op0: Mem, op1: Gpw) {
self.emit(SUB16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpd, Gpd> for Assembler<'a> {
fn sub(&mut self, op0: Gpd, op1: Gpd) {
self.emit(SUB32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Mem, Gpd> for Assembler<'a> {
fn sub(&mut self, op0: Mem, op1: Gpd) {
self.emit(SUB32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpq, Gpq> for Assembler<'a> {
fn sub(&mut self, op0: Gpq, op1: Gpq) {
self.emit(SUB64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Mem, Gpq> for Assembler<'a> {
fn sub(&mut self, op0: Mem, op1: Gpq) {
self.emit(SUB64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<GpbLo, Mem> for Assembler<'a> {
fn sub(&mut self, op0: GpbLo, op1: Mem) {
self.emit(SUB8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpw, Mem> for Assembler<'a> {
fn sub(&mut self, op0: Gpw, op1: Mem) {
self.emit(SUB16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpd, Mem> for Assembler<'a> {
fn sub(&mut self, op0: Gpd, op1: Mem) {
self.emit(SUB32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpq, Mem> for Assembler<'a> {
fn sub(&mut self, op0: Gpq, op1: Mem) {
self.emit(SUB64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<GpbLo, Imm> for Assembler<'a> {
fn sub(&mut self, op0: GpbLo, op1: Imm) {
self.emit(SUB8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpw, Imm> for Assembler<'a> {
fn sub(&mut self, op0: Gpw, op1: Imm) {
self.emit(SUB16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpd, Imm> for Assembler<'a> {
fn sub(&mut self, op0: Gpd, op1: Imm) {
self.emit(SUB32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Gpq, Imm> for Assembler<'a> {
fn sub(&mut self, op0: Gpq, op1: Imm) {
self.emit(SUB64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> SubEmitter<Mem, Imm> for Assembler<'a> {
fn sub(&mut self, op0: Mem, op1: Imm) {
self.emit(SUB8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `SWAPGS` (SWAPGS).
/// SWAPGS exchanges the current GS base register value with the value contained in MSR address C0000102H (IA32_KERNEL_GS_BASE). The SWAPGS instruction is a privileged instruction intended for use by system software.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SWAPGS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait SwapgsEmitter {
fn swapgs(&mut self);
}
impl<'a> SwapgsEmitter for Assembler<'a> {
fn swapgs(&mut self) {
self.emit(SWAPGS, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `SYSCALL` (SYSCALL).
/// SYSCALL invokes an OS system-call handler at privilege level 0. It does so by loading RIP from the IA32_LSTAR MSR (after saving the address of the instruction following SYSCALL into RCX). (The WRMSR instruction ensures that the IA32_LSTAR MSR always contain a canonical address.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSCALL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait SyscallEmitter {
fn syscall(&mut self);
}
impl<'a> SyscallEmitter for Assembler<'a> {
fn syscall(&mut self) {
self.emit(SYSCALL, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `SYSRET` (SYSRET).
/// SYSRET is a companion instruction to the SYSCALL instruction. It returns from an OS system-call handler to user code at privilege level 3. It does so by loading RIP from RCX and loading RFLAGS from R11.1 With a 64-bit operand size, SYSRET remains in 64-bit mode; otherwise, it enters compatibility mode and only the low 32 bits of the registers are loaded.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSRET.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait SysretEmitter {
fn sysret(&mut self);
}
impl<'a> SysretEmitter for Assembler<'a> {
fn sysret(&mut self) {
self.emit(SYSRET, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `TCMMIMFP16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait Tcmmimfp16psEmitter<A, B, C> {
fn tcmmimfp16ps(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Tcmmimfp16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tcmmimfp16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TCMMIMFP16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TCMMRLFP16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait Tcmmrlfp16psEmitter<A, B, C> {
fn tcmmrlfp16ps(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Tcmmrlfp16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tcmmrlfp16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TCMMRLFP16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TDPBF16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait Tdpbf16psEmitter<A, B, C> {
fn tdpbf16ps(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Tdpbf16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tdpbf16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TDPBF16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TDPBSSD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait TdpbssdEmitter<A, B, C> {
fn tdpbssd(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> TdpbssdEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tdpbssd(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TDPBSSDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TDPBSUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait TdpbsudEmitter<A, B, C> {
fn tdpbsud(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> TdpbsudEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tdpbsud(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TDPBSUDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TDPBUSD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait TdpbusdEmitter<A, B, C> {
fn tdpbusd(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> TdpbusdEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tdpbusd(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TDPBUSDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TDPBUUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait TdpbuudEmitter<A, B, C> {
fn tdpbuud(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> TdpbuudEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tdpbuud(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TDPBUUDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TDPFP16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
pub trait Tdpfp16psEmitter<A, B, C> {
fn tdpfp16ps(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Tdpfp16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
fn tdpfp16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
self.emit(TDPFP16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `TEST` (TEST).
/// Computes the bit-wise logical AND of first operand (source 1 operand) and the second operand (source 2 operand) and sets the SF, ZF, and PF status flags according to the result. The result is then discarded.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TEST.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, Gpd |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, Gpq |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, Gpw |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Gpd |
/// | 11 | Mem, Gpq |
/// | 12 | Mem, Gpw |
/// | 13 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait TestEmitter<A, B> {
fn test(&mut self, op0: A, op1: B);
}
impl<'a> TestEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn test(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(TEST8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Mem, GpbLo> for Assembler<'a> {
fn test(&mut self, op0: Mem, op1: GpbLo) {
self.emit(TEST8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Gpw, Gpw> for Assembler<'a> {
fn test(&mut self, op0: Gpw, op1: Gpw) {
self.emit(TEST16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Mem, Gpw> for Assembler<'a> {
fn test(&mut self, op0: Mem, op1: Gpw) {
self.emit(TEST16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Gpd, Gpd> for Assembler<'a> {
fn test(&mut self, op0: Gpd, op1: Gpd) {
self.emit(TEST32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Mem, Gpd> for Assembler<'a> {
fn test(&mut self, op0: Mem, op1: Gpd) {
self.emit(TEST32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Gpq, Gpq> for Assembler<'a> {
fn test(&mut self, op0: Gpq, op1: Gpq) {
self.emit(TEST64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Mem, Gpq> for Assembler<'a> {
fn test(&mut self, op0: Mem, op1: Gpq) {
self.emit(TEST64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<GpbLo, Imm> for Assembler<'a> {
fn test(&mut self, op0: GpbLo, op1: Imm) {
self.emit(TEST8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Gpw, Imm> for Assembler<'a> {
fn test(&mut self, op0: Gpw, op1: Imm) {
self.emit(TEST16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Gpd, Imm> for Assembler<'a> {
fn test(&mut self, op0: Gpd, op1: Imm) {
self.emit(TEST32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Gpq, Imm> for Assembler<'a> {
fn test(&mut self, op0: Gpq, op1: Imm) {
self.emit(TEST64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> TestEmitter<Mem, Imm> for Assembler<'a> {
fn test(&mut self, op0: Mem, op1: Imm) {
self.emit(TEST8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `TILELOADD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Tmm, Mem |
/// +---+----------+
/// ```
pub trait TileloaddEmitter<A, B> {
fn tileloadd(&mut self, op0: A, op1: B);
}
impl<'a> TileloaddEmitter<Tmm, Mem> for Assembler<'a> {
fn tileloadd(&mut self, op0: Tmm, op1: Mem) {
self.emit(TILELOADDRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `TILELOADDT1`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Tmm, Mem |
/// +---+----------+
/// ```
pub trait Tileloaddt1Emitter<A, B> {
fn tileloaddt1(&mut self, op0: A, op1: B);
}
impl<'a> Tileloaddt1Emitter<Tmm, Mem> for Assembler<'a> {
fn tileloaddt1(&mut self, op0: Tmm, op1: Mem) {
self.emit(TILELOADDT1RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `TILERELEASE` (TILERELEASE).
/// This instruction returns TILECFG and TILEDATA to the INIT state.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TILERELEASE.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait TilereleaseEmitter {
fn tilerelease(&mut self);
}
impl<'a> TilereleaseEmitter for Assembler<'a> {
fn tilerelease(&mut self) {
self.emit(TILERELEASE, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `TILESTORED`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Tmm |
/// +---+----------+
/// ```
pub trait TilestoredEmitter<A, B> {
fn tilestored(&mut self, op0: A, op1: B);
}
impl<'a> TilestoredEmitter<Mem, Tmm> for Assembler<'a> {
fn tilestored(&mut self, op0: Mem, op1: Tmm) {
self.emit(TILESTOREDMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `TILEZERO`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Tmm |
/// +---+----------+
/// ```
pub trait TilezeroEmitter<A> {
fn tilezero(&mut self, op0: A);
}
impl<'a> TilezeroEmitter<Tmm> for Assembler<'a> {
fn tilezero(&mut self, op0: Tmm) {
self.emit(TILEZEROR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `UD0`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait Ud0Emitter<A, B> {
fn ud0(&mut self, op0: A, op1: B);
}
impl<'a> Ud0Emitter<Gpw, Gpw> for Assembler<'a> {
fn ud0(&mut self, op0: Gpw, op1: Gpw) {
self.emit(UD0_16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud0Emitter<Gpw, Mem> for Assembler<'a> {
fn ud0(&mut self, op0: Gpw, op1: Mem) {
self.emit(UD0_16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud0Emitter<Gpd, Gpd> for Assembler<'a> {
fn ud0(&mut self, op0: Gpd, op1: Gpd) {
self.emit(UD0_32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud0Emitter<Gpd, Mem> for Assembler<'a> {
fn ud0(&mut self, op0: Gpd, op1: Mem) {
self.emit(UD0_32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud0Emitter<Gpq, Gpq> for Assembler<'a> {
fn ud0(&mut self, op0: Gpq, op1: Gpq) {
self.emit(UD0_64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud0Emitter<Gpq, Mem> for Assembler<'a> {
fn ud0(&mut self, op0: Gpq, op1: Mem) {
self.emit(UD0_64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `UD1` (UD1).
/// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
pub trait Ud1Emitter<A, B> {
fn ud1(&mut self, op0: A, op1: B);
}
impl<'a> Ud1Emitter<Gpw, Gpw> for Assembler<'a> {
fn ud1(&mut self, op0: Gpw, op1: Gpw) {
self.emit(UD1_16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud1Emitter<Gpw, Mem> for Assembler<'a> {
fn ud1(&mut self, op0: Gpw, op1: Mem) {
self.emit(UD1_16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud1Emitter<Gpd, Gpd> for Assembler<'a> {
fn ud1(&mut self, op0: Gpd, op1: Gpd) {
self.emit(UD1_32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud1Emitter<Gpd, Mem> for Assembler<'a> {
fn ud1(&mut self, op0: Gpd, op1: Mem) {
self.emit(UD1_32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud1Emitter<Gpq, Gpq> for Assembler<'a> {
fn ud1(&mut self, op0: Gpq, op1: Gpq) {
self.emit(UD1_64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Ud1Emitter<Gpq, Mem> for Assembler<'a> {
fn ud1(&mut self, op0: Gpq, op1: Mem) {
self.emit(UD1_64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `UD2` (UD2).
/// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait Ud2Emitter {
fn ud2(&mut self);
}
impl<'a> Ud2Emitter for Assembler<'a> {
fn ud2(&mut self) {
self.emit(UD2, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `VADDPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaddphEmitter<A, B, C> {
fn vaddph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaddph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VADDPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaddph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VADDPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaddph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VADDPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaddph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VADDPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaddph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VADDPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaddphErEmitter<A, B, C> {
fn vaddph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaddph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VADDPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaddphMaskEmitter<A, B, C> {
fn vaddph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaddph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VADDPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaddph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VADDPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaddph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VADDPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaddph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VADDPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaddph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VADDPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaddphMaskErEmitter<A, B, C> {
fn vaddph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaddph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VADDPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaddphMaskzEmitter<A, B, C> {
fn vaddph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaddph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VADDPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaddph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VADDPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaddph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VADDPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaddph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VADDPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaddph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VADDPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaddphMaskzErEmitter<A, B, C> {
fn vaddph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaddph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VADDPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VaddshEmitter<A, B, C> {
fn vaddsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaddsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VADDSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VaddshErEmitter<A, B, C> {
fn vaddsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VaddshMaskEmitter<A, B, C> {
fn vaddsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaddsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VADDSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VaddshMaskErEmitter<A, B, C> {
fn vaddsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VaddshMaskzEmitter<A, B, C> {
fn vaddsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaddshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaddsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VADDSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VADDSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VaddshMaskzErEmitter<A, B, C> {
fn vaddsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaddshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaddsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VADDSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VAESDEC` (VAESDEC).
/// This instruction performs a single round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDEC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaesdecEmitter<A, B, C> {
fn vaesdec(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaesdecEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaesdec(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VAESDEC128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdecEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaesdec(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VAESDEC128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdecEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaesdec(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VAESDEC256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdecEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaesdec(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VAESDEC256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdecEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaesdec(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VAESDEC512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdecEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaesdec(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VAESDEC512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VAESDECLAST` (VAESDECLAST).
/// This instruction performs the last round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDECLAST.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaesdeclastEmitter<A, B, C> {
fn vaesdeclast(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaesdeclastEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaesdeclast(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VAESDECLAST128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdeclastEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaesdeclast(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VAESDECLAST128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdeclastEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaesdeclast(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VAESDECLAST256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdeclastEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaesdeclast(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VAESDECLAST256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdeclastEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaesdeclast(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VAESDECLAST512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesdeclastEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaesdeclast(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VAESDECLAST512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VAESENC` (VAESENC).
/// This instruction performs a single round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaesencEmitter<A, B, C> {
fn vaesenc(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaesencEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaesenc(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VAESENC128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesencEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaesenc(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VAESENC128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesencEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaesenc(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VAESENC256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesencEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaesenc(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VAESENC256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesencEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaesenc(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VAESENC512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesencEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaesenc(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VAESENC512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VAESENCLAST` (VAESENCLAST).
/// This instruction performs the last round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENCLAST.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VaesenclastEmitter<A, B, C> {
fn vaesenclast(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaesenclastEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vaesenclast(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VAESENCLAST128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesenclastEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vaesenclast(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VAESENCLAST128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesenclastEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vaesenclast(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VAESENCLAST256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesenclastEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vaesenclast(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VAESENCLAST256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesenclastEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vaesenclast(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VAESENCLAST512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaesenclastEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vaesenclast(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VAESENCLAST512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VAESIMC` (VAESIMC).
/// Perform the InvMixColumns transformation on the source operand and store the result in the destination operand. The destination operand is an XMM register. The source operand can be an XMM register or a 128-bit memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESIMC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// +---+----------+
/// ```
pub trait VaesimcEmitter<A, B> {
fn vaesimc(&mut self, op0: A, op1: B);
}
impl<'a> VaesimcEmitter<Xmm, Xmm> for Assembler<'a> {
fn vaesimc(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VAESIMCRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VaesimcEmitter<Xmm, Mem> for Assembler<'a> {
fn vaesimc(&mut self, op0: Xmm, op1: Mem) {
self.emit(VAESIMCRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VAESKEYGENASSIST` (VAESKEYGENASSIST).
/// Assist in expanding the AES cipher key, by computing steps towards generating a round key for encryption, using 128-bit data specified in the source operand and an 8-bit round constant specified as an immediate, store the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESKEYGENASSIST.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// +---+---------------+
/// ```
pub trait VaeskeygenassistEmitter<A, B, C> {
fn vaeskeygenassist(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VaeskeygenassistEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vaeskeygenassist(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VAESKEYGENASSISTRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VaeskeygenassistEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vaeskeygenassist(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VAESKEYGENASSISTRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VBCSTNEBF162PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
pub trait Vbcstnebf162psEmitter<A, B> {
fn vbcstnebf162ps(&mut self, op0: A, op1: B);
}
impl<'a> Vbcstnebf162psEmitter<Xmm, Mem> for Assembler<'a> {
fn vbcstnebf162ps(&mut self, op0: Xmm, op1: Mem) {
self.emit(VBCSTNEBF162PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vbcstnebf162psEmitter<Ymm, Mem> for Assembler<'a> {
fn vbcstnebf162ps(&mut self, op0: Ymm, op1: Mem) {
self.emit(VBCSTNEBF162PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VBCSTNESH2PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
pub trait Vbcstnesh2psEmitter<A, B> {
fn vbcstnesh2ps(&mut self, op0: A, op1: B);
}
impl<'a> Vbcstnesh2psEmitter<Xmm, Mem> for Assembler<'a> {
fn vbcstnesh2ps(&mut self, op0: Xmm, op1: Mem) {
self.emit(VBCSTNESH2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vbcstnesh2psEmitter<Ymm, Mem> for Assembler<'a> {
fn vbcstnesh2ps(&mut self, op0: Ymm, op1: Mem) {
self.emit(VBCSTNESH2PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCMPPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// | 3 | KReg, Ymm, Mem, Imm |
/// | 4 | KReg, Ymm, Ymm, Imm |
/// | 5 | KReg, Zmm, Mem, Imm |
/// | 6 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpphEmitter<A, B, C, D> {
fn vcmpph(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpphEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
fn vcmpph(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VCMPPH128KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
fn vcmpph(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VCMPPH128KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
fn vcmpph(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VCMPPH256KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
fn vcmpph(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VCMPPH256KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
fn vcmpph(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VCMPPH512KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
fn vcmpph(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VCMPPH512KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// | 3 | KReg, Ymm, Mem, Imm |
/// | 4 | KReg, Ymm, Ymm, Imm |
/// | 5 | KReg, Zmm, Mem, Imm |
/// | 6 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpphMaskEmitter<A, B, C, D> {
fn vcmpph_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpphMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
fn vcmpph_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VCMPPH128KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
fn vcmpph_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VCMPPH128KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphMaskEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
fn vcmpph_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VCMPPH256KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphMaskEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
fn vcmpph_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VCMPPH256KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphMaskEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
fn vcmpph_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VCMPPH512KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpphMaskEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
fn vcmpph_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VCMPPH512KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpphMaskSaeEmitter<A, B, C, D> {
fn vcmpph_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpphMaskSaeEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
fn vcmpph_mask_sae(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VCMPPH512KRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpphSaeEmitter<A, B, C, D> {
fn vcmpph_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpphSaeEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
fn vcmpph_sae(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VCMPPH512KRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpshEmitter<A, B, C, D> {
fn vcmpsh(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpshEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
fn vcmpsh(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VCMPSHKRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpshEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
fn vcmpsh(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VCMPSHKRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpshMaskEmitter<A, B, C, D> {
fn vcmpsh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpshMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
fn vcmpsh_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VCMPSHKRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VcmpshMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
fn vcmpsh_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VCMPSHKRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpshMaskSaeEmitter<A, B, C, D> {
fn vcmpsh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpshMaskSaeEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
fn vcmpsh_mask_sae(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VCMPSHKRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCMPSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
pub trait VcmpshSaeEmitter<A, B, C, D> {
fn vcmpsh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VcmpshSaeEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
fn vcmpsh_sae(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VCMPSHKRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VCOMISH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// +---+----------+
/// ```
pub trait VcomishEmitter<A, B> {
fn vcomish(&mut self, op0: A, op1: B);
}
impl<'a> VcomishEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcomish(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCOMISHRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VcomishEmitter<Xmm, Mem> for Assembler<'a> {
fn vcomish(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCOMISHRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCOMISH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Xmm |
/// +---+----------+
/// ```
pub trait VcomishSaeEmitter<A, B> {
fn vcomish_sae(&mut self, op0: A, op1: B);
}
impl<'a> VcomishSaeEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcomish_sae(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCOMISHRR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTDQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtdq2phEmitter<A, B> {
fn vcvtdq2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtdq2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtdq2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTDQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtdq2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTDQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtdq2ph(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTDQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtdq2ph(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTDQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtdq2ph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTDQ2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTDQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtdq2phErEmitter<A, B> {
fn vcvtdq2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtdq2phErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtdq2ph_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTDQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTDQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtdq2phMaskEmitter<A, B> {
fn vcvtdq2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtdq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtdq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTDQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtdq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTDQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtdq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTDQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtdq2ph_mask(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTDQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtdq2ph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTDQ2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTDQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtdq2phMaskErEmitter<A, B> {
fn vcvtdq2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtdq2phMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtdq2ph_mask_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTDQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTDQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtdq2phMaskzEmitter<A, B> {
fn vcvtdq2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtdq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtdq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTDQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtdq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTDQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtdq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTDQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtdq2ph_maskz(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTDQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtdq2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtdq2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTDQ2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTDQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtdq2phMaskzErEmitter<A, B> {
fn vcvtdq2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtdq2phMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtdq2ph_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTDQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTNEEBF162PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
pub trait Vcvtneebf162psEmitter<A, B> {
fn vcvtneebf162ps(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtneebf162psEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtneebf162ps(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTNEEBF162PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtneebf162psEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtneebf162ps(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTNEEBF162PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTNEEPH2PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
pub trait Vcvtneeph2psEmitter<A, B> {
fn vcvtneeph2ps(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtneeph2psEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtneeph2ps(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTNEEPH2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtneeph2psEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtneeph2ps(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTNEEPH2PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTNEOBF162PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
pub trait Vcvtneobf162psEmitter<A, B> {
fn vcvtneobf162ps(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtneobf162psEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtneobf162ps(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTNEOBF162PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtneobf162psEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtneobf162ps(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTNEOBF162PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTNEOPH2PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
pub trait Vcvtneoph2psEmitter<A, B> {
fn vcvtneoph2ps(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtneoph2psEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtneoph2ps(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTNEOPH2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtneoph2psEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtneoph2ps(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTNEOPH2PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPD2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtpd2phEmitter<A, B> {
fn vcvtpd2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtpd2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtpd2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPD2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtpd2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPD2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtpd2ph(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTPD2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtpd2ph(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTPD2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPD2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtpd2phErEmitter<A, B> {
fn vcvtpd2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtpd2phErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtpd2ph_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTPD2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPD2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtpd2phMaskEmitter<A, B> {
fn vcvtpd2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtpd2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPD2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPD2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTPD2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phMaskEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTPD2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPD2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtpd2phMaskErEmitter<A, B> {
fn vcvtpd2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtpd2phMaskErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtpd2ph_mask_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTPD2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPD2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtpd2phMaskzEmitter<A, B> {
fn vcvtpd2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPD2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPD2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTPD2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTPD2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPD2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtpd2phMaskzErEmitter<A, B> {
fn vcvtpd2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtpd2phMaskzErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtpd2ph_maskz_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTPD2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2DQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2dqEmitter<A, B> {
fn vcvtph2dq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2dqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2dq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2DQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2dq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2DQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2dq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2DQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2dq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2DQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2dq(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2DQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2dq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2DQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2DQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2dqErEmitter<A, B> {
fn vcvtph2dq_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2dqErEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2dq_er(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2DQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2DQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2dqMaskEmitter<A, B> {
fn vcvtph2dq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2dqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2dq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2DQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2dq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2DQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2dq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2DQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2dq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2DQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2dq_mask(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2DQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2dq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2DQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2DQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2dqMaskErEmitter<A, B> {
fn vcvtph2dq_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2dqMaskErEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2dq_mask_er(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2DQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2DQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2dqMaskzEmitter<A, B> {
fn vcvtph2dq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2dqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2dq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2DQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2dq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2DQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2dq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2DQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2dq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2DQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2dq_maskz(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2DQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2dqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2dq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2DQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2DQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2dqMaskzErEmitter<A, B> {
fn vcvtph2dq_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2dqMaskzErEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2dq_maskz_er(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2DQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2pdEmitter<A, B> {
fn vcvtph2pd(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2pdEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2pd(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2PD128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2pd(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2PD128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2pd(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2PD256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2pd(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2PD256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2pd(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2PD512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2pd(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2PD512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PD_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2pdMaskEmitter<A, B> {
fn vcvtph2pd_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2pdMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2PD128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2pd_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2PD128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2pd_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2PD256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2pd_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2PD256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_mask(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2PD512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2pd_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2PD512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PD_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2pdMaskSaeEmitter<A, B> {
fn vcvtph2pd_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2pdMaskSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_mask_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2PD512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PD_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2pdMaskzEmitter<A, B> {
fn vcvtph2pd_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2pdMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2PD128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2pd_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2PD128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2pd_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2PD256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2pd_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2PD256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_maskz(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2PD512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2pdMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2pd_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2PD512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PD_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2pdMaskzSaeEmitter<A, B> {
fn vcvtph2pd_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2pdMaskzSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_maskz_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2PD512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PD_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2pdSaeEmitter<A, B> {
fn vcvtph2pd_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2pdSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2pd_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2PD512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PSX` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2psxEmitter<A, B> {
fn vcvtph2psx(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2psxEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2psx(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2PSX128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2psx(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2PSX128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2psx(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2PSX256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2psx(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2PSX256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2psx(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2PSX512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2psx(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2PSX512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PSX_MASK` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2psxMaskEmitter<A, B> {
fn vcvtph2psx_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2psxMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2psx_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2PSX128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2psx_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2PSX128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2psx_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2PSX256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2psx_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2PSX256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2psx_mask(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2PSX512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2psx_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2PSX512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PSX_MASK_SAE` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2psxMaskSaeEmitter<A, B> {
fn vcvtph2psx_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2psxMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2psx_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2PSX512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PSX_MASKZ` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2psxMaskzEmitter<A, B> {
fn vcvtph2psx_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2psxMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2psx_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2PSX128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2psx_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2PSX128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2psx_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2PSX256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2psx_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2PSX256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2psx_maskz(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2PSX512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2psxMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2psx_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2PSX512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PSX_MASKZ_SAE` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2psxMaskzSaeEmitter<A, B> {
fn vcvtph2psx_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2psxMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2psx_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2PSX512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2PSX_SAE` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2psxSaeEmitter<A, B> {
fn vcvtph2psx_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2psxSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2psx_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2PSX512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2QQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2qqEmitter<A, B> {
fn vcvtph2qq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2qqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2qq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2qq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2qq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2qq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2qq(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2qq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2QQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2qqErEmitter<A, B> {
fn vcvtph2qq_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2qqErEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_er(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2QQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2QQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2qqMaskEmitter<A, B> {
fn vcvtph2qq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2qq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2qq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2qq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_mask(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2qq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2QQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2qqMaskErEmitter<A, B> {
fn vcvtph2qq_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2qqMaskErEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_mask_er(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2QQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2QQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2qqMaskzEmitter<A, B> {
fn vcvtph2qq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2qq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_maskz(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2QQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2qqMaskzErEmitter<A, B> {
fn vcvtph2qq_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2qqMaskzErEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2qq_maskz_er(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2QQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UDQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2udqEmitter<A, B> {
fn vcvtph2udq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2udqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2udq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UDQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2udq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UDQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2udq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2UDQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2udq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UDQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2udq(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2UDQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2udq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UDQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UDQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2udqErEmitter<A, B> {
fn vcvtph2udq_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2udqErEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2udq_er(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2UDQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UDQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2udqMaskEmitter<A, B> {
fn vcvtph2udq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2udqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2udq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UDQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2udq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UDQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2udq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2UDQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2udq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UDQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2udq_mask(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2UDQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2udq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UDQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UDQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2udqMaskErEmitter<A, B> {
fn vcvtph2udq_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2udqMaskErEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2udq_mask_er(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2UDQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UDQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2udqMaskzEmitter<A, B> {
fn vcvtph2udq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2udqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2udq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UDQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2udq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UDQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2udq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2UDQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2udq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UDQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2udq_maskz(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2UDQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2udqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2udq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UDQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UDQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvtph2udqMaskzErEmitter<A, B> {
fn vcvtph2udq_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2udqMaskzErEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvtph2udq_maskz_er(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTPH2UDQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UQQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uqqEmitter<A, B> {
fn vcvtph2uqq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uqqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UQQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2uqq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UQQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2uqq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2UQQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2uqq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UQQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2UQQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2uqq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UQQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UQQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uqqErEmitter<A, B> {
fn vcvtph2uqq_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uqqErEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_er(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2UQQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UQQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uqqMaskEmitter<A, B> {
fn vcvtph2uqq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uqqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UQQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2uqq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UQQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2UQQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2uqq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UQQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_mask(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2UQQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2uqq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UQQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UQQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uqqMaskErEmitter<A, B> {
fn vcvtph2uqq_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uqqMaskErEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_mask_er(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2UQQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UQQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uqqMaskzEmitter<A, B> {
fn vcvtph2uqq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uqqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UQQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2uqq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UQQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTPH2UQQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2uqq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UQQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_maskz(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2UQQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uqqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2uqq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UQQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UQQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uqqMaskzErEmitter<A, B> {
fn vcvtph2uqq_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uqqMaskzErEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvtph2uqq_maskz_er(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTPH2UQQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UW`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uwEmitter<A, B> {
fn vcvtph2uw(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uwEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2uw(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2uw(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtph2uw(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTPH2UW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2uw(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2uw(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2UW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2uw(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UW_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uwErEmitter<A, B> {
fn vcvtph2uw_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uwErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2uw_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2UW512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UW_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uwMaskEmitter<A, B> {
fn vcvtph2uw_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uwMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2uw_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2uw_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtph2uw_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTPH2UW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2uw_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2uw_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2UW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2uw_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UW_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uwMaskErEmitter<A, B> {
fn vcvtph2uw_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uwMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2uw_mask_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2UW512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UW_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uwMaskzEmitter<A, B> {
fn vcvtph2uw_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uwMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2uw_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2UW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2uw_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2UW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtph2uw_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTPH2UW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2uw_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2UW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2uw_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2UW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2uwMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2uw_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2UW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2UW_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2uwMaskzErEmitter<A, B> {
fn vcvtph2uw_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2uwMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2uw_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2UW512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2W`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2wEmitter<A, B> {
fn vcvtph2w(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2wEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2w(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2W128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2w(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2W128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtph2w(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTPH2W256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2w(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2W256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2w(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2W512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2w(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2W512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2W_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2wErEmitter<A, B> {
fn vcvtph2w_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2wErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2w_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2W512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2W_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2wMaskEmitter<A, B> {
fn vcvtph2w_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2wMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2w_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2W128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2w_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2W128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtph2w_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTPH2W256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2w_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2W256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2w_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2W512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2w_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2W512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2W_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2wMaskErEmitter<A, B> {
fn vcvtph2w_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2wMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2w_mask_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2W512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2W_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2wMaskzEmitter<A, B> {
fn vcvtph2w_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2wMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtph2w_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPH2W128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtph2w_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPH2W128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtph2w_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTPH2W256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtph2w_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPH2W256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2w_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2W512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtph2wMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtph2w_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTPH2W512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPH2W_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtph2wMaskzErEmitter<A, B> {
fn vcvtph2w_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtph2wMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtph2w_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTPH2W512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPS2PHX` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtps2phxEmitter<A, B> {
fn vcvtps2phx(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtps2phxEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtps2phx(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPS2PHX128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtps2phx(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPS2PHX128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtps2phx(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTPS2PHX256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtps2phx(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTPS2PHX512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtps2phx(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPS2PHX512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPS2PHX_ER` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtps2phxErEmitter<A, B> {
fn vcvtps2phx_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtps2phxErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtps2phx_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTPS2PHX512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPS2PHX_MASK` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtps2phxMaskEmitter<A, B> {
fn vcvtps2phx_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtps2phxMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtps2phx_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPS2PHX128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtps2phx_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPS2PHX128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtps2phx_mask(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTPS2PHX256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtps2phx_mask(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTPS2PHX512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtps2phx_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPS2PHX512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPS2PHX_MASK_ER` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtps2phxMaskErEmitter<A, B> {
fn vcvtps2phx_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtps2phxMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtps2phx_mask_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTPS2PHX512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPS2PHX_MASKZ` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtps2phxMaskzEmitter<A, B> {
fn vcvtps2phx_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtps2phxMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtps2phx_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTPS2PHX128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtps2phx_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTPS2PHX128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtps2phx_maskz(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTPS2PHX256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtps2phx_maskz(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTPS2PHX512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtps2phxMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtps2phx_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTPS2PHX512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTPS2PHX_MASKZ_ER` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtps2phxMaskzErEmitter<A, B> {
fn vcvtps2phx_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtps2phxMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtps2phx_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTPS2PHX512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTQQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtqq2phEmitter<A, B> {
fn vcvtqq2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtqq2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtqq2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTQQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtqq2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTQQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtqq2ph(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTQQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtqq2ph(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTQQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTQQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtqq2phErEmitter<A, B> {
fn vcvtqq2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtqq2phErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtqq2ph_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTQQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTQQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtqq2phMaskEmitter<A, B> {
fn vcvtqq2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtqq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTQQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTQQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTQQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phMaskEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTQQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTQQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtqq2phMaskErEmitter<A, B> {
fn vcvtqq2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtqq2phMaskErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtqq2ph_mask_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTQQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTQQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtqq2phMaskzEmitter<A, B> {
fn vcvtqq2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTQQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTQQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTQQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTQQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTQQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtqq2phMaskzErEmitter<A, B> {
fn vcvtqq2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtqq2phMaskzErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtqq2ph_maskz_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTQQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTSD2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsd2shEmitter<A, B, C> {
fn vcvtsd2sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsd2shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsd2sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSD2SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsd2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsd2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSD2SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSD2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsd2shErEmitter<A, B, C> {
fn vcvtsd2sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsd2shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsd2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSD2SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSD2SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsd2shMaskEmitter<A, B, C> {
fn vcvtsd2sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsd2shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsd2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSD2SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsd2shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsd2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSD2SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSD2SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsd2shMaskErEmitter<A, B, C> {
fn vcvtsd2sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsd2shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsd2sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSD2SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSD2SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsd2shMaskzEmitter<A, B, C> {
fn vcvtsd2sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsd2shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsd2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSD2SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsd2shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsd2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSD2SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSD2SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsd2shMaskzErEmitter<A, B, C> {
fn vcvtsd2sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsd2shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsd2sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSD2SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2sdEmitter<A, B, C> {
fn vcvtsh2sd(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2sdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2sd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsh2sdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsh2sd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSH2SDRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SD_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2sdMaskEmitter<A, B, C> {
fn vcvtsh2sd_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2sdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2sd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SDRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsh2sdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsh2sd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSH2SDRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SD_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2sdMaskSaeEmitter<A, B, C> {
fn vcvtsh2sd_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2sdMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2sd_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SDRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SD_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2sdMaskzEmitter<A, B, C> {
fn vcvtsh2sd_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2sdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2sd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SDRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsh2sdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsh2sd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSH2SDRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SD_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2sdMaskzSaeEmitter<A, B, C> {
fn vcvtsh2sd_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2sdMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2sd_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SDRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SD_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2sdSaeEmitter<A, B, C> {
fn vcvtsh2sd_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2sdSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2sd_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SDRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtsh2siEmitter<A, B> {
fn vcvtsh2si(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtsh2siEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvtsh2si(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTSH2SI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2siEmitter<Gpd, Mem> for Assembler<'a> {
fn vcvtsh2si(&mut self, op0: Gpd, op1: Mem) {
self.emit(VCVTSH2SI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2siEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvtsh2si(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTSH2SI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2siEmitter<Gpq, Mem> for Assembler<'a> {
fn vcvtsh2si(&mut self, op0: Gpq, op1: Mem) {
self.emit(VCVTSH2SI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTSH2SI_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtsh2siErEmitter<A, B> {
fn vcvtsh2si_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtsh2siErEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvtsh2si_er(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTSH2SI32RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2siErEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvtsh2si_er(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTSH2SI64RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTSH2SS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2ssEmitter<A, B, C> {
fn vcvtsh2ss(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2ssEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2ss(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsh2ssEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsh2ss(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSH2SSRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SS_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2ssMaskEmitter<A, B, C> {
fn vcvtsh2ss_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2ssMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2ss_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SSRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsh2ssMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsh2ss_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSH2SSRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SS_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2ssMaskSaeEmitter<A, B, C> {
fn vcvtsh2ss_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2ssMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2ss_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SSRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SS_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2ssMaskzEmitter<A, B, C> {
fn vcvtsh2ss_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2ssMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2ss_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SSRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsh2ssMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsh2ss_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSH2SSRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SS_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2ssMaskzSaeEmitter<A, B, C> {
fn vcvtsh2ss_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2ssMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2ss_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SSRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2SS_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtsh2ssSaeEmitter<A, B, C> {
fn vcvtsh2ss_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsh2ssSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtsh2ss_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSH2SSRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSH2USI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtsh2usiEmitter<A, B> {
fn vcvtsh2usi(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtsh2usiEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvtsh2usi(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTSH2USI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2usiEmitter<Gpd, Mem> for Assembler<'a> {
fn vcvtsh2usi(&mut self, op0: Gpd, op1: Mem) {
self.emit(VCVTSH2USI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2usiEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvtsh2usi(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTSH2USI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2usiEmitter<Gpq, Mem> for Assembler<'a> {
fn vcvtsh2usi(&mut self, op0: Gpq, op1: Mem) {
self.emit(VCVTSH2USI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTSH2USI_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvtsh2usiErEmitter<A, B> {
fn vcvtsh2usi_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtsh2usiErEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvtsh2usi_er(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTSH2USI32RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtsh2usiErEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvtsh2usi_er(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTSH2USI64RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTSI2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// | 3 | Xmm, Xmm, Mem |
/// +---+---------------+
/// ```
pub trait Vcvtsi2shEmitter<A, B, C> {
fn vcvtsi2sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsi2shEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
fn vcvtsi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
self.emit(VCVTSI2SH32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsi2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtsi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSI2SH32RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsi2shEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
fn vcvtsi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
self.emit(VCVTSI2SH64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSI2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// +---+---------------+
/// ```
pub trait Vcvtsi2shErEmitter<A, B, C> {
fn vcvtsi2sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtsi2shErEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
fn vcvtsi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
self.emit(VCVTSI2SH32RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtsi2shErEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
fn vcvtsi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
self.emit(VCVTSI2SH64RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSS2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtss2shEmitter<A, B, C> {
fn vcvtss2sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtss2shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtss2sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSS2SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtss2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtss2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSS2SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSS2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtss2shErEmitter<A, B, C> {
fn vcvtss2sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtss2shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtss2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSS2SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSS2SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtss2shMaskEmitter<A, B, C> {
fn vcvtss2sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtss2shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtss2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSS2SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtss2shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtss2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSS2SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSS2SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtss2shMaskErEmitter<A, B, C> {
fn vcvtss2sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtss2shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtss2sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSS2SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSS2SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtss2shMaskzEmitter<A, B, C> {
fn vcvtss2sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtss2shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtss2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSS2SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtss2shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtss2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTSS2SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTSS2SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vcvtss2shMaskzErEmitter<A, B, C> {
fn vcvtss2sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtss2shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vcvtss2sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VCVTSS2SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTTPH2DQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2dqEmitter<A, B> {
fn vcvttph2dq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2dqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2dq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2DQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2dq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2DQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2dq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2DQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2dq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2DQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2dq(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2DQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2dq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2DQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2DQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2dqMaskEmitter<A, B> {
fn vcvttph2dq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2dqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2dq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2DQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2dq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2DQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2dq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2DQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2dq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2DQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2dq_mask(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2DQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2dq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2DQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2DQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2dqMaskSaeEmitter<A, B> {
fn vcvttph2dq_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2dqMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2dq_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2DQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2DQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2dqMaskzEmitter<A, B> {
fn vcvttph2dq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2dqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2dq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2DQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2dq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2DQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2dq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2DQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2dq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2DQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2dq_maskz(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2DQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2dqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2dq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2DQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2DQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2dqMaskzSaeEmitter<A, B> {
fn vcvttph2dq_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2dqMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2dq_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2DQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2DQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2dqSaeEmitter<A, B> {
fn vcvttph2dq_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2dqSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2dq_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2DQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2QQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2qqEmitter<A, B> {
fn vcvttph2qq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2qqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2qq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2qq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2qq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2qq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2qq(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2qq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2QQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2qqMaskEmitter<A, B> {
fn vcvttph2qq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2qq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2qq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2qq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_mask(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2qq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2QQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2qqMaskSaeEmitter<A, B> {
fn vcvttph2qq_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2qqMaskSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_mask_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2QQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2QQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2qqMaskzEmitter<A, B> {
fn vcvttph2qq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2qq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_maskz(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2QQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2qqMaskzSaeEmitter<A, B> {
fn vcvttph2qq_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2qqMaskzSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_maskz_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2QQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2QQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2qqSaeEmitter<A, B> {
fn vcvttph2qq_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2qqSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2qq_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2QQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UDQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2udqEmitter<A, B> {
fn vcvttph2udq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2udqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2udq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UDQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2udq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UDQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2udq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2UDQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2udq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UDQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2udq(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2UDQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2udq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UDQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UDQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2udqMaskEmitter<A, B> {
fn vcvttph2udq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2udqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2udq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UDQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2udq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UDQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2udq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2UDQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2udq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UDQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2udq_mask(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2UDQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2udq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UDQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UDQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2udqMaskSaeEmitter<A, B> {
fn vcvttph2udq_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2udqMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2udq_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2UDQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UDQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2udqMaskzEmitter<A, B> {
fn vcvttph2udq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2udqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2udq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UDQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2udq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UDQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2udq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2UDQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2udq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UDQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2udq_maskz(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2UDQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2udqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2udq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UDQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UDQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2udqMaskzSaeEmitter<A, B> {
fn vcvttph2udq_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2udqMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2udq_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2UDQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UDQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
pub trait Vcvttph2udqSaeEmitter<A, B> {
fn vcvttph2udq_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2udqSaeEmitter<Zmm, Ymm> for Assembler<'a> {
fn vcvttph2udq_sae(&mut self, op0: Zmm, op1: Ymm) {
self.emit(VCVTTPH2UDQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UQQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uqqEmitter<A, B> {
fn vcvttph2uqq(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uqqEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2uqq(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UQQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2uqq(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2UQQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2uqq(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UQQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2uqq(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UQQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UQQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uqqMaskEmitter<A, B> {
fn vcvttph2uqq_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uqqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2uqq_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UQQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_mask(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2UQQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2uqq_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UQQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_mask(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2uqq_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UQQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UQQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uqqMaskSaeEmitter<A, B> {
fn vcvttph2uqq_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uqqMaskSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_mask_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UQQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uqqMaskzEmitter<A, B> {
fn vcvttph2uqq_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uqqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2uqq_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UQQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_maskz(&mut self, op0: Ymm, op1: Xmm) {
self.emit(VCVTTPH2UQQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2uqq_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UQQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_maskz(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uqqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2uqq_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UQQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UQQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uqqMaskzSaeEmitter<A, B> {
fn vcvttph2uqq_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uqqMaskzSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_maskz_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UQQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uqqSaeEmitter<A, B> {
fn vcvttph2uqq_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uqqSaeEmitter<Zmm, Xmm> for Assembler<'a> {
fn vcvttph2uqq_sae(&mut self, op0: Zmm, op1: Xmm) {
self.emit(VCVTTPH2UQQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UW`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uwEmitter<A, B> {
fn vcvttph2uw(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uwEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2uw(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2uw(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvttph2uw(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTTPH2UW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2uw(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2uw(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2UW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2uw(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UW_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uwMaskEmitter<A, B> {
fn vcvttph2uw_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uwMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2uw_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2uw_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvttph2uw_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTTPH2UW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2uw_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2uw_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2UW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2uw_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UW_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uwMaskSaeEmitter<A, B> {
fn vcvttph2uw_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uwMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2uw_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2UW512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UW_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uwMaskzEmitter<A, B> {
fn vcvttph2uw_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uwMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2uw_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2UW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2uw_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2UW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvttph2uw_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTTPH2UW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2uw_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2UW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2uw_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2UW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2uwMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2uw_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2UW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UW_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uwMaskzSaeEmitter<A, B> {
fn vcvttph2uw_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uwMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2uw_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2UW512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2UW_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2uwSaeEmitter<A, B> {
fn vcvttph2uw_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2uwSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2uw_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2UW512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2W`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2wEmitter<A, B> {
fn vcvttph2w(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2wEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2w(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2W128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2w(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2W128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvttph2w(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTTPH2W256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2w(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2W256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2w(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2W512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2w(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2W512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2W_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2wMaskEmitter<A, B> {
fn vcvttph2w_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2wMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2w_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2W128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2w_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2W128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvttph2w_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTTPH2W256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2w_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2W256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2w_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2W512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2w_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2W512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2W_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2wMaskSaeEmitter<A, B> {
fn vcvttph2w_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2wMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2w_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2W512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2W_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2wMaskzEmitter<A, B> {
fn vcvttph2w_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2wMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvttph2w_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTTPH2W128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvttph2w_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTTPH2W128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvttph2w_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTTPH2W256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvttph2w_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTTPH2W256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2w_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2W512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttph2wMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvttph2w_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTTPH2W512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2W_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2wMaskzSaeEmitter<A, B> {
fn vcvttph2w_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2wMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2w_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2W512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTPH2W_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvttph2wSaeEmitter<A, B> {
fn vcvttph2w_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttph2wSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvttph2w_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTTPH2W512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTSH2SI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttsh2siEmitter<A, B> {
fn vcvttsh2si(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttsh2siEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvttsh2si(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTTSH2SI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2siEmitter<Gpd, Mem> for Assembler<'a> {
fn vcvttsh2si(&mut self, op0: Gpd, op1: Mem) {
self.emit(VCVTTSH2SI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2siEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvttsh2si(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTTSH2SI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2siEmitter<Gpq, Mem> for Assembler<'a> {
fn vcvttsh2si(&mut self, op0: Gpq, op1: Mem) {
self.emit(VCVTTSH2SI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTSH2SI_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttsh2siSaeEmitter<A, B> {
fn vcvttsh2si_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttsh2siSaeEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvttsh2si_sae(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTTSH2SI32RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2siSaeEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvttsh2si_sae(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTTSH2SI64RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTSH2USI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttsh2usiEmitter<A, B> {
fn vcvttsh2usi(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttsh2usiEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvttsh2usi(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTTSH2USI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2usiEmitter<Gpd, Mem> for Assembler<'a> {
fn vcvttsh2usi(&mut self, op0: Gpd, op1: Mem) {
self.emit(VCVTTSH2USI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2usiEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvttsh2usi(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTTSH2USI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2usiEmitter<Gpq, Mem> for Assembler<'a> {
fn vcvttsh2usi(&mut self, op0: Gpq, op1: Mem) {
self.emit(VCVTTSH2USI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTTSH2USI_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
pub trait Vcvttsh2usiSaeEmitter<A, B> {
fn vcvttsh2usi_sae(&mut self, op0: A, op1: B);
}
impl<'a> Vcvttsh2usiSaeEmitter<Gpd, Xmm> for Assembler<'a> {
fn vcvttsh2usi_sae(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VCVTTSH2USI32RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvttsh2usiSaeEmitter<Gpq, Xmm> for Assembler<'a> {
fn vcvttsh2usi_sae(&mut self, op0: Gpq, op1: Xmm) {
self.emit(VCVTTSH2USI64RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUDQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtudq2phEmitter<A, B> {
fn vcvtudq2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtudq2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtudq2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUDQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtudq2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUDQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtudq2ph(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTUDQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtudq2ph(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTUDQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtudq2ph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTUDQ2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUDQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtudq2phErEmitter<A, B> {
fn vcvtudq2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtudq2phErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtudq2ph_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTUDQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUDQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtudq2phMaskEmitter<A, B> {
fn vcvtudq2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtudq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtudq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUDQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtudq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUDQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtudq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTUDQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtudq2ph_mask(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTUDQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtudq2ph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTUDQ2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUDQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtudq2phMaskErEmitter<A, B> {
fn vcvtudq2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtudq2phMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtudq2ph_mask_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTUDQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUDQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtudq2phMaskzEmitter<A, B> {
fn vcvtudq2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtudq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtudq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUDQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtudq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUDQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtudq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTUDQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtudq2ph_maskz(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTUDQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtudq2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtudq2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTUDQ2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUDQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtudq2phMaskzErEmitter<A, B> {
fn vcvtudq2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtudq2phMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
fn vcvtudq2ph_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
self.emit(VCVTUDQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUQQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuqq2phEmitter<A, B> {
fn vcvtuqq2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuqq2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUQQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUQQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTUQQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTUQQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUQQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuqq2phErEmitter<A, B> {
fn vcvtuqq2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuqq2phErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtuqq2ph_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTUQQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUQQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuqq2phMaskEmitter<A, B> {
fn vcvtuqq2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUQQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUQQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTUQQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTUQQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUQQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuqq2phMaskErEmitter<A, B> {
fn vcvtuqq2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuqq2phMaskErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtuqq2ph_mask_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTUQQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUQQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuqq2phMaskzEmitter<A, B> {
fn vcvtuqq2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUQQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUQQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
self.emit(VCVTUQQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTUQQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUQQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuqq2phMaskzErEmitter<A, B> {
fn vcvtuqq2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuqq2phMaskzErEmitter<Xmm, Zmm> for Assembler<'a> {
fn vcvtuqq2ph_maskz_er(&mut self, op0: Xmm, op1: Zmm) {
self.emit(VCVTUQQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUSI2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// | 3 | Xmm, Xmm, Mem |
/// +---+---------------+
/// ```
pub trait Vcvtusi2shEmitter<A, B, C> {
fn vcvtusi2sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtusi2shEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
fn vcvtusi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
self.emit(VCVTUSI2SH32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtusi2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vcvtusi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VCVTUSI2SH32RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtusi2shEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
fn vcvtusi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
self.emit(VCVTUSI2SH64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTUSI2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// +---+---------------+
/// ```
pub trait Vcvtusi2shErEmitter<A, B, C> {
fn vcvtusi2sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vcvtusi2shErEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
fn vcvtusi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
self.emit(VCVTUSI2SH32RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vcvtusi2shErEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
fn vcvtusi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
self.emit(VCVTUSI2SH64RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VCVTUW2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuw2phEmitter<A, B> {
fn vcvtuw2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuw2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtuw2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUW2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtuw2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUW2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtuw2ph(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTUW2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtuw2ph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTUW2PH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtuw2ph(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTUW2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtuw2ph(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTUW2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUW2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuw2phErEmitter<A, B> {
fn vcvtuw2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuw2phErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtuw2ph_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTUW2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUW2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuw2phMaskEmitter<A, B> {
fn vcvtuw2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuw2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtuw2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUW2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtuw2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUW2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtuw2ph_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTUW2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtuw2ph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTUW2PH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtuw2ph_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTUW2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtuw2ph_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTUW2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUW2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuw2phMaskErEmitter<A, B> {
fn vcvtuw2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuw2phMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtuw2ph_mask_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTUW2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUW2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuw2phMaskzEmitter<A, B> {
fn vcvtuw2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuw2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtuw2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTUW2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtuw2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTUW2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtuw2ph_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTUW2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtuw2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTUW2PH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtuw2ph_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTUW2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtuw2phMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtuw2ph_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTUW2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTUW2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtuw2phMaskzErEmitter<A, B> {
fn vcvtuw2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtuw2phMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtuw2ph_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTUW2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTW2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtw2phEmitter<A, B> {
fn vcvtw2ph(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtw2phEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtw2ph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTW2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtw2ph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTW2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtw2ph(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTW2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtw2ph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTW2PH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtw2ph(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTW2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtw2ph(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTW2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTW2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtw2phErEmitter<A, B> {
fn vcvtw2ph_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtw2phErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtw2ph_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTW2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTW2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtw2phMaskEmitter<A, B> {
fn vcvtw2ph_mask(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtw2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtw2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTW2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtw2ph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTW2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtw2ph_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTW2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtw2ph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTW2PH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtw2ph_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTW2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtw2ph_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTW2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTW2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtw2phMaskErEmitter<A, B> {
fn vcvtw2ph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtw2phMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtw2ph_mask_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTW2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTW2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtw2phMaskzEmitter<A, B> {
fn vcvtw2ph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtw2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vcvtw2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VCVTW2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vcvtw2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VCVTW2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vcvtw2ph_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VCVTW2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vcvtw2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VCVTW2PH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtw2ph_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTW2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Vcvtw2phMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vcvtw2ph_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VCVTW2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VCVTW2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait Vcvtw2phMaskzErEmitter<A, B> {
fn vcvtw2ph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> Vcvtw2phMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vcvtw2ph_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VCVTW2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VDIVPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VdivphEmitter<A, B, C> {
fn vdivph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vdivph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VDIVPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vdivph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VDIVPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vdivph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VDIVPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vdivph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VDIVPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vdivph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VDIVPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VdivphErEmitter<A, B, C> {
fn vdivph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vdivph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VDIVPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VdivphMaskEmitter<A, B, C> {
fn vdivph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vdivph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VDIVPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vdivph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VDIVPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vdivph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VDIVPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vdivph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VDIVPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vdivph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VDIVPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VdivphMaskErEmitter<A, B, C> {
fn vdivph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vdivph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VDIVPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VdivphMaskzEmitter<A, B, C> {
fn vdivph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vdivph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VDIVPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vdivph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VDIVPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vdivph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VDIVPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vdivph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VDIVPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vdivph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VDIVPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VdivphMaskzErEmitter<A, B, C> {
fn vdivph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vdivph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VDIVPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VdivshEmitter<A, B, C> {
fn vdivsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vdivsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VDIVSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VdivshErEmitter<A, B, C> {
fn vdivsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VdivshMaskEmitter<A, B, C> {
fn vdivsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vdivsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VDIVSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VdivshMaskErEmitter<A, B, C> {
fn vdivsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VdivshMaskzEmitter<A, B, C> {
fn vdivsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VdivshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vdivsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VDIVSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VDIVSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VdivshMaskzErEmitter<A, B, C> {
fn vdivsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VdivshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vdivsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VDIVSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VERR` (VERR).
/// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait VerrEmitter<A> {
fn verr(&mut self, op0: A);
}
impl<'a> VerrEmitter<Gpd> for Assembler<'a> {
fn verr(&mut self, op0: Gpd) {
self.emit(VERRR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> VerrEmitter<Mem> for Assembler<'a> {
fn verr(&mut self, op0: Mem) {
self.emit(VERRM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `VERW` (VERW).
/// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
pub trait VerwEmitter<A> {
fn verw(&mut self, op0: A);
}
impl<'a> VerwEmitter<Gpd> for Assembler<'a> {
fn verw(&mut self, op0: Gpd) {
self.emit(VERWR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
impl<'a> VerwEmitter<Mem> for Assembler<'a> {
fn verw(&mut self, op0: Mem) {
self.emit(VERWM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
}
}
/// `VFCMADDCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcphEmitter<A, B, C> {
fn vfcmaddcph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMADDCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfcmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFCMADDCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfcmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFCMADDCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMADDCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfcmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFCMADDCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcphErEmitter<A, B, C> {
fn vfcmaddcph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmaddcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMADDCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcphMaskEmitter<A, B, C> {
fn vfcmaddcph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMADDCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfcmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFCMADDCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfcmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFCMADDCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMADDCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfcmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFCMADDCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcphMaskErEmitter<A, B, C> {
fn vfcmaddcph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmaddcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMADDCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcphMaskzEmitter<A, B, C> {
fn vfcmaddcph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMADDCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfcmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFCMADDCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfcmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFCMADDCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMADDCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfcmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFCMADDCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcphMaskzErEmitter<A, B, C> {
fn vfcmaddcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmaddcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMADDCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcshEmitter<A, B, C> {
fn vfcmaddcsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMADDCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcshErEmitter<A, B, C> {
fn vfcmaddcsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcshMaskEmitter<A, B, C> {
fn vfcmaddcsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMADDCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcshMaskErEmitter<A, B, C> {
fn vfcmaddcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcshMaskzEmitter<A, B, C> {
fn vfcmaddcsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmaddcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMADDCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMADDCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmaddcshMaskzErEmitter<A, B, C> {
fn vfcmaddcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmaddcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmaddcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMADDCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmulcphEmitter<A, B, C> {
fn vfcmulcph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMULCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfcmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFCMULCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfcmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFCMULCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMULCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfcmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFCMULCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmulcphErEmitter<A, B, C> {
fn vfcmulcph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmulcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMULCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmulcphMaskEmitter<A, B, C> {
fn vfcmulcph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMULCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfcmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFCMULCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfcmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFCMULCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMULCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfcmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFCMULCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmulcphMaskErEmitter<A, B, C> {
fn vfcmulcph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmulcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMULCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmulcphMaskzEmitter<A, B, C> {
fn vfcmulcph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMULCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfcmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFCMULCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfcmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFCMULCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMULCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfcmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFCMULCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfcmulcphMaskzErEmitter<A, B, C> {
fn vfcmulcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfcmulcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFCMULCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmulcshEmitter<A, B, C> {
fn vfcmulcsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMULCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmulcshErEmitter<A, B, C> {
fn vfcmulcsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmulcshMaskEmitter<A, B, C> {
fn vfcmulcsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMULCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmulcshMaskErEmitter<A, B, C> {
fn vfcmulcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmulcshMaskzEmitter<A, B, C> {
fn vfcmulcsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfcmulcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfcmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFCMULCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFCMULCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfcmulcshMaskzErEmitter<A, B, C> {
fn vfcmulcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfcmulcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfcmulcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFCMULCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132phEmitter<A, B, C> {
fn vfmadd132ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132phErEmitter<A, B, C> {
fn vfmadd132ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132phMaskEmitter<A, B, C> {
fn vfmadd132ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132phMaskErEmitter<A, B, C> {
fn vfmadd132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132phMaskzEmitter<A, B, C> {
fn vfmadd132ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132phMaskzErEmitter<A, B, C> {
fn vfmadd132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132shEmitter<A, B, C> {
fn vfmadd132sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132shErEmitter<A, B, C> {
fn vfmadd132sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132shMaskEmitter<A, B, C> {
fn vfmadd132sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132shMaskErEmitter<A, B, C> {
fn vfmadd132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132shMaskzEmitter<A, B, C> {
fn vfmadd132sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd132shMaskzErEmitter<A, B, C> {
fn vfmadd132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213phEmitter<A, B, C> {
fn vfmadd213ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213phErEmitter<A, B, C> {
fn vfmadd213ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213phMaskEmitter<A, B, C> {
fn vfmadd213ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213phMaskErEmitter<A, B, C> {
fn vfmadd213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213phMaskzEmitter<A, B, C> {
fn vfmadd213ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213phMaskzErEmitter<A, B, C> {
fn vfmadd213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213shEmitter<A, B, C> {
fn vfmadd213sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213shErEmitter<A, B, C> {
fn vfmadd213sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213shMaskEmitter<A, B, C> {
fn vfmadd213sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213shMaskErEmitter<A, B, C> {
fn vfmadd213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213shMaskzEmitter<A, B, C> {
fn vfmadd213sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd213shMaskzErEmitter<A, B, C> {
fn vfmadd213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231phEmitter<A, B, C> {
fn vfmadd231ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231phErEmitter<A, B, C> {
fn vfmadd231ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231phMaskEmitter<A, B, C> {
fn vfmadd231ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231phMaskErEmitter<A, B, C> {
fn vfmadd231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231phMaskzEmitter<A, B, C> {
fn vfmadd231ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADD231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADD231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADD231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231phMaskzErEmitter<A, B, C> {
fn vfmadd231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmadd231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADD231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231shEmitter<A, B, C> {
fn vfmadd231sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231shErEmitter<A, B, C> {
fn vfmadd231sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231shMaskEmitter<A, B, C> {
fn vfmadd231sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231shMaskErEmitter<A, B, C> {
fn vfmadd231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231shMaskzEmitter<A, B, C> {
fn vfmadd231sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmadd231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADD231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADD231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmadd231shMaskzErEmitter<A, B, C> {
fn vfmadd231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmadd231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmadd231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADD231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmaddcphEmitter<A, B, C> {
fn vfmaddcph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmaddcphErEmitter<A, B, C> {
fn vfmaddcph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmaddcphMaskEmitter<A, B, C> {
fn vfmaddcph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmaddcphMaskErEmitter<A, B, C> {
fn vfmaddcph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmaddcphMaskzEmitter<A, B, C> {
fn vfmaddcph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmaddcphMaskzErEmitter<A, B, C> {
fn vfmaddcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmaddcshEmitter<A, B, C> {
fn vfmaddcsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmaddcshErEmitter<A, B, C> {
fn vfmaddcsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmaddcshMaskEmitter<A, B, C> {
fn vfmaddcsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmaddcshMaskErEmitter<A, B, C> {
fn vfmaddcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmaddcshMaskzEmitter<A, B, C> {
fn vfmaddcsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmaddcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmaddcshMaskzErEmitter<A, B, C> {
fn vfmaddcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmaddcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub132phEmitter<A, B, C> {
fn vfmaddsub132ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub132phErEmitter<A, B, C> {
fn vfmaddsub132ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub132phMaskEmitter<A, B, C> {
fn vfmaddsub132ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub132phMaskErEmitter<A, B, C> {
fn vfmaddsub132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub132phMaskzEmitter<A, B, C> {
fn vfmaddsub132ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub132phMaskzErEmitter<A, B, C> {
fn vfmaddsub132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub213phEmitter<A, B, C> {
fn vfmaddsub213ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub213phErEmitter<A, B, C> {
fn vfmaddsub213ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub213phMaskEmitter<A, B, C> {
fn vfmaddsub213ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub213phMaskErEmitter<A, B, C> {
fn vfmaddsub213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub213phMaskzEmitter<A, B, C> {
fn vfmaddsub213ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub213phMaskzErEmitter<A, B, C> {
fn vfmaddsub213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub231phEmitter<A, B, C> {
fn vfmaddsub231ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub231phErEmitter<A, B, C> {
fn vfmaddsub231ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub231phMaskEmitter<A, B, C> {
fn vfmaddsub231ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub231phMaskErEmitter<A, B, C> {
fn vfmaddsub231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub231phMaskzEmitter<A, B, C> {
fn vfmaddsub231ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmaddsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMADDSUB231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmaddsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMADDSUB231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmaddsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMADDSUB231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmaddsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMADDSUB231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmaddsub231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmaddsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMADDSUB231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMADDSUB231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmaddsub231phMaskzErEmitter<A, B, C> {
fn vfmaddsub231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmaddsub231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmaddsub231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMADDSUB231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132phEmitter<A, B, C> {
fn vfmsub132ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132phErEmitter<A, B, C> {
fn vfmsub132ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132phMaskEmitter<A, B, C> {
fn vfmsub132ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132phMaskErEmitter<A, B, C> {
fn vfmsub132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132phMaskzEmitter<A, B, C> {
fn vfmsub132ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132phMaskzErEmitter<A, B, C> {
fn vfmsub132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132shEmitter<A, B, C> {
fn vfmsub132sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132shErEmitter<A, B, C> {
fn vfmsub132sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132shMaskEmitter<A, B, C> {
fn vfmsub132sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132shMaskErEmitter<A, B, C> {
fn vfmsub132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132shMaskzEmitter<A, B, C> {
fn vfmsub132sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub132shMaskzErEmitter<A, B, C> {
fn vfmsub132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213phEmitter<A, B, C> {
fn vfmsub213ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213phErEmitter<A, B, C> {
fn vfmsub213ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213phMaskEmitter<A, B, C> {
fn vfmsub213ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213phMaskErEmitter<A, B, C> {
fn vfmsub213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213phMaskzEmitter<A, B, C> {
fn vfmsub213ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213phMaskzErEmitter<A, B, C> {
fn vfmsub213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213shEmitter<A, B, C> {
fn vfmsub213sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213shErEmitter<A, B, C> {
fn vfmsub213sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213shMaskEmitter<A, B, C> {
fn vfmsub213sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213shMaskErEmitter<A, B, C> {
fn vfmsub213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213shMaskzEmitter<A, B, C> {
fn vfmsub213sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub213shMaskzErEmitter<A, B, C> {
fn vfmsub213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231phEmitter<A, B, C> {
fn vfmsub231ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231phErEmitter<A, B, C> {
fn vfmsub231ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231phMaskEmitter<A, B, C> {
fn vfmsub231ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231phMaskErEmitter<A, B, C> {
fn vfmsub231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231phMaskzEmitter<A, B, C> {
fn vfmsub231ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUB231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUB231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUB231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231phMaskzErEmitter<A, B, C> {
fn vfmsub231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsub231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUB231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231shEmitter<A, B, C> {
fn vfmsub231sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231shErEmitter<A, B, C> {
fn vfmsub231sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231shMaskEmitter<A, B, C> {
fn vfmsub231sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231shMaskErEmitter<A, B, C> {
fn vfmsub231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231shMaskzEmitter<A, B, C> {
fn vfmsub231sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsub231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUB231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUB231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfmsub231shMaskzErEmitter<A, B, C> {
fn vfmsub231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsub231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsub231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUB231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd132phEmitter<A, B, C> {
fn vfmsubadd132ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd132phErEmitter<A, B, C> {
fn vfmsubadd132ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd132phMaskEmitter<A, B, C> {
fn vfmsubadd132ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd132phMaskErEmitter<A, B, C> {
fn vfmsubadd132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd132phMaskzEmitter<A, B, C> {
fn vfmsubadd132ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd132phMaskzErEmitter<A, B, C> {
fn vfmsubadd132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd213phEmitter<A, B, C> {
fn vfmsubadd213ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd213phErEmitter<A, B, C> {
fn vfmsubadd213ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd213phMaskEmitter<A, B, C> {
fn vfmsubadd213ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd213phMaskErEmitter<A, B, C> {
fn vfmsubadd213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd213phMaskzEmitter<A, B, C> {
fn vfmsubadd213ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd213phMaskzErEmitter<A, B, C> {
fn vfmsubadd213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd231phEmitter<A, B, C> {
fn vfmsubadd231ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd231phErEmitter<A, B, C> {
fn vfmsubadd231ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd231phMaskEmitter<A, B, C> {
fn vfmsubadd231ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd231phMaskErEmitter<A, B, C> {
fn vfmsubadd231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd231phMaskzEmitter<A, B, C> {
fn vfmsubadd231ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmsubadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMSUBADD231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmsubadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMSUBADD231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmsubadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMSUBADD231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmsubadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMSUBADD231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfmsubadd231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmsubadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMSUBADD231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMSUBADD231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfmsubadd231phMaskzErEmitter<A, B, C> {
fn vfmsubadd231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfmsubadd231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmsubadd231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMSUBADD231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmulcphEmitter<A, B, C> {
fn vfmulcph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMULCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMULCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMULCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMULCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMULCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmulcphErEmitter<A, B, C> {
fn vfmulcph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmulcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMULCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmulcphMaskEmitter<A, B, C> {
fn vfmulcph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMULCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMULCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMULCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMULCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMULCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmulcphMaskErEmitter<A, B, C> {
fn vfmulcph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmulcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMULCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmulcphMaskzEmitter<A, B, C> {
fn vfmulcph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMULCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFMULCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFMULCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMULCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFMULCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VfmulcphMaskzErEmitter<A, B, C> {
fn vfmulcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfmulcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFMULCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmulcshEmitter<A, B, C> {
fn vfmulcsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMULCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmulcshErEmitter<A, B, C> {
fn vfmulcsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmulcshMaskEmitter<A, B, C> {
fn vfmulcsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMULCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmulcshMaskErEmitter<A, B, C> {
fn vfmulcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmulcshMaskzEmitter<A, B, C> {
fn vfmulcsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfmulcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFMULCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFMULCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VfmulcshMaskzErEmitter<A, B, C> {
fn vfmulcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfmulcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfmulcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFMULCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132phEmitter<A, B, C> {
fn vfnmadd132ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132phErEmitter<A, B, C> {
fn vfnmadd132ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132phMaskEmitter<A, B, C> {
fn vfnmadd132ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132phMaskErEmitter<A, B, C> {
fn vfnmadd132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132phMaskzEmitter<A, B, C> {
fn vfnmadd132ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132phMaskzErEmitter<A, B, C> {
fn vfnmadd132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132shEmitter<A, B, C> {
fn vfnmadd132sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132shErEmitter<A, B, C> {
fn vfnmadd132sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132shMaskEmitter<A, B, C> {
fn vfnmadd132sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132shMaskErEmitter<A, B, C> {
fn vfnmadd132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132shMaskzEmitter<A, B, C> {
fn vfnmadd132sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd132shMaskzErEmitter<A, B, C> {
fn vfnmadd132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213phEmitter<A, B, C> {
fn vfnmadd213ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213phErEmitter<A, B, C> {
fn vfnmadd213ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213phMaskEmitter<A, B, C> {
fn vfnmadd213ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213phMaskErEmitter<A, B, C> {
fn vfnmadd213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213phMaskzEmitter<A, B, C> {
fn vfnmadd213ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213phMaskzErEmitter<A, B, C> {
fn vfnmadd213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213shEmitter<A, B, C> {
fn vfnmadd213sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213shErEmitter<A, B, C> {
fn vfnmadd213sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213shMaskEmitter<A, B, C> {
fn vfnmadd213sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213shMaskErEmitter<A, B, C> {
fn vfnmadd213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213shMaskzEmitter<A, B, C> {
fn vfnmadd213sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd213shMaskzErEmitter<A, B, C> {
fn vfnmadd213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231phEmitter<A, B, C> {
fn vfnmadd231ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231phErEmitter<A, B, C> {
fn vfnmadd231ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231phMaskEmitter<A, B, C> {
fn vfnmadd231ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231phMaskErEmitter<A, B, C> {
fn vfnmadd231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231phMaskzEmitter<A, B, C> {
fn vfnmadd231ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMADD231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMADD231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMADD231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231phMaskzErEmitter<A, B, C> {
fn vfnmadd231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmadd231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMADD231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231shEmitter<A, B, C> {
fn vfnmadd231sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231shErEmitter<A, B, C> {
fn vfnmadd231sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231shMaskEmitter<A, B, C> {
fn vfnmadd231sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231shMaskErEmitter<A, B, C> {
fn vfnmadd231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231shMaskzEmitter<A, B, C> {
fn vfnmadd231sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmadd231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMADD231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMADD231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmadd231shMaskzErEmitter<A, B, C> {
fn vfnmadd231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmadd231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmadd231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMADD231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132phEmitter<A, B, C> {
fn vfnmsub132ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132phErEmitter<A, B, C> {
fn vfnmsub132ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132phMaskEmitter<A, B, C> {
fn vfnmsub132ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132phMaskErEmitter<A, B, C> {
fn vfnmsub132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132phMaskzEmitter<A, B, C> {
fn vfnmsub132ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132phMaskzErEmitter<A, B, C> {
fn vfnmsub132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132shEmitter<A, B, C> {
fn vfnmsub132sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132shErEmitter<A, B, C> {
fn vfnmsub132sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132shMaskEmitter<A, B, C> {
fn vfnmsub132sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132shMaskErEmitter<A, B, C> {
fn vfnmsub132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132shMaskzEmitter<A, B, C> {
fn vfnmsub132sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub132shMaskzErEmitter<A, B, C> {
fn vfnmsub132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213phEmitter<A, B, C> {
fn vfnmsub213ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213phErEmitter<A, B, C> {
fn vfnmsub213ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213phMaskEmitter<A, B, C> {
fn vfnmsub213ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213phMaskErEmitter<A, B, C> {
fn vfnmsub213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213phMaskzEmitter<A, B, C> {
fn vfnmsub213ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213phMaskzErEmitter<A, B, C> {
fn vfnmsub213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213shEmitter<A, B, C> {
fn vfnmsub213sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213shErEmitter<A, B, C> {
fn vfnmsub213sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213shMaskEmitter<A, B, C> {
fn vfnmsub213sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213shMaskErEmitter<A, B, C> {
fn vfnmsub213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213shMaskzEmitter<A, B, C> {
fn vfnmsub213sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub213shMaskzErEmitter<A, B, C> {
fn vfnmsub213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231phEmitter<A, B, C> {
fn vfnmsub231ph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231phErEmitter<A, B, C> {
fn vfnmsub231ph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231phMaskEmitter<A, B, C> {
fn vfnmsub231ph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231phMaskErEmitter<A, B, C> {
fn vfnmsub231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231phMaskzEmitter<A, B, C> {
fn vfnmsub231ph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vfnmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VFNMSUB231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vfnmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VFNMSUB231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vfnmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VFNMSUB231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231phMaskzErEmitter<A, B, C> {
fn vfnmsub231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vfnmsub231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VFNMSUB231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231shEmitter<A, B, C> {
fn vfnmsub231sh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231shErEmitter<A, B, C> {
fn vfnmsub231sh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231shMaskEmitter<A, B, C> {
fn vfnmsub231sh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231shMaskErEmitter<A, B, C> {
fn vfnmsub231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231shMaskzEmitter<A, B, C> {
fn vfnmsub231sh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vfnmsub231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vfnmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VFNMSUB231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFNMSUB231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait Vfnmsub231shMaskzErEmitter<A, B, C> {
fn vfnmsub231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vfnmsub231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vfnmsub231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VFNMSUB231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFPCLASSPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// | 3 | KReg, Ymm, Imm |
/// | 4 | KReg, Zmm, Imm |
/// +---+----------------+
/// ```
pub trait VfpclassphEmitter<A, B, C> {
fn vfpclassph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfpclassphEmitter<KReg, Xmm, Imm> for Assembler<'a> {
fn vfpclassph(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
self.emit(VFPCLASSPH128KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassphEmitter<KReg, Mem, Imm> for Assembler<'a> {
fn vfpclassph(&mut self, op0: KReg, op1: Mem, op2: Imm) {
self.emit(VFPCLASSPH128KMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassphEmitter<KReg, Ymm, Imm> for Assembler<'a> {
fn vfpclassph(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
self.emit(VFPCLASSPH256KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassphEmitter<KReg, Zmm, Imm> for Assembler<'a> {
fn vfpclassph(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
self.emit(VFPCLASSPH512KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFPCLASSPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// | 3 | KReg, Ymm, Imm |
/// | 4 | KReg, Zmm, Imm |
/// +---+----------------+
/// ```
pub trait VfpclassphMaskEmitter<A, B, C> {
fn vfpclassph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfpclassphMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
fn vfpclassph_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
self.emit(VFPCLASSPH128KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassphMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
fn vfpclassph_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
self.emit(VFPCLASSPH128KMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassphMaskEmitter<KReg, Ymm, Imm> for Assembler<'a> {
fn vfpclassph_mask(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
self.emit(VFPCLASSPH256KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassphMaskEmitter<KReg, Zmm, Imm> for Assembler<'a> {
fn vfpclassph_mask(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
self.emit(VFPCLASSPH512KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFPCLASSSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// +---+----------------+
/// ```
pub trait VfpclassshEmitter<A, B, C> {
fn vfpclasssh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfpclassshEmitter<KReg, Xmm, Imm> for Assembler<'a> {
fn vfpclasssh(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
self.emit(VFPCLASSSHKRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassshEmitter<KReg, Mem, Imm> for Assembler<'a> {
fn vfpclasssh(&mut self, op0: KReg, op1: Mem, op2: Imm) {
self.emit(VFPCLASSSHKMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VFPCLASSSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// +---+----------------+
/// ```
pub trait VfpclassshMaskEmitter<A, B, C> {
fn vfpclasssh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VfpclassshMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
fn vfpclasssh_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
self.emit(VFPCLASSSHKRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VfpclassshMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
fn vfpclasssh_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
self.emit(VFPCLASSSHKMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETEXPPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VgetexpphEmitter<A, B> {
fn vgetexpph(&mut self, op0: A, op1: B);
}
impl<'a> VgetexpphEmitter<Xmm, Xmm> for Assembler<'a> {
fn vgetexpph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VGETEXPPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphEmitter<Xmm, Mem> for Assembler<'a> {
fn vgetexpph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VGETEXPPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphEmitter<Ymm, Ymm> for Assembler<'a> {
fn vgetexpph(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VGETEXPPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphEmitter<Ymm, Mem> for Assembler<'a> {
fn vgetexpph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VGETEXPPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphEmitter<Zmm, Zmm> for Assembler<'a> {
fn vgetexpph(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VGETEXPPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphEmitter<Zmm, Mem> for Assembler<'a> {
fn vgetexpph(&mut self, op0: Zmm, op1: Mem) {
self.emit(VGETEXPPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VGETEXPPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VgetexpphMaskEmitter<A, B> {
fn vgetexpph_mask(&mut self, op0: A, op1: B);
}
impl<'a> VgetexpphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vgetexpph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VGETEXPPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vgetexpph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VGETEXPPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vgetexpph_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VGETEXPPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vgetexpph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VGETEXPPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vgetexpph_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VGETEXPPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vgetexpph_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VGETEXPPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VGETEXPPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VgetexpphMaskSaeEmitter<A, B> {
fn vgetexpph_mask_sae(&mut self, op0: A, op1: B);
}
impl<'a> VgetexpphMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vgetexpph_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VGETEXPPH512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VGETEXPPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VgetexpphMaskzEmitter<A, B> {
fn vgetexpph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> VgetexpphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vgetexpph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VGETEXPPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vgetexpph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VGETEXPPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vgetexpph_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VGETEXPPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vgetexpph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VGETEXPPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vgetexpph_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VGETEXPPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VgetexpphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vgetexpph_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VGETEXPPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VGETEXPPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VgetexpphMaskzSaeEmitter<A, B> {
fn vgetexpph_maskz_sae(&mut self, op0: A, op1: B);
}
impl<'a> VgetexpphMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vgetexpph_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VGETEXPPH512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VGETEXPPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VgetexpphSaeEmitter<A, B> {
fn vgetexpph_sae(&mut self, op0: A, op1: B);
}
impl<'a> VgetexpphSaeEmitter<Zmm, Zmm> for Assembler<'a> {
fn vgetexpph_sae(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VGETEXPPH512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VGETEXPSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VgetexpshEmitter<A, B, C> {
fn vgetexpsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetexpshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgetexpsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGETEXPSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetexpshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vgetexpsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VGETEXPSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETEXPSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VgetexpshMaskEmitter<A, B, C> {
fn vgetexpsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetexpshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgetexpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGETEXPSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetexpshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vgetexpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VGETEXPSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETEXPSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VgetexpshMaskSaeEmitter<A, B, C> {
fn vgetexpsh_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetexpshMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgetexpsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGETEXPSHRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETEXPSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VgetexpshMaskzEmitter<A, B, C> {
fn vgetexpsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetexpshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgetexpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGETEXPSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetexpshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vgetexpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VGETEXPSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETEXPSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VgetexpshMaskzSaeEmitter<A, B, C> {
fn vgetexpsh_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetexpshMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgetexpsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGETEXPSHRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETEXPSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VgetexpshSaeEmitter<A, B, C> {
fn vgetexpsh_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetexpshSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgetexpsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGETEXPSHRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VgetmantphEmitter<A, B, C> {
fn vgetmantph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetmantphEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantph(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VGETMANTPH128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vgetmantph(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vgetmantph(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VGETMANTPH256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vgetmantph(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vgetmantph(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VGETMANTPH512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vgetmantph(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VgetmantphMaskEmitter<A, B, C> {
fn vgetmantph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetmantphMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VGETMANTPH128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vgetmantph_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vgetmantph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VGETMANTPH256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vgetmantph_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vgetmantph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VGETMANTPH512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vgetmantph_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VgetmantphMaskSaeEmitter<A, B, C> {
fn vgetmantph_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetmantphMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vgetmantph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VGETMANTPH512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VgetmantphMaskzEmitter<A, B, C> {
fn vgetmantph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetmantphMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VGETMANTPH128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vgetmantph_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vgetmantph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VGETMANTPH256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vgetmantph_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vgetmantph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VGETMANTPH512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VgetmantphMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vgetmantph_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VGETMANTPH512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VgetmantphMaskzSaeEmitter<A, B, C> {
fn vgetmantph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetmantphMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vgetmantph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VGETMANTPH512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VgetmantphSaeEmitter<A, B, C> {
fn vgetmantph_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VgetmantphSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vgetmantph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VGETMANTPH512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGETMANTSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VgetmantshEmitter<A, B, C, D> {
fn vgetmantsh(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VgetmantshEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGETMANTSHRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VgetmantshEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgetmantsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGETMANTSHRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGETMANTSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VgetmantshMaskEmitter<A, B, C, D> {
fn vgetmantsh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VgetmantshMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGETMANTSHRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VgetmantshMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgetmantsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGETMANTSHRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGETMANTSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VgetmantshMaskSaeEmitter<A, B, C, D> {
fn vgetmantsh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VgetmantshMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGETMANTSHRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGETMANTSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VgetmantshMaskzEmitter<A, B, C, D> {
fn vgetmantsh_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VgetmantshMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGETMANTSHRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VgetmantshMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgetmantsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGETMANTSHRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGETMANTSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VgetmantshMaskzSaeEmitter<A, B, C, D> {
fn vgetmantsh_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VgetmantshMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGETMANTSHRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGETMANTSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VgetmantshSaeEmitter<A, B, C, D> {
fn vgetmantsh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VgetmantshSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgetmantsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGETMANTSHRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8AFFINEINVQB` (VGF2P8AFFINEINVQB).
/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait Vgf2p8affineinvqbEmitter<A, B, C, D> {
fn vgf2p8affineinvqb(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> Vgf2p8affineinvqbEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8AFFINEINVQB_MASK` (VGF2P8AFFINEINVQB).
/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait Vgf2p8affineinvqbMaskEmitter<A, B, C, D> {
fn vgf2p8affineinvqb_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> Vgf2p8affineinvqbMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8AFFINEINVQB_MASKZ` (VGF2P8AFFINEINVQB).
/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait Vgf2p8affineinvqbMaskzEmitter<A, B, C, D> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> Vgf2p8affineinvqbMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineinvqbMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineinvqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEINVQB512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8AFFINEQB` (VGF2P8AFFINEQB).
/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait Vgf2p8affineqbEmitter<A, B, C, D> {
fn vgf2p8affineqb(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> Vgf2p8affineqbEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgf2p8affineqb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGF2P8AFFINEQB128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vgf2p8affineqb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VGF2P8AFFINEQB256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vgf2p8affineqb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VGF2P8AFFINEQB512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8AFFINEQB_MASK` (VGF2P8AFFINEQB).
/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait Vgf2p8affineqbMaskEmitter<A, B, C, D> {
fn vgf2p8affineqb_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> Vgf2p8affineqbMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgf2p8affineqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGF2P8AFFINEQB128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vgf2p8affineqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VGF2P8AFFINEQB256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vgf2p8affineqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VGF2P8AFFINEQB512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8AFFINEQB_MASKZ` (VGF2P8AFFINEQB).
/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait Vgf2p8affineqbMaskzEmitter<A, B, C, D> {
fn vgf2p8affineqb_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> Vgf2p8affineqbMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vgf2p8affineqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VGF2P8AFFINEQB128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vgf2p8affineqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VGF2P8AFFINEQB256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vgf2p8affineqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VGF2P8AFFINEQB512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> Vgf2p8affineqbMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vgf2p8affineqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VGF2P8AFFINEQB512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VGF2P8MULB` (VGF2P8MULB).
/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vgf2p8mulbEmitter<A, B, C> {
fn vgf2p8mulb(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vgf2p8mulbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgf2p8mulb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGF2P8MULB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vgf2p8mulb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VGF2P8MULB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vgf2p8mulb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VGF2P8MULB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vgf2p8mulb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VGF2P8MULB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vgf2p8mulb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VGF2P8MULB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vgf2p8mulb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VGF2P8MULB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGF2P8MULB_MASK` (VGF2P8MULB).
/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vgf2p8mulbMaskEmitter<A, B, C> {
fn vgf2p8mulb_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vgf2p8mulbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgf2p8mulb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGF2P8MULB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vgf2p8mulb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VGF2P8MULB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vgf2p8mulb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VGF2P8MULB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vgf2p8mulb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VGF2P8MULB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vgf2p8mulb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VGF2P8MULB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vgf2p8mulb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VGF2P8MULB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VGF2P8MULB_MASKZ` (VGF2P8MULB).
/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vgf2p8mulbMaskzEmitter<A, B, C> {
fn vgf2p8mulb_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vgf2p8mulbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vgf2p8mulb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VGF2P8MULB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vgf2p8mulb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VGF2P8MULB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vgf2p8mulb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VGF2P8MULB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vgf2p8mulb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VGF2P8MULB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vgf2p8mulb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VGF2P8MULB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vgf2p8mulbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vgf2p8mulb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VGF2P8MULB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmaxphEmitter<A, B, C> {
fn vmaxph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmaxph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMAXPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vmaxph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMAXPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vmaxph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMAXPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmaxph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMAXPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vmaxph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMAXPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmaxphMaskEmitter<A, B, C> {
fn vmaxph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmaxph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMAXPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vmaxph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMAXPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vmaxph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMAXPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmaxph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMAXPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vmaxph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMAXPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmaxphMaskSaeEmitter<A, B, C> {
fn vmaxph_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxphMaskSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmaxph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMAXPH512RRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmaxphMaskzEmitter<A, B, C> {
fn vmaxph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmaxph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMAXPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vmaxph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMAXPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vmaxph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMAXPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmaxph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMAXPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vmaxph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMAXPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmaxphMaskzSaeEmitter<A, B, C> {
fn vmaxph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxphMaskzSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmaxph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMAXPH512RRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmaxphSaeEmitter<A, B, C> {
fn vmaxph_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxphSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmaxph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMAXPH512RRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmaxshEmitter<A, B, C> {
fn vmaxsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmaxsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMAXSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmaxshMaskEmitter<A, B, C> {
fn vmaxsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmaxsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMAXSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmaxshMaskSaeEmitter<A, B, C> {
fn vmaxsh_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxshMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXSHRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmaxshMaskzEmitter<A, B, C> {
fn vmaxsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmaxshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmaxsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMAXSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmaxshMaskzSaeEmitter<A, B, C> {
fn vmaxsh_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxshMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXSHRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMAXSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmaxshSaeEmitter<A, B, C> {
fn vmaxsh_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmaxshSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmaxsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMAXSHRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VminphEmitter<A, B, C> {
fn vminph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vminph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMINPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vminph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMINPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vminph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMINPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vminph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMINPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vminph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMINPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VminphMaskEmitter<A, B, C> {
fn vminph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vminph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMINPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vminph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMINPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vminph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMINPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vminph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMINPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vminph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMINPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VminphMaskSaeEmitter<A, B, C> {
fn vminph_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminphMaskSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vminph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMINPH512RRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VminphMaskzEmitter<A, B, C> {
fn vminph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vminph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMINPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vminph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMINPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vminph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMINPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vminph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMINPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vminph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMINPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VminphMaskzSaeEmitter<A, B, C> {
fn vminph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminphMaskzSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vminph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMINPH512RRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VminphSaeEmitter<A, B, C> {
fn vminph_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminphSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vminph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMINPH512RRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VminshEmitter<A, B, C> {
fn vminsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vminsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMINSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VminshMaskEmitter<A, B, C> {
fn vminsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vminsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMINSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VminshMaskSaeEmitter<A, B, C> {
fn vminsh_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminshMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINSHRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VminshMaskzEmitter<A, B, C> {
fn vminsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VminshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vminsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMINSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VminshMaskzSaeEmitter<A, B, C> {
fn vminsh_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminshMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINSHRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMINSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VminshSaeEmitter<A, B, C> {
fn vminsh_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VminshSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vminsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMINSHRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMOVSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Xmm |
/// | 2 | Xmm, Mem |
/// +---+----------+
/// ```
pub trait VmovshEmitter_2<A, B> {
fn vmovsh_2(&mut self, op0: A, op1: B);
}
impl<'a> VmovshEmitter_2<Xmm, Mem> for Assembler<'a> {
fn vmovsh_2(&mut self, op0: Xmm, op1: Mem) {
self.emit(VMOVSHRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VmovshEmitter_2<Mem, Xmm> for Assembler<'a> {
fn vmovsh_2(&mut self, op0: Mem, op1: Xmm) {
self.emit(VMOVSHMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VMOVSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmovshEmitter_3<A, B, C> {
fn vmovsh_3(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmovshEmitter_3<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmovsh_3(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMOVSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMOVSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Xmm |
/// | 2 | Xmm, Mem |
/// +---+----------+
/// ```
pub trait VmovshMaskEmitter_2<A, B> {
fn vmovsh_mask_2(&mut self, op0: A, op1: B);
}
impl<'a> VmovshMaskEmitter_2<Xmm, Mem> for Assembler<'a> {
fn vmovsh_mask_2(&mut self, op0: Xmm, op1: Mem) {
self.emit(VMOVSHRM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VmovshMaskEmitter_2<Mem, Xmm> for Assembler<'a> {
fn vmovsh_mask_2(&mut self, op0: Mem, op1: Xmm) {
self.emit(VMOVSHMR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VMOVSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmovshMaskEmitter_3<A, B, C> {
fn vmovsh_mask_3(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmovshMaskEmitter_3<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmovsh_mask_3(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMOVSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMOVSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// +---+----------+
/// ```
pub trait VmovshMaskzEmitter_2<A, B> {
fn vmovsh_maskz_2(&mut self, op0: A, op1: B);
}
impl<'a> VmovshMaskzEmitter_2<Xmm, Mem> for Assembler<'a> {
fn vmovsh_maskz_2(&mut self, op0: Xmm, op1: Mem) {
self.emit(VMOVSHRM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VMOVSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmovshMaskzEmitter_3<A, B, C> {
fn vmovsh_maskz_3(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmovshMaskzEmitter_3<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmovsh_maskz_3(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMOVSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMOVW_G2X`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Gpd |
/// | 2 | Xmm, Mem |
/// +---+----------+
/// ```
pub trait VmovwG2xEmitter<A, B> {
fn vmovw_g2x(&mut self, op0: A, op1: B);
}
impl<'a> VmovwG2xEmitter<Xmm, Gpd> for Assembler<'a> {
fn vmovw_g2x(&mut self, op0: Xmm, op1: Gpd) {
self.emit(VMOVW_G2XRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VmovwG2xEmitter<Xmm, Mem> for Assembler<'a> {
fn vmovw_g2x(&mut self, op0: Xmm, op1: Mem) {
self.emit(VMOVW_G2XRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VMOVW_X2G`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Mem, Xmm |
/// +---+----------+
/// ```
pub trait VmovwX2gEmitter<A, B> {
fn vmovw_x2g(&mut self, op0: A, op1: B);
}
impl<'a> VmovwX2gEmitter<Gpd, Xmm> for Assembler<'a> {
fn vmovw_x2g(&mut self, op0: Gpd, op1: Xmm) {
self.emit(VMOVW_X2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VmovwX2gEmitter<Mem, Xmm> for Assembler<'a> {
fn vmovw_x2g(&mut self, op0: Mem, op1: Xmm) {
self.emit(VMOVW_X2GMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VMULPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmulphEmitter<A, B, C> {
fn vmulph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmulph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMULPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vmulph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMULPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vmulph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMULPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmulph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMULPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vmulph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMULPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmulphErEmitter<A, B, C> {
fn vmulph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmulph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMULPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmulphMaskEmitter<A, B, C> {
fn vmulph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmulph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMULPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vmulph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMULPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vmulph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMULPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmulph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMULPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vmulph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMULPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmulphMaskErEmitter<A, B, C> {
fn vmulph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmulph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMULPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmulphMaskzEmitter<A, B, C> {
fn vmulph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmulph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMULPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vmulph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VMULPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vmulph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VMULPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmulph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMULPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vmulph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VMULPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VmulphMaskzErEmitter<A, B, C> {
fn vmulph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vmulph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VMULPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmulshEmitter<A, B, C> {
fn vmulsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmulsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMULSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmulshErEmitter<A, B, C> {
fn vmulsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmulshMaskEmitter<A, B, C> {
fn vmulsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmulsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMULSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmulshMaskErEmitter<A, B, C> {
fn vmulsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmulshMaskzEmitter<A, B, C> {
fn vmulsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VmulshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vmulsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VMULSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VMULSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VmulshMaskzErEmitter<A, B, C> {
fn vmulsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VmulshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vmulsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VMULSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VPCLMULQDQ` (VPCLMULQDQ).
/// Performs a carry-less multiplication of two quadwords, selected from the first source and second source operand according to the value of the immediate byte. Bits 4 and 0 are used to select which 64-bit half of each operand to use according to Table 4-13, other bits of the immediate byte are ignored.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PCLMULQDQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
pub trait VpclmulqdqEmitter<A, B, C, D> {
fn vpclmulqdq(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VpclmulqdqEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vpclmulqdq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VPCLMULQDQ128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VpclmulqdqEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vpclmulqdq(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VPCLMULQDQ128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VpclmulqdqEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
fn vpclmulqdq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
self.emit(VPCLMULQDQ256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VpclmulqdqEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
fn vpclmulqdq(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
self.emit(VPCLMULQDQ256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VpclmulqdqEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
fn vpclmulqdq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
self.emit(VPCLMULQDQ512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VpclmulqdqEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
fn vpclmulqdq(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
self.emit(VPCLMULQDQ512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VPDPBSSD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
pub trait VpdpbssdEmitter<A, B, C> {
fn vpdpbssd(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VpdpbssdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vpdpbssd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VPDPBSSD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbssdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vpdpbssd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VPDPBSSD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbssdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vpdpbssd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VPDPBSSD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbssdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vpdpbssd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VPDPBSSD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VPDPBSSDS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
pub trait VpdpbssdsEmitter<A, B, C> {
fn vpdpbssds(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VpdpbssdsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vpdpbssds(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VPDPBSSDS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbssdsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vpdpbssds(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VPDPBSSDS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbssdsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vpdpbssds(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VPDPBSSDS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbssdsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vpdpbssds(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VPDPBSSDS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VPDPBSUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
pub trait VpdpbsudEmitter<A, B, C> {
fn vpdpbsud(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VpdpbsudEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vpdpbsud(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VPDPBSUD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbsudEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vpdpbsud(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VPDPBSUD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbsudEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vpdpbsud(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VPDPBSUD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbsudEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vpdpbsud(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VPDPBSUD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VPDPBSUDS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
pub trait VpdpbsudsEmitter<A, B, C> {
fn vpdpbsuds(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VpdpbsudsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vpdpbsuds(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VPDPBSUDS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbsudsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vpdpbsuds(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VPDPBSUDS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbsudsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vpdpbsuds(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VPDPBSUDS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbsudsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vpdpbsuds(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VPDPBSUDS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VPDPBUUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
pub trait VpdpbuudEmitter<A, B, C> {
fn vpdpbuud(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VpdpbuudEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vpdpbuud(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VPDPBUUD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbuudEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vpdpbuud(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VPDPBUUD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbuudEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vpdpbuud(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VPDPBUUD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbuudEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vpdpbuud(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VPDPBUUD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VPDPBUUDS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
pub trait VpdpbuudsEmitter<A, B, C> {
fn vpdpbuuds(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VpdpbuudsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vpdpbuuds(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VPDPBUUDS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbuudsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vpdpbuuds(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VPDPBUUDS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbuudsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vpdpbuuds(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VPDPBUUDS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VpdpbuudsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vpdpbuuds(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VPDPBUUDS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRCPPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VrcpphEmitter<A, B> {
fn vrcpph(&mut self, op0: A, op1: B);
}
impl<'a> VrcpphEmitter<Xmm, Xmm> for Assembler<'a> {
fn vrcpph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VRCPPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphEmitter<Xmm, Mem> for Assembler<'a> {
fn vrcpph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VRCPPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphEmitter<Ymm, Ymm> for Assembler<'a> {
fn vrcpph(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VRCPPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphEmitter<Ymm, Mem> for Assembler<'a> {
fn vrcpph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VRCPPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphEmitter<Zmm, Zmm> for Assembler<'a> {
fn vrcpph(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VRCPPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphEmitter<Zmm, Mem> for Assembler<'a> {
fn vrcpph(&mut self, op0: Zmm, op1: Mem) {
self.emit(VRCPPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VRCPPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VrcpphMaskEmitter<A, B> {
fn vrcpph_mask(&mut self, op0: A, op1: B);
}
impl<'a> VrcpphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vrcpph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VRCPPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vrcpph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VRCPPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vrcpph_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VRCPPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vrcpph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VRCPPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vrcpph_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VRCPPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vrcpph_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VRCPPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VRCPPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VrcpphMaskzEmitter<A, B> {
fn vrcpph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> VrcpphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vrcpph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VRCPPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vrcpph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VRCPPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vrcpph_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VRCPPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vrcpph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VRCPPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vrcpph_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VRCPPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrcpphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vrcpph_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VRCPPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VRCPSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VrcpshEmitter<A, B, C> {
fn vrcpsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrcpshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vrcpsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VRCPSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrcpshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vrcpsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VRCPSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRCPSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VrcpshMaskEmitter<A, B, C> {
fn vrcpsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrcpshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vrcpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VRCPSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrcpshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vrcpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VRCPSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRCPSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VrcpshMaskzEmitter<A, B, C> {
fn vrcpsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrcpshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vrcpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VRCPSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrcpshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vrcpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VRCPSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCEPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VreducephEmitter<A, B, C> {
fn vreduceph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VreducephEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vreduceph(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VREDUCEPH128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vreduceph(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vreduceph(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VREDUCEPH256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vreduceph(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vreduceph(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VREDUCEPH512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vreduceph(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCEPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VreducephMaskEmitter<A, B, C> {
fn vreduceph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VreducephMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vreduceph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VREDUCEPH128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vreduceph_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vreduceph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VREDUCEPH256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vreduceph_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vreduceph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VREDUCEPH512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vreduceph_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCEPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VreducephMaskSaeEmitter<A, B, C> {
fn vreduceph_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VreducephMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vreduceph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VREDUCEPH512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCEPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VreducephMaskzEmitter<A, B, C> {
fn vreduceph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VreducephMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vreduceph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VREDUCEPH128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vreduceph_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vreduceph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VREDUCEPH256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vreduceph_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vreduceph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VREDUCEPH512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VreducephMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vreduceph_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VREDUCEPH512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCEPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VreducephMaskzSaeEmitter<A, B, C> {
fn vreduceph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VreducephMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vreduceph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VREDUCEPH512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCEPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VreducephSaeEmitter<A, B, C> {
fn vreduceph_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VreducephSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vreduceph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VREDUCEPH512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VREDUCESH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VreduceshEmitter<A, B, C, D> {
fn vreducesh(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VreduceshEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vreducesh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VREDUCESHRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VreduceshEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vreducesh(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VREDUCESHRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VREDUCESH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VreduceshMaskEmitter<A, B, C, D> {
fn vreducesh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VreduceshMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vreducesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VREDUCESHRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VreduceshMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vreducesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VREDUCESHRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VREDUCESH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VreduceshMaskSaeEmitter<A, B, C, D> {
fn vreducesh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VreduceshMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vreducesh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VREDUCESHRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VREDUCESH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VreduceshMaskzEmitter<A, B, C, D> {
fn vreducesh_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VreduceshMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vreducesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VREDUCESHRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VreduceshMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vreducesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VREDUCESHRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VREDUCESH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VreduceshMaskzSaeEmitter<A, B, C, D> {
fn vreducesh_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VreduceshMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vreducesh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VREDUCESHRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VREDUCESH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VreduceshSaeEmitter<A, B, C, D> {
fn vreducesh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VreduceshSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vreducesh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VREDUCESHRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRNDSCALEPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VrndscalephEmitter<A, B, C> {
fn vrndscaleph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrndscalephEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscaleph(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VRNDSCALEPH128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vrndscaleph(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VRNDSCALEPH256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vrndscaleph(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VRNDSCALEPH512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRNDSCALEPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VrndscalephMaskEmitter<A, B, C> {
fn vrndscaleph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrndscalephMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscaleph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VRNDSCALEPH128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vrndscaleph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VRNDSCALEPH256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vrndscaleph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VRNDSCALEPH512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRNDSCALEPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VrndscalephMaskSaeEmitter<A, B, C> {
fn vrndscaleph_mask_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrndscalephMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vrndscaleph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VRNDSCALEPH512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRNDSCALEPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VrndscalephMaskzEmitter<A, B, C> {
fn vrndscaleph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrndscalephMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscaleph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
self.emit(VRNDSCALEPH128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
fn vrndscaleph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
self.emit(VRNDSCALEPH256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vrndscaleph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VRNDSCALEPH512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrndscalephMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
fn vrndscaleph_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
self.emit(VRNDSCALEPH512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRNDSCALEPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VrndscalephMaskzSaeEmitter<A, B, C> {
fn vrndscaleph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrndscalephMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vrndscaleph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VRNDSCALEPH512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRNDSCALEPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
pub trait VrndscalephSaeEmitter<A, B, C> {
fn vrndscaleph_sae(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrndscalephSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
fn vrndscaleph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
self.emit(VRNDSCALEPH512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRNDSCALESH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VrndscaleshEmitter<A, B, C, D> {
fn vrndscalesh(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VrndscaleshEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscalesh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VRNDSCALESHRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VrndscaleshEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vrndscalesh(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VRNDSCALESHRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRNDSCALESH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VrndscaleshMaskEmitter<A, B, C, D> {
fn vrndscalesh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VrndscaleshMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscalesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VRNDSCALESHRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VrndscaleshMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vrndscalesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VRNDSCALESHRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRNDSCALESH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VrndscaleshMaskSaeEmitter<A, B, C, D> {
fn vrndscalesh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VrndscaleshMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscalesh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VRNDSCALESHRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRNDSCALESH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VrndscaleshMaskzEmitter<A, B, C, D> {
fn vrndscalesh_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VrndscaleshMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscalesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VRNDSCALESHRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
impl<'a> VrndscaleshMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
fn vrndscalesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
self.emit(VRNDSCALESHRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRNDSCALESH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VrndscaleshMaskzSaeEmitter<A, B, C, D> {
fn vrndscalesh_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VrndscaleshMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscalesh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VRNDSCALESHRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRNDSCALESH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
pub trait VrndscaleshSaeEmitter<A, B, C, D> {
fn vrndscalesh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
}
impl<'a> VrndscaleshSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
fn vrndscalesh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
self.emit(VRNDSCALESHRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
}
}
/// `VRSQRTPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VrsqrtphEmitter<A, B> {
fn vrsqrtph(&mut self, op0: A, op1: B);
}
impl<'a> VrsqrtphEmitter<Xmm, Xmm> for Assembler<'a> {
fn vrsqrtph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VRSQRTPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphEmitter<Xmm, Mem> for Assembler<'a> {
fn vrsqrtph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VRSQRTPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphEmitter<Ymm, Ymm> for Assembler<'a> {
fn vrsqrtph(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VRSQRTPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphEmitter<Ymm, Mem> for Assembler<'a> {
fn vrsqrtph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VRSQRTPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphEmitter<Zmm, Zmm> for Assembler<'a> {
fn vrsqrtph(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VRSQRTPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphEmitter<Zmm, Mem> for Assembler<'a> {
fn vrsqrtph(&mut self, op0: Zmm, op1: Mem) {
self.emit(VRSQRTPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VRSQRTPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VrsqrtphMaskEmitter<A, B> {
fn vrsqrtph_mask(&mut self, op0: A, op1: B);
}
impl<'a> VrsqrtphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vrsqrtph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VRSQRTPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vrsqrtph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VRSQRTPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vrsqrtph_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VRSQRTPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vrsqrtph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VRSQRTPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vrsqrtph_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VRSQRTPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vrsqrtph_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VRSQRTPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VRSQRTPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VrsqrtphMaskzEmitter<A, B> {
fn vrsqrtph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> VrsqrtphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vrsqrtph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VRSQRTPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vrsqrtph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VRSQRTPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vrsqrtph_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VRSQRTPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vrsqrtph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VRSQRTPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vrsqrtph_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VRSQRTPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VrsqrtphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vrsqrtph_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VRSQRTPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VRSQRTSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VrsqrtshEmitter<A, B, C> {
fn vrsqrtsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrsqrtshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vrsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VRSQRTSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrsqrtshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vrsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VRSQRTSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRSQRTSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VrsqrtshMaskEmitter<A, B, C> {
fn vrsqrtsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrsqrtshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vrsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VRSQRTSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrsqrtshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vrsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VRSQRTSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VRSQRTSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VrsqrtshMaskzEmitter<A, B, C> {
fn vrsqrtsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VrsqrtshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vrsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VRSQRTSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VrsqrtshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vrsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VRSQRTSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VscalefphEmitter<A, B, C> {
fn vscalefph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vscalefph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSCALEFPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vscalefph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSCALEFPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vscalefph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSCALEFPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vscalefph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSCALEFPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vscalefph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSCALEFPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VscalefphErEmitter<A, B, C> {
fn vscalefph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vscalefph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSCALEFPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VscalefphMaskEmitter<A, B, C> {
fn vscalefph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vscalefph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSCALEFPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vscalefph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSCALEFPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vscalefph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSCALEFPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vscalefph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSCALEFPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vscalefph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSCALEFPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VscalefphMaskErEmitter<A, B, C> {
fn vscalefph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vscalefph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSCALEFPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VscalefphMaskzEmitter<A, B, C> {
fn vscalefph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vscalefph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSCALEFPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vscalefph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSCALEFPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vscalefph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSCALEFPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vscalefph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSCALEFPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vscalefph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSCALEFPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VscalefphMaskzErEmitter<A, B, C> {
fn vscalefph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vscalefph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSCALEFPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VscalefshEmitter<A, B, C> {
fn vscalefsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vscalefsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSCALEFSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VscalefshErEmitter<A, B, C> {
fn vscalefsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VscalefshMaskEmitter<A, B, C> {
fn vscalefsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vscalefsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSCALEFSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VscalefshMaskErEmitter<A, B, C> {
fn vscalefsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VscalefshMaskzEmitter<A, B, C> {
fn vscalefsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VscalefshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vscalefsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSCALEFSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSCALEFSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VscalefshMaskzErEmitter<A, B, C> {
fn vscalefsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VscalefshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vscalefsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSCALEFSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSM4KEY4`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vsm4key4Emitter<A, B, C> {
fn vsm4key4(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vsm4key4Emitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsm4key4(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSM4KEY4_128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4key4Emitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsm4key4(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSM4KEY4_128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4key4Emitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vsm4key4(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSM4KEY4_256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4key4Emitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vsm4key4(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSM4KEY4_256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4key4Emitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsm4key4(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSM4KEY4_512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4key4Emitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vsm4key4(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSM4KEY4_512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSM4RNDS4`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait Vsm4rnds4Emitter<A, B, C> {
fn vsm4rnds4(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> Vsm4rnds4Emitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsm4rnds4(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSM4RNDS4_128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4rnds4Emitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsm4rnds4(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSM4RNDS4_128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4rnds4Emitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vsm4rnds4(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSM4RNDS4_256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4rnds4Emitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vsm4rnds4(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSM4RNDS4_256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4rnds4Emitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsm4rnds4(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSM4RNDS4_512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> Vsm4rnds4Emitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vsm4rnds4(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSM4RNDS4_512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSQRTPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VsqrtphEmitter<A, B> {
fn vsqrtph(&mut self, op0: A, op1: B);
}
impl<'a> VsqrtphEmitter<Xmm, Xmm> for Assembler<'a> {
fn vsqrtph(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VSQRTPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphEmitter<Xmm, Mem> for Assembler<'a> {
fn vsqrtph(&mut self, op0: Xmm, op1: Mem) {
self.emit(VSQRTPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphEmitter<Ymm, Ymm> for Assembler<'a> {
fn vsqrtph(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VSQRTPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphEmitter<Ymm, Mem> for Assembler<'a> {
fn vsqrtph(&mut self, op0: Ymm, op1: Mem) {
self.emit(VSQRTPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphEmitter<Zmm, Zmm> for Assembler<'a> {
fn vsqrtph(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VSQRTPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphEmitter<Zmm, Mem> for Assembler<'a> {
fn vsqrtph(&mut self, op0: Zmm, op1: Mem) {
self.emit(VSQRTPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VSQRTPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VsqrtphErEmitter<A, B> {
fn vsqrtph_er(&mut self, op0: A, op1: B);
}
impl<'a> VsqrtphErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vsqrtph_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VSQRTPH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VSQRTPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VsqrtphMaskEmitter<A, B> {
fn vsqrtph_mask(&mut self, op0: A, op1: B);
}
impl<'a> VsqrtphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
fn vsqrtph_mask(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VSQRTPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskEmitter<Xmm, Mem> for Assembler<'a> {
fn vsqrtph_mask(&mut self, op0: Xmm, op1: Mem) {
self.emit(VSQRTPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
fn vsqrtph_mask(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VSQRTPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskEmitter<Ymm, Mem> for Assembler<'a> {
fn vsqrtph_mask(&mut self, op0: Ymm, op1: Mem) {
self.emit(VSQRTPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
fn vsqrtph_mask(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VSQRTPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskEmitter<Zmm, Mem> for Assembler<'a> {
fn vsqrtph_mask(&mut self, op0: Zmm, op1: Mem) {
self.emit(VSQRTPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VSQRTPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VsqrtphMaskErEmitter<A, B> {
fn vsqrtph_mask_er(&mut self, op0: A, op1: B);
}
impl<'a> VsqrtphMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vsqrtph_mask_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VSQRTPH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VSQRTPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VsqrtphMaskzEmitter<A, B> {
fn vsqrtph_maskz(&mut self, op0: A, op1: B);
}
impl<'a> VsqrtphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
fn vsqrtph_maskz(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VSQRTPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
fn vsqrtph_maskz(&mut self, op0: Xmm, op1: Mem) {
self.emit(VSQRTPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
fn vsqrtph_maskz(&mut self, op0: Ymm, op1: Ymm) {
self.emit(VSQRTPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
fn vsqrtph_maskz(&mut self, op0: Ymm, op1: Mem) {
self.emit(VSQRTPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
fn vsqrtph_maskz(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VSQRTPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VsqrtphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
fn vsqrtph_maskz(&mut self, op0: Zmm, op1: Mem) {
self.emit(VSQRTPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VSQRTPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
pub trait VsqrtphMaskzErEmitter<A, B> {
fn vsqrtph_maskz_er(&mut self, op0: A, op1: B);
}
impl<'a> VsqrtphMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
fn vsqrtph_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
self.emit(VSQRTPH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VSQRTSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsqrtshEmitter<A, B, C> {
fn vsqrtsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsqrtshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSQRTSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsqrtshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSQRTSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSQRTSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsqrtshErEmitter<A, B, C> {
fn vsqrtsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsqrtshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsqrtsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSQRTSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSQRTSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsqrtshMaskEmitter<A, B, C> {
fn vsqrtsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsqrtshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSQRTSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsqrtshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSQRTSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSQRTSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsqrtshMaskErEmitter<A, B, C> {
fn vsqrtsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsqrtshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsqrtsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSQRTSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSQRTSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsqrtshMaskzEmitter<A, B, C> {
fn vsqrtsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsqrtshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSQRTSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsqrtshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSQRTSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSQRTSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsqrtshMaskzErEmitter<A, B, C> {
fn vsqrtsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsqrtshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsqrtsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSQRTSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VsubphEmitter<A, B, C> {
fn vsubph(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsubph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSUBPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vsubph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSUBPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vsubph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSUBPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsubph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSUBPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vsubph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSUBPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VsubphErEmitter<A, B, C> {
fn vsubph_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsubph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSUBPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VsubphMaskEmitter<A, B, C> {
fn vsubph_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsubph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSUBPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vsubph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSUBPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vsubph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSUBPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsubph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSUBPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vsubph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSUBPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VsubphMaskErEmitter<A, B, C> {
fn vsubph_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsubph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSUBPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VsubphMaskzEmitter<A, B, C> {
fn vsubph_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsubph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSUBPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
fn vsubph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
self.emit(VSUBPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
fn vsubph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
self.emit(VSUBPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsubph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSUBPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
fn vsubph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
self.emit(VSUBPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
pub trait VsubphMaskzErEmitter<A, B, C> {
fn vsubph_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
fn vsubph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
self.emit(VSUBPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsubshEmitter<A, B, C> {
fn vsubsh(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsubsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSUBSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsubshErEmitter<A, B, C> {
fn vsubsh_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsubshMaskEmitter<A, B, C> {
fn vsubsh_mask(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsubsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSUBSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsubshMaskErEmitter<A, B, C> {
fn vsubsh_mask_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsubshMaskzEmitter<A, B, C> {
fn vsubsh_maskz(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
impl<'a> VsubshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
fn vsubsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
self.emit(VSUBSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VSUBSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
pub trait VsubshMaskzErEmitter<A, B, C> {
fn vsubsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
}
impl<'a> VsubshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
fn vsubsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
self.emit(VSUBSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
}
}
/// `VUCOMISH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// +---+----------+
/// ```
pub trait VucomishEmitter<A, B> {
fn vucomish(&mut self, op0: A, op1: B);
}
impl<'a> VucomishEmitter<Xmm, Xmm> for Assembler<'a> {
fn vucomish(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VUCOMISHRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> VucomishEmitter<Xmm, Mem> for Assembler<'a> {
fn vucomish(&mut self, op0: Xmm, op1: Mem) {
self.emit(VUCOMISHRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `VUCOMISH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Xmm |
/// +---+----------+
/// ```
pub trait VucomishSaeEmitter<A, B> {
fn vucomish_sae(&mut self, op0: A, op1: B);
}
impl<'a> VucomishSaeEmitter<Xmm, Xmm> for Assembler<'a> {
fn vucomish_sae(&mut self, op0: Xmm, op1: Xmm) {
self.emit(VUCOMISHRR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `XCHG` (XCHG).
/// Exchanges the contents of the destination (first) and source (second) operands. The operands can be two general-purpose registers or a register and a memory location. If a memory operand is referenced, the processor’s locking protocol is automatically implemented for the duration of the exchange operation, regardless of the presence or absence of the LOCK prefix or of the value of the IOPL. (See the LOCK prefix description in this chapter for more information on the locking protocol.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XCHG.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------+
/// | # | Operands |
/// +---+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | Gpd, Gpd |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpw, Gpw |
/// | 5 | Mem, GpbLo |
/// | 6 | Mem, Gpd |
/// | 7 | Mem, Gpq |
/// | 8 | Mem, Gpw |
/// +---+--------------+
/// ```
pub trait XchgEmitter<A, B> {
fn xchg(&mut self, op0: A, op1: B);
}
impl<'a> XchgEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn xchg(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(XCHG8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Mem, GpbLo> for Assembler<'a> {
fn xchg(&mut self, op0: Mem, op1: GpbLo) {
self.emit(XCHG8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Gpw, Gpw> for Assembler<'a> {
fn xchg(&mut self, op0: Gpw, op1: Gpw) {
self.emit(XCHG16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Mem, Gpw> for Assembler<'a> {
fn xchg(&mut self, op0: Mem, op1: Gpw) {
self.emit(XCHG16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Gpd, Gpd> for Assembler<'a> {
fn xchg(&mut self, op0: Gpd, op1: Gpd) {
self.emit(XCHG32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Mem, Gpd> for Assembler<'a> {
fn xchg(&mut self, op0: Mem, op1: Gpd) {
self.emit(XCHG32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Gpq, Gpq> for Assembler<'a> {
fn xchg(&mut self, op0: Gpq, op1: Gpq) {
self.emit(XCHG64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XchgEmitter<Mem, Gpq> for Assembler<'a> {
fn xchg(&mut self, op0: Mem, op1: Gpq) {
self.emit(XCHG64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
/// `XLATB` (XLATB).
/// Locates a byte entry in a table in memory, using the contents of the AL register as a table index, then copies the contents of the table entry back into the AL register. The index in the AL register is treated as an unsigned integer. The XLAT and XLATB instructions get the base address of the table in memory from either the DS:EBX or the DS:BX registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). (The DS segment may be overridden with a segment override prefix.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XLAT%3AXLATB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
pub trait XlatbEmitter {
fn xlatb(&mut self);
}
impl<'a> XlatbEmitter for Assembler<'a> {
fn xlatb(&mut self) {
self.emit(XLATB, &NOREG, &NOREG, &NOREG, &NOREG);
}
}
/// `XOR` (XOR).
/// Performs a bitwise exclusive OR (XOR) operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is 1 if the corresponding bits of the operands are different; each bit is 0 if the corresponding bits are the same.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XOR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
pub trait XorEmitter<A, B> {
fn xor(&mut self, op0: A, op1: B);
}
impl<'a> XorEmitter<GpbLo, GpbLo> for Assembler<'a> {
fn xor(&mut self, op0: GpbLo, op1: GpbLo) {
self.emit(XOR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Mem, GpbLo> for Assembler<'a> {
fn xor(&mut self, op0: Mem, op1: GpbLo) {
self.emit(XOR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpw, Gpw> for Assembler<'a> {
fn xor(&mut self, op0: Gpw, op1: Gpw) {
self.emit(XOR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Mem, Gpw> for Assembler<'a> {
fn xor(&mut self, op0: Mem, op1: Gpw) {
self.emit(XOR16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpd, Gpd> for Assembler<'a> {
fn xor(&mut self, op0: Gpd, op1: Gpd) {
self.emit(XOR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Mem, Gpd> for Assembler<'a> {
fn xor(&mut self, op0: Mem, op1: Gpd) {
self.emit(XOR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpq, Gpq> for Assembler<'a> {
fn xor(&mut self, op0: Gpq, op1: Gpq) {
self.emit(XOR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Mem, Gpq> for Assembler<'a> {
fn xor(&mut self, op0: Mem, op1: Gpq) {
self.emit(XOR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<GpbLo, Mem> for Assembler<'a> {
fn xor(&mut self, op0: GpbLo, op1: Mem) {
self.emit(XOR8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpw, Mem> for Assembler<'a> {
fn xor(&mut self, op0: Gpw, op1: Mem) {
self.emit(XOR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpd, Mem> for Assembler<'a> {
fn xor(&mut self, op0: Gpd, op1: Mem) {
self.emit(XOR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpq, Mem> for Assembler<'a> {
fn xor(&mut self, op0: Gpq, op1: Mem) {
self.emit(XOR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<GpbLo, Imm> for Assembler<'a> {
fn xor(&mut self, op0: GpbLo, op1: Imm) {
self.emit(XOR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpw, Imm> for Assembler<'a> {
fn xor(&mut self, op0: Gpw, op1: Imm) {
self.emit(XOR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpd, Imm> for Assembler<'a> {
fn xor(&mut self, op0: Gpd, op1: Imm) {
self.emit(XOR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Gpq, Imm> for Assembler<'a> {
fn xor(&mut self, op0: Gpq, op1: Imm) {
self.emit(XOR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> XorEmitter<Mem, Imm> for Assembler<'a> {
fn xor(&mut self, op0: Mem, op1: Imm) {
self.emit(XOR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
}
}
impl<'a> Assembler<'a> {
/// `AADD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
#[inline]
pub fn aadd<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AaddEmitter<A, B> {
<Self as AaddEmitter<A, B>>::aadd(self, op0, op1);
}
/// `AAND`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
#[inline]
pub fn aand<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AandEmitter<A, B> {
<Self as AandEmitter<A, B>>::aand(self, op0, op1);
}
/// `ADC` (ADC).
/// Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADC.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn adc<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AdcEmitter<A, B> {
<Self as AdcEmitter<A, B>>::adc(self, op0, op1);
}
/// `ADD` (ADD).
/// Adds the destination operand (first operand) and the source operand (second operand) and then stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn add<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AddEmitter<A, B> {
<Self as AddEmitter<A, B>>::add(self, op0, op1);
}
/// `AND` (AND).
/// Performs a bitwise AND operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is set to 1 if both corresponding bits of the first and second operands are 1; otherwise, it is set to 0.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AND.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn and<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AndEmitter<A, B> {
<Self as AndEmitter<A, B>>::and(self, op0, op1);
}
/// `AOR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
#[inline]
pub fn aor<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AorEmitter<A, B> {
<Self as AorEmitter<A, B>>::aor(self, op0, op1);
}
/// `AXOR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Gpd |
/// | 2 | Mem, Gpq |
/// +---+----------+
/// ```
#[inline]
pub fn axor<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: AxorEmitter<A, B> {
<Self as AxorEmitter<A, B>>::axor(self, op0, op1);
}
/// `BSF` (BSF).
/// Searches the source operand (second operand) for the least significant set bit (1 bit). If a least significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content of the source operand is 0, the content of the destination operand is undefined.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSF.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn bsf<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: BsfEmitter<A, B> {
<Self as BsfEmitter<A, B>>::bsf(self, op0, op1);
}
/// `BSR` (BSR).
/// Searches the source operand (second operand) for the most significant set bit (1 bit). If a most significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content source operand is 0, the content of the destination operand is undefined.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn bsr<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: BsrEmitter<A, B> {
<Self as BsrEmitter<A, B>>::bsr(self, op0, op1);
}
/// `BT` (BT).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BT.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
#[inline]
pub fn bt<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: BtEmitter<A, B> {
<Self as BtEmitter<A, B>>::bt(self, op0, op1);
}
/// `BTC` (BTC).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTC.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
#[inline]
pub fn btc<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: BtcEmitter<A, B> {
<Self as BtcEmitter<A, B>>::btc(self, op0, op1);
}
/// `BTR` (BTR).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and clears the selected bit in the bit string to 0. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
#[inline]
pub fn btr<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: BtrEmitter<A, B> {
<Self as BtrEmitter<A, B>>::btr(self, op0, op1);
}
/// `BTS` (BTS).
/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and sets the selected bit in the bit string to 1. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTS.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+----------+
/// | # | Operands |
/// +----+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Imm |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Imm |
/// | 7 | Mem, Gpd |
/// | 8 | Mem, Gpq |
/// | 9 | Mem, Gpw |
/// | 10 | Mem, Imm |
/// +----+----------+
/// ```
#[inline]
pub fn bts<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: BtsEmitter<A, B> {
<Self as BtsEmitter<A, B>>::bts(self, op0, op1);
}
/// `CALL` (CALL).
/// Saves procedure linking information on the stack and branches to the called procedure specified using the target operand. The target operand specifies the address of the first instruction in the called procedure. The operand can be an immediate value, a general-purpose register, or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CALL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Imm |
/// | 3 | Label |
/// | 4 | Mem |
/// | 5 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn call<A>(&mut self, op0: A)
where Assembler<'a>: CallEmitter<A> {
<Self as CallEmitter<A>>::call(self, op0);
}
/// `CALLF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn callf<A>(&mut self, op0: A)
where Assembler<'a>: CallfEmitter<A> {
<Self as CallfEmitter<A>>::callf(self, op0);
}
/// `CBW`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cbw(&mut self)
where Assembler<'a>: CbwEmitter {
<Self as CbwEmitter>::cbw(self);
}
/// `CDQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cdq(&mut self)
where Assembler<'a>: CdqEmitter {
<Self as CdqEmitter>::cdq(self);
}
/// `CDQE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cdqe(&mut self)
where Assembler<'a>: CdqeEmitter {
<Self as CdqeEmitter>::cdqe(self);
}
/// `CLC` (CLC).
/// Clears the CF flag in the EFLAGS register. Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn clc(&mut self)
where Assembler<'a>: ClcEmitter {
<Self as ClcEmitter>::clc(self);
}
/// `CLD` (CLD).
/// Clears the DF flag in the EFLAGS register. When the DF flag is set to 0, string operations increment the index registers (ESI and/or EDI). Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cld(&mut self)
where Assembler<'a>: CldEmitter {
<Self as CldEmitter>::cld(self);
}
/// `CLFLUSH` (CLFLUSH).
/// Invalidates from every level of the cache hierarchy in the cache coherence domain the cache line that contains the linear address specified with the memory operand. If that cache line contains modified data at any level of the cache hierarchy, that data is written back to memory. The source operand is a byte memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLFLUSH.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn clflush<A>(&mut self, op0: A)
where Assembler<'a>: ClflushEmitter<A> {
<Self as ClflushEmitter<A>>::clflush(self, op0);
}
/// `CLI` (CLI).
/// In most cases, CLI clears the IF flag in the EFLAGS register and no other flags are affected. Clearing the IF flag causes the processor to ignore maskable external interrupts. The IF flag and the CLI and STI instruction have no effect on the generation of exceptions and NMI interrupts.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLI.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cli(&mut self)
where Assembler<'a>: CliEmitter {
<Self as CliEmitter>::cli(self);
}
/// `CLTS` (CLTS).
/// Clears the task-switched (TS) flag in the CR0 register. This instruction is intended for use in operating-system procedures. It is a privileged instruction that can only be executed at a CPL of 0. It is allowed to be executed in real-address mode to allow initialization for protected mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLTS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn clts(&mut self)
where Assembler<'a>: CltsEmitter {
<Self as CltsEmitter>::clts(self);
}
/// `CMC` (CMC).
/// Complements the CF flag in the EFLAGS register. CMC operation is the same in non-64-bit modes and 64-bit mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cmc(&mut self)
where Assembler<'a>: CmcEmitter {
<Self as CmcEmitter>::cmc(self);
}
/// `CMP` (CMP).
/// Compares the first source operand with the second source operand and sets the status flags in the EFLAGS register according to the results. The comparison is performed by subtracting the second operand from the first operand and then setting the status flags in the same manner as the SUB instruction. When an immediate value is used as an operand, it is sign-extended to the length of the first operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMP.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn cmp<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: CmpEmitter<A, B> {
<Self as CmpEmitter<A, B>>::cmp(self, op0, op1);
}
/// `CMPS` (CMPS).
/// Compares the byte, word, doubleword, or quadword specified with the first source operand with the byte, word, doubleword, or quadword specified with the second source operand and sets the status flags in the EFLAGS register according to the results.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMPS%3ACMPSB%3ACMPSW%3ACMPSD%3ACMPSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cmps(&mut self)
where Assembler<'a>: CmpsEmitter {
<Self as CmpsEmitter>::cmps(self);
}
/// `CQO`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cqo(&mut self)
where Assembler<'a>: CqoEmitter {
<Self as CqoEmitter>::cqo(self);
}
/// `CWD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cwd(&mut self)
where Assembler<'a>: CwdEmitter {
<Self as CwdEmitter>::cwd(self);
}
/// `CWDE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn cwde(&mut self)
where Assembler<'a>: CwdeEmitter {
<Self as CwdeEmitter>::cwde(self);
}
/// `C_EX`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn c_ex(&mut self)
where Assembler<'a>: CExEmitter {
<Self as CExEmitter>::c_ex(self);
}
/// `C_SEP`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn c_sep(&mut self)
where Assembler<'a>: CSepEmitter {
<Self as CSepEmitter>::c_sep(self);
}
/// `DEC` (DEC).
/// Subtracts 1 from the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (To perform a decrement operation that updates the CF flag, use a SUB instruction with an immediate operand of 1.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DEC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn dec<A>(&mut self, op0: A)
where Assembler<'a>: DecEmitter<A> {
<Self as DecEmitter<A>>::dec(self, op0);
}
/// `DIV` (DIV).
/// Divides unsigned the value in the AX, DX:AX, EDX:EAX, or RDX:RAX registers (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, EDX:EAX, or RDX:RAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor). Division using 64-bit operand is available only in 64-bit mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DIV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn div<A>(&mut self, op0: A)
where Assembler<'a>: DivEmitter<A> {
<Self as DivEmitter<A>>::div(self, op0);
}
/// `ENTER` (ENTER).
/// Creates a stack frame (comprising of space for dynamic storage and 1-32 frame pointer storage) for a procedure. The first operand (imm16) specifies the size of the dynamic storage in the stack frame (that is, the number of bytes of dynamically allocated on the stack for the procedure). The second operand (imm8) gives the lexical nesting level (0 to 31) of the procedure. The nesting level (imm8 mod 32) and the OperandSize attribute determine the size in bytes of the storage space for frame pointers.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ENTER.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
#[inline]
pub fn enter<A>(&mut self, op0: A)
where Assembler<'a>: EnterEmitter<A> {
<Self as EnterEmitter<A>>::enter(self, op0);
}
/// `FWAIT` (FWAIT).
/// Causes the processor to check for and handle pending, unmasked, floating-point exceptions before proceeding. (FWAIT is an alternate mnemonic for WAIT.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/WAIT%3AFWAIT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn fwait(&mut self)
where Assembler<'a>: FwaitEmitter {
<Self as FwaitEmitter>::fwait(self);
}
/// `HLT` (HLT).
/// Stops instruction execution and places the processor in a HALT state. An enabled interrupt (including NMI and SMI), a debug exception, the BINIT# signal, the INIT# signal, or the RESET# signal will resume execution. If an interrupt (including NMI) is used to resume execution after a HLT instruction, the saved instruction pointer (CS:EIP) points to the instruction following the HLT instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/HLT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn hlt(&mut self)
where Assembler<'a>: HltEmitter {
<Self as HltEmitter>::hlt(self);
}
/// `IDIV` (IDIV).
/// Divides the (signed) value in the AX, DX:AX, or EDX:EAX (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, or EDX:EAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IDIV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn idiv<A>(&mut self, op0: A)
where Assembler<'a>: IdivEmitter<A> {
<Self as IdivEmitter<A>>::idiv(self, op0);
}
/// `IMUL` (IMUL).
/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn imul_1<A>(&mut self, op0: A)
where Assembler<'a>: ImulEmitter_1<A> {
<Self as ImulEmitter_1<A>>::imul_1(self, op0);
}
/// `IMUL` (IMUL).
/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn imul_2<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: ImulEmitter_2<A, B> {
<Self as ImulEmitter_2<A, B>>::imul_2(self, op0, op1);
}
/// `IMUL` (IMUL).
/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Gpd, Gpd, Imm |
/// | 2 | Gpd, Mem, Imm |
/// | 3 | Gpq, Gpq, Imm |
/// | 4 | Gpq, Mem, Imm |
/// | 5 | Gpw, Gpw, Imm |
/// | 6 | Gpw, Mem, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn imul_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: ImulEmitter_3<A, B, C> {
<Self as ImulEmitter_3<A, B, C>>::imul_3(self, op0, op1, op2);
}
/// `IN` (IN).
/// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn r#in(&mut self)
where Assembler<'a>: InEmitter {
<Self as InEmitter>::r#in(self);
}
/// `IN` (IN).
/// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+------------+
/// | # | Operands |
/// +---+------------+
/// | 1 | GpbLo, Imm |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Imm |
/// | 4 | Gpw, Imm |
/// +---+------------+
/// ```
#[inline]
pub fn r#in_2<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: InEmitter_2<A, B> {
<Self as InEmitter_2<A, B>>::r#in_2(self, op0, op1);
}
/// `INC` (INC).
/// Adds 1 to the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (Use a ADD instruction with an immediate operand of 1 to perform an increment operation that does updates the CF flag.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn inc<A>(&mut self, op0: A)
where Assembler<'a>: IncEmitter<A> {
<Self as IncEmitter<A>>::inc(self, op0);
}
/// `INS` (INS).
/// Copies the data from the I/O port specified with the source operand (second operand) to the destination operand (first operand). The source operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The destination operand is a memory location, the address of which is read from either the ES:DI, ES:EDI or the RDI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The ES segment cannot be overridden with a segment override prefix.) The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INS%3AINSB%3AINSW%3AINSD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn ins(&mut self)
where Assembler<'a>: InsEmitter {
<Self as InsEmitter>::ins(self);
}
/// `INT` (INT).
/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
#[inline]
pub fn int<A>(&mut self, op0: A)
where Assembler<'a>: IntEmitter<A> {
<Self as IntEmitter<A>>::int(self, op0);
}
/// `INT1` (INT1).
/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn int1(&mut self)
where Assembler<'a>: Int1Emitter {
<Self as Int1Emitter>::int1(self);
}
/// `INT3` (INT3).
/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn int3(&mut self)
where Assembler<'a>: Int3Emitter {
<Self as Int3Emitter>::int3(self);
}
/// `IRET` (IRET).
/// Returns program control from an exception or interrupt handler to a program or procedure that was interrupted by an exception, an external interrupt, or a software-generated interrupt. These instructions are also used to perform a return from a nested task. (A nested task is created when a CALL instruction is used to initiate a task switch or when an interrupt or exception causes a task switch to an interrupt or exception handler.) See the section titled “Task Linking” in Chapter 8 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 3A.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IRET%3AIRETD%3AIRETQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn iret(&mut self)
where Assembler<'a>: IretEmitter {
<Self as IretEmitter>::iret(self);
}
/// `JA` (JA).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn ja<A>(&mut self, op0: A)
where Assembler<'a>: JaEmitter<A> {
<Self as JaEmitter<A>>::ja(self, op0);
}
/// `JBE` (JBE).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jbe<A>(&mut self, op0: A)
where Assembler<'a>: JbeEmitter<A> {
<Self as JbeEmitter<A>>::jbe(self, op0);
}
/// `JC` (JC).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jc<A>(&mut self, op0: A)
where Assembler<'a>: JcEmitter<A> {
<Self as JcEmitter<A>>::jc(self, op0);
}
/// `JCXZ` (JCXZ).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jcxz<A>(&mut self, op0: A)
where Assembler<'a>: JcxzEmitter<A> {
<Self as JcxzEmitter<A>>::jcxz(self, op0);
}
/// `JG` (JG).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jg<A>(&mut self, op0: A)
where Assembler<'a>: JgEmitter<A> {
<Self as JgEmitter<A>>::jg(self, op0);
}
/// `JGE` (JGE).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jge<A>(&mut self, op0: A)
where Assembler<'a>: JgeEmitter<A> {
<Self as JgeEmitter<A>>::jge(self, op0);
}
/// `JL` (JL).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jl<A>(&mut self, op0: A)
where Assembler<'a>: JlEmitter<A> {
<Self as JlEmitter<A>>::jl(self, op0);
}
/// `JLE` (JLE).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jle<A>(&mut self, op0: A)
where Assembler<'a>: JleEmitter<A> {
<Self as JleEmitter<A>>::jle(self, op0);
}
/// `JMP` (JMP).
/// Transfers program control to a different point in the instruction stream without recording return information. The destination (target) operand specifies the address of the instruction being jumped to. This operand can be an immediate value, a general-purpose register, or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/JMP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Imm |
/// | 3 | Label |
/// | 4 | Mem |
/// | 5 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jmp<A>(&mut self, op0: A)
where Assembler<'a>: JmpEmitter<A> {
<Self as JmpEmitter<A>>::jmp(self, op0);
}
/// `JMPF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn jmpf<A>(&mut self, op0: A)
where Assembler<'a>: JmpfEmitter<A> {
<Self as JmpfEmitter<A>>::jmpf(self, op0);
}
/// `JNC` (JNC).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jnc<A>(&mut self, op0: A)
where Assembler<'a>: JncEmitter<A> {
<Self as JncEmitter<A>>::jnc(self, op0);
}
/// `JNO` (JNO).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jno<A>(&mut self, op0: A)
where Assembler<'a>: JnoEmitter<A> {
<Self as JnoEmitter<A>>::jno(self, op0);
}
/// `JNP` (JNP).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jnp<A>(&mut self, op0: A)
where Assembler<'a>: JnpEmitter<A> {
<Self as JnpEmitter<A>>::jnp(self, op0);
}
/// `JNS` (JNS).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jns<A>(&mut self, op0: A)
where Assembler<'a>: JnsEmitter<A> {
<Self as JnsEmitter<A>>::jns(self, op0);
}
/// `JNZ` (JNZ).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jnz<A>(&mut self, op0: A)
where Assembler<'a>: JnzEmitter<A> {
<Self as JnzEmitter<A>>::jnz(self, op0);
}
/// `JO` (JO).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jo<A>(&mut self, op0: A)
where Assembler<'a>: JoEmitter<A> {
<Self as JoEmitter<A>>::jo(self, op0);
}
/// `JP` (JP).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jp<A>(&mut self, op0: A)
where Assembler<'a>: JpEmitter<A> {
<Self as JpEmitter<A>>::jp(self, op0);
}
/// `JS` (JS).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn js<A>(&mut self, op0: A)
where Assembler<'a>: JsEmitter<A> {
<Self as JsEmitter<A>>::js(self, op0);
}
/// `JZ` (JZ).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jz<A>(&mut self, op0: A)
where Assembler<'a>: JzEmitter<A> {
<Self as JzEmitter<A>>::jz(self, op0);
}
/// `JCC` (JO).
/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn jcc<A>(&mut self, op0: A)
where Assembler<'a>: JccEmitter<A> {
<Self as JccEmitter<A>>::jcc(self, op0);
}
/// `LAHF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn lahf(&mut self)
where Assembler<'a>: LahfEmitter {
<Self as LahfEmitter>::lahf(self);
}
/// `LAR` (LAR).
/// Loads the access rights from the segment descriptor specified by the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the flag register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. If the source operand is a memory address, only 16 bits of data are accessed. The destination operand is a general-purpose register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LAR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpw |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpw |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lar<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: LarEmitter<A, B> {
<Self as LarEmitter<A, B>>::lar(self, op0, op1);
}
/// `LDTILECFG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn ldtilecfg<A>(&mut self, op0: A)
where Assembler<'a>: LdtilecfgEmitter<A> {
<Self as LdtilecfgEmitter<A>>::ldtilecfg(self, op0);
}
/// `LEA` (LEA).
/// Computes the effective address of the second operand (the source operand) and stores it in the first operand (destination operand). The source operand is a memory address (offset part) specified with one of the processors addressing modes; the destination operand is a general-purpose register. The address-size and operand-size attributes affect the action performed by this instruction, as shown in the following table. The operand-size attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the attribute of the code segment.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEA.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lea<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: LeaEmitter<A, B> {
<Self as LeaEmitter<A, B>>::lea(self, op0, op1);
}
/// `LEAVE` (LEAVE).
/// Releases the stack frame set up by an earlier ENTER instruction. The LEAVE instruction copies the frame pointer (in the EBP register) into the stack pointer register (ESP), which releases the stack space allocated to the stack frame. The old frame pointer (the frame pointer for the calling procedure that was saved by the ENTER instruction) is then popped from the stack into the EBP register, restoring the calling procedure’s stack frame.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEAVE.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn leave(&mut self)
where Assembler<'a>: LeaveEmitter {
<Self as LeaveEmitter>::leave(self);
}
/// `LFS` (LFS).
/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lfs<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: LfsEmitter<A, B> {
<Self as LfsEmitter<A, B>>::lfs(self, op0, op1);
}
/// `LGDT` (LGDT).
/// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lgdt<A>(&mut self, op0: A)
where Assembler<'a>: LgdtEmitter<A> {
<Self as LgdtEmitter<A>>::lgdt(self, op0);
}
/// `LGS` (LGS).
/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lgs<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: LgsEmitter<A, B> {
<Self as LgsEmitter<A, B>>::lgs(self, op0, op1);
}
/// `LIDT` (LIDT).
/// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lidt<A>(&mut self, op0: A)
where Assembler<'a>: LidtEmitter<A> {
<Self as LidtEmitter<A>>::lidt(self, op0);
}
/// `LLDT` (LLDT).
/// Loads the source operand into the segment selector field of the local descriptor table register (LDTR). The source operand (a general-purpose register or a memory location) contains a segment selector that points to a local descriptor table (LDT). After the segment selector is loaded in the LDTR, the processor uses the segment selector to locate the segment descriptor for the LDT in the global descriptor table (GDT). It then loads the segment limit and base address for the LDT from the segment descriptor into the LDTR. The segment registers DS, ES, SS, FS, GS, and CS are not affected by this instruction, nor is the LDTR field in the task state segment (TSS) for the current task.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LLDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lldt<A>(&mut self, op0: A)
where Assembler<'a>: LldtEmitter<A> {
<Self as LldtEmitter<A>>::lldt(self, op0);
}
/// `LMSW` (LMSW).
/// Loads the source operand into the machine status word, bits 0 through 15 of register CR0. The source operand can be a 16-bit general-purpose register or a memory location. Only the low-order 4 bits of the source operand (which contains the PE, MP, EM, and TS flags) are loaded into CR0. The PG, CD, NW, AM, WP, NE, and ET flags of CR0 are not affected. The operand-size attribute has no effect on this instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LMSW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lmsw<A>(&mut self, op0: A)
where Assembler<'a>: LmswEmitter<A> {
<Self as LmswEmitter<A>>::lmsw(self, op0);
}
/// `LODS` (LODS).
/// Loads a byte, word, or doubleword from the source operand into the AL, AX, or EAX register, respectively. The source operand is a memory location, the address of which is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The DS segment may be overridden with a segment override prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LODS%3ALODSB%3ALODSW%3ALODSD%3ALODSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn lods(&mut self)
where Assembler<'a>: LodsEmitter {
<Self as LodsEmitter>::lods(self);
}
/// `LOOP` (LOOP).
/// Performs a loop operation using the RCX, ECX or CX register as a counter (depending on whether address size is 64 bits, 32 bits, or 16 bits). Note that the LOOP instruction ignores REX.W; but 64-bit address size can be over-ridden using a 67H prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LOOP%3ALOOPcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn r#loop<A>(&mut self, op0: A)
where Assembler<'a>: LoopEmitter<A> {
<Self as LoopEmitter<A>>::r#loop(self, op0);
}
/// `LOOPNZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn loopnz<A>(&mut self, op0: A)
where Assembler<'a>: LoopnzEmitter<A> {
<Self as LoopnzEmitter<A>>::loopnz(self, op0);
}
/// `LOOPZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// | 2 | Label |
/// | 3 | Sym |
/// +---+----------+
/// ```
#[inline]
pub fn loopz<A>(&mut self, op0: A)
where Assembler<'a>: LoopzEmitter<A> {
<Self as LoopzEmitter<A>>::loopz(self, op0);
}
/// `LSL` (LSL).
/// Loads the unscrambled segment limit from the segment descriptor specified with the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the EFLAGS register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. The destination operand is a general-purpose register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LSL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpw |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpw |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lsl<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: LslEmitter<A, B> {
<Self as LslEmitter<A, B>>::lsl(self, op0, op1);
}
/// `LSS` (LSS).
/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpq, Mem |
/// | 3 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn lss<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: LssEmitter<A, B> {
<Self as LssEmitter<A, B>>::lss(self, op0, op1);
}
/// `LTR` (LTR).
/// Loads the source operand into the segment selector field of the task register. The source operand (a general-purpose register or a memory location) contains a segment selector that points to a task state segment (TSS). After the segment selector is loaded in the task register, the processor uses the segment selector to locate the segment descriptor for the TSS in the global descriptor table (GDT). It then loads the segment limit and base address for the TSS from the segment descriptor into the task register. The task pointed to by the task register is marked busy, but a switch to the task does not occur.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LTR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn ltr<A>(&mut self, op0: A)
where Assembler<'a>: LtrEmitter<A> {
<Self as LtrEmitter<A>>::ltr(self, op0);
}
/// `MOV` (MOV).
/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+------------------------+
/// | # | Operands |
/// +----+------------------------+
/// | 1 | AbsoluteAddress, GpbLo |
/// | 2 | AbsoluteAddress, Gpd |
/// | 3 | AbsoluteAddress, Gpq |
/// | 4 | AbsoluteAddress, Gpw |
/// | 5 | GpbLo, AbsoluteAddress |
/// | 6 | GpbLo, GpbLo |
/// | 7 | GpbLo, Imm |
/// | 8 | GpbLo, Mem |
/// | 9 | Gpd, AbsoluteAddress |
/// | 10 | Gpd, Gpd |
/// | 11 | Gpd, Imm |
/// | 12 | Gpd, Mem |
/// | 13 | Gpq, AbsoluteAddress |
/// | 14 | Gpq, Gpq |
/// | 15 | Gpq, Imm |
/// | 16 | Gpq, Mem |
/// | 17 | Gpw, AbsoluteAddress |
/// | 18 | Gpw, Gpw |
/// | 19 | Gpw, Imm |
/// | 20 | Gpw, Mem |
/// | 21 | Mem, GpbLo |
/// | 22 | Mem, Gpd |
/// | 23 | Mem, Gpq |
/// | 24 | Mem, Gpw |
/// | 25 | Mem, Imm |
/// +----+------------------------+
/// ```
#[inline]
pub fn mov<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovEmitter<A, B> {
<Self as MovEmitter<A, B>>::mov(self, op0, op1);
}
/// `MOVS` (MOVS).
/// Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified with the first operand (destination operand). Both the source and destination operands are located in memory. The address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be overridden with a segment override prefix, but the ES segment cannot be overridden.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVS%3AMOVSB%3AMOVSW%3AMOVSD%3AMOVSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn movs(&mut self)
where Assembler<'a>: MovsEmitter {
<Self as MovsEmitter>::movs(self);
}
/// `MOVSX` (MOVSX).
/// Copies the contents of the source operand (register or memory location) to the destination operand (register) and sign extends the value to 16 or 32 bits (see Figure 7-6 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The size of the converted value depends on the operand-size attribute.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVSX%3AMOVSXD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+------------+
/// | # | Operands |
/// +----+------------+
/// | 1 | Gpd, GpbLo |
/// | 2 | Gpd, Gpd |
/// | 3 | Gpd, Gpw |
/// | 4 | Gpd, Mem |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Gpd |
/// | 7 | Gpq, Gpw |
/// | 8 | Gpq, Mem |
/// | 9 | Gpw, GpbLo |
/// | 10 | Gpw, Gpd |
/// | 11 | Gpw, Gpw |
/// | 12 | Gpw, Mem |
/// +----+------------+
/// ```
#[inline]
pub fn movsx<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovsxEmitter<A, B> {
<Self as MovsxEmitter<A, B>>::movsx(self, op0, op1);
}
/// `MOVZX` (MOVZX).
/// Copies the contents of the source operand (register or memory location) to the destination operand (register) and zero extends the value. The size of the converted value depends on the operand-size attribute.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVZX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+------------+
/// | # | Operands |
/// +---+------------+
/// | 1 | Gpd, GpbLo |
/// | 2 | Gpd, Gpw |
/// | 3 | Gpd, Mem |
/// | 4 | Gpq, GpbLo |
/// | 5 | Gpq, Gpw |
/// | 6 | Gpq, Mem |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Gpw |
/// | 9 | Gpw, Mem |
/// +---+------------+
/// ```
#[inline]
pub fn movzx<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovzxEmitter<A, B> {
<Self as MovzxEmitter<A, B>>::movzx(self, op0, op1);
}
/// `MOV_CR2G`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | Gpq, CReg |
/// +---+-----------+
/// ```
#[inline]
pub fn mov_cr2g<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovCr2gEmitter<A, B> {
<Self as MovCr2gEmitter<A, B>>::mov_cr2g(self, op0, op1);
}
/// `MOV_DR2G`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | Gpq, DReg |
/// +---+-----------+
/// ```
#[inline]
pub fn mov_dr2g<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovDr2gEmitter<A, B> {
<Self as MovDr2gEmitter<A, B>>::mov_dr2g(self, op0, op1);
}
/// `MOV_G2CR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | CReg, Gpq |
/// +---+-----------+
/// ```
#[inline]
pub fn mov_g2cr<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovG2crEmitter<A, B> {
<Self as MovG2crEmitter<A, B>>::mov_g2cr(self, op0, op1);
}
/// `MOV_G2DR`.
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | DReg, Gpq |
/// +---+-----------+
/// ```
#[inline]
pub fn mov_g2dr<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovG2drEmitter<A, B> {
<Self as MovG2drEmitter<A, B>>::mov_g2dr(self, op0, op1);
}
/// `MOV_G2S` (MOV).
/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | SReg, Gpd |
/// | 2 | SReg, Mem |
/// +---+-----------+
/// ```
#[inline]
pub fn mov_g2s<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovG2sEmitter<A, B> {
<Self as MovG2sEmitter<A, B>>::mov_g2s(self, op0, op1);
}
/// `MOV_S2G` (MOV).
/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+-----------+
/// | # | Operands |
/// +---+-----------+
/// | 1 | Gpd, SReg |
/// | 2 | Mem, SReg |
/// +---+-----------+
/// ```
#[inline]
pub fn mov_s2g<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: MovS2gEmitter<A, B> {
<Self as MovS2gEmitter<A, B>>::mov_s2g(self, op0, op1);
}
/// `MUL` (MUL).
/// Performs an unsigned multiplication of the first operand (destination operand) and the second operand (source operand) and stores the result in the destination operand. The destination operand is an implied operand located in register AL, AX or EAX (depending on the size of the operand); the source operand is located in a general-purpose register or a memory location. The action of this instruction and the location of the result depends on the opcode and the operand size as shown in Table 4-9.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MUL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn mul<A>(&mut self, op0: A)
where Assembler<'a>: MulEmitter<A> {
<Self as MulEmitter<A>>::mul(self, op0);
}
/// `NEG` (NEG).
/// Replaces the value of operand (the destination operand) with its two's complement. (This operation is equivalent to subtracting the operand from 0.) The destination operand is located in a general-purpose register or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NEG.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn neg<A>(&mut self, op0: A)
where Assembler<'a>: NegEmitter<A> {
<Self as NegEmitter<A>>::neg(self, op0);
}
/// `NOP` (NOP).
/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn nop(&mut self)
where Assembler<'a>: NopEmitter {
<Self as NopEmitter>::nop(self);
}
/// `NOP` (NOP).
/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Gpq |
/// | 3 | Gpw |
/// | 4 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn nop_1<A>(&mut self, op0: A)
where Assembler<'a>: NopEmitter_1<A> {
<Self as NopEmitter_1<A>>::nop_1(self, op0);
}
/// `NOT` (NOT).
/// Performs a bitwise NOT operation (each 1 is set to 0, and each 0 is set to 1) on the destination operand and stores the result in the destination operand location. The destination operand can be a register or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Gpd |
/// | 3 | Gpq |
/// | 4 | Gpw |
/// | 5 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn not<A>(&mut self, op0: A)
where Assembler<'a>: NotEmitter<A> {
<Self as NotEmitter<A>>::not(self, op0);
}
/// `OR` (OR).
/// Performs a bitwise inclusive OR operation between the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result of the OR instruction is set to 0 if both corresponding bits of the first and second operands are 0; otherwise, each bit is set to 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn or<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: OrEmitter<A, B> {
<Self as OrEmitter<A, B>>::or(self, op0, op1);
}
/// `OUT` (OUT).
/// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn r#out(&mut self)
where Assembler<'a>: OutEmitter {
<Self as OutEmitter>::r#out(self);
}
/// `OUT` (OUT).
/// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+------------+
/// | # | Operands |
/// +---+------------+
/// | 1 | GpbLo, Imm |
/// | 2 | Gpd, Imm |
/// | 3 | Gpq, Imm |
/// | 4 | Gpw, Imm |
/// +---+------------+
/// ```
#[inline]
pub fn r#out_2<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: OutEmitter_2<A, B> {
<Self as OutEmitter_2<A, B>>::r#out_2(self, op0, op1);
}
/// `OUTS` (OUTS).
/// Copies data from the source operand (second operand) to the I/O port specified with the destination operand (first operand). The source operand is a memory location, the address of which is read from either the DS:SI, DS:ESI or the RSI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The DS segment may be overridden with a segment override prefix.) The destination operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUTS%3AOUTSB%3AOUTSW%3AOUTSD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn outs(&mut self)
where Assembler<'a>: OutsEmitter {
<Self as OutsEmitter>::outs(self);
}
/// `PAUSE` (NOP).
/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn pause(&mut self)
where Assembler<'a>: PauseEmitter {
<Self as PauseEmitter>::pause(self);
}
/// `POP` (POP).
/// Loads the value from the top of the stack to the location specified with the destination operand (or explicit opcode) and then increments the stack pointer. The destination operand can be a general-purpose register, memory location, or segment register.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POP.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Gpw |
/// | 3 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn pop<A>(&mut self, op0: A)
where Assembler<'a>: PopEmitter<A> {
<Self as PopEmitter<A>>::pop(self, op0);
}
/// `POPF` (POPF).
/// Pops a doubleword (POPFD) from the top of the stack (if the current operand-size attribute is 32) and stores the value in the EFLAGS register, or pops a word from the top of the stack (if the operand-size attribute is 16) and stores it in the lower 16 bits of the EFLAGS register (that is, the FLAGS register). These instructions reverse the operation of the PUSHF/PUSHFD/PUSHFQ instructions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POPF%3APOPFD%3APOPFQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn popf(&mut self)
where Assembler<'a>: PopfEmitter {
<Self as PopfEmitter>::popf(self);
}
/// `POP_SEG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | SReg |
/// +---+----------+
/// ```
#[inline]
pub fn pop_seg<A>(&mut self, op0: A)
where Assembler<'a>: PopSegEmitter<A> {
<Self as PopSegEmitter<A>>::pop_seg(self, op0);
}
/// `PUSH` (PUSH).
/// Decrements the stack pointer and then stores the source operand on the top of the stack. Address and operand sizes are determined and used as follows
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSH.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpq |
/// | 2 | Gpw |
/// | 3 | Imm |
/// | 4 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn push<A>(&mut self, op0: A)
where Assembler<'a>: PushEmitter<A> {
<Self as PushEmitter<A>>::push(self, op0);
}
/// `PUSHF` (PUSHF).
/// Decrements the stack pointer by 4 (if the current operand-size attribute is 32) and pushes the entire contents of the EFLAGS register onto the stack, or decrements the stack pointer by 2 (if the operand-size attribute is 16) and pushes the lower 16 bits of the EFLAGS register (that is, the FLAGS register) onto the stack. These instructions reverse the operation of the POPF/POPFD instructions.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSHF%3APUSHFD%3APUSHFQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn pushf(&mut self)
where Assembler<'a>: PushfEmitter {
<Self as PushfEmitter>::pushf(self);
}
/// `PUSH_SEG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | SReg |
/// +---+----------+
/// ```
#[inline]
pub fn push_seg<A>(&mut self, op0: A)
where Assembler<'a>: PushSegEmitter<A> {
<Self as PushSegEmitter<A>>::push_seg(self, op0);
}
/// `RCL` (RCL).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn rcl<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: RclEmitter<A, B> {
<Self as RclEmitter<A, B>>::rcl(self, op0, op1);
}
/// `RCR` (RCR).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn rcr<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: RcrEmitter<A, B> {
<Self as RcrEmitter<A, B>>::rcr(self, op0, op1);
}
/// `RET` (RET).
/// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn ret(&mut self)
where Assembler<'a>: RetEmitter {
<Self as RetEmitter>::ret(self);
}
/// `RET` (RET).
/// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
#[inline]
pub fn ret_1<A>(&mut self, op0: A)
where Assembler<'a>: RetEmitter_1<A> {
<Self as RetEmitter_1<A>>::ret_1(self, op0);
}
/// `RETF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn retf(&mut self)
where Assembler<'a>: RetfEmitter {
<Self as RetfEmitter>::retf(self);
}
/// `RETF`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Imm |
/// +---+----------+
/// ```
#[inline]
pub fn retf_1<A>(&mut self, op0: A)
where Assembler<'a>: RetfEmitter_1<A> {
<Self as RetfEmitter_1<A>>::retf_1(self, op0);
}
/// `ROL` (ROL).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn rol<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: RolEmitter<A, B> {
<Self as RolEmitter<A, B>>::rol(self, op0, op1);
}
/// `ROR` (ROR).
/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn ror<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: RorEmitter<A, B> {
<Self as RorEmitter<A, B>>::ror(self, op0, op1);
}
/// `SAHF` (SAHF).
/// Loads the SF, ZF, AF, PF, and CF flags of the EFLAGS register with values from the corresponding bits in the AH register (bits 7, 6, 4, 2, and 0, respectively). Bits 1, 3, and 5 of register AH are ignored; the corresponding reserved bits (1, 3, and 5) in the EFLAGS register remain as shown in the “Operation” section below.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAHF.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn sahf(&mut self)
where Assembler<'a>: SahfEmitter {
<Self as SahfEmitter>::sahf(self);
}
/// `SAR` (SAR).
/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn sar<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: SarEmitter<A, B> {
<Self as SarEmitter<A, B>>::sar(self, op0, op1);
}
/// `SBB` (SBB).
/// Adds the source operand (second operand) and the carry (CF) flag, and subtracts the result from the destination operand (first operand). The result of the subtraction is stored in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SBB.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn sbb<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: SbbEmitter<A, B> {
<Self as SbbEmitter<A, B>>::sbb(self, op0, op1);
}
/// `SCAS` (SCAS).
/// In non-64-bit modes and in default 64-bit mode: this instruction compares a byte, word, doubleword or quadword specified using a memory operand with the value in AL, AX, or EAX. It then sets status flags in EFLAGS recording the results. The memory operand address is read from ES:(E)DI register (depending on the address-size attribute of the instruction and the current operational mode). Note that ES cannot be overridden with a segment override prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SCAS%3ASCASB%3ASCASW%3ASCASD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn scas(&mut self)
where Assembler<'a>: ScasEmitter {
<Self as ScasEmitter>::scas(self);
}
/// `SETA` (SETA).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn seta<A>(&mut self, op0: A)
where Assembler<'a>: SetaEmitter<A> {
<Self as SetaEmitter<A>>::seta(self, op0);
}
/// `SETBE` (SETBE).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setbe<A>(&mut self, op0: A)
where Assembler<'a>: SetbeEmitter<A> {
<Self as SetbeEmitter<A>>::setbe(self, op0);
}
/// `SETC` (SETC).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setc<A>(&mut self, op0: A)
where Assembler<'a>: SetcEmitter<A> {
<Self as SetcEmitter<A>>::setc(self, op0);
}
/// `SETG` (SETG).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setg<A>(&mut self, op0: A)
where Assembler<'a>: SetgEmitter<A> {
<Self as SetgEmitter<A>>::setg(self, op0);
}
/// `SETGE` (SETGE).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setge<A>(&mut self, op0: A)
where Assembler<'a>: SetgeEmitter<A> {
<Self as SetgeEmitter<A>>::setge(self, op0);
}
/// `SETL` (SETL).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setl<A>(&mut self, op0: A)
where Assembler<'a>: SetlEmitter<A> {
<Self as SetlEmitter<A>>::setl(self, op0);
}
/// `SETLE` (SETLE).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setle<A>(&mut self, op0: A)
where Assembler<'a>: SetleEmitter<A> {
<Self as SetleEmitter<A>>::setle(self, op0);
}
/// `SETNC` (SETNC).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setnc<A>(&mut self, op0: A)
where Assembler<'a>: SetncEmitter<A> {
<Self as SetncEmitter<A>>::setnc(self, op0);
}
/// `SETNO` (SETNO).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setno<A>(&mut self, op0: A)
where Assembler<'a>: SetnoEmitter<A> {
<Self as SetnoEmitter<A>>::setno(self, op0);
}
/// `SETNP` (SETNP).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setnp<A>(&mut self, op0: A)
where Assembler<'a>: SetnpEmitter<A> {
<Self as SetnpEmitter<A>>::setnp(self, op0);
}
/// `SETNS` (SETNS).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setns<A>(&mut self, op0: A)
where Assembler<'a>: SetnsEmitter<A> {
<Self as SetnsEmitter<A>>::setns(self, op0);
}
/// `SETNZ` (SETNZ).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setnz<A>(&mut self, op0: A)
where Assembler<'a>: SetnzEmitter<A> {
<Self as SetnzEmitter<A>>::setnz(self, op0);
}
/// `SETO` (SETO).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn seto<A>(&mut self, op0: A)
where Assembler<'a>: SetoEmitter<A> {
<Self as SetoEmitter<A>>::seto(self, op0);
}
/// `SETP` (SETP).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setp<A>(&mut self, op0: A)
where Assembler<'a>: SetpEmitter<A> {
<Self as SetpEmitter<A>>::setp(self, op0);
}
/// `SETS` (SETS).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn sets<A>(&mut self, op0: A)
where Assembler<'a>: SetsEmitter<A> {
<Self as SetsEmitter<A>>::sets(self, op0);
}
/// `SETZ` (SETZ).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setz<A>(&mut self, op0: A)
where Assembler<'a>: SetzEmitter<A> {
<Self as SetzEmitter<A>>::setz(self, op0);
}
/// `SETCC` (SETO).
/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | GpbLo |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn setcc<A>(&mut self, op0: A)
where Assembler<'a>: SetccEmitter<A> {
<Self as SetccEmitter<A>>::setcc(self, op0);
}
/// `SGDT`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn sgdt<A>(&mut self, op0: A)
where Assembler<'a>: SgdtEmitter<A> {
<Self as SgdtEmitter<A>>::sgdt(self, op0);
}
/// `SHL` (SHL).
/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn shl<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: ShlEmitter<A, B> {
<Self as ShlEmitter<A, B>>::shl(self, op0, op1);
}
/// `SHLD` (SHLD).
/// The SHLD instruction is used for multi-precision shifts of 64 bits or more.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHLD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+-----------------+
/// | # | Operands |
/// +----+-----------------+
/// | 1 | Gpd, Gpd, GpbLo |
/// | 2 | Gpd, Gpd, Imm |
/// | 3 | Gpq, Gpq, GpbLo |
/// | 4 | Gpq, Gpq, Imm |
/// | 5 | Gpw, Gpw, GpbLo |
/// | 6 | Gpw, Gpw, Imm |
/// | 7 | Mem, Gpd, GpbLo |
/// | 8 | Mem, Gpd, Imm |
/// | 9 | Mem, Gpq, GpbLo |
/// | 10 | Mem, Gpq, Imm |
/// | 11 | Mem, Gpw, GpbLo |
/// | 12 | Mem, Gpw, Imm |
/// +----+-----------------+
/// ```
#[inline]
pub fn shld<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: ShldEmitter<A, B, C> {
<Self as ShldEmitter<A, B, C>>::shld(self, op0, op1, op2);
}
/// `SHR` (SHR).
/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, GpbLo |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, GpbLo |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, GpbLo |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn shr<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: ShrEmitter<A, B> {
<Self as ShrEmitter<A, B>>::shr(self, op0, op1);
}
/// `SHRD` (SHRD).
/// The SHRD instruction is useful for multi-precision shifts of 64 bits or more.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHRD.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+-----------------+
/// | # | Operands |
/// +----+-----------------+
/// | 1 | Gpd, Gpd, GpbLo |
/// | 2 | Gpd, Gpd, Imm |
/// | 3 | Gpq, Gpq, GpbLo |
/// | 4 | Gpq, Gpq, Imm |
/// | 5 | Gpw, Gpw, GpbLo |
/// | 6 | Gpw, Gpw, Imm |
/// | 7 | Mem, Gpd, GpbLo |
/// | 8 | Mem, Gpd, Imm |
/// | 9 | Mem, Gpq, GpbLo |
/// | 10 | Mem, Gpq, Imm |
/// | 11 | Mem, Gpw, GpbLo |
/// | 12 | Mem, Gpw, Imm |
/// +----+-----------------+
/// ```
#[inline]
pub fn shrd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: ShrdEmitter<A, B, C> {
<Self as ShrdEmitter<A, B, C>>::shrd(self, op0, op1, op2);
}
/// `SIDT`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn sidt<A>(&mut self, op0: A)
where Assembler<'a>: SidtEmitter<A> {
<Self as SidtEmitter<A>>::sidt(self, op0);
}
/// `SLDT` (SLDT).
/// Stores the segment selector from the local descriptor table register (LDTR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the segment descriptor (located in the GDT) for the current LDT. This instruction can only be executed in protected mode.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SLDT.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn sldt<A>(&mut self, op0: A)
where Assembler<'a>: SldtEmitter<A> {
<Self as SldtEmitter<A>>::sldt(self, op0);
}
/// `SMSW` (SMSW).
/// Stores the machine status word (bits 0 through 15 of control register CR0) into the destination operand. The destination operand can be a general-purpose register or a memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SMSW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Gpq |
/// | 3 | Gpw |
/// | 4 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn smsw<A>(&mut self, op0: A)
where Assembler<'a>: SmswEmitter<A> {
<Self as SmswEmitter<A>>::smsw(self, op0);
}
/// `STC` (STC).
/// Sets the CF flag in the EFLAGS register. Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn stc(&mut self)
where Assembler<'a>: StcEmitter {
<Self as StcEmitter>::stc(self);
}
/// `STD` (STD).
/// Sets the DF flag in the EFLAGS register. When the DF flag is set to 1, string operations decrement the index registers (ESI and/or EDI). Operation is the same in all modes.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn std(&mut self)
where Assembler<'a>: StdEmitter {
<Self as StdEmitter>::std(self);
}
/// `STI` (STI).
/// In most cases, STI sets the interrupt flag (IF) in the EFLAGS register. This allows the processor to respond to maskable hardware interrupts.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STI.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn sti(&mut self)
where Assembler<'a>: StiEmitter {
<Self as StiEmitter>::sti(self);
}
/// `STOS` (STOS).
/// In non-64-bit and default 64-bit mode; stores a byte, word, or doubleword from the AL, AX, or EAX register (respectively) into the destination operand. The destination operand is a memory location, the address of which is read from either the ES:EDI or ES:DI register (depending on the address-size attribute of the instruction and the mode of operation). The ES segment cannot be overridden with a segment override prefix.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STOS%3ASTOSB%3ASTOSW%3ASTOSD%3ASTOSQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn stos(&mut self)
where Assembler<'a>: StosEmitter {
<Self as StosEmitter>::stos(self);
}
/// `STR` (STR).
/// Stores the segment selector from the task register (TR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the task state segment (TSS) for the currently running task.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STR.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn str<A>(&mut self, op0: A)
where Assembler<'a>: StrEmitter<A> {
<Self as StrEmitter<A>>::str(self, op0);
}
/// `STTILECFG`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn sttilecfg<A>(&mut self, op0: A)
where Assembler<'a>: SttilecfgEmitter<A> {
<Self as SttilecfgEmitter<A>>::sttilecfg(self, op0);
}
/// `SUB` (SUB).
/// Subtracts the second operand (source operand) from the first operand (destination operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, register, or memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SUB.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn sub<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: SubEmitter<A, B> {
<Self as SubEmitter<A, B>>::sub(self, op0, op1);
}
/// `SWAPGS` (SWAPGS).
/// SWAPGS exchanges the current GS base register value with the value contained in MSR address C0000102H (IA32_KERNEL_GS_BASE). The SWAPGS instruction is a privileged instruction intended for use by system software.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SWAPGS.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn swapgs(&mut self)
where Assembler<'a>: SwapgsEmitter {
<Self as SwapgsEmitter>::swapgs(self);
}
/// `SYSCALL` (SYSCALL).
/// SYSCALL invokes an OS system-call handler at privilege level 0. It does so by loading RIP from the IA32_LSTAR MSR (after saving the address of the instruction following SYSCALL into RCX). (The WRMSR instruction ensures that the IA32_LSTAR MSR always contain a canonical address.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSCALL.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn syscall(&mut self)
where Assembler<'a>: SyscallEmitter {
<Self as SyscallEmitter>::syscall(self);
}
/// `SYSRET` (SYSRET).
/// SYSRET is a companion instruction to the SYSCALL instruction. It returns from an OS system-call handler to user code at privilege level 3. It does so by loading RIP from RCX and loading RFLAGS from R11.1 With a 64-bit operand size, SYSRET remains in 64-bit mode; otherwise, it enters compatibility mode and only the low 32 bits of the registers are loaded.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSRET.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn sysret(&mut self)
where Assembler<'a>: SysretEmitter {
<Self as SysretEmitter>::sysret(self);
}
/// `TCMMIMFP16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tcmmimfp16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Tcmmimfp16psEmitter<A, B, C> {
<Self as Tcmmimfp16psEmitter<A, B, C>>::tcmmimfp16ps(self, op0, op1, op2);
}
/// `TCMMRLFP16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tcmmrlfp16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Tcmmrlfp16psEmitter<A, B, C> {
<Self as Tcmmrlfp16psEmitter<A, B, C>>::tcmmrlfp16ps(self, op0, op1, op2);
}
/// `TDPBF16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tdpbf16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Tdpbf16psEmitter<A, B, C> {
<Self as Tdpbf16psEmitter<A, B, C>>::tdpbf16ps(self, op0, op1, op2);
}
/// `TDPBSSD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tdpbssd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: TdpbssdEmitter<A, B, C> {
<Self as TdpbssdEmitter<A, B, C>>::tdpbssd(self, op0, op1, op2);
}
/// `TDPBSUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tdpbsud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: TdpbsudEmitter<A, B, C> {
<Self as TdpbsudEmitter<A, B, C>>::tdpbsud(self, op0, op1, op2);
}
/// `TDPBUSD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tdpbusd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: TdpbusdEmitter<A, B, C> {
<Self as TdpbusdEmitter<A, B, C>>::tdpbusd(self, op0, op1, op2);
}
/// `TDPBUUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tdpbuud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: TdpbuudEmitter<A, B, C> {
<Self as TdpbuudEmitter<A, B, C>>::tdpbuud(self, op0, op1, op2);
}
/// `TDPFP16PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Tmm, Tmm, Tmm |
/// +---+---------------+
/// ```
#[inline]
pub fn tdpfp16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Tdpfp16psEmitter<A, B, C> {
<Self as Tdpfp16psEmitter<A, B, C>>::tdpfp16ps(self, op0, op1, op2);
}
/// `TEST` (TEST).
/// Computes the bit-wise logical AND of first operand (source 1 operand) and the second operand (source 2 operand) and sets the SF, ZF, and PF status flags according to the result. The result is then discarded.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TEST.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | Gpd, Gpd |
/// | 4 | Gpd, Imm |
/// | 5 | Gpq, Gpq |
/// | 6 | Gpq, Imm |
/// | 7 | Gpw, Gpw |
/// | 8 | Gpw, Imm |
/// | 9 | Mem, GpbLo |
/// | 10 | Mem, Gpd |
/// | 11 | Mem, Gpq |
/// | 12 | Mem, Gpw |
/// | 13 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn test<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: TestEmitter<A, B> {
<Self as TestEmitter<A, B>>::test(self, op0, op1);
}
/// `TILELOADD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Tmm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn tileloadd<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: TileloaddEmitter<A, B> {
<Self as TileloaddEmitter<A, B>>::tileloadd(self, op0, op1);
}
/// `TILELOADDT1`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Tmm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn tileloaddt1<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Tileloaddt1Emitter<A, B> {
<Self as Tileloaddt1Emitter<A, B>>::tileloaddt1(self, op0, op1);
}
/// `TILERELEASE` (TILERELEASE).
/// This instruction returns TILECFG and TILEDATA to the INIT state.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TILERELEASE.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn tilerelease(&mut self)
where Assembler<'a>: TilereleaseEmitter {
<Self as TilereleaseEmitter>::tilerelease(self);
}
/// `TILESTORED`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Tmm |
/// +---+----------+
/// ```
#[inline]
pub fn tilestored<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: TilestoredEmitter<A, B> {
<Self as TilestoredEmitter<A, B>>::tilestored(self, op0, op1);
}
/// `TILEZERO`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Tmm |
/// +---+----------+
/// ```
#[inline]
pub fn tilezero<A>(&mut self, op0: A)
where Assembler<'a>: TilezeroEmitter<A> {
<Self as TilezeroEmitter<A>>::tilezero(self, op0);
}
/// `UD0`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn ud0<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Ud0Emitter<A, B> {
<Self as Ud0Emitter<A, B>>::ud0(self, op0, op1);
}
/// `UD1` (UD1).
/// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Gpd |
/// | 2 | Gpd, Mem |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpq, Mem |
/// | 5 | Gpw, Gpw |
/// | 6 | Gpw, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn ud1<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Ud1Emitter<A, B> {
<Self as Ud1Emitter<A, B>>::ud1(self, op0, op1);
}
/// `UD2` (UD2).
/// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn ud2(&mut self)
where Assembler<'a>: Ud2Emitter {
<Self as Ud2Emitter>::ud2(self);
}
/// `VADDPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddphEmitter<A, B, C> {
<Self as VaddphEmitter<A, B, C>>::vaddph(self, op0, op1, op2);
}
/// `VADDPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddphErEmitter<A, B, C> {
<Self as VaddphErEmitter<A, B, C>>::vaddph_er(self, op0, op1, op2);
}
/// `VADDPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddphMaskEmitter<A, B, C> {
<Self as VaddphMaskEmitter<A, B, C>>::vaddph_mask(self, op0, op1, op2);
}
/// `VADDPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddphMaskErEmitter<A, B, C> {
<Self as VaddphMaskErEmitter<A, B, C>>::vaddph_mask_er(self, op0, op1, op2);
}
/// `VADDPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddphMaskzEmitter<A, B, C> {
<Self as VaddphMaskzEmitter<A, B, C>>::vaddph_maskz(self, op0, op1, op2);
}
/// `VADDPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddphMaskzErEmitter<A, B, C> {
<Self as VaddphMaskzErEmitter<A, B, C>>::vaddph_maskz_er(self, op0, op1, op2);
}
/// `VADDSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddshEmitter<A, B, C> {
<Self as VaddshEmitter<A, B, C>>::vaddsh(self, op0, op1, op2);
}
/// `VADDSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddshErEmitter<A, B, C> {
<Self as VaddshErEmitter<A, B, C>>::vaddsh_er(self, op0, op1, op2);
}
/// `VADDSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddshMaskEmitter<A, B, C> {
<Self as VaddshMaskEmitter<A, B, C>>::vaddsh_mask(self, op0, op1, op2);
}
/// `VADDSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddshMaskErEmitter<A, B, C> {
<Self as VaddshMaskErEmitter<A, B, C>>::vaddsh_mask_er(self, op0, op1, op2);
}
/// `VADDSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddshMaskzEmitter<A, B, C> {
<Self as VaddshMaskzEmitter<A, B, C>>::vaddsh_maskz(self, op0, op1, op2);
}
/// `VADDSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaddsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaddshMaskzErEmitter<A, B, C> {
<Self as VaddshMaskzErEmitter<A, B, C>>::vaddsh_maskz_er(self, op0, op1, op2);
}
/// `VAESDEC` (VAESDEC).
/// This instruction performs a single round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDEC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaesdec<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaesdecEmitter<A, B, C> {
<Self as VaesdecEmitter<A, B, C>>::vaesdec(self, op0, op1, op2);
}
/// `VAESDECLAST` (VAESDECLAST).
/// This instruction performs the last round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDECLAST.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaesdeclast<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaesdeclastEmitter<A, B, C> {
<Self as VaesdeclastEmitter<A, B, C>>::vaesdeclast(self, op0, op1, op2);
}
/// `VAESENC` (VAESENC).
/// This instruction performs a single round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaesenc<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaesencEmitter<A, B, C> {
<Self as VaesencEmitter<A, B, C>>::vaesenc(self, op0, op1, op2);
}
/// `VAESENCLAST` (VAESENCLAST).
/// This instruction performs the last round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENCLAST.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaesenclast<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaesenclastEmitter<A, B, C> {
<Self as VaesenclastEmitter<A, B, C>>::vaesenclast(self, op0, op1, op2);
}
/// `VAESIMC` (VAESIMC).
/// Perform the InvMixColumns transformation on the source operand and store the result in the destination operand. The destination operand is an XMM register. The source operand can be an XMM register or a 128-bit memory location.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESIMC.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vaesimc<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VaesimcEmitter<A, B> {
<Self as VaesimcEmitter<A, B>>::vaesimc(self, op0, op1);
}
/// `VAESKEYGENASSIST` (VAESKEYGENASSIST).
/// Assist in expanding the AES cipher key, by computing steps towards generating a round key for encryption, using 128-bit data specified in the source operand and an 8-bit round constant specified as an immediate, store the result in the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESKEYGENASSIST.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vaeskeygenassist<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VaeskeygenassistEmitter<A, B, C> {
<Self as VaeskeygenassistEmitter<A, B, C>>::vaeskeygenassist(self, op0, op1, op2);
}
/// `VBCSTNEBF162PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vbcstnebf162ps<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vbcstnebf162psEmitter<A, B> {
<Self as Vbcstnebf162psEmitter<A, B>>::vbcstnebf162ps(self, op0, op1);
}
/// `VBCSTNESH2PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vbcstnesh2ps<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vbcstnesh2psEmitter<A, B> {
<Self as Vbcstnesh2psEmitter<A, B>>::vbcstnesh2ps(self, op0, op1);
}
/// `VCMPPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// | 3 | KReg, Ymm, Mem, Imm |
/// | 4 | KReg, Ymm, Ymm, Imm |
/// | 5 | KReg, Zmm, Mem, Imm |
/// | 6 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpph<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpphEmitter<A, B, C, D> {
<Self as VcmpphEmitter<A, B, C, D>>::vcmpph(self, op0, op1, op2, op3);
}
/// `VCMPPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// | 3 | KReg, Ymm, Mem, Imm |
/// | 4 | KReg, Ymm, Ymm, Imm |
/// | 5 | KReg, Zmm, Mem, Imm |
/// | 6 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpph_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpphMaskEmitter<A, B, C, D> {
<Self as VcmpphMaskEmitter<A, B, C, D>>::vcmpph_mask(self, op0, op1, op2, op3);
}
/// `VCMPPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpph_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpphMaskSaeEmitter<A, B, C, D> {
<Self as VcmpphMaskSaeEmitter<A, B, C, D>>::vcmpph_mask_sae(self, op0, op1, op2, op3);
}
/// `VCMPPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Zmm, Zmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpph_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpphSaeEmitter<A, B, C, D> {
<Self as VcmpphSaeEmitter<A, B, C, D>>::vcmpph_sae(self, op0, op1, op2, op3);
}
/// `VCMPSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpsh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpshEmitter<A, B, C, D> {
<Self as VcmpshEmitter<A, B, C, D>>::vcmpsh(self, op0, op1, op2, op3);
}
/// `VCMPSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Mem, Imm |
/// | 2 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpsh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpshMaskEmitter<A, B, C, D> {
<Self as VcmpshMaskEmitter<A, B, C, D>>::vcmpsh_mask(self, op0, op1, op2, op3);
}
/// `VCMPSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpsh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpshMaskSaeEmitter<A, B, C, D> {
<Self as VcmpshMaskSaeEmitter<A, B, C, D>>::vcmpsh_mask_sae(self, op0, op1, op2, op3);
}
/// `VCMPSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------------+
/// | # | Operands |
/// +---+---------------------+
/// | 1 | KReg, Xmm, Xmm, Imm |
/// +---+---------------------+
/// ```
#[inline]
pub fn vcmpsh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VcmpshSaeEmitter<A, B, C, D> {
<Self as VcmpshSaeEmitter<A, B, C, D>>::vcmpsh_sae(self, op0, op1, op2, op3);
}
/// `VCOMISH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcomish<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VcomishEmitter<A, B> {
<Self as VcomishEmitter<A, B>>::vcomish(self, op0, op1);
}
/// `VCOMISH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcomish_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VcomishSaeEmitter<A, B> {
<Self as VcomishSaeEmitter<A, B>>::vcomish_sae(self, op0, op1);
}
/// `VCVTDQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtdq2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtdq2phEmitter<A, B> {
<Self as Vcvtdq2phEmitter<A, B>>::vcvtdq2ph(self, op0, op1);
}
/// `VCVTDQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtdq2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtdq2phErEmitter<A, B> {
<Self as Vcvtdq2phErEmitter<A, B>>::vcvtdq2ph_er(self, op0, op1);
}
/// `VCVTDQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtdq2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtdq2phMaskEmitter<A, B> {
<Self as Vcvtdq2phMaskEmitter<A, B>>::vcvtdq2ph_mask(self, op0, op1);
}
/// `VCVTDQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtdq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtdq2phMaskErEmitter<A, B> {
<Self as Vcvtdq2phMaskErEmitter<A, B>>::vcvtdq2ph_mask_er(self, op0, op1);
}
/// `VCVTDQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtdq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtdq2phMaskzEmitter<A, B> {
<Self as Vcvtdq2phMaskzEmitter<A, B>>::vcvtdq2ph_maskz(self, op0, op1);
}
/// `VCVTDQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtdq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtdq2phMaskzErEmitter<A, B> {
<Self as Vcvtdq2phMaskzErEmitter<A, B>>::vcvtdq2ph_maskz_er(self, op0, op1);
}
/// `VCVTNEEBF162PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtneebf162ps<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtneebf162psEmitter<A, B> {
<Self as Vcvtneebf162psEmitter<A, B>>::vcvtneebf162ps(self, op0, op1);
}
/// `VCVTNEEPH2PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtneeph2ps<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtneeph2psEmitter<A, B> {
<Self as Vcvtneeph2psEmitter<A, B>>::vcvtneeph2ps(self, op0, op1);
}
/// `VCVTNEOBF162PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtneobf162ps<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtneobf162psEmitter<A, B> {
<Self as Vcvtneobf162psEmitter<A, B>>::vcvtneobf162ps(self, op0, op1);
}
/// `VCVTNEOPH2PS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Ymm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtneoph2ps<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtneoph2psEmitter<A, B> {
<Self as Vcvtneoph2psEmitter<A, B>>::vcvtneoph2ps(self, op0, op1);
}
/// `VCVTPD2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtpd2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtpd2phEmitter<A, B> {
<Self as Vcvtpd2phEmitter<A, B>>::vcvtpd2ph(self, op0, op1);
}
/// `VCVTPD2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtpd2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtpd2phErEmitter<A, B> {
<Self as Vcvtpd2phErEmitter<A, B>>::vcvtpd2ph_er(self, op0, op1);
}
/// `VCVTPD2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtpd2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtpd2phMaskEmitter<A, B> {
<Self as Vcvtpd2phMaskEmitter<A, B>>::vcvtpd2ph_mask(self, op0, op1);
}
/// `VCVTPD2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtpd2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtpd2phMaskErEmitter<A, B> {
<Self as Vcvtpd2phMaskErEmitter<A, B>>::vcvtpd2ph_mask_er(self, op0, op1);
}
/// `VCVTPD2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtpd2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtpd2phMaskzEmitter<A, B> {
<Self as Vcvtpd2phMaskzEmitter<A, B>>::vcvtpd2ph_maskz(self, op0, op1);
}
/// `VCVTPD2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtpd2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtpd2phMaskzErEmitter<A, B> {
<Self as Vcvtpd2phMaskzErEmitter<A, B>>::vcvtpd2ph_maskz_er(self, op0, op1);
}
/// `VCVTPH2DQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2dq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2dqEmitter<A, B> {
<Self as Vcvtph2dqEmitter<A, B>>::vcvtph2dq(self, op0, op1);
}
/// `VCVTPH2DQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2dq_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2dqErEmitter<A, B> {
<Self as Vcvtph2dqErEmitter<A, B>>::vcvtph2dq_er(self, op0, op1);
}
/// `VCVTPH2DQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2dq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2dqMaskEmitter<A, B> {
<Self as Vcvtph2dqMaskEmitter<A, B>>::vcvtph2dq_mask(self, op0, op1);
}
/// `VCVTPH2DQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2dq_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2dqMaskErEmitter<A, B> {
<Self as Vcvtph2dqMaskErEmitter<A, B>>::vcvtph2dq_mask_er(self, op0, op1);
}
/// `VCVTPH2DQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2dq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2dqMaskzEmitter<A, B> {
<Self as Vcvtph2dqMaskzEmitter<A, B>>::vcvtph2dq_maskz(self, op0, op1);
}
/// `VCVTPH2DQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2dq_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2dqMaskzErEmitter<A, B> {
<Self as Vcvtph2dqMaskzErEmitter<A, B>>::vcvtph2dq_maskz_er(self, op0, op1);
}
/// `VCVTPH2PD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2pd<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2pdEmitter<A, B> {
<Self as Vcvtph2pdEmitter<A, B>>::vcvtph2pd(self, op0, op1);
}
/// `VCVTPH2PD_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2pd_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2pdMaskEmitter<A, B> {
<Self as Vcvtph2pdMaskEmitter<A, B>>::vcvtph2pd_mask(self, op0, op1);
}
/// `VCVTPH2PD_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2pd_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2pdMaskSaeEmitter<A, B> {
<Self as Vcvtph2pdMaskSaeEmitter<A, B>>::vcvtph2pd_mask_sae(self, op0, op1);
}
/// `VCVTPH2PD_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2pd_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2pdMaskzEmitter<A, B> {
<Self as Vcvtph2pdMaskzEmitter<A, B>>::vcvtph2pd_maskz(self, op0, op1);
}
/// `VCVTPH2PD_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2pd_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2pdMaskzSaeEmitter<A, B> {
<Self as Vcvtph2pdMaskzSaeEmitter<A, B>>::vcvtph2pd_maskz_sae(self, op0, op1);
}
/// `VCVTPH2PD_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2pd_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2pdSaeEmitter<A, B> {
<Self as Vcvtph2pdSaeEmitter<A, B>>::vcvtph2pd_sae(self, op0, op1);
}
/// `VCVTPH2PSX` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2psx<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2psxEmitter<A, B> {
<Self as Vcvtph2psxEmitter<A, B>>::vcvtph2psx(self, op0, op1);
}
/// `VCVTPH2PSX_MASK` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2psx_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2psxMaskEmitter<A, B> {
<Self as Vcvtph2psxMaskEmitter<A, B>>::vcvtph2psx_mask(self, op0, op1);
}
/// `VCVTPH2PSX_MASK_SAE` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2psx_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2psxMaskSaeEmitter<A, B> {
<Self as Vcvtph2psxMaskSaeEmitter<A, B>>::vcvtph2psx_mask_sae(self, op0, op1);
}
/// `VCVTPH2PSX_MASKZ` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2psx_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2psxMaskzEmitter<A, B> {
<Self as Vcvtph2psxMaskzEmitter<A, B>>::vcvtph2psx_maskz(self, op0, op1);
}
/// `VCVTPH2PSX_MASKZ_SAE` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2psx_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2psxMaskzSaeEmitter<A, B> {
<Self as Vcvtph2psxMaskzSaeEmitter<A, B>>::vcvtph2psx_maskz_sae(self, op0, op1);
}
/// `VCVTPH2PSX_SAE` (VCVTPH2PSX).
/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2psx_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2psxSaeEmitter<A, B> {
<Self as Vcvtph2psxSaeEmitter<A, B>>::vcvtph2psx_sae(self, op0, op1);
}
/// `VCVTPH2QQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2qq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2qqEmitter<A, B> {
<Self as Vcvtph2qqEmitter<A, B>>::vcvtph2qq(self, op0, op1);
}
/// `VCVTPH2QQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2qq_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2qqErEmitter<A, B> {
<Self as Vcvtph2qqErEmitter<A, B>>::vcvtph2qq_er(self, op0, op1);
}
/// `VCVTPH2QQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2qq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2qqMaskEmitter<A, B> {
<Self as Vcvtph2qqMaskEmitter<A, B>>::vcvtph2qq_mask(self, op0, op1);
}
/// `VCVTPH2QQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2qq_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2qqMaskErEmitter<A, B> {
<Self as Vcvtph2qqMaskErEmitter<A, B>>::vcvtph2qq_mask_er(self, op0, op1);
}
/// `VCVTPH2QQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2qq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2qqMaskzEmitter<A, B> {
<Self as Vcvtph2qqMaskzEmitter<A, B>>::vcvtph2qq_maskz(self, op0, op1);
}
/// `VCVTPH2QQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2qq_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2qqMaskzErEmitter<A, B> {
<Self as Vcvtph2qqMaskzErEmitter<A, B>>::vcvtph2qq_maskz_er(self, op0, op1);
}
/// `VCVTPH2UDQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2udq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2udqEmitter<A, B> {
<Self as Vcvtph2udqEmitter<A, B>>::vcvtph2udq(self, op0, op1);
}
/// `VCVTPH2UDQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2udq_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2udqErEmitter<A, B> {
<Self as Vcvtph2udqErEmitter<A, B>>::vcvtph2udq_er(self, op0, op1);
}
/// `VCVTPH2UDQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2udq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2udqMaskEmitter<A, B> {
<Self as Vcvtph2udqMaskEmitter<A, B>>::vcvtph2udq_mask(self, op0, op1);
}
/// `VCVTPH2UDQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2udq_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2udqMaskErEmitter<A, B> {
<Self as Vcvtph2udqMaskErEmitter<A, B>>::vcvtph2udq_mask_er(self, op0, op1);
}
/// `VCVTPH2UDQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2udq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2udqMaskzEmitter<A, B> {
<Self as Vcvtph2udqMaskzEmitter<A, B>>::vcvtph2udq_maskz(self, op0, op1);
}
/// `VCVTPH2UDQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2udq_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2udqMaskzErEmitter<A, B> {
<Self as Vcvtph2udqMaskzErEmitter<A, B>>::vcvtph2udq_maskz_er(self, op0, op1);
}
/// `VCVTPH2UQQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uqq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uqqEmitter<A, B> {
<Self as Vcvtph2uqqEmitter<A, B>>::vcvtph2uqq(self, op0, op1);
}
/// `VCVTPH2UQQ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uqq_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uqqErEmitter<A, B> {
<Self as Vcvtph2uqqErEmitter<A, B>>::vcvtph2uqq_er(self, op0, op1);
}
/// `VCVTPH2UQQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uqq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uqqMaskEmitter<A, B> {
<Self as Vcvtph2uqqMaskEmitter<A, B>>::vcvtph2uqq_mask(self, op0, op1);
}
/// `VCVTPH2UQQ_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uqq_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uqqMaskErEmitter<A, B> {
<Self as Vcvtph2uqqMaskErEmitter<A, B>>::vcvtph2uqq_mask_er(self, op0, op1);
}
/// `VCVTPH2UQQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uqq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uqqMaskzEmitter<A, B> {
<Self as Vcvtph2uqqMaskzEmitter<A, B>>::vcvtph2uqq_maskz(self, op0, op1);
}
/// `VCVTPH2UQQ_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uqq_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uqqMaskzErEmitter<A, B> {
<Self as Vcvtph2uqqMaskzErEmitter<A, B>>::vcvtph2uqq_maskz_er(self, op0, op1);
}
/// `VCVTPH2UW`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uw<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uwEmitter<A, B> {
<Self as Vcvtph2uwEmitter<A, B>>::vcvtph2uw(self, op0, op1);
}
/// `VCVTPH2UW_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uw_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uwErEmitter<A, B> {
<Self as Vcvtph2uwErEmitter<A, B>>::vcvtph2uw_er(self, op0, op1);
}
/// `VCVTPH2UW_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uw_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uwMaskEmitter<A, B> {
<Self as Vcvtph2uwMaskEmitter<A, B>>::vcvtph2uw_mask(self, op0, op1);
}
/// `VCVTPH2UW_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uw_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uwMaskErEmitter<A, B> {
<Self as Vcvtph2uwMaskErEmitter<A, B>>::vcvtph2uw_mask_er(self, op0, op1);
}
/// `VCVTPH2UW_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uw_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uwMaskzEmitter<A, B> {
<Self as Vcvtph2uwMaskzEmitter<A, B>>::vcvtph2uw_maskz(self, op0, op1);
}
/// `VCVTPH2UW_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2uw_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2uwMaskzErEmitter<A, B> {
<Self as Vcvtph2uwMaskzErEmitter<A, B>>::vcvtph2uw_maskz_er(self, op0, op1);
}
/// `VCVTPH2W`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2w<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2wEmitter<A, B> {
<Self as Vcvtph2wEmitter<A, B>>::vcvtph2w(self, op0, op1);
}
/// `VCVTPH2W_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2w_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2wErEmitter<A, B> {
<Self as Vcvtph2wErEmitter<A, B>>::vcvtph2w_er(self, op0, op1);
}
/// `VCVTPH2W_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2w_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2wMaskEmitter<A, B> {
<Self as Vcvtph2wMaskEmitter<A, B>>::vcvtph2w_mask(self, op0, op1);
}
/// `VCVTPH2W_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2w_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2wMaskErEmitter<A, B> {
<Self as Vcvtph2wMaskErEmitter<A, B>>::vcvtph2w_mask_er(self, op0, op1);
}
/// `VCVTPH2W_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2w_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2wMaskzEmitter<A, B> {
<Self as Vcvtph2wMaskzEmitter<A, B>>::vcvtph2w_maskz(self, op0, op1);
}
/// `VCVTPH2W_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtph2w_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtph2wMaskzErEmitter<A, B> {
<Self as Vcvtph2wMaskzErEmitter<A, B>>::vcvtph2w_maskz_er(self, op0, op1);
}
/// `VCVTPS2PHX` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtps2phx<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtps2phxEmitter<A, B> {
<Self as Vcvtps2phxEmitter<A, B>>::vcvtps2phx(self, op0, op1);
}
/// `VCVTPS2PHX_ER` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtps2phx_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtps2phxErEmitter<A, B> {
<Self as Vcvtps2phxErEmitter<A, B>>::vcvtps2phx_er(self, op0, op1);
}
/// `VCVTPS2PHX_MASK` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtps2phx_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtps2phxMaskEmitter<A, B> {
<Self as Vcvtps2phxMaskEmitter<A, B>>::vcvtps2phx_mask(self, op0, op1);
}
/// `VCVTPS2PHX_MASK_ER` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtps2phx_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtps2phxMaskErEmitter<A, B> {
<Self as Vcvtps2phxMaskErEmitter<A, B>>::vcvtps2phx_mask_er(self, op0, op1);
}
/// `VCVTPS2PHX_MASKZ` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtps2phx_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtps2phxMaskzEmitter<A, B> {
<Self as Vcvtps2phxMaskzEmitter<A, B>>::vcvtps2phx_maskz(self, op0, op1);
}
/// `VCVTPS2PHX_MASKZ_ER` (VCVTPS2PHX).
/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtps2phx_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtps2phxMaskzErEmitter<A, B> {
<Self as Vcvtps2phxMaskzErEmitter<A, B>>::vcvtps2phx_maskz_er(self, op0, op1);
}
/// `VCVTQQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtqq2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtqq2phEmitter<A, B> {
<Self as Vcvtqq2phEmitter<A, B>>::vcvtqq2ph(self, op0, op1);
}
/// `VCVTQQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtqq2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtqq2phErEmitter<A, B> {
<Self as Vcvtqq2phErEmitter<A, B>>::vcvtqq2ph_er(self, op0, op1);
}
/// `VCVTQQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtqq2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtqq2phMaskEmitter<A, B> {
<Self as Vcvtqq2phMaskEmitter<A, B>>::vcvtqq2ph_mask(self, op0, op1);
}
/// `VCVTQQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtqq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtqq2phMaskErEmitter<A, B> {
<Self as Vcvtqq2phMaskErEmitter<A, B>>::vcvtqq2ph_mask_er(self, op0, op1);
}
/// `VCVTQQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtqq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtqq2phMaskzEmitter<A, B> {
<Self as Vcvtqq2phMaskzEmitter<A, B>>::vcvtqq2ph_maskz(self, op0, op1);
}
/// `VCVTQQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtqq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtqq2phMaskzErEmitter<A, B> {
<Self as Vcvtqq2phMaskzErEmitter<A, B>>::vcvtqq2ph_maskz_er(self, op0, op1);
}
/// `VCVTSD2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsd2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsd2shEmitter<A, B, C> {
<Self as Vcvtsd2shEmitter<A, B, C>>::vcvtsd2sh(self, op0, op1, op2);
}
/// `VCVTSD2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsd2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsd2shErEmitter<A, B, C> {
<Self as Vcvtsd2shErEmitter<A, B, C>>::vcvtsd2sh_er(self, op0, op1, op2);
}
/// `VCVTSD2SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsd2sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsd2shMaskEmitter<A, B, C> {
<Self as Vcvtsd2shMaskEmitter<A, B, C>>::vcvtsd2sh_mask(self, op0, op1, op2);
}
/// `VCVTSD2SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsd2sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsd2shMaskErEmitter<A, B, C> {
<Self as Vcvtsd2shMaskErEmitter<A, B, C>>::vcvtsd2sh_mask_er(self, op0, op1, op2);
}
/// `VCVTSD2SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsd2sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsd2shMaskzEmitter<A, B, C> {
<Self as Vcvtsd2shMaskzEmitter<A, B, C>>::vcvtsd2sh_maskz(self, op0, op1, op2);
}
/// `VCVTSD2SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsd2sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsd2shMaskzErEmitter<A, B, C> {
<Self as Vcvtsd2shMaskzErEmitter<A, B, C>>::vcvtsd2sh_maskz_er(self, op0, op1, op2);
}
/// `VCVTSH2SD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2sd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2sdEmitter<A, B, C> {
<Self as Vcvtsh2sdEmitter<A, B, C>>::vcvtsh2sd(self, op0, op1, op2);
}
/// `VCVTSH2SD_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2sd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2sdMaskEmitter<A, B, C> {
<Self as Vcvtsh2sdMaskEmitter<A, B, C>>::vcvtsh2sd_mask(self, op0, op1, op2);
}
/// `VCVTSH2SD_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2sd_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2sdMaskSaeEmitter<A, B, C> {
<Self as Vcvtsh2sdMaskSaeEmitter<A, B, C>>::vcvtsh2sd_mask_sae(self, op0, op1, op2);
}
/// `VCVTSH2SD_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2sd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2sdMaskzEmitter<A, B, C> {
<Self as Vcvtsh2sdMaskzEmitter<A, B, C>>::vcvtsh2sd_maskz(self, op0, op1, op2);
}
/// `VCVTSH2SD_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2sd_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2sdMaskzSaeEmitter<A, B, C> {
<Self as Vcvtsh2sdMaskzSaeEmitter<A, B, C>>::vcvtsh2sd_maskz_sae(self, op0, op1, op2);
}
/// `VCVTSH2SD_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2sd_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2sdSaeEmitter<A, B, C> {
<Self as Vcvtsh2sdSaeEmitter<A, B, C>>::vcvtsh2sd_sae(self, op0, op1, op2);
}
/// `VCVTSH2SI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtsh2si<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtsh2siEmitter<A, B> {
<Self as Vcvtsh2siEmitter<A, B>>::vcvtsh2si(self, op0, op1);
}
/// `VCVTSH2SI_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtsh2si_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtsh2siErEmitter<A, B> {
<Self as Vcvtsh2siErEmitter<A, B>>::vcvtsh2si_er(self, op0, op1);
}
/// `VCVTSH2SS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2ss<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2ssEmitter<A, B, C> {
<Self as Vcvtsh2ssEmitter<A, B, C>>::vcvtsh2ss(self, op0, op1, op2);
}
/// `VCVTSH2SS_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2ss_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2ssMaskEmitter<A, B, C> {
<Self as Vcvtsh2ssMaskEmitter<A, B, C>>::vcvtsh2ss_mask(self, op0, op1, op2);
}
/// `VCVTSH2SS_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2ss_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2ssMaskSaeEmitter<A, B, C> {
<Self as Vcvtsh2ssMaskSaeEmitter<A, B, C>>::vcvtsh2ss_mask_sae(self, op0, op1, op2);
}
/// `VCVTSH2SS_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2ss_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2ssMaskzEmitter<A, B, C> {
<Self as Vcvtsh2ssMaskzEmitter<A, B, C>>::vcvtsh2ss_maskz(self, op0, op1, op2);
}
/// `VCVTSH2SS_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2ss_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2ssMaskzSaeEmitter<A, B, C> {
<Self as Vcvtsh2ssMaskzSaeEmitter<A, B, C>>::vcvtsh2ss_maskz_sae(self, op0, op1, op2);
}
/// `VCVTSH2SS_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsh2ss_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsh2ssSaeEmitter<A, B, C> {
<Self as Vcvtsh2ssSaeEmitter<A, B, C>>::vcvtsh2ss_sae(self, op0, op1, op2);
}
/// `VCVTSH2USI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtsh2usi<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtsh2usiEmitter<A, B> {
<Self as Vcvtsh2usiEmitter<A, B>>::vcvtsh2usi(self, op0, op1);
}
/// `VCVTSH2USI_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtsh2usi_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtsh2usiErEmitter<A, B> {
<Self as Vcvtsh2usiErEmitter<A, B>>::vcvtsh2usi_er(self, op0, op1);
}
/// `VCVTSI2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// | 3 | Xmm, Xmm, Mem |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsi2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsi2shEmitter<A, B, C> {
<Self as Vcvtsi2shEmitter<A, B, C>>::vcvtsi2sh(self, op0, op1, op2);
}
/// `VCVTSI2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtsi2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtsi2shErEmitter<A, B, C> {
<Self as Vcvtsi2shErEmitter<A, B, C>>::vcvtsi2sh_er(self, op0, op1, op2);
}
/// `VCVTSS2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtss2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtss2shEmitter<A, B, C> {
<Self as Vcvtss2shEmitter<A, B, C>>::vcvtss2sh(self, op0, op1, op2);
}
/// `VCVTSS2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtss2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtss2shErEmitter<A, B, C> {
<Self as Vcvtss2shErEmitter<A, B, C>>::vcvtss2sh_er(self, op0, op1, op2);
}
/// `VCVTSS2SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtss2sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtss2shMaskEmitter<A, B, C> {
<Self as Vcvtss2shMaskEmitter<A, B, C>>::vcvtss2sh_mask(self, op0, op1, op2);
}
/// `VCVTSS2SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtss2sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtss2shMaskErEmitter<A, B, C> {
<Self as Vcvtss2shMaskErEmitter<A, B, C>>::vcvtss2sh_mask_er(self, op0, op1, op2);
}
/// `VCVTSS2SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtss2sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtss2shMaskzEmitter<A, B, C> {
<Self as Vcvtss2shMaskzEmitter<A, B, C>>::vcvtss2sh_maskz(self, op0, op1, op2);
}
/// `VCVTSS2SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtss2sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtss2shMaskzErEmitter<A, B, C> {
<Self as Vcvtss2shMaskzErEmitter<A, B, C>>::vcvtss2sh_maskz_er(self, op0, op1, op2);
}
/// `VCVTTPH2DQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2dq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2dqEmitter<A, B> {
<Self as Vcvttph2dqEmitter<A, B>>::vcvttph2dq(self, op0, op1);
}
/// `VCVTTPH2DQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2dq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2dqMaskEmitter<A, B> {
<Self as Vcvttph2dqMaskEmitter<A, B>>::vcvttph2dq_mask(self, op0, op1);
}
/// `VCVTTPH2DQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2dq_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2dqMaskSaeEmitter<A, B> {
<Self as Vcvttph2dqMaskSaeEmitter<A, B>>::vcvttph2dq_mask_sae(self, op0, op1);
}
/// `VCVTTPH2DQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2dq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2dqMaskzEmitter<A, B> {
<Self as Vcvttph2dqMaskzEmitter<A, B>>::vcvttph2dq_maskz(self, op0, op1);
}
/// `VCVTTPH2DQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2dq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2dqMaskzSaeEmitter<A, B> {
<Self as Vcvttph2dqMaskzSaeEmitter<A, B>>::vcvttph2dq_maskz_sae(self, op0, op1);
}
/// `VCVTTPH2DQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2dq_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2dqSaeEmitter<A, B> {
<Self as Vcvttph2dqSaeEmitter<A, B>>::vcvttph2dq_sae(self, op0, op1);
}
/// `VCVTTPH2QQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2qq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2qqEmitter<A, B> {
<Self as Vcvttph2qqEmitter<A, B>>::vcvttph2qq(self, op0, op1);
}
/// `VCVTTPH2QQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2qq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2qqMaskEmitter<A, B> {
<Self as Vcvttph2qqMaskEmitter<A, B>>::vcvttph2qq_mask(self, op0, op1);
}
/// `VCVTTPH2QQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2qq_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2qqMaskSaeEmitter<A, B> {
<Self as Vcvttph2qqMaskSaeEmitter<A, B>>::vcvttph2qq_mask_sae(self, op0, op1);
}
/// `VCVTTPH2QQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2qq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2qqMaskzEmitter<A, B> {
<Self as Vcvttph2qqMaskzEmitter<A, B>>::vcvttph2qq_maskz(self, op0, op1);
}
/// `VCVTTPH2QQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2qq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2qqMaskzSaeEmitter<A, B> {
<Self as Vcvttph2qqMaskzSaeEmitter<A, B>>::vcvttph2qq_maskz_sae(self, op0, op1);
}
/// `VCVTTPH2QQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2qq_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2qqSaeEmitter<A, B> {
<Self as Vcvttph2qqSaeEmitter<A, B>>::vcvttph2qq_sae(self, op0, op1);
}
/// `VCVTTPH2UDQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2udq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2udqEmitter<A, B> {
<Self as Vcvttph2udqEmitter<A, B>>::vcvttph2udq(self, op0, op1);
}
/// `VCVTTPH2UDQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2udq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2udqMaskEmitter<A, B> {
<Self as Vcvttph2udqMaskEmitter<A, B>>::vcvttph2udq_mask(self, op0, op1);
}
/// `VCVTTPH2UDQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2udq_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2udqMaskSaeEmitter<A, B> {
<Self as Vcvttph2udqMaskSaeEmitter<A, B>>::vcvttph2udq_mask_sae(self, op0, op1);
}
/// `VCVTTPH2UDQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2udq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2udqMaskzEmitter<A, B> {
<Self as Vcvttph2udqMaskzEmitter<A, B>>::vcvttph2udq_maskz(self, op0, op1);
}
/// `VCVTTPH2UDQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2udq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2udqMaskzSaeEmitter<A, B> {
<Self as Vcvttph2udqMaskzSaeEmitter<A, B>>::vcvttph2udq_maskz_sae(self, op0, op1);
}
/// `VCVTTPH2UDQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Ymm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2udq_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2udqSaeEmitter<A, B> {
<Self as Vcvttph2udqSaeEmitter<A, B>>::vcvttph2udq_sae(self, op0, op1);
}
/// `VCVTTPH2UQQ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uqq<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uqqEmitter<A, B> {
<Self as Vcvttph2uqqEmitter<A, B>>::vcvttph2uqq(self, op0, op1);
}
/// `VCVTTPH2UQQ_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uqq_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uqqMaskEmitter<A, B> {
<Self as Vcvttph2uqqMaskEmitter<A, B>>::vcvttph2uqq_mask(self, op0, op1);
}
/// `VCVTTPH2UQQ_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uqq_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uqqMaskSaeEmitter<A, B> {
<Self as Vcvttph2uqqMaskSaeEmitter<A, B>>::vcvttph2uqq_mask_sae(self, op0, op1);
}
/// `VCVTTPH2UQQ_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Xmm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uqq_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uqqMaskzEmitter<A, B> {
<Self as Vcvttph2uqqMaskzEmitter<A, B>>::vcvttph2uqq_maskz(self, op0, op1);
}
/// `VCVTTPH2UQQ_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uqq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uqqMaskzSaeEmitter<A, B> {
<Self as Vcvttph2uqqMaskzSaeEmitter<A, B>>::vcvttph2uqq_maskz_sae(self, op0, op1);
}
/// `VCVTTPH2UQQ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uqq_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uqqSaeEmitter<A, B> {
<Self as Vcvttph2uqqSaeEmitter<A, B>>::vcvttph2uqq_sae(self, op0, op1);
}
/// `VCVTTPH2UW`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uw<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uwEmitter<A, B> {
<Self as Vcvttph2uwEmitter<A, B>>::vcvttph2uw(self, op0, op1);
}
/// `VCVTTPH2UW_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uw_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uwMaskEmitter<A, B> {
<Self as Vcvttph2uwMaskEmitter<A, B>>::vcvttph2uw_mask(self, op0, op1);
}
/// `VCVTTPH2UW_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uw_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uwMaskSaeEmitter<A, B> {
<Self as Vcvttph2uwMaskSaeEmitter<A, B>>::vcvttph2uw_mask_sae(self, op0, op1);
}
/// `VCVTTPH2UW_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uw_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uwMaskzEmitter<A, B> {
<Self as Vcvttph2uwMaskzEmitter<A, B>>::vcvttph2uw_maskz(self, op0, op1);
}
/// `VCVTTPH2UW_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uw_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uwMaskzSaeEmitter<A, B> {
<Self as Vcvttph2uwMaskzSaeEmitter<A, B>>::vcvttph2uw_maskz_sae(self, op0, op1);
}
/// `VCVTTPH2UW_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2uw_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2uwSaeEmitter<A, B> {
<Self as Vcvttph2uwSaeEmitter<A, B>>::vcvttph2uw_sae(self, op0, op1);
}
/// `VCVTTPH2W`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2w<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2wEmitter<A, B> {
<Self as Vcvttph2wEmitter<A, B>>::vcvttph2w(self, op0, op1);
}
/// `VCVTTPH2W_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2w_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2wMaskEmitter<A, B> {
<Self as Vcvttph2wMaskEmitter<A, B>>::vcvttph2w_mask(self, op0, op1);
}
/// `VCVTTPH2W_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2w_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2wMaskSaeEmitter<A, B> {
<Self as Vcvttph2wMaskSaeEmitter<A, B>>::vcvttph2w_mask_sae(self, op0, op1);
}
/// `VCVTTPH2W_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2w_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2wMaskzEmitter<A, B> {
<Self as Vcvttph2wMaskzEmitter<A, B>>::vcvttph2w_maskz(self, op0, op1);
}
/// `VCVTTPH2W_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2w_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2wMaskzSaeEmitter<A, B> {
<Self as Vcvttph2wMaskzSaeEmitter<A, B>>::vcvttph2w_maskz_sae(self, op0, op1);
}
/// `VCVTTPH2W_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttph2w_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttph2wSaeEmitter<A, B> {
<Self as Vcvttph2wSaeEmitter<A, B>>::vcvttph2w_sae(self, op0, op1);
}
/// `VCVTTSH2SI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttsh2si<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttsh2siEmitter<A, B> {
<Self as Vcvttsh2siEmitter<A, B>>::vcvttsh2si(self, op0, op1);
}
/// `VCVTTSH2SI_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttsh2si_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttsh2siSaeEmitter<A, B> {
<Self as Vcvttsh2siSaeEmitter<A, B>>::vcvttsh2si_sae(self, op0, op1);
}
/// `VCVTTSH2USI`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Mem |
/// | 2 | Gpd, Xmm |
/// | 3 | Gpq, Mem |
/// | 4 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttsh2usi<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttsh2usiEmitter<A, B> {
<Self as Vcvttsh2usiEmitter<A, B>>::vcvttsh2usi(self, op0, op1);
}
/// `VCVTTSH2USI_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Gpq, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvttsh2usi_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvttsh2usiSaeEmitter<A, B> {
<Self as Vcvttsh2usiSaeEmitter<A, B>>::vcvttsh2usi_sae(self, op0, op1);
}
/// `VCVTUDQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtudq2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtudq2phEmitter<A, B> {
<Self as Vcvtudq2phEmitter<A, B>>::vcvtudq2ph(self, op0, op1);
}
/// `VCVTUDQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtudq2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtudq2phErEmitter<A, B> {
<Self as Vcvtudq2phErEmitter<A, B>>::vcvtudq2ph_er(self, op0, op1);
}
/// `VCVTUDQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtudq2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtudq2phMaskEmitter<A, B> {
<Self as Vcvtudq2phMaskEmitter<A, B>>::vcvtudq2ph_mask(self, op0, op1);
}
/// `VCVTUDQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtudq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtudq2phMaskErEmitter<A, B> {
<Self as Vcvtudq2phMaskErEmitter<A, B>>::vcvtudq2ph_mask_er(self, op0, op1);
}
/// `VCVTUDQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Ymm, Mem |
/// | 5 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtudq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtudq2phMaskzEmitter<A, B> {
<Self as Vcvtudq2phMaskzEmitter<A, B>>::vcvtudq2ph_maskz(self, op0, op1);
}
/// `VCVTUDQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Ymm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtudq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtudq2phMaskzErEmitter<A, B> {
<Self as Vcvtudq2phMaskzErEmitter<A, B>>::vcvtudq2ph_maskz_er(self, op0, op1);
}
/// `VCVTUQQ2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuqq2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuqq2phEmitter<A, B> {
<Self as Vcvtuqq2phEmitter<A, B>>::vcvtuqq2ph(self, op0, op1);
}
/// `VCVTUQQ2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuqq2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuqq2phErEmitter<A, B> {
<Self as Vcvtuqq2phErEmitter<A, B>>::vcvtuqq2ph_er(self, op0, op1);
}
/// `VCVTUQQ2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuqq2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuqq2phMaskEmitter<A, B> {
<Self as Vcvtuqq2phMaskEmitter<A, B>>::vcvtuqq2ph_mask(self, op0, op1);
}
/// `VCVTUQQ2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuqq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuqq2phMaskErEmitter<A, B> {
<Self as Vcvtuqq2phMaskErEmitter<A, B>>::vcvtuqq2ph_mask_er(self, op0, op1);
}
/// `VCVTUQQ2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Xmm, Ymm |
/// | 4 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuqq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuqq2phMaskzEmitter<A, B> {
<Self as Vcvtuqq2phMaskzEmitter<A, B>>::vcvtuqq2ph_maskz(self, op0, op1);
}
/// `VCVTUQQ2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuqq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuqq2phMaskzErEmitter<A, B> {
<Self as Vcvtuqq2phMaskzErEmitter<A, B>>::vcvtuqq2ph_maskz_er(self, op0, op1);
}
/// `VCVTUSI2SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// | 3 | Xmm, Xmm, Mem |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtusi2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtusi2shEmitter<A, B, C> {
<Self as Vcvtusi2shEmitter<A, B, C>>::vcvtusi2sh(self, op0, op1, op2);
}
/// `VCVTUSI2SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Gpd |
/// | 2 | Xmm, Xmm, Gpq |
/// +---+---------------+
/// ```
#[inline]
pub fn vcvtusi2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vcvtusi2shErEmitter<A, B, C> {
<Self as Vcvtusi2shErEmitter<A, B, C>>::vcvtusi2sh_er(self, op0, op1, op2);
}
/// `VCVTUW2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuw2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuw2phEmitter<A, B> {
<Self as Vcvtuw2phEmitter<A, B>>::vcvtuw2ph(self, op0, op1);
}
/// `VCVTUW2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuw2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuw2phErEmitter<A, B> {
<Self as Vcvtuw2phErEmitter<A, B>>::vcvtuw2ph_er(self, op0, op1);
}
/// `VCVTUW2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuw2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuw2phMaskEmitter<A, B> {
<Self as Vcvtuw2phMaskEmitter<A, B>>::vcvtuw2ph_mask(self, op0, op1);
}
/// `VCVTUW2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuw2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuw2phMaskErEmitter<A, B> {
<Self as Vcvtuw2phMaskErEmitter<A, B>>::vcvtuw2ph_mask_er(self, op0, op1);
}
/// `VCVTUW2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuw2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuw2phMaskzEmitter<A, B> {
<Self as Vcvtuw2phMaskzEmitter<A, B>>::vcvtuw2ph_maskz(self, op0, op1);
}
/// `VCVTUW2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtuw2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtuw2phMaskzErEmitter<A, B> {
<Self as Vcvtuw2phMaskzErEmitter<A, B>>::vcvtuw2ph_maskz_er(self, op0, op1);
}
/// `VCVTW2PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtw2ph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtw2phEmitter<A, B> {
<Self as Vcvtw2phEmitter<A, B>>::vcvtw2ph(self, op0, op1);
}
/// `VCVTW2PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtw2ph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtw2phErEmitter<A, B> {
<Self as Vcvtw2phErEmitter<A, B>>::vcvtw2ph_er(self, op0, op1);
}
/// `VCVTW2PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtw2ph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtw2phMaskEmitter<A, B> {
<Self as Vcvtw2phMaskEmitter<A, B>>::vcvtw2ph_mask(self, op0, op1);
}
/// `VCVTW2PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtw2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtw2phMaskErEmitter<A, B> {
<Self as Vcvtw2phMaskErEmitter<A, B>>::vcvtw2ph_mask_er(self, op0, op1);
}
/// `VCVTW2PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtw2ph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtw2phMaskzEmitter<A, B> {
<Self as Vcvtw2phMaskzEmitter<A, B>>::vcvtw2ph_maskz(self, op0, op1);
}
/// `VCVTW2PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vcvtw2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: Vcvtw2phMaskzErEmitter<A, B> {
<Self as Vcvtw2phMaskzErEmitter<A, B>>::vcvtw2ph_maskz_er(self, op0, op1);
}
/// `VDIVPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivphEmitter<A, B, C> {
<Self as VdivphEmitter<A, B, C>>::vdivph(self, op0, op1, op2);
}
/// `VDIVPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivphErEmitter<A, B, C> {
<Self as VdivphErEmitter<A, B, C>>::vdivph_er(self, op0, op1, op2);
}
/// `VDIVPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivphMaskEmitter<A, B, C> {
<Self as VdivphMaskEmitter<A, B, C>>::vdivph_mask(self, op0, op1, op2);
}
/// `VDIVPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivphMaskErEmitter<A, B, C> {
<Self as VdivphMaskErEmitter<A, B, C>>::vdivph_mask_er(self, op0, op1, op2);
}
/// `VDIVPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivphMaskzEmitter<A, B, C> {
<Self as VdivphMaskzEmitter<A, B, C>>::vdivph_maskz(self, op0, op1, op2);
}
/// `VDIVPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivphMaskzErEmitter<A, B, C> {
<Self as VdivphMaskzErEmitter<A, B, C>>::vdivph_maskz_er(self, op0, op1, op2);
}
/// `VDIVSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivshEmitter<A, B, C> {
<Self as VdivshEmitter<A, B, C>>::vdivsh(self, op0, op1, op2);
}
/// `VDIVSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivshErEmitter<A, B, C> {
<Self as VdivshErEmitter<A, B, C>>::vdivsh_er(self, op0, op1, op2);
}
/// `VDIVSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivshMaskEmitter<A, B, C> {
<Self as VdivshMaskEmitter<A, B, C>>::vdivsh_mask(self, op0, op1, op2);
}
/// `VDIVSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivshMaskErEmitter<A, B, C> {
<Self as VdivshMaskErEmitter<A, B, C>>::vdivsh_mask_er(self, op0, op1, op2);
}
/// `VDIVSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivshMaskzEmitter<A, B, C> {
<Self as VdivshMaskzEmitter<A, B, C>>::vdivsh_maskz(self, op0, op1, op2);
}
/// `VDIVSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vdivsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VdivshMaskzErEmitter<A, B, C> {
<Self as VdivshMaskzErEmitter<A, B, C>>::vdivsh_maskz_er(self, op0, op1, op2);
}
/// `VERR` (VERR).
/// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn verr<A>(&mut self, op0: A)
where Assembler<'a>: VerrEmitter<A> {
<Self as VerrEmitter<A>>::verr(self, op0);
}
/// `VERW` (VERW).
/// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd |
/// | 2 | Mem |
/// +---+----------+
/// ```
#[inline]
pub fn verw<A>(&mut self, op0: A)
where Assembler<'a>: VerwEmitter<A> {
<Self as VerwEmitter<A>>::verw(self, op0);
}
/// `VFCMADDCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcphEmitter<A, B, C> {
<Self as VfcmaddcphEmitter<A, B, C>>::vfcmaddcph(self, op0, op1, op2);
}
/// `VFCMADDCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcphErEmitter<A, B, C> {
<Self as VfcmaddcphErEmitter<A, B, C>>::vfcmaddcph_er(self, op0, op1, op2);
}
/// `VFCMADDCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcphMaskEmitter<A, B, C> {
<Self as VfcmaddcphMaskEmitter<A, B, C>>::vfcmaddcph_mask(self, op0, op1, op2);
}
/// `VFCMADDCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcphMaskErEmitter<A, B, C> {
<Self as VfcmaddcphMaskErEmitter<A, B, C>>::vfcmaddcph_mask_er(self, op0, op1, op2);
}
/// `VFCMADDCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcphMaskzEmitter<A, B, C> {
<Self as VfcmaddcphMaskzEmitter<A, B, C>>::vfcmaddcph_maskz(self, op0, op1, op2);
}
/// `VFCMADDCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcphMaskzErEmitter<A, B, C> {
<Self as VfcmaddcphMaskzErEmitter<A, B, C>>::vfcmaddcph_maskz_er(self, op0, op1, op2);
}
/// `VFCMADDCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcshEmitter<A, B, C> {
<Self as VfcmaddcshEmitter<A, B, C>>::vfcmaddcsh(self, op0, op1, op2);
}
/// `VFCMADDCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcshErEmitter<A, B, C> {
<Self as VfcmaddcshErEmitter<A, B, C>>::vfcmaddcsh_er(self, op0, op1, op2);
}
/// `VFCMADDCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcshMaskEmitter<A, B, C> {
<Self as VfcmaddcshMaskEmitter<A, B, C>>::vfcmaddcsh_mask(self, op0, op1, op2);
}
/// `VFCMADDCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcshMaskErEmitter<A, B, C> {
<Self as VfcmaddcshMaskErEmitter<A, B, C>>::vfcmaddcsh_mask_er(self, op0, op1, op2);
}
/// `VFCMADDCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcshMaskzEmitter<A, B, C> {
<Self as VfcmaddcshMaskzEmitter<A, B, C>>::vfcmaddcsh_maskz(self, op0, op1, op2);
}
/// `VFCMADDCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmaddcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmaddcshMaskzErEmitter<A, B, C> {
<Self as VfcmaddcshMaskzErEmitter<A, B, C>>::vfcmaddcsh_maskz_er(self, op0, op1, op2);
}
/// `VFCMULCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcphEmitter<A, B, C> {
<Self as VfcmulcphEmitter<A, B, C>>::vfcmulcph(self, op0, op1, op2);
}
/// `VFCMULCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcphErEmitter<A, B, C> {
<Self as VfcmulcphErEmitter<A, B, C>>::vfcmulcph_er(self, op0, op1, op2);
}
/// `VFCMULCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcphMaskEmitter<A, B, C> {
<Self as VfcmulcphMaskEmitter<A, B, C>>::vfcmulcph_mask(self, op0, op1, op2);
}
/// `VFCMULCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcphMaskErEmitter<A, B, C> {
<Self as VfcmulcphMaskErEmitter<A, B, C>>::vfcmulcph_mask_er(self, op0, op1, op2);
}
/// `VFCMULCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcphMaskzEmitter<A, B, C> {
<Self as VfcmulcphMaskzEmitter<A, B, C>>::vfcmulcph_maskz(self, op0, op1, op2);
}
/// `VFCMULCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcphMaskzErEmitter<A, B, C> {
<Self as VfcmulcphMaskzErEmitter<A, B, C>>::vfcmulcph_maskz_er(self, op0, op1, op2);
}
/// `VFCMULCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcshEmitter<A, B, C> {
<Self as VfcmulcshEmitter<A, B, C>>::vfcmulcsh(self, op0, op1, op2);
}
/// `VFCMULCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcshErEmitter<A, B, C> {
<Self as VfcmulcshErEmitter<A, B, C>>::vfcmulcsh_er(self, op0, op1, op2);
}
/// `VFCMULCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcshMaskEmitter<A, B, C> {
<Self as VfcmulcshMaskEmitter<A, B, C>>::vfcmulcsh_mask(self, op0, op1, op2);
}
/// `VFCMULCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcshMaskErEmitter<A, B, C> {
<Self as VfcmulcshMaskErEmitter<A, B, C>>::vfcmulcsh_mask_er(self, op0, op1, op2);
}
/// `VFCMULCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcshMaskzEmitter<A, B, C> {
<Self as VfcmulcshMaskzEmitter<A, B, C>>::vfcmulcsh_maskz(self, op0, op1, op2);
}
/// `VFCMULCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfcmulcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfcmulcshMaskzErEmitter<A, B, C> {
<Self as VfcmulcshMaskzErEmitter<A, B, C>>::vfcmulcsh_maskz_er(self, op0, op1, op2);
}
/// `VFMADD132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132phEmitter<A, B, C> {
<Self as Vfmadd132phEmitter<A, B, C>>::vfmadd132ph(self, op0, op1, op2);
}
/// `VFMADD132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132phErEmitter<A, B, C> {
<Self as Vfmadd132phErEmitter<A, B, C>>::vfmadd132ph_er(self, op0, op1, op2);
}
/// `VFMADD132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132phMaskEmitter<A, B, C> {
<Self as Vfmadd132phMaskEmitter<A, B, C>>::vfmadd132ph_mask(self, op0, op1, op2);
}
/// `VFMADD132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132phMaskErEmitter<A, B, C> {
<Self as Vfmadd132phMaskErEmitter<A, B, C>>::vfmadd132ph_mask_er(self, op0, op1, op2);
}
/// `VFMADD132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132phMaskzEmitter<A, B, C> {
<Self as Vfmadd132phMaskzEmitter<A, B, C>>::vfmadd132ph_maskz(self, op0, op1, op2);
}
/// `VFMADD132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132phMaskzErEmitter<A, B, C> {
<Self as Vfmadd132phMaskzErEmitter<A, B, C>>::vfmadd132ph_maskz_er(self, op0, op1, op2);
}
/// `VFMADD132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132shEmitter<A, B, C> {
<Self as Vfmadd132shEmitter<A, B, C>>::vfmadd132sh(self, op0, op1, op2);
}
/// `VFMADD132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132shErEmitter<A, B, C> {
<Self as Vfmadd132shErEmitter<A, B, C>>::vfmadd132sh_er(self, op0, op1, op2);
}
/// `VFMADD132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132shMaskEmitter<A, B, C> {
<Self as Vfmadd132shMaskEmitter<A, B, C>>::vfmadd132sh_mask(self, op0, op1, op2);
}
/// `VFMADD132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132shMaskErEmitter<A, B, C> {
<Self as Vfmadd132shMaskErEmitter<A, B, C>>::vfmadd132sh_mask_er(self, op0, op1, op2);
}
/// `VFMADD132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132shMaskzEmitter<A, B, C> {
<Self as Vfmadd132shMaskzEmitter<A, B, C>>::vfmadd132sh_maskz(self, op0, op1, op2);
}
/// `VFMADD132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd132shMaskzErEmitter<A, B, C> {
<Self as Vfmadd132shMaskzErEmitter<A, B, C>>::vfmadd132sh_maskz_er(self, op0, op1, op2);
}
/// `VFMADD213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213phEmitter<A, B, C> {
<Self as Vfmadd213phEmitter<A, B, C>>::vfmadd213ph(self, op0, op1, op2);
}
/// `VFMADD213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213phErEmitter<A, B, C> {
<Self as Vfmadd213phErEmitter<A, B, C>>::vfmadd213ph_er(self, op0, op1, op2);
}
/// `VFMADD213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213phMaskEmitter<A, B, C> {
<Self as Vfmadd213phMaskEmitter<A, B, C>>::vfmadd213ph_mask(self, op0, op1, op2);
}
/// `VFMADD213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213phMaskErEmitter<A, B, C> {
<Self as Vfmadd213phMaskErEmitter<A, B, C>>::vfmadd213ph_mask_er(self, op0, op1, op2);
}
/// `VFMADD213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213phMaskzEmitter<A, B, C> {
<Self as Vfmadd213phMaskzEmitter<A, B, C>>::vfmadd213ph_maskz(self, op0, op1, op2);
}
/// `VFMADD213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213phMaskzErEmitter<A, B, C> {
<Self as Vfmadd213phMaskzErEmitter<A, B, C>>::vfmadd213ph_maskz_er(self, op0, op1, op2);
}
/// `VFMADD213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213shEmitter<A, B, C> {
<Self as Vfmadd213shEmitter<A, B, C>>::vfmadd213sh(self, op0, op1, op2);
}
/// `VFMADD213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213shErEmitter<A, B, C> {
<Self as Vfmadd213shErEmitter<A, B, C>>::vfmadd213sh_er(self, op0, op1, op2);
}
/// `VFMADD213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213shMaskEmitter<A, B, C> {
<Self as Vfmadd213shMaskEmitter<A, B, C>>::vfmadd213sh_mask(self, op0, op1, op2);
}
/// `VFMADD213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213shMaskErEmitter<A, B, C> {
<Self as Vfmadd213shMaskErEmitter<A, B, C>>::vfmadd213sh_mask_er(self, op0, op1, op2);
}
/// `VFMADD213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213shMaskzEmitter<A, B, C> {
<Self as Vfmadd213shMaskzEmitter<A, B, C>>::vfmadd213sh_maskz(self, op0, op1, op2);
}
/// `VFMADD213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd213shMaskzErEmitter<A, B, C> {
<Self as Vfmadd213shMaskzErEmitter<A, B, C>>::vfmadd213sh_maskz_er(self, op0, op1, op2);
}
/// `VFMADD231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231phEmitter<A, B, C> {
<Self as Vfmadd231phEmitter<A, B, C>>::vfmadd231ph(self, op0, op1, op2);
}
/// `VFMADD231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231phErEmitter<A, B, C> {
<Self as Vfmadd231phErEmitter<A, B, C>>::vfmadd231ph_er(self, op0, op1, op2);
}
/// `VFMADD231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231phMaskEmitter<A, B, C> {
<Self as Vfmadd231phMaskEmitter<A, B, C>>::vfmadd231ph_mask(self, op0, op1, op2);
}
/// `VFMADD231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231phMaskErEmitter<A, B, C> {
<Self as Vfmadd231phMaskErEmitter<A, B, C>>::vfmadd231ph_mask_er(self, op0, op1, op2);
}
/// `VFMADD231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231phMaskzEmitter<A, B, C> {
<Self as Vfmadd231phMaskzEmitter<A, B, C>>::vfmadd231ph_maskz(self, op0, op1, op2);
}
/// `VFMADD231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231phMaskzErEmitter<A, B, C> {
<Self as Vfmadd231phMaskzErEmitter<A, B, C>>::vfmadd231ph_maskz_er(self, op0, op1, op2);
}
/// `VFMADD231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231shEmitter<A, B, C> {
<Self as Vfmadd231shEmitter<A, B, C>>::vfmadd231sh(self, op0, op1, op2);
}
/// `VFMADD231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231shErEmitter<A, B, C> {
<Self as Vfmadd231shErEmitter<A, B, C>>::vfmadd231sh_er(self, op0, op1, op2);
}
/// `VFMADD231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231shMaskEmitter<A, B, C> {
<Self as Vfmadd231shMaskEmitter<A, B, C>>::vfmadd231sh_mask(self, op0, op1, op2);
}
/// `VFMADD231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231shMaskErEmitter<A, B, C> {
<Self as Vfmadd231shMaskErEmitter<A, B, C>>::vfmadd231sh_mask_er(self, op0, op1, op2);
}
/// `VFMADD231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231shMaskzEmitter<A, B, C> {
<Self as Vfmadd231shMaskzEmitter<A, B, C>>::vfmadd231sh_maskz(self, op0, op1, op2);
}
/// `VFMADD231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmadd231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmadd231shMaskzErEmitter<A, B, C> {
<Self as Vfmadd231shMaskzErEmitter<A, B, C>>::vfmadd231sh_maskz_er(self, op0, op1, op2);
}
/// `VFMADDCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcphEmitter<A, B, C> {
<Self as VfmaddcphEmitter<A, B, C>>::vfmaddcph(self, op0, op1, op2);
}
/// `VFMADDCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcphErEmitter<A, B, C> {
<Self as VfmaddcphErEmitter<A, B, C>>::vfmaddcph_er(self, op0, op1, op2);
}
/// `VFMADDCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcphMaskEmitter<A, B, C> {
<Self as VfmaddcphMaskEmitter<A, B, C>>::vfmaddcph_mask(self, op0, op1, op2);
}
/// `VFMADDCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcphMaskErEmitter<A, B, C> {
<Self as VfmaddcphMaskErEmitter<A, B, C>>::vfmaddcph_mask_er(self, op0, op1, op2);
}
/// `VFMADDCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcphMaskzEmitter<A, B, C> {
<Self as VfmaddcphMaskzEmitter<A, B, C>>::vfmaddcph_maskz(self, op0, op1, op2);
}
/// `VFMADDCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcphMaskzErEmitter<A, B, C> {
<Self as VfmaddcphMaskzErEmitter<A, B, C>>::vfmaddcph_maskz_er(self, op0, op1, op2);
}
/// `VFMADDCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcshEmitter<A, B, C> {
<Self as VfmaddcshEmitter<A, B, C>>::vfmaddcsh(self, op0, op1, op2);
}
/// `VFMADDCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcshErEmitter<A, B, C> {
<Self as VfmaddcshErEmitter<A, B, C>>::vfmaddcsh_er(self, op0, op1, op2);
}
/// `VFMADDCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcshMaskEmitter<A, B, C> {
<Self as VfmaddcshMaskEmitter<A, B, C>>::vfmaddcsh_mask(self, op0, op1, op2);
}
/// `VFMADDCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcshMaskErEmitter<A, B, C> {
<Self as VfmaddcshMaskErEmitter<A, B, C>>::vfmaddcsh_mask_er(self, op0, op1, op2);
}
/// `VFMADDCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcshMaskzEmitter<A, B, C> {
<Self as VfmaddcshMaskzEmitter<A, B, C>>::vfmaddcsh_maskz(self, op0, op1, op2);
}
/// `VFMADDCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmaddcshMaskzErEmitter<A, B, C> {
<Self as VfmaddcshMaskzErEmitter<A, B, C>>::vfmaddcsh_maskz_er(self, op0, op1, op2);
}
/// `VFMADDSUB132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub132phEmitter<A, B, C> {
<Self as Vfmaddsub132phEmitter<A, B, C>>::vfmaddsub132ph(self, op0, op1, op2);
}
/// `VFMADDSUB132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub132phErEmitter<A, B, C> {
<Self as Vfmaddsub132phErEmitter<A, B, C>>::vfmaddsub132ph_er(self, op0, op1, op2);
}
/// `VFMADDSUB132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub132phMaskEmitter<A, B, C> {
<Self as Vfmaddsub132phMaskEmitter<A, B, C>>::vfmaddsub132ph_mask(self, op0, op1, op2);
}
/// `VFMADDSUB132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub132phMaskErEmitter<A, B, C> {
<Self as Vfmaddsub132phMaskErEmitter<A, B, C>>::vfmaddsub132ph_mask_er(self, op0, op1, op2);
}
/// `VFMADDSUB132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub132phMaskzEmitter<A, B, C> {
<Self as Vfmaddsub132phMaskzEmitter<A, B, C>>::vfmaddsub132ph_maskz(self, op0, op1, op2);
}
/// `VFMADDSUB132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub132phMaskzErEmitter<A, B, C> {
<Self as Vfmaddsub132phMaskzErEmitter<A, B, C>>::vfmaddsub132ph_maskz_er(self, op0, op1, op2);
}
/// `VFMADDSUB213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub213phEmitter<A, B, C> {
<Self as Vfmaddsub213phEmitter<A, B, C>>::vfmaddsub213ph(self, op0, op1, op2);
}
/// `VFMADDSUB213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub213phErEmitter<A, B, C> {
<Self as Vfmaddsub213phErEmitter<A, B, C>>::vfmaddsub213ph_er(self, op0, op1, op2);
}
/// `VFMADDSUB213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub213phMaskEmitter<A, B, C> {
<Self as Vfmaddsub213phMaskEmitter<A, B, C>>::vfmaddsub213ph_mask(self, op0, op1, op2);
}
/// `VFMADDSUB213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub213phMaskErEmitter<A, B, C> {
<Self as Vfmaddsub213phMaskErEmitter<A, B, C>>::vfmaddsub213ph_mask_er(self, op0, op1, op2);
}
/// `VFMADDSUB213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub213phMaskzEmitter<A, B, C> {
<Self as Vfmaddsub213phMaskzEmitter<A, B, C>>::vfmaddsub213ph_maskz(self, op0, op1, op2);
}
/// `VFMADDSUB213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub213phMaskzErEmitter<A, B, C> {
<Self as Vfmaddsub213phMaskzErEmitter<A, B, C>>::vfmaddsub213ph_maskz_er(self, op0, op1, op2);
}
/// `VFMADDSUB231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub231phEmitter<A, B, C> {
<Self as Vfmaddsub231phEmitter<A, B, C>>::vfmaddsub231ph(self, op0, op1, op2);
}
/// `VFMADDSUB231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub231phErEmitter<A, B, C> {
<Self as Vfmaddsub231phErEmitter<A, B, C>>::vfmaddsub231ph_er(self, op0, op1, op2);
}
/// `VFMADDSUB231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub231phMaskEmitter<A, B, C> {
<Self as Vfmaddsub231phMaskEmitter<A, B, C>>::vfmaddsub231ph_mask(self, op0, op1, op2);
}
/// `VFMADDSUB231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub231phMaskErEmitter<A, B, C> {
<Self as Vfmaddsub231phMaskErEmitter<A, B, C>>::vfmaddsub231ph_mask_er(self, op0, op1, op2);
}
/// `VFMADDSUB231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub231phMaskzEmitter<A, B, C> {
<Self as Vfmaddsub231phMaskzEmitter<A, B, C>>::vfmaddsub231ph_maskz(self, op0, op1, op2);
}
/// `VFMADDSUB231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmaddsub231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmaddsub231phMaskzErEmitter<A, B, C> {
<Self as Vfmaddsub231phMaskzErEmitter<A, B, C>>::vfmaddsub231ph_maskz_er(self, op0, op1, op2);
}
/// `VFMSUB132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132phEmitter<A, B, C> {
<Self as Vfmsub132phEmitter<A, B, C>>::vfmsub132ph(self, op0, op1, op2);
}
/// `VFMSUB132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132phErEmitter<A, B, C> {
<Self as Vfmsub132phErEmitter<A, B, C>>::vfmsub132ph_er(self, op0, op1, op2);
}
/// `VFMSUB132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132phMaskEmitter<A, B, C> {
<Self as Vfmsub132phMaskEmitter<A, B, C>>::vfmsub132ph_mask(self, op0, op1, op2);
}
/// `VFMSUB132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132phMaskErEmitter<A, B, C> {
<Self as Vfmsub132phMaskErEmitter<A, B, C>>::vfmsub132ph_mask_er(self, op0, op1, op2);
}
/// `VFMSUB132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132phMaskzEmitter<A, B, C> {
<Self as Vfmsub132phMaskzEmitter<A, B, C>>::vfmsub132ph_maskz(self, op0, op1, op2);
}
/// `VFMSUB132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132phMaskzErEmitter<A, B, C> {
<Self as Vfmsub132phMaskzErEmitter<A, B, C>>::vfmsub132ph_maskz_er(self, op0, op1, op2);
}
/// `VFMSUB132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132shEmitter<A, B, C> {
<Self as Vfmsub132shEmitter<A, B, C>>::vfmsub132sh(self, op0, op1, op2);
}
/// `VFMSUB132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132shErEmitter<A, B, C> {
<Self as Vfmsub132shErEmitter<A, B, C>>::vfmsub132sh_er(self, op0, op1, op2);
}
/// `VFMSUB132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132shMaskEmitter<A, B, C> {
<Self as Vfmsub132shMaskEmitter<A, B, C>>::vfmsub132sh_mask(self, op0, op1, op2);
}
/// `VFMSUB132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132shMaskErEmitter<A, B, C> {
<Self as Vfmsub132shMaskErEmitter<A, B, C>>::vfmsub132sh_mask_er(self, op0, op1, op2);
}
/// `VFMSUB132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132shMaskzEmitter<A, B, C> {
<Self as Vfmsub132shMaskzEmitter<A, B, C>>::vfmsub132sh_maskz(self, op0, op1, op2);
}
/// `VFMSUB132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub132shMaskzErEmitter<A, B, C> {
<Self as Vfmsub132shMaskzErEmitter<A, B, C>>::vfmsub132sh_maskz_er(self, op0, op1, op2);
}
/// `VFMSUB213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213phEmitter<A, B, C> {
<Self as Vfmsub213phEmitter<A, B, C>>::vfmsub213ph(self, op0, op1, op2);
}
/// `VFMSUB213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213phErEmitter<A, B, C> {
<Self as Vfmsub213phErEmitter<A, B, C>>::vfmsub213ph_er(self, op0, op1, op2);
}
/// `VFMSUB213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213phMaskEmitter<A, B, C> {
<Self as Vfmsub213phMaskEmitter<A, B, C>>::vfmsub213ph_mask(self, op0, op1, op2);
}
/// `VFMSUB213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213phMaskErEmitter<A, B, C> {
<Self as Vfmsub213phMaskErEmitter<A, B, C>>::vfmsub213ph_mask_er(self, op0, op1, op2);
}
/// `VFMSUB213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213phMaskzEmitter<A, B, C> {
<Self as Vfmsub213phMaskzEmitter<A, B, C>>::vfmsub213ph_maskz(self, op0, op1, op2);
}
/// `VFMSUB213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213phMaskzErEmitter<A, B, C> {
<Self as Vfmsub213phMaskzErEmitter<A, B, C>>::vfmsub213ph_maskz_er(self, op0, op1, op2);
}
/// `VFMSUB213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213shEmitter<A, B, C> {
<Self as Vfmsub213shEmitter<A, B, C>>::vfmsub213sh(self, op0, op1, op2);
}
/// `VFMSUB213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213shErEmitter<A, B, C> {
<Self as Vfmsub213shErEmitter<A, B, C>>::vfmsub213sh_er(self, op0, op1, op2);
}
/// `VFMSUB213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213shMaskEmitter<A, B, C> {
<Self as Vfmsub213shMaskEmitter<A, B, C>>::vfmsub213sh_mask(self, op0, op1, op2);
}
/// `VFMSUB213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213shMaskErEmitter<A, B, C> {
<Self as Vfmsub213shMaskErEmitter<A, B, C>>::vfmsub213sh_mask_er(self, op0, op1, op2);
}
/// `VFMSUB213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213shMaskzEmitter<A, B, C> {
<Self as Vfmsub213shMaskzEmitter<A, B, C>>::vfmsub213sh_maskz(self, op0, op1, op2);
}
/// `VFMSUB213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub213shMaskzErEmitter<A, B, C> {
<Self as Vfmsub213shMaskzErEmitter<A, B, C>>::vfmsub213sh_maskz_er(self, op0, op1, op2);
}
/// `VFMSUB231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231phEmitter<A, B, C> {
<Self as Vfmsub231phEmitter<A, B, C>>::vfmsub231ph(self, op0, op1, op2);
}
/// `VFMSUB231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231phErEmitter<A, B, C> {
<Self as Vfmsub231phErEmitter<A, B, C>>::vfmsub231ph_er(self, op0, op1, op2);
}
/// `VFMSUB231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231phMaskEmitter<A, B, C> {
<Self as Vfmsub231phMaskEmitter<A, B, C>>::vfmsub231ph_mask(self, op0, op1, op2);
}
/// `VFMSUB231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231phMaskErEmitter<A, B, C> {
<Self as Vfmsub231phMaskErEmitter<A, B, C>>::vfmsub231ph_mask_er(self, op0, op1, op2);
}
/// `VFMSUB231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231phMaskzEmitter<A, B, C> {
<Self as Vfmsub231phMaskzEmitter<A, B, C>>::vfmsub231ph_maskz(self, op0, op1, op2);
}
/// `VFMSUB231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231phMaskzErEmitter<A, B, C> {
<Self as Vfmsub231phMaskzErEmitter<A, B, C>>::vfmsub231ph_maskz_er(self, op0, op1, op2);
}
/// `VFMSUB231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231shEmitter<A, B, C> {
<Self as Vfmsub231shEmitter<A, B, C>>::vfmsub231sh(self, op0, op1, op2);
}
/// `VFMSUB231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231shErEmitter<A, B, C> {
<Self as Vfmsub231shErEmitter<A, B, C>>::vfmsub231sh_er(self, op0, op1, op2);
}
/// `VFMSUB231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231shMaskEmitter<A, B, C> {
<Self as Vfmsub231shMaskEmitter<A, B, C>>::vfmsub231sh_mask(self, op0, op1, op2);
}
/// `VFMSUB231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231shMaskErEmitter<A, B, C> {
<Self as Vfmsub231shMaskErEmitter<A, B, C>>::vfmsub231sh_mask_er(self, op0, op1, op2);
}
/// `VFMSUB231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231shMaskzEmitter<A, B, C> {
<Self as Vfmsub231shMaskzEmitter<A, B, C>>::vfmsub231sh_maskz(self, op0, op1, op2);
}
/// `VFMSUB231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsub231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsub231shMaskzErEmitter<A, B, C> {
<Self as Vfmsub231shMaskzErEmitter<A, B, C>>::vfmsub231sh_maskz_er(self, op0, op1, op2);
}
/// `VFMSUBADD132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd132phEmitter<A, B, C> {
<Self as Vfmsubadd132phEmitter<A, B, C>>::vfmsubadd132ph(self, op0, op1, op2);
}
/// `VFMSUBADD132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd132phErEmitter<A, B, C> {
<Self as Vfmsubadd132phErEmitter<A, B, C>>::vfmsubadd132ph_er(self, op0, op1, op2);
}
/// `VFMSUBADD132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd132phMaskEmitter<A, B, C> {
<Self as Vfmsubadd132phMaskEmitter<A, B, C>>::vfmsubadd132ph_mask(self, op0, op1, op2);
}
/// `VFMSUBADD132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd132phMaskErEmitter<A, B, C> {
<Self as Vfmsubadd132phMaskErEmitter<A, B, C>>::vfmsubadd132ph_mask_er(self, op0, op1, op2);
}
/// `VFMSUBADD132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd132phMaskzEmitter<A, B, C> {
<Self as Vfmsubadd132phMaskzEmitter<A, B, C>>::vfmsubadd132ph_maskz(self, op0, op1, op2);
}
/// `VFMSUBADD132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd132phMaskzErEmitter<A, B, C> {
<Self as Vfmsubadd132phMaskzErEmitter<A, B, C>>::vfmsubadd132ph_maskz_er(self, op0, op1, op2);
}
/// `VFMSUBADD213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd213phEmitter<A, B, C> {
<Self as Vfmsubadd213phEmitter<A, B, C>>::vfmsubadd213ph(self, op0, op1, op2);
}
/// `VFMSUBADD213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd213phErEmitter<A, B, C> {
<Self as Vfmsubadd213phErEmitter<A, B, C>>::vfmsubadd213ph_er(self, op0, op1, op2);
}
/// `VFMSUBADD213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd213phMaskEmitter<A, B, C> {
<Self as Vfmsubadd213phMaskEmitter<A, B, C>>::vfmsubadd213ph_mask(self, op0, op1, op2);
}
/// `VFMSUBADD213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd213phMaskErEmitter<A, B, C> {
<Self as Vfmsubadd213phMaskErEmitter<A, B, C>>::vfmsubadd213ph_mask_er(self, op0, op1, op2);
}
/// `VFMSUBADD213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd213phMaskzEmitter<A, B, C> {
<Self as Vfmsubadd213phMaskzEmitter<A, B, C>>::vfmsubadd213ph_maskz(self, op0, op1, op2);
}
/// `VFMSUBADD213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd213phMaskzErEmitter<A, B, C> {
<Self as Vfmsubadd213phMaskzErEmitter<A, B, C>>::vfmsubadd213ph_maskz_er(self, op0, op1, op2);
}
/// `VFMSUBADD231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd231phEmitter<A, B, C> {
<Self as Vfmsubadd231phEmitter<A, B, C>>::vfmsubadd231ph(self, op0, op1, op2);
}
/// `VFMSUBADD231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd231phErEmitter<A, B, C> {
<Self as Vfmsubadd231phErEmitter<A, B, C>>::vfmsubadd231ph_er(self, op0, op1, op2);
}
/// `VFMSUBADD231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd231phMaskEmitter<A, B, C> {
<Self as Vfmsubadd231phMaskEmitter<A, B, C>>::vfmsubadd231ph_mask(self, op0, op1, op2);
}
/// `VFMSUBADD231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd231phMaskErEmitter<A, B, C> {
<Self as Vfmsubadd231phMaskErEmitter<A, B, C>>::vfmsubadd231ph_mask_er(self, op0, op1, op2);
}
/// `VFMSUBADD231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd231phMaskzEmitter<A, B, C> {
<Self as Vfmsubadd231phMaskzEmitter<A, B, C>>::vfmsubadd231ph_maskz(self, op0, op1, op2);
}
/// `VFMSUBADD231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmsubadd231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfmsubadd231phMaskzErEmitter<A, B, C> {
<Self as Vfmsubadd231phMaskzErEmitter<A, B, C>>::vfmsubadd231ph_maskz_er(self, op0, op1, op2);
}
/// `VFMULCPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcphEmitter<A, B, C> {
<Self as VfmulcphEmitter<A, B, C>>::vfmulcph(self, op0, op1, op2);
}
/// `VFMULCPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcphErEmitter<A, B, C> {
<Self as VfmulcphErEmitter<A, B, C>>::vfmulcph_er(self, op0, op1, op2);
}
/// `VFMULCPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcphMaskEmitter<A, B, C> {
<Self as VfmulcphMaskEmitter<A, B, C>>::vfmulcph_mask(self, op0, op1, op2);
}
/// `VFMULCPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcphMaskErEmitter<A, B, C> {
<Self as VfmulcphMaskErEmitter<A, B, C>>::vfmulcph_mask_er(self, op0, op1, op2);
}
/// `VFMULCPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcphMaskzEmitter<A, B, C> {
<Self as VfmulcphMaskzEmitter<A, B, C>>::vfmulcph_maskz(self, op0, op1, op2);
}
/// `VFMULCPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcphMaskzErEmitter<A, B, C> {
<Self as VfmulcphMaskzErEmitter<A, B, C>>::vfmulcph_maskz_er(self, op0, op1, op2);
}
/// `VFMULCSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcshEmitter<A, B, C> {
<Self as VfmulcshEmitter<A, B, C>>::vfmulcsh(self, op0, op1, op2);
}
/// `VFMULCSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcshErEmitter<A, B, C> {
<Self as VfmulcshErEmitter<A, B, C>>::vfmulcsh_er(self, op0, op1, op2);
}
/// `VFMULCSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcshMaskEmitter<A, B, C> {
<Self as VfmulcshMaskEmitter<A, B, C>>::vfmulcsh_mask(self, op0, op1, op2);
}
/// `VFMULCSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcshMaskErEmitter<A, B, C> {
<Self as VfmulcshMaskErEmitter<A, B, C>>::vfmulcsh_mask_er(self, op0, op1, op2);
}
/// `VFMULCSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcshMaskzEmitter<A, B, C> {
<Self as VfmulcshMaskzEmitter<A, B, C>>::vfmulcsh_maskz(self, op0, op1, op2);
}
/// `VFMULCSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfmulcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfmulcshMaskzErEmitter<A, B, C> {
<Self as VfmulcshMaskzErEmitter<A, B, C>>::vfmulcsh_maskz_er(self, op0, op1, op2);
}
/// `VFNMADD132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132phEmitter<A, B, C> {
<Self as Vfnmadd132phEmitter<A, B, C>>::vfnmadd132ph(self, op0, op1, op2);
}
/// `VFNMADD132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132phErEmitter<A, B, C> {
<Self as Vfnmadd132phErEmitter<A, B, C>>::vfnmadd132ph_er(self, op0, op1, op2);
}
/// `VFNMADD132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132phMaskEmitter<A, B, C> {
<Self as Vfnmadd132phMaskEmitter<A, B, C>>::vfnmadd132ph_mask(self, op0, op1, op2);
}
/// `VFNMADD132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132phMaskErEmitter<A, B, C> {
<Self as Vfnmadd132phMaskErEmitter<A, B, C>>::vfnmadd132ph_mask_er(self, op0, op1, op2);
}
/// `VFNMADD132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132phMaskzEmitter<A, B, C> {
<Self as Vfnmadd132phMaskzEmitter<A, B, C>>::vfnmadd132ph_maskz(self, op0, op1, op2);
}
/// `VFNMADD132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132phMaskzErEmitter<A, B, C> {
<Self as Vfnmadd132phMaskzErEmitter<A, B, C>>::vfnmadd132ph_maskz_er(self, op0, op1, op2);
}
/// `VFNMADD132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132shEmitter<A, B, C> {
<Self as Vfnmadd132shEmitter<A, B, C>>::vfnmadd132sh(self, op0, op1, op2);
}
/// `VFNMADD132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132shErEmitter<A, B, C> {
<Self as Vfnmadd132shErEmitter<A, B, C>>::vfnmadd132sh_er(self, op0, op1, op2);
}
/// `VFNMADD132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132shMaskEmitter<A, B, C> {
<Self as Vfnmadd132shMaskEmitter<A, B, C>>::vfnmadd132sh_mask(self, op0, op1, op2);
}
/// `VFNMADD132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132shMaskErEmitter<A, B, C> {
<Self as Vfnmadd132shMaskErEmitter<A, B, C>>::vfnmadd132sh_mask_er(self, op0, op1, op2);
}
/// `VFNMADD132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132shMaskzEmitter<A, B, C> {
<Self as Vfnmadd132shMaskzEmitter<A, B, C>>::vfnmadd132sh_maskz(self, op0, op1, op2);
}
/// `VFNMADD132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd132shMaskzErEmitter<A, B, C> {
<Self as Vfnmadd132shMaskzErEmitter<A, B, C>>::vfnmadd132sh_maskz_er(self, op0, op1, op2);
}
/// `VFNMADD213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213phEmitter<A, B, C> {
<Self as Vfnmadd213phEmitter<A, B, C>>::vfnmadd213ph(self, op0, op1, op2);
}
/// `VFNMADD213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213phErEmitter<A, B, C> {
<Self as Vfnmadd213phErEmitter<A, B, C>>::vfnmadd213ph_er(self, op0, op1, op2);
}
/// `VFNMADD213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213phMaskEmitter<A, B, C> {
<Self as Vfnmadd213phMaskEmitter<A, B, C>>::vfnmadd213ph_mask(self, op0, op1, op2);
}
/// `VFNMADD213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213phMaskErEmitter<A, B, C> {
<Self as Vfnmadd213phMaskErEmitter<A, B, C>>::vfnmadd213ph_mask_er(self, op0, op1, op2);
}
/// `VFNMADD213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213phMaskzEmitter<A, B, C> {
<Self as Vfnmadd213phMaskzEmitter<A, B, C>>::vfnmadd213ph_maskz(self, op0, op1, op2);
}
/// `VFNMADD213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213phMaskzErEmitter<A, B, C> {
<Self as Vfnmadd213phMaskzErEmitter<A, B, C>>::vfnmadd213ph_maskz_er(self, op0, op1, op2);
}
/// `VFNMADD213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213shEmitter<A, B, C> {
<Self as Vfnmadd213shEmitter<A, B, C>>::vfnmadd213sh(self, op0, op1, op2);
}
/// `VFNMADD213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213shErEmitter<A, B, C> {
<Self as Vfnmadd213shErEmitter<A, B, C>>::vfnmadd213sh_er(self, op0, op1, op2);
}
/// `VFNMADD213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213shMaskEmitter<A, B, C> {
<Self as Vfnmadd213shMaskEmitter<A, B, C>>::vfnmadd213sh_mask(self, op0, op1, op2);
}
/// `VFNMADD213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213shMaskErEmitter<A, B, C> {
<Self as Vfnmadd213shMaskErEmitter<A, B, C>>::vfnmadd213sh_mask_er(self, op0, op1, op2);
}
/// `VFNMADD213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213shMaskzEmitter<A, B, C> {
<Self as Vfnmadd213shMaskzEmitter<A, B, C>>::vfnmadd213sh_maskz(self, op0, op1, op2);
}
/// `VFNMADD213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd213shMaskzErEmitter<A, B, C> {
<Self as Vfnmadd213shMaskzErEmitter<A, B, C>>::vfnmadd213sh_maskz_er(self, op0, op1, op2);
}
/// `VFNMADD231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231phEmitter<A, B, C> {
<Self as Vfnmadd231phEmitter<A, B, C>>::vfnmadd231ph(self, op0, op1, op2);
}
/// `VFNMADD231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231phErEmitter<A, B, C> {
<Self as Vfnmadd231phErEmitter<A, B, C>>::vfnmadd231ph_er(self, op0, op1, op2);
}
/// `VFNMADD231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231phMaskEmitter<A, B, C> {
<Self as Vfnmadd231phMaskEmitter<A, B, C>>::vfnmadd231ph_mask(self, op0, op1, op2);
}
/// `VFNMADD231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231phMaskErEmitter<A, B, C> {
<Self as Vfnmadd231phMaskErEmitter<A, B, C>>::vfnmadd231ph_mask_er(self, op0, op1, op2);
}
/// `VFNMADD231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231phMaskzEmitter<A, B, C> {
<Self as Vfnmadd231phMaskzEmitter<A, B, C>>::vfnmadd231ph_maskz(self, op0, op1, op2);
}
/// `VFNMADD231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231phMaskzErEmitter<A, B, C> {
<Self as Vfnmadd231phMaskzErEmitter<A, B, C>>::vfnmadd231ph_maskz_er(self, op0, op1, op2);
}
/// `VFNMADD231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231shEmitter<A, B, C> {
<Self as Vfnmadd231shEmitter<A, B, C>>::vfnmadd231sh(self, op0, op1, op2);
}
/// `VFNMADD231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231shErEmitter<A, B, C> {
<Self as Vfnmadd231shErEmitter<A, B, C>>::vfnmadd231sh_er(self, op0, op1, op2);
}
/// `VFNMADD231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231shMaskEmitter<A, B, C> {
<Self as Vfnmadd231shMaskEmitter<A, B, C>>::vfnmadd231sh_mask(self, op0, op1, op2);
}
/// `VFNMADD231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231shMaskErEmitter<A, B, C> {
<Self as Vfnmadd231shMaskErEmitter<A, B, C>>::vfnmadd231sh_mask_er(self, op0, op1, op2);
}
/// `VFNMADD231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231shMaskzEmitter<A, B, C> {
<Self as Vfnmadd231shMaskzEmitter<A, B, C>>::vfnmadd231sh_maskz(self, op0, op1, op2);
}
/// `VFNMADD231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmadd231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmadd231shMaskzErEmitter<A, B, C> {
<Self as Vfnmadd231shMaskzErEmitter<A, B, C>>::vfnmadd231sh_maskz_er(self, op0, op1, op2);
}
/// `VFNMSUB132PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132phEmitter<A, B, C> {
<Self as Vfnmsub132phEmitter<A, B, C>>::vfnmsub132ph(self, op0, op1, op2);
}
/// `VFNMSUB132PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132phErEmitter<A, B, C> {
<Self as Vfnmsub132phErEmitter<A, B, C>>::vfnmsub132ph_er(self, op0, op1, op2);
}
/// `VFNMSUB132PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132phMaskEmitter<A, B, C> {
<Self as Vfnmsub132phMaskEmitter<A, B, C>>::vfnmsub132ph_mask(self, op0, op1, op2);
}
/// `VFNMSUB132PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132phMaskErEmitter<A, B, C> {
<Self as Vfnmsub132phMaskErEmitter<A, B, C>>::vfnmsub132ph_mask_er(self, op0, op1, op2);
}
/// `VFNMSUB132PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132phMaskzEmitter<A, B, C> {
<Self as Vfnmsub132phMaskzEmitter<A, B, C>>::vfnmsub132ph_maskz(self, op0, op1, op2);
}
/// `VFNMSUB132PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132phMaskzErEmitter<A, B, C> {
<Self as Vfnmsub132phMaskzErEmitter<A, B, C>>::vfnmsub132ph_maskz_er(self, op0, op1, op2);
}
/// `VFNMSUB132SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132shEmitter<A, B, C> {
<Self as Vfnmsub132shEmitter<A, B, C>>::vfnmsub132sh(self, op0, op1, op2);
}
/// `VFNMSUB132SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132shErEmitter<A, B, C> {
<Self as Vfnmsub132shErEmitter<A, B, C>>::vfnmsub132sh_er(self, op0, op1, op2);
}
/// `VFNMSUB132SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132shMaskEmitter<A, B, C> {
<Self as Vfnmsub132shMaskEmitter<A, B, C>>::vfnmsub132sh_mask(self, op0, op1, op2);
}
/// `VFNMSUB132SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132shMaskErEmitter<A, B, C> {
<Self as Vfnmsub132shMaskErEmitter<A, B, C>>::vfnmsub132sh_mask_er(self, op0, op1, op2);
}
/// `VFNMSUB132SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132shMaskzEmitter<A, B, C> {
<Self as Vfnmsub132shMaskzEmitter<A, B, C>>::vfnmsub132sh_maskz(self, op0, op1, op2);
}
/// `VFNMSUB132SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub132shMaskzErEmitter<A, B, C> {
<Self as Vfnmsub132shMaskzErEmitter<A, B, C>>::vfnmsub132sh_maskz_er(self, op0, op1, op2);
}
/// `VFNMSUB213PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213phEmitter<A, B, C> {
<Self as Vfnmsub213phEmitter<A, B, C>>::vfnmsub213ph(self, op0, op1, op2);
}
/// `VFNMSUB213PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213phErEmitter<A, B, C> {
<Self as Vfnmsub213phErEmitter<A, B, C>>::vfnmsub213ph_er(self, op0, op1, op2);
}
/// `VFNMSUB213PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213phMaskEmitter<A, B, C> {
<Self as Vfnmsub213phMaskEmitter<A, B, C>>::vfnmsub213ph_mask(self, op0, op1, op2);
}
/// `VFNMSUB213PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213phMaskErEmitter<A, B, C> {
<Self as Vfnmsub213phMaskErEmitter<A, B, C>>::vfnmsub213ph_mask_er(self, op0, op1, op2);
}
/// `VFNMSUB213PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213phMaskzEmitter<A, B, C> {
<Self as Vfnmsub213phMaskzEmitter<A, B, C>>::vfnmsub213ph_maskz(self, op0, op1, op2);
}
/// `VFNMSUB213PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213phMaskzErEmitter<A, B, C> {
<Self as Vfnmsub213phMaskzErEmitter<A, B, C>>::vfnmsub213ph_maskz_er(self, op0, op1, op2);
}
/// `VFNMSUB213SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213shEmitter<A, B, C> {
<Self as Vfnmsub213shEmitter<A, B, C>>::vfnmsub213sh(self, op0, op1, op2);
}
/// `VFNMSUB213SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213shErEmitter<A, B, C> {
<Self as Vfnmsub213shErEmitter<A, B, C>>::vfnmsub213sh_er(self, op0, op1, op2);
}
/// `VFNMSUB213SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213shMaskEmitter<A, B, C> {
<Self as Vfnmsub213shMaskEmitter<A, B, C>>::vfnmsub213sh_mask(self, op0, op1, op2);
}
/// `VFNMSUB213SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213shMaskErEmitter<A, B, C> {
<Self as Vfnmsub213shMaskErEmitter<A, B, C>>::vfnmsub213sh_mask_er(self, op0, op1, op2);
}
/// `VFNMSUB213SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213shMaskzEmitter<A, B, C> {
<Self as Vfnmsub213shMaskzEmitter<A, B, C>>::vfnmsub213sh_maskz(self, op0, op1, op2);
}
/// `VFNMSUB213SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub213shMaskzErEmitter<A, B, C> {
<Self as Vfnmsub213shMaskzErEmitter<A, B, C>>::vfnmsub213sh_maskz_er(self, op0, op1, op2);
}
/// `VFNMSUB231PH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231phEmitter<A, B, C> {
<Self as Vfnmsub231phEmitter<A, B, C>>::vfnmsub231ph(self, op0, op1, op2);
}
/// `VFNMSUB231PH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231phErEmitter<A, B, C> {
<Self as Vfnmsub231phErEmitter<A, B, C>>::vfnmsub231ph_er(self, op0, op1, op2);
}
/// `VFNMSUB231PH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231phMaskEmitter<A, B, C> {
<Self as Vfnmsub231phMaskEmitter<A, B, C>>::vfnmsub231ph_mask(self, op0, op1, op2);
}
/// `VFNMSUB231PH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231phMaskErEmitter<A, B, C> {
<Self as Vfnmsub231phMaskErEmitter<A, B, C>>::vfnmsub231ph_mask_er(self, op0, op1, op2);
}
/// `VFNMSUB231PH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231phMaskzEmitter<A, B, C> {
<Self as Vfnmsub231phMaskzEmitter<A, B, C>>::vfnmsub231ph_maskz(self, op0, op1, op2);
}
/// `VFNMSUB231PH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231phMaskzErEmitter<A, B, C> {
<Self as Vfnmsub231phMaskzErEmitter<A, B, C>>::vfnmsub231ph_maskz_er(self, op0, op1, op2);
}
/// `VFNMSUB231SH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231shEmitter<A, B, C> {
<Self as Vfnmsub231shEmitter<A, B, C>>::vfnmsub231sh(self, op0, op1, op2);
}
/// `VFNMSUB231SH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231shErEmitter<A, B, C> {
<Self as Vfnmsub231shErEmitter<A, B, C>>::vfnmsub231sh_er(self, op0, op1, op2);
}
/// `VFNMSUB231SH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231shMaskEmitter<A, B, C> {
<Self as Vfnmsub231shMaskEmitter<A, B, C>>::vfnmsub231sh_mask(self, op0, op1, op2);
}
/// `VFNMSUB231SH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231shMaskErEmitter<A, B, C> {
<Self as Vfnmsub231shMaskErEmitter<A, B, C>>::vfnmsub231sh_mask_er(self, op0, op1, op2);
}
/// `VFNMSUB231SH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231shMaskzEmitter<A, B, C> {
<Self as Vfnmsub231shMaskzEmitter<A, B, C>>::vfnmsub231sh_maskz(self, op0, op1, op2);
}
/// `VFNMSUB231SH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vfnmsub231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vfnmsub231shMaskzErEmitter<A, B, C> {
<Self as Vfnmsub231shMaskzErEmitter<A, B, C>>::vfnmsub231sh_maskz_er(self, op0, op1, op2);
}
/// `VFPCLASSPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// | 3 | KReg, Ymm, Imm |
/// | 4 | KReg, Zmm, Imm |
/// +---+----------------+
/// ```
#[inline]
pub fn vfpclassph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfpclassphEmitter<A, B, C> {
<Self as VfpclassphEmitter<A, B, C>>::vfpclassph(self, op0, op1, op2);
}
/// `VFPCLASSPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// | 3 | KReg, Ymm, Imm |
/// | 4 | KReg, Zmm, Imm |
/// +---+----------------+
/// ```
#[inline]
pub fn vfpclassph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfpclassphMaskEmitter<A, B, C> {
<Self as VfpclassphMaskEmitter<A, B, C>>::vfpclassph_mask(self, op0, op1, op2);
}
/// `VFPCLASSSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// +---+----------------+
/// ```
#[inline]
pub fn vfpclasssh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfpclassshEmitter<A, B, C> {
<Self as VfpclassshEmitter<A, B, C>>::vfpclasssh(self, op0, op1, op2);
}
/// `VFPCLASSSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------------+
/// | # | Operands |
/// +---+----------------+
/// | 1 | KReg, Mem, Imm |
/// | 2 | KReg, Xmm, Imm |
/// +---+----------------+
/// ```
#[inline]
pub fn vfpclasssh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VfpclassshMaskEmitter<A, B, C> {
<Self as VfpclassshMaskEmitter<A, B, C>>::vfpclasssh_mask(self, op0, op1, op2);
}
/// `VGETEXPPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vgetexpph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VgetexpphEmitter<A, B> {
<Self as VgetexpphEmitter<A, B>>::vgetexpph(self, op0, op1);
}
/// `VGETEXPPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vgetexpph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VgetexpphMaskEmitter<A, B> {
<Self as VgetexpphMaskEmitter<A, B>>::vgetexpph_mask(self, op0, op1);
}
/// `VGETEXPPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vgetexpph_mask_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VgetexpphMaskSaeEmitter<A, B> {
<Self as VgetexpphMaskSaeEmitter<A, B>>::vgetexpph_mask_sae(self, op0, op1);
}
/// `VGETEXPPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vgetexpph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VgetexpphMaskzEmitter<A, B> {
<Self as VgetexpphMaskzEmitter<A, B>>::vgetexpph_maskz(self, op0, op1);
}
/// `VGETEXPPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vgetexpph_maskz_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VgetexpphMaskzSaeEmitter<A, B> {
<Self as VgetexpphMaskzSaeEmitter<A, B>>::vgetexpph_maskz_sae(self, op0, op1);
}
/// `VGETEXPPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vgetexpph_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VgetexpphSaeEmitter<A, B> {
<Self as VgetexpphSaeEmitter<A, B>>::vgetexpph_sae(self, op0, op1);
}
/// `VGETEXPSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetexpsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetexpshEmitter<A, B, C> {
<Self as VgetexpshEmitter<A, B, C>>::vgetexpsh(self, op0, op1, op2);
}
/// `VGETEXPSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetexpsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetexpshMaskEmitter<A, B, C> {
<Self as VgetexpshMaskEmitter<A, B, C>>::vgetexpsh_mask(self, op0, op1, op2);
}
/// `VGETEXPSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetexpsh_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetexpshMaskSaeEmitter<A, B, C> {
<Self as VgetexpshMaskSaeEmitter<A, B, C>>::vgetexpsh_mask_sae(self, op0, op1, op2);
}
/// `VGETEXPSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetexpsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetexpshMaskzEmitter<A, B, C> {
<Self as VgetexpshMaskzEmitter<A, B, C>>::vgetexpsh_maskz(self, op0, op1, op2);
}
/// `VGETEXPSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetexpsh_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetexpshMaskzSaeEmitter<A, B, C> {
<Self as VgetexpshMaskzSaeEmitter<A, B, C>>::vgetexpsh_maskz_sae(self, op0, op1, op2);
}
/// `VGETEXPSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetexpsh_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetexpshSaeEmitter<A, B, C> {
<Self as VgetexpshSaeEmitter<A, B, C>>::vgetexpsh_sae(self, op0, op1, op2);
}
/// `VGETMANTPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetmantph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetmantphEmitter<A, B, C> {
<Self as VgetmantphEmitter<A, B, C>>::vgetmantph(self, op0, op1, op2);
}
/// `VGETMANTPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetmantph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetmantphMaskEmitter<A, B, C> {
<Self as VgetmantphMaskEmitter<A, B, C>>::vgetmantph_mask(self, op0, op1, op2);
}
/// `VGETMANTPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetmantph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetmantphMaskSaeEmitter<A, B, C> {
<Self as VgetmantphMaskSaeEmitter<A, B, C>>::vgetmantph_mask_sae(self, op0, op1, op2);
}
/// `VGETMANTPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetmantph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetmantphMaskzEmitter<A, B, C> {
<Self as VgetmantphMaskzEmitter<A, B, C>>::vgetmantph_maskz(self, op0, op1, op2);
}
/// `VGETMANTPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetmantph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetmantphMaskzSaeEmitter<A, B, C> {
<Self as VgetmantphMaskzSaeEmitter<A, B, C>>::vgetmantph_maskz_sae(self, op0, op1, op2);
}
/// `VGETMANTPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgetmantph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VgetmantphSaeEmitter<A, B, C> {
<Self as VgetmantphSaeEmitter<A, B, C>>::vgetmantph_sae(self, op0, op1, op2);
}
/// `VGETMANTSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgetmantsh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VgetmantshEmitter<A, B, C, D> {
<Self as VgetmantshEmitter<A, B, C, D>>::vgetmantsh(self, op0, op1, op2, op3);
}
/// `VGETMANTSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgetmantsh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VgetmantshMaskEmitter<A, B, C, D> {
<Self as VgetmantshMaskEmitter<A, B, C, D>>::vgetmantsh_mask(self, op0, op1, op2, op3);
}
/// `VGETMANTSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgetmantsh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VgetmantshMaskSaeEmitter<A, B, C, D> {
<Self as VgetmantshMaskSaeEmitter<A, B, C, D>>::vgetmantsh_mask_sae(self, op0, op1, op2, op3);
}
/// `VGETMANTSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgetmantsh_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VgetmantshMaskzEmitter<A, B, C, D> {
<Self as VgetmantshMaskzEmitter<A, B, C, D>>::vgetmantsh_maskz(self, op0, op1, op2, op3);
}
/// `VGETMANTSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgetmantsh_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VgetmantshMaskzSaeEmitter<A, B, C, D> {
<Self as VgetmantshMaskzSaeEmitter<A, B, C, D>>::vgetmantsh_maskz_sae(self, op0, op1, op2, op3);
}
/// `VGETMANTSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgetmantsh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VgetmantshSaeEmitter<A, B, C, D> {
<Self as VgetmantshSaeEmitter<A, B, C, D>>::vgetmantsh_sae(self, op0, op1, op2, op3);
}
/// `VGF2P8AFFINEINVQB` (VGF2P8AFFINEINVQB).
/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgf2p8affineinvqb<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: Vgf2p8affineinvqbEmitter<A, B, C, D> {
<Self as Vgf2p8affineinvqbEmitter<A, B, C, D>>::vgf2p8affineinvqb(self, op0, op1, op2, op3);
}
/// `VGF2P8AFFINEINVQB_MASK` (VGF2P8AFFINEINVQB).
/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgf2p8affineinvqb_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: Vgf2p8affineinvqbMaskEmitter<A, B, C, D> {
<Self as Vgf2p8affineinvqbMaskEmitter<A, B, C, D>>::vgf2p8affineinvqb_mask(self, op0, op1, op2, op3);
}
/// `VGF2P8AFFINEINVQB_MASKZ` (VGF2P8AFFINEINVQB).
/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgf2p8affineinvqb_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: Vgf2p8affineinvqbMaskzEmitter<A, B, C, D> {
<Self as Vgf2p8affineinvqbMaskzEmitter<A, B, C, D>>::vgf2p8affineinvqb_maskz(self, op0, op1, op2, op3);
}
/// `VGF2P8AFFINEQB` (VGF2P8AFFINEQB).
/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgf2p8affineqb<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: Vgf2p8affineqbEmitter<A, B, C, D> {
<Self as Vgf2p8affineqbEmitter<A, B, C, D>>::vgf2p8affineqb(self, op0, op1, op2, op3);
}
/// `VGF2P8AFFINEQB_MASK` (VGF2P8AFFINEQB).
/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgf2p8affineqb_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: Vgf2p8affineqbMaskEmitter<A, B, C, D> {
<Self as Vgf2p8affineqbMaskEmitter<A, B, C, D>>::vgf2p8affineqb_mask(self, op0, op1, op2, op3);
}
/// `VGF2P8AFFINEQB_MASKZ` (VGF2P8AFFINEQB).
/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vgf2p8affineqb_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: Vgf2p8affineqbMaskzEmitter<A, B, C, D> {
<Self as Vgf2p8affineqbMaskzEmitter<A, B, C, D>>::vgf2p8affineqb_maskz(self, op0, op1, op2, op3);
}
/// `VGF2P8MULB` (VGF2P8MULB).
/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgf2p8mulb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vgf2p8mulbEmitter<A, B, C> {
<Self as Vgf2p8mulbEmitter<A, B, C>>::vgf2p8mulb(self, op0, op1, op2);
}
/// `VGF2P8MULB_MASK` (VGF2P8MULB).
/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgf2p8mulb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vgf2p8mulbMaskEmitter<A, B, C> {
<Self as Vgf2p8mulbMaskEmitter<A, B, C>>::vgf2p8mulb_mask(self, op0, op1, op2);
}
/// `VGF2P8MULB_MASKZ` (VGF2P8MULB).
/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vgf2p8mulb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vgf2p8mulbMaskzEmitter<A, B, C> {
<Self as Vgf2p8mulbMaskzEmitter<A, B, C>>::vgf2p8mulb_maskz(self, op0, op1, op2);
}
/// `VMAXPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxphEmitter<A, B, C> {
<Self as VmaxphEmitter<A, B, C>>::vmaxph(self, op0, op1, op2);
}
/// `VMAXPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxphMaskEmitter<A, B, C> {
<Self as VmaxphMaskEmitter<A, B, C>>::vmaxph_mask(self, op0, op1, op2);
}
/// `VMAXPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxphMaskSaeEmitter<A, B, C> {
<Self as VmaxphMaskSaeEmitter<A, B, C>>::vmaxph_mask_sae(self, op0, op1, op2);
}
/// `VMAXPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxphMaskzEmitter<A, B, C> {
<Self as VmaxphMaskzEmitter<A, B, C>>::vmaxph_maskz(self, op0, op1, op2);
}
/// `VMAXPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxphMaskzSaeEmitter<A, B, C> {
<Self as VmaxphMaskzSaeEmitter<A, B, C>>::vmaxph_maskz_sae(self, op0, op1, op2);
}
/// `VMAXPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxphSaeEmitter<A, B, C> {
<Self as VmaxphSaeEmitter<A, B, C>>::vmaxph_sae(self, op0, op1, op2);
}
/// `VMAXSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxshEmitter<A, B, C> {
<Self as VmaxshEmitter<A, B, C>>::vmaxsh(self, op0, op1, op2);
}
/// `VMAXSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxshMaskEmitter<A, B, C> {
<Self as VmaxshMaskEmitter<A, B, C>>::vmaxsh_mask(self, op0, op1, op2);
}
/// `VMAXSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxsh_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxshMaskSaeEmitter<A, B, C> {
<Self as VmaxshMaskSaeEmitter<A, B, C>>::vmaxsh_mask_sae(self, op0, op1, op2);
}
/// `VMAXSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxshMaskzEmitter<A, B, C> {
<Self as VmaxshMaskzEmitter<A, B, C>>::vmaxsh_maskz(self, op0, op1, op2);
}
/// `VMAXSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxsh_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxshMaskzSaeEmitter<A, B, C> {
<Self as VmaxshMaskzSaeEmitter<A, B, C>>::vmaxsh_maskz_sae(self, op0, op1, op2);
}
/// `VMAXSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmaxsh_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmaxshSaeEmitter<A, B, C> {
<Self as VmaxshSaeEmitter<A, B, C>>::vmaxsh_sae(self, op0, op1, op2);
}
/// `VMINPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminphEmitter<A, B, C> {
<Self as VminphEmitter<A, B, C>>::vminph(self, op0, op1, op2);
}
/// `VMINPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminphMaskEmitter<A, B, C> {
<Self as VminphMaskEmitter<A, B, C>>::vminph_mask(self, op0, op1, op2);
}
/// `VMINPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminphMaskSaeEmitter<A, B, C> {
<Self as VminphMaskSaeEmitter<A, B, C>>::vminph_mask_sae(self, op0, op1, op2);
}
/// `VMINPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminphMaskzEmitter<A, B, C> {
<Self as VminphMaskzEmitter<A, B, C>>::vminph_maskz(self, op0, op1, op2);
}
/// `VMINPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminphMaskzSaeEmitter<A, B, C> {
<Self as VminphMaskzSaeEmitter<A, B, C>>::vminph_maskz_sae(self, op0, op1, op2);
}
/// `VMINPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminphSaeEmitter<A, B, C> {
<Self as VminphSaeEmitter<A, B, C>>::vminph_sae(self, op0, op1, op2);
}
/// `VMINSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminshEmitter<A, B, C> {
<Self as VminshEmitter<A, B, C>>::vminsh(self, op0, op1, op2);
}
/// `VMINSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminshMaskEmitter<A, B, C> {
<Self as VminshMaskEmitter<A, B, C>>::vminsh_mask(self, op0, op1, op2);
}
/// `VMINSH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminsh_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminshMaskSaeEmitter<A, B, C> {
<Self as VminshMaskSaeEmitter<A, B, C>>::vminsh_mask_sae(self, op0, op1, op2);
}
/// `VMINSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminshMaskzEmitter<A, B, C> {
<Self as VminshMaskzEmitter<A, B, C>>::vminsh_maskz(self, op0, op1, op2);
}
/// `VMINSH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminsh_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminshMaskzSaeEmitter<A, B, C> {
<Self as VminshMaskzSaeEmitter<A, B, C>>::vminsh_maskz_sae(self, op0, op1, op2);
}
/// `VMINSH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vminsh_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VminshSaeEmitter<A, B, C> {
<Self as VminshSaeEmitter<A, B, C>>::vminsh_sae(self, op0, op1, op2);
}
/// `VMOVSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Xmm |
/// | 2 | Xmm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vmovsh_2<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VmovshEmitter_2<A, B> {
<Self as VmovshEmitter_2<A, B>>::vmovsh_2(self, op0, op1);
}
/// `VMOVSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmovsh_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmovshEmitter_3<A, B, C> {
<Self as VmovshEmitter_3<A, B, C>>::vmovsh_3(self, op0, op1, op2);
}
/// `VMOVSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Mem, Xmm |
/// | 2 | Xmm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vmovsh_mask_2<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VmovshMaskEmitter_2<A, B> {
<Self as VmovshMaskEmitter_2<A, B>>::vmovsh_mask_2(self, op0, op1);
}
/// `VMOVSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmovsh_mask_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmovshMaskEmitter_3<A, B, C> {
<Self as VmovshMaskEmitter_3<A, B, C>>::vmovsh_mask_3(self, op0, op1, op2);
}
/// `VMOVSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vmovsh_maskz_2<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VmovshMaskzEmitter_2<A, B> {
<Self as VmovshMaskzEmitter_2<A, B>>::vmovsh_maskz_2(self, op0, op1);
}
/// `VMOVSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmovsh_maskz_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmovshMaskzEmitter_3<A, B, C> {
<Self as VmovshMaskzEmitter_3<A, B, C>>::vmovsh_maskz_3(self, op0, op1, op2);
}
/// `VMOVW_G2X`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Gpd |
/// | 2 | Xmm, Mem |
/// +---+----------+
/// ```
#[inline]
pub fn vmovw_g2x<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VmovwG2xEmitter<A, B> {
<Self as VmovwG2xEmitter<A, B>>::vmovw_g2x(self, op0, op1);
}
/// `VMOVW_X2G`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Gpd, Xmm |
/// | 2 | Mem, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vmovw_x2g<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VmovwX2gEmitter<A, B> {
<Self as VmovwX2gEmitter<A, B>>::vmovw_x2g(self, op0, op1);
}
/// `VMULPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulphEmitter<A, B, C> {
<Self as VmulphEmitter<A, B, C>>::vmulph(self, op0, op1, op2);
}
/// `VMULPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulphErEmitter<A, B, C> {
<Self as VmulphErEmitter<A, B, C>>::vmulph_er(self, op0, op1, op2);
}
/// `VMULPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulphMaskEmitter<A, B, C> {
<Self as VmulphMaskEmitter<A, B, C>>::vmulph_mask(self, op0, op1, op2);
}
/// `VMULPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulphMaskErEmitter<A, B, C> {
<Self as VmulphMaskErEmitter<A, B, C>>::vmulph_mask_er(self, op0, op1, op2);
}
/// `VMULPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulphMaskzEmitter<A, B, C> {
<Self as VmulphMaskzEmitter<A, B, C>>::vmulph_maskz(self, op0, op1, op2);
}
/// `VMULPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulphMaskzErEmitter<A, B, C> {
<Self as VmulphMaskzErEmitter<A, B, C>>::vmulph_maskz_er(self, op0, op1, op2);
}
/// `VMULSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulshEmitter<A, B, C> {
<Self as VmulshEmitter<A, B, C>>::vmulsh(self, op0, op1, op2);
}
/// `VMULSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulshErEmitter<A, B, C> {
<Self as VmulshErEmitter<A, B, C>>::vmulsh_er(self, op0, op1, op2);
}
/// `VMULSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulshMaskEmitter<A, B, C> {
<Self as VmulshMaskEmitter<A, B, C>>::vmulsh_mask(self, op0, op1, op2);
}
/// `VMULSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulshMaskErEmitter<A, B, C> {
<Self as VmulshMaskErEmitter<A, B, C>>::vmulsh_mask_er(self, op0, op1, op2);
}
/// `VMULSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulshMaskzEmitter<A, B, C> {
<Self as VmulshMaskzEmitter<A, B, C>>::vmulsh_maskz(self, op0, op1, op2);
}
/// `VMULSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vmulsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VmulshMaskzErEmitter<A, B, C> {
<Self as VmulshMaskzErEmitter<A, B, C>>::vmulsh_maskz_er(self, op0, op1, op2);
}
/// `VPCLMULQDQ` (VPCLMULQDQ).
/// Performs a carry-less multiplication of two quadwords, selected from the first source and second source operand according to the value of the immediate byte. Bits 4 and 0 are used to select which 64-bit half of each operand to use according to Table 4-13, other bits of the immediate byte are ignored.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PCLMULQDQ.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// | 3 | Ymm, Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Ymm, Imm |
/// | 5 | Zmm, Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Zmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vpclmulqdq<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VpclmulqdqEmitter<A, B, C, D> {
<Self as VpclmulqdqEmitter<A, B, C, D>>::vpclmulqdq(self, op0, op1, op2, op3);
}
/// `VPDPBSSD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
#[inline]
pub fn vpdpbssd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VpdpbssdEmitter<A, B, C> {
<Self as VpdpbssdEmitter<A, B, C>>::vpdpbssd(self, op0, op1, op2);
}
/// `VPDPBSSDS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
#[inline]
pub fn vpdpbssds<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VpdpbssdsEmitter<A, B, C> {
<Self as VpdpbssdsEmitter<A, B, C>>::vpdpbssds(self, op0, op1, op2);
}
/// `VPDPBSUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
#[inline]
pub fn vpdpbsud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VpdpbsudEmitter<A, B, C> {
<Self as VpdpbsudEmitter<A, B, C>>::vpdpbsud(self, op0, op1, op2);
}
/// `VPDPBSUDS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
#[inline]
pub fn vpdpbsuds<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VpdpbsudsEmitter<A, B, C> {
<Self as VpdpbsudsEmitter<A, B, C>>::vpdpbsuds(self, op0, op1, op2);
}
/// `VPDPBUUD`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
#[inline]
pub fn vpdpbuud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VpdpbuudEmitter<A, B, C> {
<Self as VpdpbuudEmitter<A, B, C>>::vpdpbuud(self, op0, op1, op2);
}
/// `VPDPBUUDS`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// +---+---------------+
/// ```
#[inline]
pub fn vpdpbuuds<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VpdpbuudsEmitter<A, B, C> {
<Self as VpdpbuudsEmitter<A, B, C>>::vpdpbuuds(self, op0, op1, op2);
}
/// `VRCPPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vrcpph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VrcpphEmitter<A, B> {
<Self as VrcpphEmitter<A, B>>::vrcpph(self, op0, op1);
}
/// `VRCPPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vrcpph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VrcpphMaskEmitter<A, B> {
<Self as VrcpphMaskEmitter<A, B>>::vrcpph_mask(self, op0, op1);
}
/// `VRCPPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vrcpph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VrcpphMaskzEmitter<A, B> {
<Self as VrcpphMaskzEmitter<A, B>>::vrcpph_maskz(self, op0, op1);
}
/// `VRCPSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrcpsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrcpshEmitter<A, B, C> {
<Self as VrcpshEmitter<A, B, C>>::vrcpsh(self, op0, op1, op2);
}
/// `VRCPSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrcpsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrcpshMaskEmitter<A, B, C> {
<Self as VrcpshMaskEmitter<A, B, C>>::vrcpsh_mask(self, op0, op1, op2);
}
/// `VRCPSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrcpsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrcpshMaskzEmitter<A, B, C> {
<Self as VrcpshMaskzEmitter<A, B, C>>::vrcpsh_maskz(self, op0, op1, op2);
}
/// `VREDUCEPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vreduceph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VreducephEmitter<A, B, C> {
<Self as VreducephEmitter<A, B, C>>::vreduceph(self, op0, op1, op2);
}
/// `VREDUCEPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vreduceph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VreducephMaskEmitter<A, B, C> {
<Self as VreducephMaskEmitter<A, B, C>>::vreduceph_mask(self, op0, op1, op2);
}
/// `VREDUCEPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vreduceph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VreducephMaskSaeEmitter<A, B, C> {
<Self as VreducephMaskSaeEmitter<A, B, C>>::vreduceph_mask_sae(self, op0, op1, op2);
}
/// `VREDUCEPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vreduceph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VreducephMaskzEmitter<A, B, C> {
<Self as VreducephMaskzEmitter<A, B, C>>::vreduceph_maskz(self, op0, op1, op2);
}
/// `VREDUCEPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vreduceph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VreducephMaskzSaeEmitter<A, B, C> {
<Self as VreducephMaskzSaeEmitter<A, B, C>>::vreduceph_maskz_sae(self, op0, op1, op2);
}
/// `VREDUCEPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vreduceph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VreducephSaeEmitter<A, B, C> {
<Self as VreducephSaeEmitter<A, B, C>>::vreduceph_sae(self, op0, op1, op2);
}
/// `VREDUCESH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vreducesh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VreduceshEmitter<A, B, C, D> {
<Self as VreduceshEmitter<A, B, C, D>>::vreducesh(self, op0, op1, op2, op3);
}
/// `VREDUCESH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vreducesh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VreduceshMaskEmitter<A, B, C, D> {
<Self as VreduceshMaskEmitter<A, B, C, D>>::vreducesh_mask(self, op0, op1, op2, op3);
}
/// `VREDUCESH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vreducesh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VreduceshMaskSaeEmitter<A, B, C, D> {
<Self as VreduceshMaskSaeEmitter<A, B, C, D>>::vreducesh_mask_sae(self, op0, op1, op2, op3);
}
/// `VREDUCESH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vreducesh_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VreduceshMaskzEmitter<A, B, C, D> {
<Self as VreduceshMaskzEmitter<A, B, C, D>>::vreducesh_maskz(self, op0, op1, op2, op3);
}
/// `VREDUCESH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vreducesh_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VreduceshMaskzSaeEmitter<A, B, C, D> {
<Self as VreduceshMaskzSaeEmitter<A, B, C, D>>::vreducesh_maskz_sae(self, op0, op1, op2, op3);
}
/// `VREDUCESH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vreducesh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VreduceshSaeEmitter<A, B, C, D> {
<Self as VreduceshSaeEmitter<A, B, C, D>>::vreducesh_sae(self, op0, op1, op2, op3);
}
/// `VRNDSCALEPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrndscaleph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrndscalephEmitter<A, B, C> {
<Self as VrndscalephEmitter<A, B, C>>::vrndscaleph(self, op0, op1, op2);
}
/// `VRNDSCALEPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrndscaleph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrndscalephMaskEmitter<A, B, C> {
<Self as VrndscalephMaskEmitter<A, B, C>>::vrndscaleph_mask(self, op0, op1, op2);
}
/// `VRNDSCALEPH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrndscaleph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrndscalephMaskSaeEmitter<A, B, C> {
<Self as VrndscalephMaskSaeEmitter<A, B, C>>::vrndscaleph_mask_sae(self, op0, op1, op2);
}
/// `VRNDSCALEPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Imm |
/// | 3 | Ymm, Mem, Imm |
/// | 4 | Ymm, Ymm, Imm |
/// | 5 | Zmm, Mem, Imm |
/// | 6 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrndscaleph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrndscalephMaskzEmitter<A, B, C> {
<Self as VrndscalephMaskzEmitter<A, B, C>>::vrndscaleph_maskz(self, op0, op1, op2);
}
/// `VRNDSCALEPH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrndscaleph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrndscalephMaskzSaeEmitter<A, B, C> {
<Self as VrndscalephMaskzSaeEmitter<A, B, C>>::vrndscaleph_maskz_sae(self, op0, op1, op2);
}
/// `VRNDSCALEPH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Imm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrndscaleph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrndscalephSaeEmitter<A, B, C> {
<Self as VrndscalephSaeEmitter<A, B, C>>::vrndscaleph_sae(self, op0, op1, op2);
}
/// `VRNDSCALESH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vrndscalesh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VrndscaleshEmitter<A, B, C, D> {
<Self as VrndscaleshEmitter<A, B, C, D>>::vrndscalesh(self, op0, op1, op2, op3);
}
/// `VRNDSCALESH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vrndscalesh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VrndscaleshMaskEmitter<A, B, C, D> {
<Self as VrndscaleshMaskEmitter<A, B, C, D>>::vrndscalesh_mask(self, op0, op1, op2, op3);
}
/// `VRNDSCALESH_MASK_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vrndscalesh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VrndscaleshMaskSaeEmitter<A, B, C, D> {
<Self as VrndscaleshMaskSaeEmitter<A, B, C, D>>::vrndscalesh_mask_sae(self, op0, op1, op2, op3);
}
/// `VRNDSCALESH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Mem, Imm |
/// | 2 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vrndscalesh_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VrndscaleshMaskzEmitter<A, B, C, D> {
<Self as VrndscaleshMaskzEmitter<A, B, C, D>>::vrndscalesh_maskz(self, op0, op1, op2, op3);
}
/// `VRNDSCALESH_MASKZ_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vrndscalesh_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VrndscaleshMaskzSaeEmitter<A, B, C, D> {
<Self as VrndscaleshMaskzSaeEmitter<A, B, C, D>>::vrndscalesh_maskz_sae(self, op0, op1, op2, op3);
}
/// `VRNDSCALESH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------------+
/// | # | Operands |
/// +---+--------------------+
/// | 1 | Xmm, Xmm, Xmm, Imm |
/// +---+--------------------+
/// ```
#[inline]
pub fn vrndscalesh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
where Assembler<'a>: VrndscaleshSaeEmitter<A, B, C, D> {
<Self as VrndscaleshSaeEmitter<A, B, C, D>>::vrndscalesh_sae(self, op0, op1, op2, op3);
}
/// `VRSQRTPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vrsqrtph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VrsqrtphEmitter<A, B> {
<Self as VrsqrtphEmitter<A, B>>::vrsqrtph(self, op0, op1);
}
/// `VRSQRTPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vrsqrtph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VrsqrtphMaskEmitter<A, B> {
<Self as VrsqrtphMaskEmitter<A, B>>::vrsqrtph_mask(self, op0, op1);
}
/// `VRSQRTPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vrsqrtph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VrsqrtphMaskzEmitter<A, B> {
<Self as VrsqrtphMaskzEmitter<A, B>>::vrsqrtph_maskz(self, op0, op1);
}
/// `VRSQRTSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrsqrtsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrsqrtshEmitter<A, B, C> {
<Self as VrsqrtshEmitter<A, B, C>>::vrsqrtsh(self, op0, op1, op2);
}
/// `VRSQRTSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrsqrtsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrsqrtshMaskEmitter<A, B, C> {
<Self as VrsqrtshMaskEmitter<A, B, C>>::vrsqrtsh_mask(self, op0, op1, op2);
}
/// `VRSQRTSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vrsqrtsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VrsqrtshMaskzEmitter<A, B, C> {
<Self as VrsqrtshMaskzEmitter<A, B, C>>::vrsqrtsh_maskz(self, op0, op1, op2);
}
/// `VSCALEFPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefphEmitter<A, B, C> {
<Self as VscalefphEmitter<A, B, C>>::vscalefph(self, op0, op1, op2);
}
/// `VSCALEFPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefphErEmitter<A, B, C> {
<Self as VscalefphErEmitter<A, B, C>>::vscalefph_er(self, op0, op1, op2);
}
/// `VSCALEFPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefphMaskEmitter<A, B, C> {
<Self as VscalefphMaskEmitter<A, B, C>>::vscalefph_mask(self, op0, op1, op2);
}
/// `VSCALEFPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefphMaskErEmitter<A, B, C> {
<Self as VscalefphMaskErEmitter<A, B, C>>::vscalefph_mask_er(self, op0, op1, op2);
}
/// `VSCALEFPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefphMaskzEmitter<A, B, C> {
<Self as VscalefphMaskzEmitter<A, B, C>>::vscalefph_maskz(self, op0, op1, op2);
}
/// `VSCALEFPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefphMaskzErEmitter<A, B, C> {
<Self as VscalefphMaskzErEmitter<A, B, C>>::vscalefph_maskz_er(self, op0, op1, op2);
}
/// `VSCALEFSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefshEmitter<A, B, C> {
<Self as VscalefshEmitter<A, B, C>>::vscalefsh(self, op0, op1, op2);
}
/// `VSCALEFSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefshErEmitter<A, B, C> {
<Self as VscalefshErEmitter<A, B, C>>::vscalefsh_er(self, op0, op1, op2);
}
/// `VSCALEFSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefshMaskEmitter<A, B, C> {
<Self as VscalefshMaskEmitter<A, B, C>>::vscalefsh_mask(self, op0, op1, op2);
}
/// `VSCALEFSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefshMaskErEmitter<A, B, C> {
<Self as VscalefshMaskErEmitter<A, B, C>>::vscalefsh_mask_er(self, op0, op1, op2);
}
/// `VSCALEFSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefshMaskzEmitter<A, B, C> {
<Self as VscalefshMaskzEmitter<A, B, C>>::vscalefsh_maskz(self, op0, op1, op2);
}
/// `VSCALEFSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vscalefsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VscalefshMaskzErEmitter<A, B, C> {
<Self as VscalefshMaskzErEmitter<A, B, C>>::vscalefsh_maskz_er(self, op0, op1, op2);
}
/// `VSM4KEY4`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsm4key4<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vsm4key4Emitter<A, B, C> {
<Self as Vsm4key4Emitter<A, B, C>>::vsm4key4(self, op0, op1, op2);
}
/// `VSM4RNDS4`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsm4rnds4<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: Vsm4rnds4Emitter<A, B, C> {
<Self as Vsm4rnds4Emitter<A, B, C>>::vsm4rnds4(self, op0, op1, op2);
}
/// `VSQRTPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vsqrtph<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VsqrtphEmitter<A, B> {
<Self as VsqrtphEmitter<A, B>>::vsqrtph(self, op0, op1);
}
/// `VSQRTPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vsqrtph_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VsqrtphErEmitter<A, B> {
<Self as VsqrtphErEmitter<A, B>>::vsqrtph_er(self, op0, op1);
}
/// `VSQRTPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vsqrtph_mask<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VsqrtphMaskEmitter<A, B> {
<Self as VsqrtphMaskEmitter<A, B>>::vsqrtph_mask(self, op0, op1);
}
/// `VSQRTPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vsqrtph_mask_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VsqrtphMaskErEmitter<A, B> {
<Self as VsqrtphMaskErEmitter<A, B>>::vsqrtph_mask_er(self, op0, op1);
}
/// `VSQRTPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// | 3 | Ymm, Mem |
/// | 4 | Ymm, Ymm |
/// | 5 | Zmm, Mem |
/// | 6 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vsqrtph_maskz<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VsqrtphMaskzEmitter<A, B> {
<Self as VsqrtphMaskzEmitter<A, B>>::vsqrtph_maskz(self, op0, op1);
}
/// `VSQRTPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Zmm, Zmm |
/// +---+----------+
/// ```
#[inline]
pub fn vsqrtph_maskz_er<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VsqrtphMaskzErEmitter<A, B> {
<Self as VsqrtphMaskzErEmitter<A, B>>::vsqrtph_maskz_er(self, op0, op1);
}
/// `VSQRTSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsqrtsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsqrtshEmitter<A, B, C> {
<Self as VsqrtshEmitter<A, B, C>>::vsqrtsh(self, op0, op1, op2);
}
/// `VSQRTSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsqrtsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsqrtshErEmitter<A, B, C> {
<Self as VsqrtshErEmitter<A, B, C>>::vsqrtsh_er(self, op0, op1, op2);
}
/// `VSQRTSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsqrtsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsqrtshMaskEmitter<A, B, C> {
<Self as VsqrtshMaskEmitter<A, B, C>>::vsqrtsh_mask(self, op0, op1, op2);
}
/// `VSQRTSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsqrtsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsqrtshMaskErEmitter<A, B, C> {
<Self as VsqrtshMaskErEmitter<A, B, C>>::vsqrtsh_mask_er(self, op0, op1, op2);
}
/// `VSQRTSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsqrtsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsqrtshMaskzEmitter<A, B, C> {
<Self as VsqrtshMaskzEmitter<A, B, C>>::vsqrtsh_maskz(self, op0, op1, op2);
}
/// `VSQRTSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsqrtsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsqrtshMaskzErEmitter<A, B, C> {
<Self as VsqrtshMaskzErEmitter<A, B, C>>::vsqrtsh_maskz_er(self, op0, op1, op2);
}
/// `VSUBPH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubphEmitter<A, B, C> {
<Self as VsubphEmitter<A, B, C>>::vsubph(self, op0, op1, op2);
}
/// `VSUBPH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubphErEmitter<A, B, C> {
<Self as VsubphErEmitter<A, B, C>>::vsubph_er(self, op0, op1, op2);
}
/// `VSUBPH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubphMaskEmitter<A, B, C> {
<Self as VsubphMaskEmitter<A, B, C>>::vsubph_mask(self, op0, op1, op2);
}
/// `VSUBPH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubphMaskErEmitter<A, B, C> {
<Self as VsubphMaskErEmitter<A, B, C>>::vsubph_mask_er(self, op0, op1, op2);
}
/// `VSUBPH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// | 3 | Ymm, Ymm, Mem |
/// | 4 | Ymm, Ymm, Ymm |
/// | 5 | Zmm, Zmm, Mem |
/// | 6 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubphMaskzEmitter<A, B, C> {
<Self as VsubphMaskzEmitter<A, B, C>>::vsubph_maskz(self, op0, op1, op2);
}
/// `VSUBPH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Zmm, Zmm, Zmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubphMaskzErEmitter<A, B, C> {
<Self as VsubphMaskzErEmitter<A, B, C>>::vsubph_maskz_er(self, op0, op1, op2);
}
/// `VSUBSH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubshEmitter<A, B, C> {
<Self as VsubshEmitter<A, B, C>>::vsubsh(self, op0, op1, op2);
}
/// `VSUBSH_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubshErEmitter<A, B, C> {
<Self as VsubshErEmitter<A, B, C>>::vsubsh_er(self, op0, op1, op2);
}
/// `VSUBSH_MASK`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubshMaskEmitter<A, B, C> {
<Self as VsubshMaskEmitter<A, B, C>>::vsubsh_mask(self, op0, op1, op2);
}
/// `VSUBSH_MASK_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubshMaskErEmitter<A, B, C> {
<Self as VsubshMaskErEmitter<A, B, C>>::vsubsh_mask_er(self, op0, op1, op2);
}
/// `VSUBSH_MASKZ`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Mem |
/// | 2 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubshMaskzEmitter<A, B, C> {
<Self as VsubshMaskzEmitter<A, B, C>>::vsubsh_maskz(self, op0, op1, op2);
}
/// `VSUBSH_MASKZ_ER`.
///
/// Supported operand variants:
///
/// ```text
/// +---+---------------+
/// | # | Operands |
/// +---+---------------+
/// | 1 | Xmm, Xmm, Xmm |
/// +---+---------------+
/// ```
#[inline]
pub fn vsubsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
where Assembler<'a>: VsubshMaskzErEmitter<A, B, C> {
<Self as VsubshMaskzErEmitter<A, B, C>>::vsubsh_maskz_er(self, op0, op1, op2);
}
/// `VUCOMISH`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Mem |
/// | 2 | Xmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vucomish<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VucomishEmitter<A, B> {
<Self as VucomishEmitter<A, B>>::vucomish(self, op0, op1);
}
/// `VUCOMISH_SAE`.
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | Xmm, Xmm |
/// +---+----------+
/// ```
#[inline]
pub fn vucomish_sae<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: VucomishSaeEmitter<A, B> {
<Self as VucomishSaeEmitter<A, B>>::vucomish_sae(self, op0, op1);
}
/// `XCHG` (XCHG).
/// Exchanges the contents of the destination (first) and source (second) operands. The operands can be two general-purpose registers or a register and a memory location. If a memory operand is referenced, the processor’s locking protocol is automatically implemented for the duration of the exchange operation, regardless of the presence or absence of the LOCK prefix or of the value of the IOPL. (See the LOCK prefix description in this chapter for more information on the locking protocol.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XCHG.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+--------------+
/// | # | Operands |
/// +---+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | Gpd, Gpd |
/// | 3 | Gpq, Gpq |
/// | 4 | Gpw, Gpw |
/// | 5 | Mem, GpbLo |
/// | 6 | Mem, Gpd |
/// | 7 | Mem, Gpq |
/// | 8 | Mem, Gpw |
/// +---+--------------+
/// ```
#[inline]
pub fn xchg<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: XchgEmitter<A, B> {
<Self as XchgEmitter<A, B>>::xchg(self, op0, op1);
}
/// `XLATB` (XLATB).
/// Locates a byte entry in a table in memory, using the contents of the AL register as a table index, then copies the contents of the table entry back into the AL register. The index in the AL register is treated as an unsigned integer. The XLAT and XLATB instructions get the base address of the table in memory from either the DS:EBX or the DS:BX registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). (The DS segment may be overridden with a segment override prefix.)
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XLAT%3AXLATB.html).
///
/// Supported operand variants:
///
/// ```text
/// +---+----------+
/// | # | Operands |
/// +---+----------+
/// | 1 | (none) |
/// +---+----------+
/// ```
#[inline]
pub fn xlatb(&mut self)
where Assembler<'a>: XlatbEmitter {
<Self as XlatbEmitter>::xlatb(self);
}
/// `XOR` (XOR).
/// Performs a bitwise exclusive OR (XOR) operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is 1 if the corresponding bits of the operands are different; each bit is 0 if the corresponding bits are the same.
///
///
/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XOR.html).
///
/// Supported operand variants:
///
/// ```text
/// +----+--------------+
/// | # | Operands |
/// +----+--------------+
/// | 1 | GpbLo, GpbLo |
/// | 2 | GpbLo, Imm |
/// | 3 | GpbLo, Mem |
/// | 4 | Gpd, Gpd |
/// | 5 | Gpd, Imm |
/// | 6 | Gpd, Mem |
/// | 7 | Gpq, Gpq |
/// | 8 | Gpq, Imm |
/// | 9 | Gpq, Mem |
/// | 10 | Gpw, Gpw |
/// | 11 | Gpw, Imm |
/// | 12 | Gpw, Mem |
/// | 13 | Mem, GpbLo |
/// | 14 | Mem, Gpd |
/// | 15 | Mem, Gpq |
/// | 16 | Mem, Gpw |
/// | 17 | Mem, Imm |
/// +----+--------------+
/// ```
#[inline]
pub fn xor<A, B>(&mut self, op0: A, op1: B)
where Assembler<'a>: XorEmitter<A, B> {
<Self as XorEmitter<A, B>>::xor(self, op0, op1);
}
}