Skip to main content

asmkit/x86/features/
AVX512DQ.rs

1use crate::x86::assembler::*;
2use crate::x86::operands::*;
3use super::super::opcodes::*;
4use crate::core::emitter::*;
5use crate::core::operand::*;
6
7/// A dummy operand that represents no register. Here just for simplicity.
8const NOREG: Operand = Operand::new();
9
10/// `KADDB` (KADDB). 
11/// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
12///
13///
14/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
15///
16/// Supported operand variants:
17///
18/// ```text
19/// +---+------------------+
20/// | # | Operands         |
21/// +---+------------------+
22/// | 1 | KReg, KReg, KReg |
23/// +---+------------------+
24/// ```
25pub trait KaddbEmitter<A, B, C> {
26    fn kaddb(&mut self, op0: A, op1: B, op2: C);
27}
28
29impl<'a> KaddbEmitter<KReg, KReg, KReg> for Assembler<'a> {
30    fn kaddb(&mut self, op0: KReg, op1: KReg, op2: KReg) {
31        self.emit(KADDBKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
32    }
33}
34
35/// `KADDW` (KADDW). 
36/// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
37///
38///
39/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
40///
41/// Supported operand variants:
42///
43/// ```text
44/// +---+------------------+
45/// | # | Operands         |
46/// +---+------------------+
47/// | 1 | KReg, KReg, KReg |
48/// +---+------------------+
49/// ```
50pub trait KaddwEmitter<A, B, C> {
51    fn kaddw(&mut self, op0: A, op1: B, op2: C);
52}
53
54impl<'a> KaddwEmitter<KReg, KReg, KReg> for Assembler<'a> {
55    fn kaddw(&mut self, op0: KReg, op1: KReg, op2: KReg) {
56        self.emit(KADDWKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
57    }
58}
59
60/// `KANDB` (KANDB). 
61/// Performs a bitwise AND between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
62///
63///
64/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDW%3AKANDB%3AKANDQ%3AKANDD.html).
65///
66/// Supported operand variants:
67///
68/// ```text
69/// +---+------------------+
70/// | # | Operands         |
71/// +---+------------------+
72/// | 1 | KReg, KReg, KReg |
73/// +---+------------------+
74/// ```
75pub trait KandbEmitter<A, B, C> {
76    fn kandb(&mut self, op0: A, op1: B, op2: C);
77}
78
79impl<'a> KandbEmitter<KReg, KReg, KReg> for Assembler<'a> {
80    fn kandb(&mut self, op0: KReg, op1: KReg, op2: KReg) {
81        self.emit(KANDBKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
82    }
83}
84
85/// `KANDNB` (KANDNB). 
86/// Performs a bitwise AND NOT between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
87///
88///
89/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDNW%3AKANDNB%3AKANDNQ%3AKANDND.html).
90///
91/// Supported operand variants:
92///
93/// ```text
94/// +---+------------------+
95/// | # | Operands         |
96/// +---+------------------+
97/// | 1 | KReg, KReg, KReg |
98/// +---+------------------+
99/// ```
100pub trait KandnbEmitter<A, B, C> {
101    fn kandnb(&mut self, op0: A, op1: B, op2: C);
102}
103
104impl<'a> KandnbEmitter<KReg, KReg, KReg> for Assembler<'a> {
105    fn kandnb(&mut self, op0: KReg, op1: KReg, op2: KReg) {
106        self.emit(KANDNBKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
107    }
108}
109
110/// `KMOVB` (KMOVB). 
111/// Copies values from the source operand (second operand) to the destination operand (first operand). The source and destination operands can be mask registers, memory location or general purpose. The instruction cannot be used to transfer data between general purpose registers and or memory locations.
112///
113///
114/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KMOVW%3AKMOVB%3AKMOVQ%3AKMOVD.html).
115///
116/// Supported operand variants:
117///
118/// ```text
119/// +---+------------+
120/// | # | Operands   |
121/// +---+------------+
122/// | 1 | Gpd, KReg  |
123/// | 2 | KReg, Gpd  |
124/// | 3 | KReg, KReg |
125/// | 4 | KReg, Mem  |
126/// | 5 | Mem, KReg  |
127/// +---+------------+
128/// ```
129pub trait KmovbEmitter<A, B> {
130    fn kmovb(&mut self, op0: A, op1: B);
131}
132
133impl<'a> KmovbEmitter<KReg, KReg> for Assembler<'a> {
134    fn kmovb(&mut self, op0: KReg, op1: KReg) {
135        self.emit(KMOVBKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
136    }
137}
138
139impl<'a> KmovbEmitter<KReg, Mem> for Assembler<'a> {
140    fn kmovb(&mut self, op0: KReg, op1: Mem) {
141        self.emit(KMOVBKM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
142    }
143}
144
145impl<'a> KmovbEmitter<Mem, KReg> for Assembler<'a> {
146    fn kmovb(&mut self, op0: Mem, op1: KReg) {
147        self.emit(KMOVBMK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
148    }
149}
150
151impl<'a> KmovbEmitter<KReg, Gpd> for Assembler<'a> {
152    fn kmovb(&mut self, op0: KReg, op1: Gpd) {
153        self.emit(KMOVBKR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
154    }
155}
156
157impl<'a> KmovbEmitter<Gpd, KReg> for Assembler<'a> {
158    fn kmovb(&mut self, op0: Gpd, op1: KReg) {
159        self.emit(KMOVBRK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
160    }
161}
162
163/// `KNOTB` (KNOTB). 
164/// Performs a bitwise NOT of vector mask k2 and writes the result into vector mask k1.
165///
166///
167/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KNOTW%3AKNOTB%3AKNOTQ%3AKNOTD.html).
168///
169/// Supported operand variants:
170///
171/// ```text
172/// +---+------------+
173/// | # | Operands   |
174/// +---+------------+
175/// | 1 | KReg, KReg |
176/// +---+------------+
177/// ```
178pub trait KnotbEmitter<A, B> {
179    fn knotb(&mut self, op0: A, op1: B);
180}
181
182impl<'a> KnotbEmitter<KReg, KReg> for Assembler<'a> {
183    fn knotb(&mut self, op0: KReg, op1: KReg) {
184        self.emit(KNOTBKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
185    }
186}
187
188/// `KORB` (KORB). 
189/// Performs a bitwise OR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
190///
191///
192/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORW%3AKORB%3AKORQ%3AKORD.html).
193///
194/// Supported operand variants:
195///
196/// ```text
197/// +---+------------------+
198/// | # | Operands         |
199/// +---+------------------+
200/// | 1 | KReg, KReg, KReg |
201/// +---+------------------+
202/// ```
203pub trait KorbEmitter<A, B, C> {
204    fn korb(&mut self, op0: A, op1: B, op2: C);
205}
206
207impl<'a> KorbEmitter<KReg, KReg, KReg> for Assembler<'a> {
208    fn korb(&mut self, op0: KReg, op1: KReg, op2: KReg) {
209        self.emit(KORBKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
210    }
211}
212
213/// `KORTESTB` (KORTESTB). 
214/// Performs a bitwise OR between the vector mask register k2, and the vector mask register k1, and sets CF and ZF based on the operation result.
215///
216///
217/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORTESTW%3AKORTESTB%3AKORTESTQ%3AKORTESTD.html).
218///
219/// Supported operand variants:
220///
221/// ```text
222/// +---+------------+
223/// | # | Operands   |
224/// +---+------------+
225/// | 1 | KReg, KReg |
226/// +---+------------+
227/// ```
228pub trait KortestbEmitter<A, B> {
229    fn kortestb(&mut self, op0: A, op1: B);
230}
231
232impl<'a> KortestbEmitter<KReg, KReg> for Assembler<'a> {
233    fn kortestb(&mut self, op0: KReg, op1: KReg) {
234        self.emit(KORTESTBKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
235    }
236}
237
238/// `KSHIFTLB` (KSHIFTLB). 
239/// Shifts 8/16/32/64 bits in the second operand (source operand) left by the count specified in immediate byte and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
240///
241///
242/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTLW%3AKSHIFTLB%3AKSHIFTLQ%3AKSHIFTLD.html).
243///
244/// Supported operand variants:
245///
246/// ```text
247/// +---+-----------------+
248/// | # | Operands        |
249/// +---+-----------------+
250/// | 1 | KReg, KReg, Imm |
251/// +---+-----------------+
252/// ```
253pub trait KshiftlbEmitter<A, B, C> {
254    fn kshiftlb(&mut self, op0: A, op1: B, op2: C);
255}
256
257impl<'a> KshiftlbEmitter<KReg, KReg, Imm> for Assembler<'a> {
258    fn kshiftlb(&mut self, op0: KReg, op1: KReg, op2: Imm) {
259        self.emit(KSHIFTLBKKI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
260    }
261}
262
263/// `KSHIFTRB` (KSHIFTRB). 
264/// Shifts 8/16/32/64 bits in the second operand (source operand) right by the count specified in immediate and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
265///
266///
267/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTRW%3AKSHIFTRB%3AKSHIFTRQ%3AKSHIFTRD.html).
268///
269/// Supported operand variants:
270///
271/// ```text
272/// +---+-----------------+
273/// | # | Operands        |
274/// +---+-----------------+
275/// | 1 | KReg, KReg, Imm |
276/// +---+-----------------+
277/// ```
278pub trait KshiftrbEmitter<A, B, C> {
279    fn kshiftrb(&mut self, op0: A, op1: B, op2: C);
280}
281
282impl<'a> KshiftrbEmitter<KReg, KReg, Imm> for Assembler<'a> {
283    fn kshiftrb(&mut self, op0: KReg, op1: KReg, op2: Imm) {
284        self.emit(KSHIFTRBKKI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
285    }
286}
287
288/// `KTESTB` (KTESTB). 
289/// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
290///
291///
292/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
293///
294/// Supported operand variants:
295///
296/// ```text
297/// +---+------------+
298/// | # | Operands   |
299/// +---+------------+
300/// | 1 | KReg, KReg |
301/// +---+------------+
302/// ```
303pub trait KtestbEmitter<A, B> {
304    fn ktestb(&mut self, op0: A, op1: B);
305}
306
307impl<'a> KtestbEmitter<KReg, KReg> for Assembler<'a> {
308    fn ktestb(&mut self, op0: KReg, op1: KReg) {
309        self.emit(KTESTBKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
310    }
311}
312
313/// `KTESTW` (KTESTW). 
314/// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
315///
316///
317/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
318///
319/// Supported operand variants:
320///
321/// ```text
322/// +---+------------+
323/// | # | Operands   |
324/// +---+------------+
325/// | 1 | KReg, KReg |
326/// +---+------------+
327/// ```
328pub trait KtestwEmitter<A, B> {
329    fn ktestw(&mut self, op0: A, op1: B);
330}
331
332impl<'a> KtestwEmitter<KReg, KReg> for Assembler<'a> {
333    fn ktestw(&mut self, op0: KReg, op1: KReg) {
334        self.emit(KTESTWKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
335    }
336}
337
338/// `KXNORB` (KXNORB). 
339/// Performs a bitwise XNOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
340///
341///
342/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXNORW%3AKXNORB%3AKXNORQ%3AKXNORD.html).
343///
344/// Supported operand variants:
345///
346/// ```text
347/// +---+------------------+
348/// | # | Operands         |
349/// +---+------------------+
350/// | 1 | KReg, KReg, KReg |
351/// +---+------------------+
352/// ```
353pub trait KxnorbEmitter<A, B, C> {
354    fn kxnorb(&mut self, op0: A, op1: B, op2: C);
355}
356
357impl<'a> KxnorbEmitter<KReg, KReg, KReg> for Assembler<'a> {
358    fn kxnorb(&mut self, op0: KReg, op1: KReg, op2: KReg) {
359        self.emit(KXNORBKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
360    }
361}
362
363/// `KXORB` (KXORB). 
364/// Performs a bitwise XOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
365///
366///
367/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXORW%3AKXORB%3AKXORQ%3AKXORD.html).
368///
369/// Supported operand variants:
370///
371/// ```text
372/// +---+------------------+
373/// | # | Operands         |
374/// +---+------------------+
375/// | 1 | KReg, KReg, KReg |
376/// +---+------------------+
377/// ```
378pub trait KxorbEmitter<A, B, C> {
379    fn kxorb(&mut self, op0: A, op1: B, op2: C);
380}
381
382impl<'a> KxorbEmitter<KReg, KReg, KReg> for Assembler<'a> {
383    fn kxorb(&mut self, op0: KReg, op1: KReg, op2: KReg) {
384        self.emit(KXORBKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
385    }
386}
387
388/// `VANDNPD` (VANDNPD). 
389/// Performs a bitwise logical AND NOT of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
390///
391///
392/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPD.html).
393///
394/// Supported operand variants:
395///
396/// ```text
397/// +---+---------------+
398/// | # | Operands      |
399/// +---+---------------+
400/// | 1 | Xmm, Xmm, Mem |
401/// | 2 | Xmm, Xmm, Xmm |
402/// | 3 | Ymm, Ymm, Mem |
403/// | 4 | Ymm, Ymm, Ymm |
404/// | 5 | Zmm, Zmm, Mem |
405/// | 6 | Zmm, Zmm, Zmm |
406/// +---+---------------+
407/// ```
408pub trait VandnpdEmitter<A, B, C> {
409    fn vandnpd(&mut self, op0: A, op1: B, op2: C);
410}
411
412impl<'a> VandnpdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
413    fn vandnpd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
414        self.emit(VANDNPD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
415    }
416}
417
418impl<'a> VandnpdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
419    fn vandnpd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
420        self.emit(VANDNPD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
421    }
422}
423
424impl<'a> VandnpdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
425    fn vandnpd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
426        self.emit(VANDNPD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
427    }
428}
429
430impl<'a> VandnpdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
431    fn vandnpd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
432        self.emit(VANDNPD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
433    }
434}
435
436impl<'a> VandnpdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
437    fn vandnpd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
438        self.emit(VANDNPD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
439    }
440}
441
442impl<'a> VandnpdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
443    fn vandnpd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
444        self.emit(VANDNPD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
445    }
446}
447
448/// `VANDNPD_MASK` (VANDNPD). 
449/// Performs a bitwise logical AND NOT of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
450///
451///
452/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPD.html).
453///
454/// Supported operand variants:
455///
456/// ```text
457/// +---+---------------+
458/// | # | Operands      |
459/// +---+---------------+
460/// | 1 | Xmm, Xmm, Mem |
461/// | 2 | Xmm, Xmm, Xmm |
462/// | 3 | Ymm, Ymm, Mem |
463/// | 4 | Ymm, Ymm, Ymm |
464/// | 5 | Zmm, Zmm, Mem |
465/// | 6 | Zmm, Zmm, Zmm |
466/// +---+---------------+
467/// ```
468pub trait VandnpdMaskEmitter<A, B, C> {
469    fn vandnpd_mask(&mut self, op0: A, op1: B, op2: C);
470}
471
472impl<'a> VandnpdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
473    fn vandnpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
474        self.emit(VANDNPD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
475    }
476}
477
478impl<'a> VandnpdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
479    fn vandnpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
480        self.emit(VANDNPD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
481    }
482}
483
484impl<'a> VandnpdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
485    fn vandnpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
486        self.emit(VANDNPD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
487    }
488}
489
490impl<'a> VandnpdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
491    fn vandnpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
492        self.emit(VANDNPD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
493    }
494}
495
496impl<'a> VandnpdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
497    fn vandnpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
498        self.emit(VANDNPD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
499    }
500}
501
502impl<'a> VandnpdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
503    fn vandnpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
504        self.emit(VANDNPD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
505    }
506}
507
508/// `VANDNPD_MASKZ` (VANDNPD). 
509/// Performs a bitwise logical AND NOT of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
510///
511///
512/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPD.html).
513///
514/// Supported operand variants:
515///
516/// ```text
517/// +---+---------------+
518/// | # | Operands      |
519/// +---+---------------+
520/// | 1 | Xmm, Xmm, Mem |
521/// | 2 | Xmm, Xmm, Xmm |
522/// | 3 | Ymm, Ymm, Mem |
523/// | 4 | Ymm, Ymm, Ymm |
524/// | 5 | Zmm, Zmm, Mem |
525/// | 6 | Zmm, Zmm, Zmm |
526/// +---+---------------+
527/// ```
528pub trait VandnpdMaskzEmitter<A, B, C> {
529    fn vandnpd_maskz(&mut self, op0: A, op1: B, op2: C);
530}
531
532impl<'a> VandnpdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
533    fn vandnpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
534        self.emit(VANDNPD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
535    }
536}
537
538impl<'a> VandnpdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
539    fn vandnpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
540        self.emit(VANDNPD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
541    }
542}
543
544impl<'a> VandnpdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
545    fn vandnpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
546        self.emit(VANDNPD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
547    }
548}
549
550impl<'a> VandnpdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
551    fn vandnpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
552        self.emit(VANDNPD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
553    }
554}
555
556impl<'a> VandnpdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
557    fn vandnpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
558        self.emit(VANDNPD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
559    }
560}
561
562impl<'a> VandnpdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
563    fn vandnpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
564        self.emit(VANDNPD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
565    }
566}
567
568/// `VANDNPS` (VANDNPS). 
569/// Performs a bitwise logical AND NOT of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
570///
571///
572/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPS.html).
573///
574/// Supported operand variants:
575///
576/// ```text
577/// +---+---------------+
578/// | # | Operands      |
579/// +---+---------------+
580/// | 1 | Xmm, Xmm, Mem |
581/// | 2 | Xmm, Xmm, Xmm |
582/// | 3 | Ymm, Ymm, Mem |
583/// | 4 | Ymm, Ymm, Ymm |
584/// | 5 | Zmm, Zmm, Mem |
585/// | 6 | Zmm, Zmm, Zmm |
586/// +---+---------------+
587/// ```
588pub trait VandnpsEmitter<A, B, C> {
589    fn vandnps(&mut self, op0: A, op1: B, op2: C);
590}
591
592impl<'a> VandnpsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
593    fn vandnps(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
594        self.emit(VANDNPS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
595    }
596}
597
598impl<'a> VandnpsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
599    fn vandnps(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
600        self.emit(VANDNPS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
601    }
602}
603
604impl<'a> VandnpsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
605    fn vandnps(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
606        self.emit(VANDNPS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
607    }
608}
609
610impl<'a> VandnpsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
611    fn vandnps(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
612        self.emit(VANDNPS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
613    }
614}
615
616impl<'a> VandnpsEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
617    fn vandnps(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
618        self.emit(VANDNPS512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
619    }
620}
621
622impl<'a> VandnpsEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
623    fn vandnps(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
624        self.emit(VANDNPS512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
625    }
626}
627
628/// `VANDNPS_MASK` (VANDNPS). 
629/// Performs a bitwise logical AND NOT of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
630///
631///
632/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPS.html).
633///
634/// Supported operand variants:
635///
636/// ```text
637/// +---+---------------+
638/// | # | Operands      |
639/// +---+---------------+
640/// | 1 | Xmm, Xmm, Mem |
641/// | 2 | Xmm, Xmm, Xmm |
642/// | 3 | Ymm, Ymm, Mem |
643/// | 4 | Ymm, Ymm, Ymm |
644/// | 5 | Zmm, Zmm, Mem |
645/// | 6 | Zmm, Zmm, Zmm |
646/// +---+---------------+
647/// ```
648pub trait VandnpsMaskEmitter<A, B, C> {
649    fn vandnps_mask(&mut self, op0: A, op1: B, op2: C);
650}
651
652impl<'a> VandnpsMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
653    fn vandnps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
654        self.emit(VANDNPS128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
655    }
656}
657
658impl<'a> VandnpsMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
659    fn vandnps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
660        self.emit(VANDNPS128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
661    }
662}
663
664impl<'a> VandnpsMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
665    fn vandnps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
666        self.emit(VANDNPS256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
667    }
668}
669
670impl<'a> VandnpsMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
671    fn vandnps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
672        self.emit(VANDNPS256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
673    }
674}
675
676impl<'a> VandnpsMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
677    fn vandnps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
678        self.emit(VANDNPS512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
679    }
680}
681
682impl<'a> VandnpsMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
683    fn vandnps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
684        self.emit(VANDNPS512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
685    }
686}
687
688/// `VANDNPS_MASKZ` (VANDNPS). 
689/// Performs a bitwise logical AND NOT of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
690///
691///
692/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPS.html).
693///
694/// Supported operand variants:
695///
696/// ```text
697/// +---+---------------+
698/// | # | Operands      |
699/// +---+---------------+
700/// | 1 | Xmm, Xmm, Mem |
701/// | 2 | Xmm, Xmm, Xmm |
702/// | 3 | Ymm, Ymm, Mem |
703/// | 4 | Ymm, Ymm, Ymm |
704/// | 5 | Zmm, Zmm, Mem |
705/// | 6 | Zmm, Zmm, Zmm |
706/// +---+---------------+
707/// ```
708pub trait VandnpsMaskzEmitter<A, B, C> {
709    fn vandnps_maskz(&mut self, op0: A, op1: B, op2: C);
710}
711
712impl<'a> VandnpsMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
713    fn vandnps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
714        self.emit(VANDNPS128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
715    }
716}
717
718impl<'a> VandnpsMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
719    fn vandnps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
720        self.emit(VANDNPS128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
721    }
722}
723
724impl<'a> VandnpsMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
725    fn vandnps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
726        self.emit(VANDNPS256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
727    }
728}
729
730impl<'a> VandnpsMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
731    fn vandnps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
732        self.emit(VANDNPS256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
733    }
734}
735
736impl<'a> VandnpsMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
737    fn vandnps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
738        self.emit(VANDNPS512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
739    }
740}
741
742impl<'a> VandnpsMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
743    fn vandnps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
744        self.emit(VANDNPS512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
745    }
746}
747
748/// `VANDPD` (VANDPD). 
749/// Performs a bitwise logical AND of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
750///
751///
752/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPD.html).
753///
754/// Supported operand variants:
755///
756/// ```text
757/// +---+---------------+
758/// | # | Operands      |
759/// +---+---------------+
760/// | 1 | Xmm, Xmm, Mem |
761/// | 2 | Xmm, Xmm, Xmm |
762/// | 3 | Ymm, Ymm, Mem |
763/// | 4 | Ymm, Ymm, Ymm |
764/// | 5 | Zmm, Zmm, Mem |
765/// | 6 | Zmm, Zmm, Zmm |
766/// +---+---------------+
767/// ```
768pub trait VandpdEmitter<A, B, C> {
769    fn vandpd(&mut self, op0: A, op1: B, op2: C);
770}
771
772impl<'a> VandpdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
773    fn vandpd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
774        self.emit(VANDPD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
775    }
776}
777
778impl<'a> VandpdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
779    fn vandpd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
780        self.emit(VANDPD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
781    }
782}
783
784impl<'a> VandpdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
785    fn vandpd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
786        self.emit(VANDPD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
787    }
788}
789
790impl<'a> VandpdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
791    fn vandpd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
792        self.emit(VANDPD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
793    }
794}
795
796impl<'a> VandpdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
797    fn vandpd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
798        self.emit(VANDPD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
799    }
800}
801
802impl<'a> VandpdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
803    fn vandpd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
804        self.emit(VANDPD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
805    }
806}
807
808/// `VANDPD_MASK` (VANDPD). 
809/// Performs a bitwise logical AND of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
810///
811///
812/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPD.html).
813///
814/// Supported operand variants:
815///
816/// ```text
817/// +---+---------------+
818/// | # | Operands      |
819/// +---+---------------+
820/// | 1 | Xmm, Xmm, Mem |
821/// | 2 | Xmm, Xmm, Xmm |
822/// | 3 | Ymm, Ymm, Mem |
823/// | 4 | Ymm, Ymm, Ymm |
824/// | 5 | Zmm, Zmm, Mem |
825/// | 6 | Zmm, Zmm, Zmm |
826/// +---+---------------+
827/// ```
828pub trait VandpdMaskEmitter<A, B, C> {
829    fn vandpd_mask(&mut self, op0: A, op1: B, op2: C);
830}
831
832impl<'a> VandpdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
833    fn vandpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
834        self.emit(VANDPD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
835    }
836}
837
838impl<'a> VandpdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
839    fn vandpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
840        self.emit(VANDPD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
841    }
842}
843
844impl<'a> VandpdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
845    fn vandpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
846        self.emit(VANDPD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
847    }
848}
849
850impl<'a> VandpdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
851    fn vandpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
852        self.emit(VANDPD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
853    }
854}
855
856impl<'a> VandpdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
857    fn vandpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
858        self.emit(VANDPD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
859    }
860}
861
862impl<'a> VandpdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
863    fn vandpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
864        self.emit(VANDPD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
865    }
866}
867
868/// `VANDPD_MASKZ` (VANDPD). 
869/// Performs a bitwise logical AND of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
870///
871///
872/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPD.html).
873///
874/// Supported operand variants:
875///
876/// ```text
877/// +---+---------------+
878/// | # | Operands      |
879/// +---+---------------+
880/// | 1 | Xmm, Xmm, Mem |
881/// | 2 | Xmm, Xmm, Xmm |
882/// | 3 | Ymm, Ymm, Mem |
883/// | 4 | Ymm, Ymm, Ymm |
884/// | 5 | Zmm, Zmm, Mem |
885/// | 6 | Zmm, Zmm, Zmm |
886/// +---+---------------+
887/// ```
888pub trait VandpdMaskzEmitter<A, B, C> {
889    fn vandpd_maskz(&mut self, op0: A, op1: B, op2: C);
890}
891
892impl<'a> VandpdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
893    fn vandpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
894        self.emit(VANDPD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
895    }
896}
897
898impl<'a> VandpdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
899    fn vandpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
900        self.emit(VANDPD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
901    }
902}
903
904impl<'a> VandpdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
905    fn vandpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
906        self.emit(VANDPD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
907    }
908}
909
910impl<'a> VandpdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
911    fn vandpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
912        self.emit(VANDPD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
913    }
914}
915
916impl<'a> VandpdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
917    fn vandpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
918        self.emit(VANDPD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
919    }
920}
921
922impl<'a> VandpdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
923    fn vandpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
924        self.emit(VANDPD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
925    }
926}
927
928/// `VANDPS` (VANDPS). 
929/// Performs a bitwise logical AND of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
930///
931///
932/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPS.html).
933///
934/// Supported operand variants:
935///
936/// ```text
937/// +---+---------------+
938/// | # | Operands      |
939/// +---+---------------+
940/// | 1 | Xmm, Xmm, Mem |
941/// | 2 | Xmm, Xmm, Xmm |
942/// | 3 | Ymm, Ymm, Mem |
943/// | 4 | Ymm, Ymm, Ymm |
944/// | 5 | Zmm, Zmm, Mem |
945/// | 6 | Zmm, Zmm, Zmm |
946/// +---+---------------+
947/// ```
948pub trait VandpsEmitter<A, B, C> {
949    fn vandps(&mut self, op0: A, op1: B, op2: C);
950}
951
952impl<'a> VandpsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
953    fn vandps(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
954        self.emit(VANDPS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
955    }
956}
957
958impl<'a> VandpsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
959    fn vandps(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
960        self.emit(VANDPS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
961    }
962}
963
964impl<'a> VandpsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
965    fn vandps(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
966        self.emit(VANDPS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
967    }
968}
969
970impl<'a> VandpsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
971    fn vandps(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
972        self.emit(VANDPS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
973    }
974}
975
976impl<'a> VandpsEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
977    fn vandps(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
978        self.emit(VANDPS512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
979    }
980}
981
982impl<'a> VandpsEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
983    fn vandps(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
984        self.emit(VANDPS512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
985    }
986}
987
988/// `VANDPS_MASK` (VANDPS). 
989/// Performs a bitwise logical AND of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
990///
991///
992/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPS.html).
993///
994/// Supported operand variants:
995///
996/// ```text
997/// +---+---------------+
998/// | # | Operands      |
999/// +---+---------------+
1000/// | 1 | Xmm, Xmm, Mem |
1001/// | 2 | Xmm, Xmm, Xmm |
1002/// | 3 | Ymm, Ymm, Mem |
1003/// | 4 | Ymm, Ymm, Ymm |
1004/// | 5 | Zmm, Zmm, Mem |
1005/// | 6 | Zmm, Zmm, Zmm |
1006/// +---+---------------+
1007/// ```
1008pub trait VandpsMaskEmitter<A, B, C> {
1009    fn vandps_mask(&mut self, op0: A, op1: B, op2: C);
1010}
1011
1012impl<'a> VandpsMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1013    fn vandps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1014        self.emit(VANDPS128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1015    }
1016}
1017
1018impl<'a> VandpsMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1019    fn vandps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1020        self.emit(VANDPS128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1021    }
1022}
1023
1024impl<'a> VandpsMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1025    fn vandps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1026        self.emit(VANDPS256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1027    }
1028}
1029
1030impl<'a> VandpsMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1031    fn vandps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1032        self.emit(VANDPS256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1033    }
1034}
1035
1036impl<'a> VandpsMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1037    fn vandps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1038        self.emit(VANDPS512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1039    }
1040}
1041
1042impl<'a> VandpsMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1043    fn vandps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1044        self.emit(VANDPS512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1045    }
1046}
1047
1048/// `VANDPS_MASKZ` (VANDPS). 
1049/// Performs a bitwise logical AND of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
1050///
1051///
1052/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPS.html).
1053///
1054/// Supported operand variants:
1055///
1056/// ```text
1057/// +---+---------------+
1058/// | # | Operands      |
1059/// +---+---------------+
1060/// | 1 | Xmm, Xmm, Mem |
1061/// | 2 | Xmm, Xmm, Xmm |
1062/// | 3 | Ymm, Ymm, Mem |
1063/// | 4 | Ymm, Ymm, Ymm |
1064/// | 5 | Zmm, Zmm, Mem |
1065/// | 6 | Zmm, Zmm, Zmm |
1066/// +---+---------------+
1067/// ```
1068pub trait VandpsMaskzEmitter<A, B, C> {
1069    fn vandps_maskz(&mut self, op0: A, op1: B, op2: C);
1070}
1071
1072impl<'a> VandpsMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1073    fn vandps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1074        self.emit(VANDPS128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1075    }
1076}
1077
1078impl<'a> VandpsMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1079    fn vandps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1080        self.emit(VANDPS128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1081    }
1082}
1083
1084impl<'a> VandpsMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1085    fn vandps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1086        self.emit(VANDPS256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1087    }
1088}
1089
1090impl<'a> VandpsMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1091    fn vandps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1092        self.emit(VANDPS256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1093    }
1094}
1095
1096impl<'a> VandpsMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1097    fn vandps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1098        self.emit(VANDPS512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1099    }
1100}
1101
1102impl<'a> VandpsMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1103    fn vandps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1104        self.emit(VANDPS512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1105    }
1106}
1107
1108/// `VBROADCASTF32X2` (VBROADCASTF32X2). 
1109/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1110///
1111///
1112/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1113///
1114/// Supported operand variants:
1115///
1116/// ```text
1117/// +---+----------+
1118/// | # | Operands |
1119/// +---+----------+
1120/// | 1 | Ymm, Mem |
1121/// | 2 | Ymm, Xmm |
1122/// | 3 | Zmm, Mem |
1123/// | 4 | Zmm, Xmm |
1124/// +---+----------+
1125/// ```
1126pub trait Vbroadcastf32x2Emitter<A, B> {
1127    fn vbroadcastf32x2(&mut self, op0: A, op1: B);
1128}
1129
1130impl<'a> Vbroadcastf32x2Emitter<Ymm, Xmm> for Assembler<'a> {
1131    fn vbroadcastf32x2(&mut self, op0: Ymm, op1: Xmm) {
1132        self.emit(VBROADCASTF32X2_256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1133    }
1134}
1135
1136impl<'a> Vbroadcastf32x2Emitter<Ymm, Mem> for Assembler<'a> {
1137    fn vbroadcastf32x2(&mut self, op0: Ymm, op1: Mem) {
1138        self.emit(VBROADCASTF32X2_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1139    }
1140}
1141
1142impl<'a> Vbroadcastf32x2Emitter<Zmm, Xmm> for Assembler<'a> {
1143    fn vbroadcastf32x2(&mut self, op0: Zmm, op1: Xmm) {
1144        self.emit(VBROADCASTF32X2_512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1145    }
1146}
1147
1148impl<'a> Vbroadcastf32x2Emitter<Zmm, Mem> for Assembler<'a> {
1149    fn vbroadcastf32x2(&mut self, op0: Zmm, op1: Mem) {
1150        self.emit(VBROADCASTF32X2_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1151    }
1152}
1153
1154/// `VBROADCASTF32X2_MASK` (VBROADCASTF32X2). 
1155/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1156///
1157///
1158/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1159///
1160/// Supported operand variants:
1161///
1162/// ```text
1163/// +---+----------+
1164/// | # | Operands |
1165/// +---+----------+
1166/// | 1 | Ymm, Mem |
1167/// | 2 | Ymm, Xmm |
1168/// | 3 | Zmm, Mem |
1169/// | 4 | Zmm, Xmm |
1170/// +---+----------+
1171/// ```
1172pub trait Vbroadcastf32x2MaskEmitter<A, B> {
1173    fn vbroadcastf32x2_mask(&mut self, op0: A, op1: B);
1174}
1175
1176impl<'a> Vbroadcastf32x2MaskEmitter<Ymm, Xmm> for Assembler<'a> {
1177    fn vbroadcastf32x2_mask(&mut self, op0: Ymm, op1: Xmm) {
1178        self.emit(VBROADCASTF32X2_256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1179    }
1180}
1181
1182impl<'a> Vbroadcastf32x2MaskEmitter<Ymm, Mem> for Assembler<'a> {
1183    fn vbroadcastf32x2_mask(&mut self, op0: Ymm, op1: Mem) {
1184        self.emit(VBROADCASTF32X2_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1185    }
1186}
1187
1188impl<'a> Vbroadcastf32x2MaskEmitter<Zmm, Xmm> for Assembler<'a> {
1189    fn vbroadcastf32x2_mask(&mut self, op0: Zmm, op1: Xmm) {
1190        self.emit(VBROADCASTF32X2_512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1191    }
1192}
1193
1194impl<'a> Vbroadcastf32x2MaskEmitter<Zmm, Mem> for Assembler<'a> {
1195    fn vbroadcastf32x2_mask(&mut self, op0: Zmm, op1: Mem) {
1196        self.emit(VBROADCASTF32X2_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1197    }
1198}
1199
1200/// `VBROADCASTF32X2_MASKZ` (VBROADCASTF32X2). 
1201/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1202///
1203///
1204/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1205///
1206/// Supported operand variants:
1207///
1208/// ```text
1209/// +---+----------+
1210/// | # | Operands |
1211/// +---+----------+
1212/// | 1 | Ymm, Mem |
1213/// | 2 | Ymm, Xmm |
1214/// | 3 | Zmm, Mem |
1215/// | 4 | Zmm, Xmm |
1216/// +---+----------+
1217/// ```
1218pub trait Vbroadcastf32x2MaskzEmitter<A, B> {
1219    fn vbroadcastf32x2_maskz(&mut self, op0: A, op1: B);
1220}
1221
1222impl<'a> Vbroadcastf32x2MaskzEmitter<Ymm, Xmm> for Assembler<'a> {
1223    fn vbroadcastf32x2_maskz(&mut self, op0: Ymm, op1: Xmm) {
1224        self.emit(VBROADCASTF32X2_256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1225    }
1226}
1227
1228impl<'a> Vbroadcastf32x2MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1229    fn vbroadcastf32x2_maskz(&mut self, op0: Ymm, op1: Mem) {
1230        self.emit(VBROADCASTF32X2_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1231    }
1232}
1233
1234impl<'a> Vbroadcastf32x2MaskzEmitter<Zmm, Xmm> for Assembler<'a> {
1235    fn vbroadcastf32x2_maskz(&mut self, op0: Zmm, op1: Xmm) {
1236        self.emit(VBROADCASTF32X2_512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1237    }
1238}
1239
1240impl<'a> Vbroadcastf32x2MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1241    fn vbroadcastf32x2_maskz(&mut self, op0: Zmm, op1: Mem) {
1242        self.emit(VBROADCASTF32X2_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1243    }
1244}
1245
1246/// `VBROADCASTF32X8` (VBROADCASTF32X8). 
1247/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1248///
1249///
1250/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1251///
1252/// Supported operand variants:
1253///
1254/// ```text
1255/// +---+----------+
1256/// | # | Operands |
1257/// +---+----------+
1258/// | 1 | Zmm, Mem |
1259/// +---+----------+
1260/// ```
1261pub trait Vbroadcastf32x8Emitter<A, B> {
1262    fn vbroadcastf32x8(&mut self, op0: A, op1: B);
1263}
1264
1265impl<'a> Vbroadcastf32x8Emitter<Zmm, Mem> for Assembler<'a> {
1266    fn vbroadcastf32x8(&mut self, op0: Zmm, op1: Mem) {
1267        self.emit(VBROADCASTF32X8_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1268    }
1269}
1270
1271/// `VBROADCASTF32X8_MASK` (VBROADCASTF32X8). 
1272/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1273///
1274///
1275/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1276///
1277/// Supported operand variants:
1278///
1279/// ```text
1280/// +---+----------+
1281/// | # | Operands |
1282/// +---+----------+
1283/// | 1 | Zmm, Mem |
1284/// +---+----------+
1285/// ```
1286pub trait Vbroadcastf32x8MaskEmitter<A, B> {
1287    fn vbroadcastf32x8_mask(&mut self, op0: A, op1: B);
1288}
1289
1290impl<'a> Vbroadcastf32x8MaskEmitter<Zmm, Mem> for Assembler<'a> {
1291    fn vbroadcastf32x8_mask(&mut self, op0: Zmm, op1: Mem) {
1292        self.emit(VBROADCASTF32X8_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1293    }
1294}
1295
1296/// `VBROADCASTF32X8_MASKZ` (VBROADCASTF32X8). 
1297/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1298///
1299///
1300/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1301///
1302/// Supported operand variants:
1303///
1304/// ```text
1305/// +---+----------+
1306/// | # | Operands |
1307/// +---+----------+
1308/// | 1 | Zmm, Mem |
1309/// +---+----------+
1310/// ```
1311pub trait Vbroadcastf32x8MaskzEmitter<A, B> {
1312    fn vbroadcastf32x8_maskz(&mut self, op0: A, op1: B);
1313}
1314
1315impl<'a> Vbroadcastf32x8MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1316    fn vbroadcastf32x8_maskz(&mut self, op0: Zmm, op1: Mem) {
1317        self.emit(VBROADCASTF32X8_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1318    }
1319}
1320
1321/// `VBROADCASTF64X2` (VBROADCASTF64X2). 
1322/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1323///
1324///
1325/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1326///
1327/// Supported operand variants:
1328///
1329/// ```text
1330/// +---+----------+
1331/// | # | Operands |
1332/// +---+----------+
1333/// | 1 | Ymm, Mem |
1334/// | 2 | Zmm, Mem |
1335/// +---+----------+
1336/// ```
1337pub trait Vbroadcastf64x2Emitter<A, B> {
1338    fn vbroadcastf64x2(&mut self, op0: A, op1: B);
1339}
1340
1341impl<'a> Vbroadcastf64x2Emitter<Ymm, Mem> for Assembler<'a> {
1342    fn vbroadcastf64x2(&mut self, op0: Ymm, op1: Mem) {
1343        self.emit(VBROADCASTF64X2_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1344    }
1345}
1346
1347impl<'a> Vbroadcastf64x2Emitter<Zmm, Mem> for Assembler<'a> {
1348    fn vbroadcastf64x2(&mut self, op0: Zmm, op1: Mem) {
1349        self.emit(VBROADCASTF64X2_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1350    }
1351}
1352
1353/// `VBROADCASTF64X2_MASK` (VBROADCASTF64X2). 
1354/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1355///
1356///
1357/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1358///
1359/// Supported operand variants:
1360///
1361/// ```text
1362/// +---+----------+
1363/// | # | Operands |
1364/// +---+----------+
1365/// | 1 | Ymm, Mem |
1366/// | 2 | Zmm, Mem |
1367/// +---+----------+
1368/// ```
1369pub trait Vbroadcastf64x2MaskEmitter<A, B> {
1370    fn vbroadcastf64x2_mask(&mut self, op0: A, op1: B);
1371}
1372
1373impl<'a> Vbroadcastf64x2MaskEmitter<Ymm, Mem> for Assembler<'a> {
1374    fn vbroadcastf64x2_mask(&mut self, op0: Ymm, op1: Mem) {
1375        self.emit(VBROADCASTF64X2_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1376    }
1377}
1378
1379impl<'a> Vbroadcastf64x2MaskEmitter<Zmm, Mem> for Assembler<'a> {
1380    fn vbroadcastf64x2_mask(&mut self, op0: Zmm, op1: Mem) {
1381        self.emit(VBROADCASTF64X2_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1382    }
1383}
1384
1385/// `VBROADCASTF64X2_MASKZ` (VBROADCASTF64X2). 
1386/// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
1387///
1388///
1389/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
1390///
1391/// Supported operand variants:
1392///
1393/// ```text
1394/// +---+----------+
1395/// | # | Operands |
1396/// +---+----------+
1397/// | 1 | Ymm, Mem |
1398/// | 2 | Zmm, Mem |
1399/// +---+----------+
1400/// ```
1401pub trait Vbroadcastf64x2MaskzEmitter<A, B> {
1402    fn vbroadcastf64x2_maskz(&mut self, op0: A, op1: B);
1403}
1404
1405impl<'a> Vbroadcastf64x2MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1406    fn vbroadcastf64x2_maskz(&mut self, op0: Ymm, op1: Mem) {
1407        self.emit(VBROADCASTF64X2_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1408    }
1409}
1410
1411impl<'a> Vbroadcastf64x2MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1412    fn vbroadcastf64x2_maskz(&mut self, op0: Zmm, op1: Mem) {
1413        self.emit(VBROADCASTF64X2_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1414    }
1415}
1416
1417/// `VBROADCASTI32X2`.
1418///
1419/// Supported operand variants:
1420///
1421/// ```text
1422/// +---+----------+
1423/// | # | Operands |
1424/// +---+----------+
1425/// | 1 | Xmm, Mem |
1426/// | 2 | Xmm, Xmm |
1427/// | 3 | Ymm, Mem |
1428/// | 4 | Ymm, Xmm |
1429/// | 5 | Zmm, Mem |
1430/// | 6 | Zmm, Xmm |
1431/// +---+----------+
1432/// ```
1433pub trait Vbroadcasti32x2Emitter<A, B> {
1434    fn vbroadcasti32x2(&mut self, op0: A, op1: B);
1435}
1436
1437impl<'a> Vbroadcasti32x2Emitter<Xmm, Xmm> for Assembler<'a> {
1438    fn vbroadcasti32x2(&mut self, op0: Xmm, op1: Xmm) {
1439        self.emit(VBROADCASTI32X2_128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1440    }
1441}
1442
1443impl<'a> Vbroadcasti32x2Emitter<Xmm, Mem> for Assembler<'a> {
1444    fn vbroadcasti32x2(&mut self, op0: Xmm, op1: Mem) {
1445        self.emit(VBROADCASTI32X2_128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1446    }
1447}
1448
1449impl<'a> Vbroadcasti32x2Emitter<Ymm, Xmm> for Assembler<'a> {
1450    fn vbroadcasti32x2(&mut self, op0: Ymm, op1: Xmm) {
1451        self.emit(VBROADCASTI32X2_256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1452    }
1453}
1454
1455impl<'a> Vbroadcasti32x2Emitter<Ymm, Mem> for Assembler<'a> {
1456    fn vbroadcasti32x2(&mut self, op0: Ymm, op1: Mem) {
1457        self.emit(VBROADCASTI32X2_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1458    }
1459}
1460
1461impl<'a> Vbroadcasti32x2Emitter<Zmm, Xmm> for Assembler<'a> {
1462    fn vbroadcasti32x2(&mut self, op0: Zmm, op1: Xmm) {
1463        self.emit(VBROADCASTI32X2_512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1464    }
1465}
1466
1467impl<'a> Vbroadcasti32x2Emitter<Zmm, Mem> for Assembler<'a> {
1468    fn vbroadcasti32x2(&mut self, op0: Zmm, op1: Mem) {
1469        self.emit(VBROADCASTI32X2_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1470    }
1471}
1472
1473/// `VBROADCASTI32X2_MASK`.
1474///
1475/// Supported operand variants:
1476///
1477/// ```text
1478/// +---+----------+
1479/// | # | Operands |
1480/// +---+----------+
1481/// | 1 | Xmm, Mem |
1482/// | 2 | Xmm, Xmm |
1483/// | 3 | Ymm, Mem |
1484/// | 4 | Ymm, Xmm |
1485/// | 5 | Zmm, Mem |
1486/// | 6 | Zmm, Xmm |
1487/// +---+----------+
1488/// ```
1489pub trait Vbroadcasti32x2MaskEmitter<A, B> {
1490    fn vbroadcasti32x2_mask(&mut self, op0: A, op1: B);
1491}
1492
1493impl<'a> Vbroadcasti32x2MaskEmitter<Xmm, Xmm> for Assembler<'a> {
1494    fn vbroadcasti32x2_mask(&mut self, op0: Xmm, op1: Xmm) {
1495        self.emit(VBROADCASTI32X2_128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1496    }
1497}
1498
1499impl<'a> Vbroadcasti32x2MaskEmitter<Xmm, Mem> for Assembler<'a> {
1500    fn vbroadcasti32x2_mask(&mut self, op0: Xmm, op1: Mem) {
1501        self.emit(VBROADCASTI32X2_128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1502    }
1503}
1504
1505impl<'a> Vbroadcasti32x2MaskEmitter<Ymm, Xmm> for Assembler<'a> {
1506    fn vbroadcasti32x2_mask(&mut self, op0: Ymm, op1: Xmm) {
1507        self.emit(VBROADCASTI32X2_256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1508    }
1509}
1510
1511impl<'a> Vbroadcasti32x2MaskEmitter<Ymm, Mem> for Assembler<'a> {
1512    fn vbroadcasti32x2_mask(&mut self, op0: Ymm, op1: Mem) {
1513        self.emit(VBROADCASTI32X2_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1514    }
1515}
1516
1517impl<'a> Vbroadcasti32x2MaskEmitter<Zmm, Xmm> for Assembler<'a> {
1518    fn vbroadcasti32x2_mask(&mut self, op0: Zmm, op1: Xmm) {
1519        self.emit(VBROADCASTI32X2_512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1520    }
1521}
1522
1523impl<'a> Vbroadcasti32x2MaskEmitter<Zmm, Mem> for Assembler<'a> {
1524    fn vbroadcasti32x2_mask(&mut self, op0: Zmm, op1: Mem) {
1525        self.emit(VBROADCASTI32X2_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1526    }
1527}
1528
1529/// `VBROADCASTI32X2_MASKZ`.
1530///
1531/// Supported operand variants:
1532///
1533/// ```text
1534/// +---+----------+
1535/// | # | Operands |
1536/// +---+----------+
1537/// | 1 | Xmm, Mem |
1538/// | 2 | Xmm, Xmm |
1539/// | 3 | Ymm, Mem |
1540/// | 4 | Ymm, Xmm |
1541/// | 5 | Zmm, Mem |
1542/// | 6 | Zmm, Xmm |
1543/// +---+----------+
1544/// ```
1545pub trait Vbroadcasti32x2MaskzEmitter<A, B> {
1546    fn vbroadcasti32x2_maskz(&mut self, op0: A, op1: B);
1547}
1548
1549impl<'a> Vbroadcasti32x2MaskzEmitter<Xmm, Xmm> for Assembler<'a> {
1550    fn vbroadcasti32x2_maskz(&mut self, op0: Xmm, op1: Xmm) {
1551        self.emit(VBROADCASTI32X2_128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1552    }
1553}
1554
1555impl<'a> Vbroadcasti32x2MaskzEmitter<Xmm, Mem> for Assembler<'a> {
1556    fn vbroadcasti32x2_maskz(&mut self, op0: Xmm, op1: Mem) {
1557        self.emit(VBROADCASTI32X2_128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1558    }
1559}
1560
1561impl<'a> Vbroadcasti32x2MaskzEmitter<Ymm, Xmm> for Assembler<'a> {
1562    fn vbroadcasti32x2_maskz(&mut self, op0: Ymm, op1: Xmm) {
1563        self.emit(VBROADCASTI32X2_256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1564    }
1565}
1566
1567impl<'a> Vbroadcasti32x2MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1568    fn vbroadcasti32x2_maskz(&mut self, op0: Ymm, op1: Mem) {
1569        self.emit(VBROADCASTI32X2_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1570    }
1571}
1572
1573impl<'a> Vbroadcasti32x2MaskzEmitter<Zmm, Xmm> for Assembler<'a> {
1574    fn vbroadcasti32x2_maskz(&mut self, op0: Zmm, op1: Xmm) {
1575        self.emit(VBROADCASTI32X2_512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1576    }
1577}
1578
1579impl<'a> Vbroadcasti32x2MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1580    fn vbroadcasti32x2_maskz(&mut self, op0: Zmm, op1: Mem) {
1581        self.emit(VBROADCASTI32X2_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1582    }
1583}
1584
1585/// `VBROADCASTI32X4` (VBROADCASTI32X4). 
1586/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1587///
1588///
1589/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1590///
1591/// Supported operand variants:
1592///
1593/// ```text
1594/// +---+----------+
1595/// | # | Operands |
1596/// +---+----------+
1597/// | 1 | Ymm, Mem |
1598/// | 2 | Zmm, Mem |
1599/// +---+----------+
1600/// ```
1601pub trait Vbroadcasti32x4Emitter<A, B> {
1602    fn vbroadcasti32x4(&mut self, op0: A, op1: B);
1603}
1604
1605impl<'a> Vbroadcasti32x4Emitter<Ymm, Mem> for Assembler<'a> {
1606    fn vbroadcasti32x4(&mut self, op0: Ymm, op1: Mem) {
1607        self.emit(VBROADCASTI32X4_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1608    }
1609}
1610
1611impl<'a> Vbroadcasti32x4Emitter<Zmm, Mem> for Assembler<'a> {
1612    fn vbroadcasti32x4(&mut self, op0: Zmm, op1: Mem) {
1613        self.emit(VBROADCASTI32X4_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1614    }
1615}
1616
1617/// `VBROADCASTI32X4_MASK` (VBROADCASTI32X4). 
1618/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1619///
1620///
1621/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1622///
1623/// Supported operand variants:
1624///
1625/// ```text
1626/// +---+----------+
1627/// | # | Operands |
1628/// +---+----------+
1629/// | 1 | Ymm, Mem |
1630/// | 2 | Zmm, Mem |
1631/// +---+----------+
1632/// ```
1633pub trait Vbroadcasti32x4MaskEmitter<A, B> {
1634    fn vbroadcasti32x4_mask(&mut self, op0: A, op1: B);
1635}
1636
1637impl<'a> Vbroadcasti32x4MaskEmitter<Ymm, Mem> for Assembler<'a> {
1638    fn vbroadcasti32x4_mask(&mut self, op0: Ymm, op1: Mem) {
1639        self.emit(VBROADCASTI32X4_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1640    }
1641}
1642
1643impl<'a> Vbroadcasti32x4MaskEmitter<Zmm, Mem> for Assembler<'a> {
1644    fn vbroadcasti32x4_mask(&mut self, op0: Zmm, op1: Mem) {
1645        self.emit(VBROADCASTI32X4_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1646    }
1647}
1648
1649/// `VBROADCASTI32X4_MASKZ` (VBROADCASTI32X4). 
1650/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1651///
1652///
1653/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1654///
1655/// Supported operand variants:
1656///
1657/// ```text
1658/// +---+----------+
1659/// | # | Operands |
1660/// +---+----------+
1661/// | 1 | Ymm, Mem |
1662/// | 2 | Zmm, Mem |
1663/// +---+----------+
1664/// ```
1665pub trait Vbroadcasti32x4MaskzEmitter<A, B> {
1666    fn vbroadcasti32x4_maskz(&mut self, op0: A, op1: B);
1667}
1668
1669impl<'a> Vbroadcasti32x4MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1670    fn vbroadcasti32x4_maskz(&mut self, op0: Ymm, op1: Mem) {
1671        self.emit(VBROADCASTI32X4_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1672    }
1673}
1674
1675impl<'a> Vbroadcasti32x4MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1676    fn vbroadcasti32x4_maskz(&mut self, op0: Zmm, op1: Mem) {
1677        self.emit(VBROADCASTI32X4_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1678    }
1679}
1680
1681/// `VBROADCASTI32X8` (VBROADCASTI32X8). 
1682/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1683///
1684///
1685/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1686///
1687/// Supported operand variants:
1688///
1689/// ```text
1690/// +---+----------+
1691/// | # | Operands |
1692/// +---+----------+
1693/// | 1 | Zmm, Mem |
1694/// +---+----------+
1695/// ```
1696pub trait Vbroadcasti32x8Emitter<A, B> {
1697    fn vbroadcasti32x8(&mut self, op0: A, op1: B);
1698}
1699
1700impl<'a> Vbroadcasti32x8Emitter<Zmm, Mem> for Assembler<'a> {
1701    fn vbroadcasti32x8(&mut self, op0: Zmm, op1: Mem) {
1702        self.emit(VBROADCASTI32X8_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1703    }
1704}
1705
1706/// `VBROADCASTI32X8_MASK` (VBROADCASTI32X8). 
1707/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1708///
1709///
1710/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1711///
1712/// Supported operand variants:
1713///
1714/// ```text
1715/// +---+----------+
1716/// | # | Operands |
1717/// +---+----------+
1718/// | 1 | Zmm, Mem |
1719/// +---+----------+
1720/// ```
1721pub trait Vbroadcasti32x8MaskEmitter<A, B> {
1722    fn vbroadcasti32x8_mask(&mut self, op0: A, op1: B);
1723}
1724
1725impl<'a> Vbroadcasti32x8MaskEmitter<Zmm, Mem> for Assembler<'a> {
1726    fn vbroadcasti32x8_mask(&mut self, op0: Zmm, op1: Mem) {
1727        self.emit(VBROADCASTI32X8_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1728    }
1729}
1730
1731/// `VBROADCASTI32X8_MASKZ` (VBROADCASTI32X8). 
1732/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1733///
1734///
1735/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1736///
1737/// Supported operand variants:
1738///
1739/// ```text
1740/// +---+----------+
1741/// | # | Operands |
1742/// +---+----------+
1743/// | 1 | Zmm, Mem |
1744/// +---+----------+
1745/// ```
1746pub trait Vbroadcasti32x8MaskzEmitter<A, B> {
1747    fn vbroadcasti32x8_maskz(&mut self, op0: A, op1: B);
1748}
1749
1750impl<'a> Vbroadcasti32x8MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1751    fn vbroadcasti32x8_maskz(&mut self, op0: Zmm, op1: Mem) {
1752        self.emit(VBROADCASTI32X8_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1753    }
1754}
1755
1756/// `VBROADCASTI64X2` (VBROADCASTI64X2). 
1757/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1758///
1759///
1760/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1761///
1762/// Supported operand variants:
1763///
1764/// ```text
1765/// +---+----------+
1766/// | # | Operands |
1767/// +---+----------+
1768/// | 1 | Ymm, Mem |
1769/// | 2 | Zmm, Mem |
1770/// +---+----------+
1771/// ```
1772pub trait Vbroadcasti64x2Emitter<A, B> {
1773    fn vbroadcasti64x2(&mut self, op0: A, op1: B);
1774}
1775
1776impl<'a> Vbroadcasti64x2Emitter<Ymm, Mem> for Assembler<'a> {
1777    fn vbroadcasti64x2(&mut self, op0: Ymm, op1: Mem) {
1778        self.emit(VBROADCASTI64X2_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1779    }
1780}
1781
1782impl<'a> Vbroadcasti64x2Emitter<Zmm, Mem> for Assembler<'a> {
1783    fn vbroadcasti64x2(&mut self, op0: Zmm, op1: Mem) {
1784        self.emit(VBROADCASTI64X2_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1785    }
1786}
1787
1788/// `VBROADCASTI64X2_MASK` (VBROADCASTI64X2). 
1789/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1790///
1791///
1792/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1793///
1794/// Supported operand variants:
1795///
1796/// ```text
1797/// +---+----------+
1798/// | # | Operands |
1799/// +---+----------+
1800/// | 1 | Ymm, Mem |
1801/// | 2 | Zmm, Mem |
1802/// +---+----------+
1803/// ```
1804pub trait Vbroadcasti64x2MaskEmitter<A, B> {
1805    fn vbroadcasti64x2_mask(&mut self, op0: A, op1: B);
1806}
1807
1808impl<'a> Vbroadcasti64x2MaskEmitter<Ymm, Mem> for Assembler<'a> {
1809    fn vbroadcasti64x2_mask(&mut self, op0: Ymm, op1: Mem) {
1810        self.emit(VBROADCASTI64X2_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1811    }
1812}
1813
1814impl<'a> Vbroadcasti64x2MaskEmitter<Zmm, Mem> for Assembler<'a> {
1815    fn vbroadcasti64x2_mask(&mut self, op0: Zmm, op1: Mem) {
1816        self.emit(VBROADCASTI64X2_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1817    }
1818}
1819
1820/// `VBROADCASTI64X2_MASKZ` (VBROADCASTI64X2). 
1821/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
1822///
1823///
1824/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
1825///
1826/// Supported operand variants:
1827///
1828/// ```text
1829/// +---+----------+
1830/// | # | Operands |
1831/// +---+----------+
1832/// | 1 | Ymm, Mem |
1833/// | 2 | Zmm, Mem |
1834/// +---+----------+
1835/// ```
1836pub trait Vbroadcasti64x2MaskzEmitter<A, B> {
1837    fn vbroadcasti64x2_maskz(&mut self, op0: A, op1: B);
1838}
1839
1840impl<'a> Vbroadcasti64x2MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1841    fn vbroadcasti64x2_maskz(&mut self, op0: Ymm, op1: Mem) {
1842        self.emit(VBROADCASTI64X2_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1843    }
1844}
1845
1846impl<'a> Vbroadcasti64x2MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1847    fn vbroadcasti64x2_maskz(&mut self, op0: Zmm, op1: Mem) {
1848        self.emit(VBROADCASTI64X2_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1849    }
1850}
1851
1852/// `VCVTPD2QQ` (VCVTPD2QQ). 
1853/// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
1854///
1855///
1856/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
1857///
1858/// Supported operand variants:
1859///
1860/// ```text
1861/// +---+----------+
1862/// | # | Operands |
1863/// +---+----------+
1864/// | 1 | Xmm, Mem |
1865/// | 2 | Xmm, Xmm |
1866/// | 3 | Ymm, Mem |
1867/// | 4 | Ymm, Ymm |
1868/// | 5 | Zmm, Mem |
1869/// | 6 | Zmm, Zmm |
1870/// +---+----------+
1871/// ```
1872pub trait Vcvtpd2qqEmitter<A, B> {
1873    fn vcvtpd2qq(&mut self, op0: A, op1: B);
1874}
1875
1876impl<'a> Vcvtpd2qqEmitter<Xmm, Xmm> for Assembler<'a> {
1877    fn vcvtpd2qq(&mut self, op0: Xmm, op1: Xmm) {
1878        self.emit(VCVTPD2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1879    }
1880}
1881
1882impl<'a> Vcvtpd2qqEmitter<Xmm, Mem> for Assembler<'a> {
1883    fn vcvtpd2qq(&mut self, op0: Xmm, op1: Mem) {
1884        self.emit(VCVTPD2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1885    }
1886}
1887
1888impl<'a> Vcvtpd2qqEmitter<Ymm, Ymm> for Assembler<'a> {
1889    fn vcvtpd2qq(&mut self, op0: Ymm, op1: Ymm) {
1890        self.emit(VCVTPD2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1891    }
1892}
1893
1894impl<'a> Vcvtpd2qqEmitter<Ymm, Mem> for Assembler<'a> {
1895    fn vcvtpd2qq(&mut self, op0: Ymm, op1: Mem) {
1896        self.emit(VCVTPD2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1897    }
1898}
1899
1900impl<'a> Vcvtpd2qqEmitter<Zmm, Zmm> for Assembler<'a> {
1901    fn vcvtpd2qq(&mut self, op0: Zmm, op1: Zmm) {
1902        self.emit(VCVTPD2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1903    }
1904}
1905
1906impl<'a> Vcvtpd2qqEmitter<Zmm, Mem> for Assembler<'a> {
1907    fn vcvtpd2qq(&mut self, op0: Zmm, op1: Mem) {
1908        self.emit(VCVTPD2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1909    }
1910}
1911
1912/// `VCVTPD2QQ_ER` (VCVTPD2QQ). 
1913/// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
1914///
1915///
1916/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
1917///
1918/// Supported operand variants:
1919///
1920/// ```text
1921/// +---+----------+
1922/// | # | Operands |
1923/// +---+----------+
1924/// | 1 | Zmm, Zmm |
1925/// +---+----------+
1926/// ```
1927pub trait Vcvtpd2qqErEmitter<A, B> {
1928    fn vcvtpd2qq_er(&mut self, op0: A, op1: B);
1929}
1930
1931impl<'a> Vcvtpd2qqErEmitter<Zmm, Zmm> for Assembler<'a> {
1932    fn vcvtpd2qq_er(&mut self, op0: Zmm, op1: Zmm) {
1933        self.emit(VCVTPD2QQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1934    }
1935}
1936
1937/// `VCVTPD2QQ_MASK` (VCVTPD2QQ). 
1938/// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
1939///
1940///
1941/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
1942///
1943/// Supported operand variants:
1944///
1945/// ```text
1946/// +---+----------+
1947/// | # | Operands |
1948/// +---+----------+
1949/// | 1 | Xmm, Mem |
1950/// | 2 | Xmm, Xmm |
1951/// | 3 | Ymm, Mem |
1952/// | 4 | Ymm, Ymm |
1953/// | 5 | Zmm, Mem |
1954/// | 6 | Zmm, Zmm |
1955/// +---+----------+
1956/// ```
1957pub trait Vcvtpd2qqMaskEmitter<A, B> {
1958    fn vcvtpd2qq_mask(&mut self, op0: A, op1: B);
1959}
1960
1961impl<'a> Vcvtpd2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
1962    fn vcvtpd2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
1963        self.emit(VCVTPD2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1964    }
1965}
1966
1967impl<'a> Vcvtpd2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
1968    fn vcvtpd2qq_mask(&mut self, op0: Xmm, op1: Mem) {
1969        self.emit(VCVTPD2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1970    }
1971}
1972
1973impl<'a> Vcvtpd2qqMaskEmitter<Ymm, Ymm> for Assembler<'a> {
1974    fn vcvtpd2qq_mask(&mut self, op0: Ymm, op1: Ymm) {
1975        self.emit(VCVTPD2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1976    }
1977}
1978
1979impl<'a> Vcvtpd2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
1980    fn vcvtpd2qq_mask(&mut self, op0: Ymm, op1: Mem) {
1981        self.emit(VCVTPD2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1982    }
1983}
1984
1985impl<'a> Vcvtpd2qqMaskEmitter<Zmm, Zmm> for Assembler<'a> {
1986    fn vcvtpd2qq_mask(&mut self, op0: Zmm, op1: Zmm) {
1987        self.emit(VCVTPD2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1988    }
1989}
1990
1991impl<'a> Vcvtpd2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
1992    fn vcvtpd2qq_mask(&mut self, op0: Zmm, op1: Mem) {
1993        self.emit(VCVTPD2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1994    }
1995}
1996
1997/// `VCVTPD2QQ_MASK_ER` (VCVTPD2QQ). 
1998/// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
1999///
2000///
2001/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
2002///
2003/// Supported operand variants:
2004///
2005/// ```text
2006/// +---+----------+
2007/// | # | Operands |
2008/// +---+----------+
2009/// | 1 | Zmm, Zmm |
2010/// +---+----------+
2011/// ```
2012pub trait Vcvtpd2qqMaskErEmitter<A, B> {
2013    fn vcvtpd2qq_mask_er(&mut self, op0: A, op1: B);
2014}
2015
2016impl<'a> Vcvtpd2qqMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
2017    fn vcvtpd2qq_mask_er(&mut self, op0: Zmm, op1: Zmm) {
2018        self.emit(VCVTPD2QQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2019    }
2020}
2021
2022/// `VCVTPD2QQ_MASKZ` (VCVTPD2QQ). 
2023/// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
2024///
2025///
2026/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
2027///
2028/// Supported operand variants:
2029///
2030/// ```text
2031/// +---+----------+
2032/// | # | Operands |
2033/// +---+----------+
2034/// | 1 | Xmm, Mem |
2035/// | 2 | Xmm, Xmm |
2036/// | 3 | Ymm, Mem |
2037/// | 4 | Ymm, Ymm |
2038/// | 5 | Zmm, Mem |
2039/// | 6 | Zmm, Zmm |
2040/// +---+----------+
2041/// ```
2042pub trait Vcvtpd2qqMaskzEmitter<A, B> {
2043    fn vcvtpd2qq_maskz(&mut self, op0: A, op1: B);
2044}
2045
2046impl<'a> Vcvtpd2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
2047    fn vcvtpd2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
2048        self.emit(VCVTPD2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2049    }
2050}
2051
2052impl<'a> Vcvtpd2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
2053    fn vcvtpd2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
2054        self.emit(VCVTPD2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2055    }
2056}
2057
2058impl<'a> Vcvtpd2qqMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
2059    fn vcvtpd2qq_maskz(&mut self, op0: Ymm, op1: Ymm) {
2060        self.emit(VCVTPD2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2061    }
2062}
2063
2064impl<'a> Vcvtpd2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
2065    fn vcvtpd2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
2066        self.emit(VCVTPD2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2067    }
2068}
2069
2070impl<'a> Vcvtpd2qqMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
2071    fn vcvtpd2qq_maskz(&mut self, op0: Zmm, op1: Zmm) {
2072        self.emit(VCVTPD2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2073    }
2074}
2075
2076impl<'a> Vcvtpd2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
2077    fn vcvtpd2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
2078        self.emit(VCVTPD2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2079    }
2080}
2081
2082/// `VCVTPD2QQ_MASKZ_ER` (VCVTPD2QQ). 
2083/// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
2084///
2085///
2086/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
2087///
2088/// Supported operand variants:
2089///
2090/// ```text
2091/// +---+----------+
2092/// | # | Operands |
2093/// +---+----------+
2094/// | 1 | Zmm, Zmm |
2095/// +---+----------+
2096/// ```
2097pub trait Vcvtpd2qqMaskzErEmitter<A, B> {
2098    fn vcvtpd2qq_maskz_er(&mut self, op0: A, op1: B);
2099}
2100
2101impl<'a> Vcvtpd2qqMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
2102    fn vcvtpd2qq_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
2103        self.emit(VCVTPD2QQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2104    }
2105}
2106
2107/// `VCVTPS2QQ` (VCVTPS2QQ). 
2108/// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
2109///
2110///
2111/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
2112///
2113/// Supported operand variants:
2114///
2115/// ```text
2116/// +---+----------+
2117/// | # | Operands |
2118/// +---+----------+
2119/// | 1 | Xmm, Mem |
2120/// | 2 | Xmm, Xmm |
2121/// | 3 | Ymm, Mem |
2122/// | 4 | Ymm, Xmm |
2123/// | 5 | Zmm, Mem |
2124/// | 6 | Zmm, Ymm |
2125/// +---+----------+
2126/// ```
2127pub trait Vcvtps2qqEmitter<A, B> {
2128    fn vcvtps2qq(&mut self, op0: A, op1: B);
2129}
2130
2131impl<'a> Vcvtps2qqEmitter<Xmm, Xmm> for Assembler<'a> {
2132    fn vcvtps2qq(&mut self, op0: Xmm, op1: Xmm) {
2133        self.emit(VCVTPS2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2134    }
2135}
2136
2137impl<'a> Vcvtps2qqEmitter<Xmm, Mem> for Assembler<'a> {
2138    fn vcvtps2qq(&mut self, op0: Xmm, op1: Mem) {
2139        self.emit(VCVTPS2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2140    }
2141}
2142
2143impl<'a> Vcvtps2qqEmitter<Ymm, Xmm> for Assembler<'a> {
2144    fn vcvtps2qq(&mut self, op0: Ymm, op1: Xmm) {
2145        self.emit(VCVTPS2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2146    }
2147}
2148
2149impl<'a> Vcvtps2qqEmitter<Ymm, Mem> for Assembler<'a> {
2150    fn vcvtps2qq(&mut self, op0: Ymm, op1: Mem) {
2151        self.emit(VCVTPS2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2152    }
2153}
2154
2155impl<'a> Vcvtps2qqEmitter<Zmm, Ymm> for Assembler<'a> {
2156    fn vcvtps2qq(&mut self, op0: Zmm, op1: Ymm) {
2157        self.emit(VCVTPS2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2158    }
2159}
2160
2161impl<'a> Vcvtps2qqEmitter<Zmm, Mem> for Assembler<'a> {
2162    fn vcvtps2qq(&mut self, op0: Zmm, op1: Mem) {
2163        self.emit(VCVTPS2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2164    }
2165}
2166
2167/// `VCVTPS2QQ_ER` (VCVTPS2QQ). 
2168/// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
2169///
2170///
2171/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
2172///
2173/// Supported operand variants:
2174///
2175/// ```text
2176/// +---+----------+
2177/// | # | Operands |
2178/// +---+----------+
2179/// | 1 | Zmm, Ymm |
2180/// +---+----------+
2181/// ```
2182pub trait Vcvtps2qqErEmitter<A, B> {
2183    fn vcvtps2qq_er(&mut self, op0: A, op1: B);
2184}
2185
2186impl<'a> Vcvtps2qqErEmitter<Zmm, Ymm> for Assembler<'a> {
2187    fn vcvtps2qq_er(&mut self, op0: Zmm, op1: Ymm) {
2188        self.emit(VCVTPS2QQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2189    }
2190}
2191
2192/// `VCVTPS2QQ_MASK` (VCVTPS2QQ). 
2193/// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
2194///
2195///
2196/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
2197///
2198/// Supported operand variants:
2199///
2200/// ```text
2201/// +---+----------+
2202/// | # | Operands |
2203/// +---+----------+
2204/// | 1 | Xmm, Mem |
2205/// | 2 | Xmm, Xmm |
2206/// | 3 | Ymm, Mem |
2207/// | 4 | Ymm, Xmm |
2208/// | 5 | Zmm, Mem |
2209/// | 6 | Zmm, Ymm |
2210/// +---+----------+
2211/// ```
2212pub trait Vcvtps2qqMaskEmitter<A, B> {
2213    fn vcvtps2qq_mask(&mut self, op0: A, op1: B);
2214}
2215
2216impl<'a> Vcvtps2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
2217    fn vcvtps2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
2218        self.emit(VCVTPS2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2219    }
2220}
2221
2222impl<'a> Vcvtps2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
2223    fn vcvtps2qq_mask(&mut self, op0: Xmm, op1: Mem) {
2224        self.emit(VCVTPS2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2225    }
2226}
2227
2228impl<'a> Vcvtps2qqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
2229    fn vcvtps2qq_mask(&mut self, op0: Ymm, op1: Xmm) {
2230        self.emit(VCVTPS2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2231    }
2232}
2233
2234impl<'a> Vcvtps2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
2235    fn vcvtps2qq_mask(&mut self, op0: Ymm, op1: Mem) {
2236        self.emit(VCVTPS2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2237    }
2238}
2239
2240impl<'a> Vcvtps2qqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
2241    fn vcvtps2qq_mask(&mut self, op0: Zmm, op1: Ymm) {
2242        self.emit(VCVTPS2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2243    }
2244}
2245
2246impl<'a> Vcvtps2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
2247    fn vcvtps2qq_mask(&mut self, op0: Zmm, op1: Mem) {
2248        self.emit(VCVTPS2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2249    }
2250}
2251
2252/// `VCVTPS2QQ_MASK_ER` (VCVTPS2QQ). 
2253/// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
2254///
2255///
2256/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
2257///
2258/// Supported operand variants:
2259///
2260/// ```text
2261/// +---+----------+
2262/// | # | Operands |
2263/// +---+----------+
2264/// | 1 | Zmm, Ymm |
2265/// +---+----------+
2266/// ```
2267pub trait Vcvtps2qqMaskErEmitter<A, B> {
2268    fn vcvtps2qq_mask_er(&mut self, op0: A, op1: B);
2269}
2270
2271impl<'a> Vcvtps2qqMaskErEmitter<Zmm, Ymm> for Assembler<'a> {
2272    fn vcvtps2qq_mask_er(&mut self, op0: Zmm, op1: Ymm) {
2273        self.emit(VCVTPS2QQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2274    }
2275}
2276
2277/// `VCVTPS2QQ_MASKZ` (VCVTPS2QQ). 
2278/// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
2279///
2280///
2281/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
2282///
2283/// Supported operand variants:
2284///
2285/// ```text
2286/// +---+----------+
2287/// | # | Operands |
2288/// +---+----------+
2289/// | 1 | Xmm, Mem |
2290/// | 2 | Xmm, Xmm |
2291/// | 3 | Ymm, Mem |
2292/// | 4 | Ymm, Xmm |
2293/// | 5 | Zmm, Mem |
2294/// | 6 | Zmm, Ymm |
2295/// +---+----------+
2296/// ```
2297pub trait Vcvtps2qqMaskzEmitter<A, B> {
2298    fn vcvtps2qq_maskz(&mut self, op0: A, op1: B);
2299}
2300
2301impl<'a> Vcvtps2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
2302    fn vcvtps2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
2303        self.emit(VCVTPS2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2304    }
2305}
2306
2307impl<'a> Vcvtps2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
2308    fn vcvtps2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
2309        self.emit(VCVTPS2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2310    }
2311}
2312
2313impl<'a> Vcvtps2qqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
2314    fn vcvtps2qq_maskz(&mut self, op0: Ymm, op1: Xmm) {
2315        self.emit(VCVTPS2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2316    }
2317}
2318
2319impl<'a> Vcvtps2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
2320    fn vcvtps2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
2321        self.emit(VCVTPS2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2322    }
2323}
2324
2325impl<'a> Vcvtps2qqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
2326    fn vcvtps2qq_maskz(&mut self, op0: Zmm, op1: Ymm) {
2327        self.emit(VCVTPS2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2328    }
2329}
2330
2331impl<'a> Vcvtps2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
2332    fn vcvtps2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
2333        self.emit(VCVTPS2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2334    }
2335}
2336
2337/// `VCVTPS2QQ_MASKZ_ER` (VCVTPS2QQ). 
2338/// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
2339///
2340///
2341/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
2342///
2343/// Supported operand variants:
2344///
2345/// ```text
2346/// +---+----------+
2347/// | # | Operands |
2348/// +---+----------+
2349/// | 1 | Zmm, Ymm |
2350/// +---+----------+
2351/// ```
2352pub trait Vcvtps2qqMaskzErEmitter<A, B> {
2353    fn vcvtps2qq_maskz_er(&mut self, op0: A, op1: B);
2354}
2355
2356impl<'a> Vcvtps2qqMaskzErEmitter<Zmm, Ymm> for Assembler<'a> {
2357    fn vcvtps2qq_maskz_er(&mut self, op0: Zmm, op1: Ymm) {
2358        self.emit(VCVTPS2QQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2359    }
2360}
2361
2362/// `VCVTQQ2PD` (VCVTQQ2PD). 
2363/// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
2364///
2365///
2366/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
2367///
2368/// Supported operand variants:
2369///
2370/// ```text
2371/// +---+----------+
2372/// | # | Operands |
2373/// +---+----------+
2374/// | 1 | Xmm, Mem |
2375/// | 2 | Xmm, Xmm |
2376/// | 3 | Ymm, Mem |
2377/// | 4 | Ymm, Ymm |
2378/// | 5 | Zmm, Mem |
2379/// | 6 | Zmm, Zmm |
2380/// +---+----------+
2381/// ```
2382pub trait Vcvtqq2pdEmitter<A, B> {
2383    fn vcvtqq2pd(&mut self, op0: A, op1: B);
2384}
2385
2386impl<'a> Vcvtqq2pdEmitter<Xmm, Xmm> for Assembler<'a> {
2387    fn vcvtqq2pd(&mut self, op0: Xmm, op1: Xmm) {
2388        self.emit(VCVTQQ2PD128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2389    }
2390}
2391
2392impl<'a> Vcvtqq2pdEmitter<Xmm, Mem> for Assembler<'a> {
2393    fn vcvtqq2pd(&mut self, op0: Xmm, op1: Mem) {
2394        self.emit(VCVTQQ2PD128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2395    }
2396}
2397
2398impl<'a> Vcvtqq2pdEmitter<Ymm, Ymm> for Assembler<'a> {
2399    fn vcvtqq2pd(&mut self, op0: Ymm, op1: Ymm) {
2400        self.emit(VCVTQQ2PD256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2401    }
2402}
2403
2404impl<'a> Vcvtqq2pdEmitter<Ymm, Mem> for Assembler<'a> {
2405    fn vcvtqq2pd(&mut self, op0: Ymm, op1: Mem) {
2406        self.emit(VCVTQQ2PD256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2407    }
2408}
2409
2410impl<'a> Vcvtqq2pdEmitter<Zmm, Zmm> for Assembler<'a> {
2411    fn vcvtqq2pd(&mut self, op0: Zmm, op1: Zmm) {
2412        self.emit(VCVTQQ2PD512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2413    }
2414}
2415
2416impl<'a> Vcvtqq2pdEmitter<Zmm, Mem> for Assembler<'a> {
2417    fn vcvtqq2pd(&mut self, op0: Zmm, op1: Mem) {
2418        self.emit(VCVTQQ2PD512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2419    }
2420}
2421
2422/// `VCVTQQ2PD_ER` (VCVTQQ2PD). 
2423/// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
2424///
2425///
2426/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
2427///
2428/// Supported operand variants:
2429///
2430/// ```text
2431/// +---+----------+
2432/// | # | Operands |
2433/// +---+----------+
2434/// | 1 | Zmm, Zmm |
2435/// +---+----------+
2436/// ```
2437pub trait Vcvtqq2pdErEmitter<A, B> {
2438    fn vcvtqq2pd_er(&mut self, op0: A, op1: B);
2439}
2440
2441impl<'a> Vcvtqq2pdErEmitter<Zmm, Zmm> for Assembler<'a> {
2442    fn vcvtqq2pd_er(&mut self, op0: Zmm, op1: Zmm) {
2443        self.emit(VCVTQQ2PD512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2444    }
2445}
2446
2447/// `VCVTQQ2PD_MASK` (VCVTQQ2PD). 
2448/// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
2449///
2450///
2451/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
2452///
2453/// Supported operand variants:
2454///
2455/// ```text
2456/// +---+----------+
2457/// | # | Operands |
2458/// +---+----------+
2459/// | 1 | Xmm, Mem |
2460/// | 2 | Xmm, Xmm |
2461/// | 3 | Ymm, Mem |
2462/// | 4 | Ymm, Ymm |
2463/// | 5 | Zmm, Mem |
2464/// | 6 | Zmm, Zmm |
2465/// +---+----------+
2466/// ```
2467pub trait Vcvtqq2pdMaskEmitter<A, B> {
2468    fn vcvtqq2pd_mask(&mut self, op0: A, op1: B);
2469}
2470
2471impl<'a> Vcvtqq2pdMaskEmitter<Xmm, Xmm> for Assembler<'a> {
2472    fn vcvtqq2pd_mask(&mut self, op0: Xmm, op1: Xmm) {
2473        self.emit(VCVTQQ2PD128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2474    }
2475}
2476
2477impl<'a> Vcvtqq2pdMaskEmitter<Xmm, Mem> for Assembler<'a> {
2478    fn vcvtqq2pd_mask(&mut self, op0: Xmm, op1: Mem) {
2479        self.emit(VCVTQQ2PD128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2480    }
2481}
2482
2483impl<'a> Vcvtqq2pdMaskEmitter<Ymm, Ymm> for Assembler<'a> {
2484    fn vcvtqq2pd_mask(&mut self, op0: Ymm, op1: Ymm) {
2485        self.emit(VCVTQQ2PD256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2486    }
2487}
2488
2489impl<'a> Vcvtqq2pdMaskEmitter<Ymm, Mem> for Assembler<'a> {
2490    fn vcvtqq2pd_mask(&mut self, op0: Ymm, op1: Mem) {
2491        self.emit(VCVTQQ2PD256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2492    }
2493}
2494
2495impl<'a> Vcvtqq2pdMaskEmitter<Zmm, Zmm> for Assembler<'a> {
2496    fn vcvtqq2pd_mask(&mut self, op0: Zmm, op1: Zmm) {
2497        self.emit(VCVTQQ2PD512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2498    }
2499}
2500
2501impl<'a> Vcvtqq2pdMaskEmitter<Zmm, Mem> for Assembler<'a> {
2502    fn vcvtqq2pd_mask(&mut self, op0: Zmm, op1: Mem) {
2503        self.emit(VCVTQQ2PD512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2504    }
2505}
2506
2507/// `VCVTQQ2PD_MASK_ER` (VCVTQQ2PD). 
2508/// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
2509///
2510///
2511/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
2512///
2513/// Supported operand variants:
2514///
2515/// ```text
2516/// +---+----------+
2517/// | # | Operands |
2518/// +---+----------+
2519/// | 1 | Zmm, Zmm |
2520/// +---+----------+
2521/// ```
2522pub trait Vcvtqq2pdMaskErEmitter<A, B> {
2523    fn vcvtqq2pd_mask_er(&mut self, op0: A, op1: B);
2524}
2525
2526impl<'a> Vcvtqq2pdMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
2527    fn vcvtqq2pd_mask_er(&mut self, op0: Zmm, op1: Zmm) {
2528        self.emit(VCVTQQ2PD512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2529    }
2530}
2531
2532/// `VCVTQQ2PD_MASKZ` (VCVTQQ2PD). 
2533/// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
2534///
2535///
2536/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
2537///
2538/// Supported operand variants:
2539///
2540/// ```text
2541/// +---+----------+
2542/// | # | Operands |
2543/// +---+----------+
2544/// | 1 | Xmm, Mem |
2545/// | 2 | Xmm, Xmm |
2546/// | 3 | Ymm, Mem |
2547/// | 4 | Ymm, Ymm |
2548/// | 5 | Zmm, Mem |
2549/// | 6 | Zmm, Zmm |
2550/// +---+----------+
2551/// ```
2552pub trait Vcvtqq2pdMaskzEmitter<A, B> {
2553    fn vcvtqq2pd_maskz(&mut self, op0: A, op1: B);
2554}
2555
2556impl<'a> Vcvtqq2pdMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
2557    fn vcvtqq2pd_maskz(&mut self, op0: Xmm, op1: Xmm) {
2558        self.emit(VCVTQQ2PD128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2559    }
2560}
2561
2562impl<'a> Vcvtqq2pdMaskzEmitter<Xmm, Mem> for Assembler<'a> {
2563    fn vcvtqq2pd_maskz(&mut self, op0: Xmm, op1: Mem) {
2564        self.emit(VCVTQQ2PD128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2565    }
2566}
2567
2568impl<'a> Vcvtqq2pdMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
2569    fn vcvtqq2pd_maskz(&mut self, op0: Ymm, op1: Ymm) {
2570        self.emit(VCVTQQ2PD256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2571    }
2572}
2573
2574impl<'a> Vcvtqq2pdMaskzEmitter<Ymm, Mem> for Assembler<'a> {
2575    fn vcvtqq2pd_maskz(&mut self, op0: Ymm, op1: Mem) {
2576        self.emit(VCVTQQ2PD256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2577    }
2578}
2579
2580impl<'a> Vcvtqq2pdMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
2581    fn vcvtqq2pd_maskz(&mut self, op0: Zmm, op1: Zmm) {
2582        self.emit(VCVTQQ2PD512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2583    }
2584}
2585
2586impl<'a> Vcvtqq2pdMaskzEmitter<Zmm, Mem> for Assembler<'a> {
2587    fn vcvtqq2pd_maskz(&mut self, op0: Zmm, op1: Mem) {
2588        self.emit(VCVTQQ2PD512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2589    }
2590}
2591
2592/// `VCVTQQ2PD_MASKZ_ER` (VCVTQQ2PD). 
2593/// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
2594///
2595///
2596/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
2597///
2598/// Supported operand variants:
2599///
2600/// ```text
2601/// +---+----------+
2602/// | # | Operands |
2603/// +---+----------+
2604/// | 1 | Zmm, Zmm |
2605/// +---+----------+
2606/// ```
2607pub trait Vcvtqq2pdMaskzErEmitter<A, B> {
2608    fn vcvtqq2pd_maskz_er(&mut self, op0: A, op1: B);
2609}
2610
2611impl<'a> Vcvtqq2pdMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
2612    fn vcvtqq2pd_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
2613        self.emit(VCVTQQ2PD512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2614    }
2615}
2616
2617/// `VCVTQQ2PS` (VCVTQQ2PS). 
2618/// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
2619///
2620///
2621/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
2622///
2623/// Supported operand variants:
2624///
2625/// ```text
2626/// +---+----------+
2627/// | # | Operands |
2628/// +---+----------+
2629/// | 1 | Xmm, Mem |
2630/// | 2 | Xmm, Xmm |
2631/// | 3 | Xmm, Ymm |
2632/// | 4 | Ymm, Mem |
2633/// | 5 | Ymm, Zmm |
2634/// +---+----------+
2635/// ```
2636pub trait Vcvtqq2psEmitter<A, B> {
2637    fn vcvtqq2ps(&mut self, op0: A, op1: B);
2638}
2639
2640impl<'a> Vcvtqq2psEmitter<Xmm, Xmm> for Assembler<'a> {
2641    fn vcvtqq2ps(&mut self, op0: Xmm, op1: Xmm) {
2642        self.emit(VCVTQQ2PS128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2643    }
2644}
2645
2646impl<'a> Vcvtqq2psEmitter<Xmm, Mem> for Assembler<'a> {
2647    fn vcvtqq2ps(&mut self, op0: Xmm, op1: Mem) {
2648        self.emit(VCVTQQ2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2649    }
2650}
2651
2652impl<'a> Vcvtqq2psEmitter<Xmm, Ymm> for Assembler<'a> {
2653    fn vcvtqq2ps(&mut self, op0: Xmm, op1: Ymm) {
2654        self.emit(VCVTQQ2PS256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2655    }
2656}
2657
2658impl<'a> Vcvtqq2psEmitter<Ymm, Zmm> for Assembler<'a> {
2659    fn vcvtqq2ps(&mut self, op0: Ymm, op1: Zmm) {
2660        self.emit(VCVTQQ2PS512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2661    }
2662}
2663
2664impl<'a> Vcvtqq2psEmitter<Ymm, Mem> for Assembler<'a> {
2665    fn vcvtqq2ps(&mut self, op0: Ymm, op1: Mem) {
2666        self.emit(VCVTQQ2PS512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2667    }
2668}
2669
2670/// `VCVTQQ2PS_ER` (VCVTQQ2PS). 
2671/// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
2672///
2673///
2674/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
2675///
2676/// Supported operand variants:
2677///
2678/// ```text
2679/// +---+----------+
2680/// | # | Operands |
2681/// +---+----------+
2682/// | 1 | Ymm, Zmm |
2683/// +---+----------+
2684/// ```
2685pub trait Vcvtqq2psErEmitter<A, B> {
2686    fn vcvtqq2ps_er(&mut self, op0: A, op1: B);
2687}
2688
2689impl<'a> Vcvtqq2psErEmitter<Ymm, Zmm> for Assembler<'a> {
2690    fn vcvtqq2ps_er(&mut self, op0: Ymm, op1: Zmm) {
2691        self.emit(VCVTQQ2PS512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2692    }
2693}
2694
2695/// `VCVTQQ2PS_MASK` (VCVTQQ2PS). 
2696/// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
2697///
2698///
2699/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
2700///
2701/// Supported operand variants:
2702///
2703/// ```text
2704/// +---+----------+
2705/// | # | Operands |
2706/// +---+----------+
2707/// | 1 | Xmm, Mem |
2708/// | 2 | Xmm, Xmm |
2709/// | 3 | Xmm, Ymm |
2710/// | 4 | Ymm, Mem |
2711/// | 5 | Ymm, Zmm |
2712/// +---+----------+
2713/// ```
2714pub trait Vcvtqq2psMaskEmitter<A, B> {
2715    fn vcvtqq2ps_mask(&mut self, op0: A, op1: B);
2716}
2717
2718impl<'a> Vcvtqq2psMaskEmitter<Xmm, Xmm> for Assembler<'a> {
2719    fn vcvtqq2ps_mask(&mut self, op0: Xmm, op1: Xmm) {
2720        self.emit(VCVTQQ2PS128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2721    }
2722}
2723
2724impl<'a> Vcvtqq2psMaskEmitter<Xmm, Mem> for Assembler<'a> {
2725    fn vcvtqq2ps_mask(&mut self, op0: Xmm, op1: Mem) {
2726        self.emit(VCVTQQ2PS128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2727    }
2728}
2729
2730impl<'a> Vcvtqq2psMaskEmitter<Xmm, Ymm> for Assembler<'a> {
2731    fn vcvtqq2ps_mask(&mut self, op0: Xmm, op1: Ymm) {
2732        self.emit(VCVTQQ2PS256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2733    }
2734}
2735
2736impl<'a> Vcvtqq2psMaskEmitter<Ymm, Zmm> for Assembler<'a> {
2737    fn vcvtqq2ps_mask(&mut self, op0: Ymm, op1: Zmm) {
2738        self.emit(VCVTQQ2PS512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2739    }
2740}
2741
2742impl<'a> Vcvtqq2psMaskEmitter<Ymm, Mem> for Assembler<'a> {
2743    fn vcvtqq2ps_mask(&mut self, op0: Ymm, op1: Mem) {
2744        self.emit(VCVTQQ2PS512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2745    }
2746}
2747
2748/// `VCVTQQ2PS_MASK_ER` (VCVTQQ2PS). 
2749/// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
2750///
2751///
2752/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
2753///
2754/// Supported operand variants:
2755///
2756/// ```text
2757/// +---+----------+
2758/// | # | Operands |
2759/// +---+----------+
2760/// | 1 | Ymm, Zmm |
2761/// +---+----------+
2762/// ```
2763pub trait Vcvtqq2psMaskErEmitter<A, B> {
2764    fn vcvtqq2ps_mask_er(&mut self, op0: A, op1: B);
2765}
2766
2767impl<'a> Vcvtqq2psMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
2768    fn vcvtqq2ps_mask_er(&mut self, op0: Ymm, op1: Zmm) {
2769        self.emit(VCVTQQ2PS512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2770    }
2771}
2772
2773/// `VCVTQQ2PS_MASKZ` (VCVTQQ2PS). 
2774/// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
2775///
2776///
2777/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
2778///
2779/// Supported operand variants:
2780///
2781/// ```text
2782/// +---+----------+
2783/// | # | Operands |
2784/// +---+----------+
2785/// | 1 | Xmm, Mem |
2786/// | 2 | Xmm, Xmm |
2787/// | 3 | Xmm, Ymm |
2788/// | 4 | Ymm, Mem |
2789/// | 5 | Ymm, Zmm |
2790/// +---+----------+
2791/// ```
2792pub trait Vcvtqq2psMaskzEmitter<A, B> {
2793    fn vcvtqq2ps_maskz(&mut self, op0: A, op1: B);
2794}
2795
2796impl<'a> Vcvtqq2psMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
2797    fn vcvtqq2ps_maskz(&mut self, op0: Xmm, op1: Xmm) {
2798        self.emit(VCVTQQ2PS128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2799    }
2800}
2801
2802impl<'a> Vcvtqq2psMaskzEmitter<Xmm, Mem> for Assembler<'a> {
2803    fn vcvtqq2ps_maskz(&mut self, op0: Xmm, op1: Mem) {
2804        self.emit(VCVTQQ2PS128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2805    }
2806}
2807
2808impl<'a> Vcvtqq2psMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
2809    fn vcvtqq2ps_maskz(&mut self, op0: Xmm, op1: Ymm) {
2810        self.emit(VCVTQQ2PS256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2811    }
2812}
2813
2814impl<'a> Vcvtqq2psMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
2815    fn vcvtqq2ps_maskz(&mut self, op0: Ymm, op1: Zmm) {
2816        self.emit(VCVTQQ2PS512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2817    }
2818}
2819
2820impl<'a> Vcvtqq2psMaskzEmitter<Ymm, Mem> for Assembler<'a> {
2821    fn vcvtqq2ps_maskz(&mut self, op0: Ymm, op1: Mem) {
2822        self.emit(VCVTQQ2PS512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2823    }
2824}
2825
2826/// `VCVTQQ2PS_MASKZ_ER` (VCVTQQ2PS). 
2827/// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
2828///
2829///
2830/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
2831///
2832/// Supported operand variants:
2833///
2834/// ```text
2835/// +---+----------+
2836/// | # | Operands |
2837/// +---+----------+
2838/// | 1 | Ymm, Zmm |
2839/// +---+----------+
2840/// ```
2841pub trait Vcvtqq2psMaskzErEmitter<A, B> {
2842    fn vcvtqq2ps_maskz_er(&mut self, op0: A, op1: B);
2843}
2844
2845impl<'a> Vcvtqq2psMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
2846    fn vcvtqq2ps_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
2847        self.emit(VCVTQQ2PS512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2848    }
2849}
2850
2851/// `VCVTTPD2QQ` (VCVTTPD2QQ). 
2852/// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
2853///
2854///
2855/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
2856///
2857/// Supported operand variants:
2858///
2859/// ```text
2860/// +---+----------+
2861/// | # | Operands |
2862/// +---+----------+
2863/// | 1 | Xmm, Mem |
2864/// | 2 | Xmm, Xmm |
2865/// | 3 | Ymm, Mem |
2866/// | 4 | Ymm, Ymm |
2867/// | 5 | Zmm, Mem |
2868/// | 6 | Zmm, Zmm |
2869/// +---+----------+
2870/// ```
2871pub trait Vcvttpd2qqEmitter<A, B> {
2872    fn vcvttpd2qq(&mut self, op0: A, op1: B);
2873}
2874
2875impl<'a> Vcvttpd2qqEmitter<Xmm, Xmm> for Assembler<'a> {
2876    fn vcvttpd2qq(&mut self, op0: Xmm, op1: Xmm) {
2877        self.emit(VCVTTPD2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2878    }
2879}
2880
2881impl<'a> Vcvttpd2qqEmitter<Xmm, Mem> for Assembler<'a> {
2882    fn vcvttpd2qq(&mut self, op0: Xmm, op1: Mem) {
2883        self.emit(VCVTTPD2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2884    }
2885}
2886
2887impl<'a> Vcvttpd2qqEmitter<Ymm, Ymm> for Assembler<'a> {
2888    fn vcvttpd2qq(&mut self, op0: Ymm, op1: Ymm) {
2889        self.emit(VCVTTPD2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2890    }
2891}
2892
2893impl<'a> Vcvttpd2qqEmitter<Ymm, Mem> for Assembler<'a> {
2894    fn vcvttpd2qq(&mut self, op0: Ymm, op1: Mem) {
2895        self.emit(VCVTTPD2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2896    }
2897}
2898
2899impl<'a> Vcvttpd2qqEmitter<Zmm, Zmm> for Assembler<'a> {
2900    fn vcvttpd2qq(&mut self, op0: Zmm, op1: Zmm) {
2901        self.emit(VCVTTPD2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2902    }
2903}
2904
2905impl<'a> Vcvttpd2qqEmitter<Zmm, Mem> for Assembler<'a> {
2906    fn vcvttpd2qq(&mut self, op0: Zmm, op1: Mem) {
2907        self.emit(VCVTTPD2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2908    }
2909}
2910
2911/// `VCVTTPD2QQ_MASK` (VCVTTPD2QQ). 
2912/// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
2913///
2914///
2915/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
2916///
2917/// Supported operand variants:
2918///
2919/// ```text
2920/// +---+----------+
2921/// | # | Operands |
2922/// +---+----------+
2923/// | 1 | Xmm, Mem |
2924/// | 2 | Xmm, Xmm |
2925/// | 3 | Ymm, Mem |
2926/// | 4 | Ymm, Ymm |
2927/// | 5 | Zmm, Mem |
2928/// | 6 | Zmm, Zmm |
2929/// +---+----------+
2930/// ```
2931pub trait Vcvttpd2qqMaskEmitter<A, B> {
2932    fn vcvttpd2qq_mask(&mut self, op0: A, op1: B);
2933}
2934
2935impl<'a> Vcvttpd2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
2936    fn vcvttpd2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
2937        self.emit(VCVTTPD2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2938    }
2939}
2940
2941impl<'a> Vcvttpd2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
2942    fn vcvttpd2qq_mask(&mut self, op0: Xmm, op1: Mem) {
2943        self.emit(VCVTTPD2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2944    }
2945}
2946
2947impl<'a> Vcvttpd2qqMaskEmitter<Ymm, Ymm> for Assembler<'a> {
2948    fn vcvttpd2qq_mask(&mut self, op0: Ymm, op1: Ymm) {
2949        self.emit(VCVTTPD2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2950    }
2951}
2952
2953impl<'a> Vcvttpd2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
2954    fn vcvttpd2qq_mask(&mut self, op0: Ymm, op1: Mem) {
2955        self.emit(VCVTTPD2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2956    }
2957}
2958
2959impl<'a> Vcvttpd2qqMaskEmitter<Zmm, Zmm> for Assembler<'a> {
2960    fn vcvttpd2qq_mask(&mut self, op0: Zmm, op1: Zmm) {
2961        self.emit(VCVTTPD2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2962    }
2963}
2964
2965impl<'a> Vcvttpd2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
2966    fn vcvttpd2qq_mask(&mut self, op0: Zmm, op1: Mem) {
2967        self.emit(VCVTTPD2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2968    }
2969}
2970
2971/// `VCVTTPD2QQ_MASK_SAE` (VCVTTPD2QQ). 
2972/// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
2973///
2974///
2975/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
2976///
2977/// Supported operand variants:
2978///
2979/// ```text
2980/// +---+----------+
2981/// | # | Operands |
2982/// +---+----------+
2983/// | 1 | Zmm, Zmm |
2984/// +---+----------+
2985/// ```
2986pub trait Vcvttpd2qqMaskSaeEmitter<A, B> {
2987    fn vcvttpd2qq_mask_sae(&mut self, op0: A, op1: B);
2988}
2989
2990impl<'a> Vcvttpd2qqMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
2991    fn vcvttpd2qq_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
2992        self.emit(VCVTTPD2QQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2993    }
2994}
2995
2996/// `VCVTTPD2QQ_MASKZ` (VCVTTPD2QQ). 
2997/// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
2998///
2999///
3000/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
3001///
3002/// Supported operand variants:
3003///
3004/// ```text
3005/// +---+----------+
3006/// | # | Operands |
3007/// +---+----------+
3008/// | 1 | Xmm, Mem |
3009/// | 2 | Xmm, Xmm |
3010/// | 3 | Ymm, Mem |
3011/// | 4 | Ymm, Ymm |
3012/// | 5 | Zmm, Mem |
3013/// | 6 | Zmm, Zmm |
3014/// +---+----------+
3015/// ```
3016pub trait Vcvttpd2qqMaskzEmitter<A, B> {
3017    fn vcvttpd2qq_maskz(&mut self, op0: A, op1: B);
3018}
3019
3020impl<'a> Vcvttpd2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
3021    fn vcvttpd2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
3022        self.emit(VCVTTPD2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3023    }
3024}
3025
3026impl<'a> Vcvttpd2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
3027    fn vcvttpd2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
3028        self.emit(VCVTTPD2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3029    }
3030}
3031
3032impl<'a> Vcvttpd2qqMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
3033    fn vcvttpd2qq_maskz(&mut self, op0: Ymm, op1: Ymm) {
3034        self.emit(VCVTTPD2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3035    }
3036}
3037
3038impl<'a> Vcvttpd2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
3039    fn vcvttpd2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
3040        self.emit(VCVTTPD2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3041    }
3042}
3043
3044impl<'a> Vcvttpd2qqMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
3045    fn vcvttpd2qq_maskz(&mut self, op0: Zmm, op1: Zmm) {
3046        self.emit(VCVTTPD2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3047    }
3048}
3049
3050impl<'a> Vcvttpd2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
3051    fn vcvttpd2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
3052        self.emit(VCVTTPD2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3053    }
3054}
3055
3056/// `VCVTTPD2QQ_MASKZ_SAE` (VCVTTPD2QQ). 
3057/// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
3058///
3059///
3060/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
3061///
3062/// Supported operand variants:
3063///
3064/// ```text
3065/// +---+----------+
3066/// | # | Operands |
3067/// +---+----------+
3068/// | 1 | Zmm, Zmm |
3069/// +---+----------+
3070/// ```
3071pub trait Vcvttpd2qqMaskzSaeEmitter<A, B> {
3072    fn vcvttpd2qq_maskz_sae(&mut self, op0: A, op1: B);
3073}
3074
3075impl<'a> Vcvttpd2qqMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
3076    fn vcvttpd2qq_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
3077        self.emit(VCVTTPD2QQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3078    }
3079}
3080
3081/// `VCVTTPD2QQ_SAE` (VCVTTPD2QQ). 
3082/// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
3083///
3084///
3085/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
3086///
3087/// Supported operand variants:
3088///
3089/// ```text
3090/// +---+----------+
3091/// | # | Operands |
3092/// +---+----------+
3093/// | 1 | Zmm, Zmm |
3094/// +---+----------+
3095/// ```
3096pub trait Vcvttpd2qqSaeEmitter<A, B> {
3097    fn vcvttpd2qq_sae(&mut self, op0: A, op1: B);
3098}
3099
3100impl<'a> Vcvttpd2qqSaeEmitter<Zmm, Zmm> for Assembler<'a> {
3101    fn vcvttpd2qq_sae(&mut self, op0: Zmm, op1: Zmm) {
3102        self.emit(VCVTTPD2QQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3103    }
3104}
3105
3106/// `VCVTTPS2QQ` (VCVTTPS2QQ). 
3107/// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
3108///
3109///
3110/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
3111///
3112/// Supported operand variants:
3113///
3114/// ```text
3115/// +---+----------+
3116/// | # | Operands |
3117/// +---+----------+
3118/// | 1 | Xmm, Mem |
3119/// | 2 | Xmm, Xmm |
3120/// | 3 | Ymm, Mem |
3121/// | 4 | Ymm, Xmm |
3122/// | 5 | Zmm, Mem |
3123/// | 6 | Zmm, Ymm |
3124/// +---+----------+
3125/// ```
3126pub trait Vcvttps2qqEmitter<A, B> {
3127    fn vcvttps2qq(&mut self, op0: A, op1: B);
3128}
3129
3130impl<'a> Vcvttps2qqEmitter<Xmm, Xmm> for Assembler<'a> {
3131    fn vcvttps2qq(&mut self, op0: Xmm, op1: Xmm) {
3132        self.emit(VCVTTPS2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3133    }
3134}
3135
3136impl<'a> Vcvttps2qqEmitter<Xmm, Mem> for Assembler<'a> {
3137    fn vcvttps2qq(&mut self, op0: Xmm, op1: Mem) {
3138        self.emit(VCVTTPS2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3139    }
3140}
3141
3142impl<'a> Vcvttps2qqEmitter<Ymm, Xmm> for Assembler<'a> {
3143    fn vcvttps2qq(&mut self, op0: Ymm, op1: Xmm) {
3144        self.emit(VCVTTPS2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3145    }
3146}
3147
3148impl<'a> Vcvttps2qqEmitter<Ymm, Mem> for Assembler<'a> {
3149    fn vcvttps2qq(&mut self, op0: Ymm, op1: Mem) {
3150        self.emit(VCVTTPS2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3151    }
3152}
3153
3154impl<'a> Vcvttps2qqEmitter<Zmm, Ymm> for Assembler<'a> {
3155    fn vcvttps2qq(&mut self, op0: Zmm, op1: Ymm) {
3156        self.emit(VCVTTPS2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3157    }
3158}
3159
3160impl<'a> Vcvttps2qqEmitter<Zmm, Mem> for Assembler<'a> {
3161    fn vcvttps2qq(&mut self, op0: Zmm, op1: Mem) {
3162        self.emit(VCVTTPS2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3163    }
3164}
3165
3166/// `VCVTTPS2QQ_MASK` (VCVTTPS2QQ). 
3167/// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
3168///
3169///
3170/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
3171///
3172/// Supported operand variants:
3173///
3174/// ```text
3175/// +---+----------+
3176/// | # | Operands |
3177/// +---+----------+
3178/// | 1 | Xmm, Mem |
3179/// | 2 | Xmm, Xmm |
3180/// | 3 | Ymm, Mem |
3181/// | 4 | Ymm, Xmm |
3182/// | 5 | Zmm, Mem |
3183/// | 6 | Zmm, Ymm |
3184/// +---+----------+
3185/// ```
3186pub trait Vcvttps2qqMaskEmitter<A, B> {
3187    fn vcvttps2qq_mask(&mut self, op0: A, op1: B);
3188}
3189
3190impl<'a> Vcvttps2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
3191    fn vcvttps2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
3192        self.emit(VCVTTPS2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3193    }
3194}
3195
3196impl<'a> Vcvttps2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
3197    fn vcvttps2qq_mask(&mut self, op0: Xmm, op1: Mem) {
3198        self.emit(VCVTTPS2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3199    }
3200}
3201
3202impl<'a> Vcvttps2qqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
3203    fn vcvttps2qq_mask(&mut self, op0: Ymm, op1: Xmm) {
3204        self.emit(VCVTTPS2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3205    }
3206}
3207
3208impl<'a> Vcvttps2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
3209    fn vcvttps2qq_mask(&mut self, op0: Ymm, op1: Mem) {
3210        self.emit(VCVTTPS2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3211    }
3212}
3213
3214impl<'a> Vcvttps2qqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
3215    fn vcvttps2qq_mask(&mut self, op0: Zmm, op1: Ymm) {
3216        self.emit(VCVTTPS2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3217    }
3218}
3219
3220impl<'a> Vcvttps2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
3221    fn vcvttps2qq_mask(&mut self, op0: Zmm, op1: Mem) {
3222        self.emit(VCVTTPS2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3223    }
3224}
3225
3226/// `VCVTTPS2QQ_MASK_SAE` (VCVTTPS2QQ). 
3227/// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
3228///
3229///
3230/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
3231///
3232/// Supported operand variants:
3233///
3234/// ```text
3235/// +---+----------+
3236/// | # | Operands |
3237/// +---+----------+
3238/// | 1 | Zmm, Ymm |
3239/// +---+----------+
3240/// ```
3241pub trait Vcvttps2qqMaskSaeEmitter<A, B> {
3242    fn vcvttps2qq_mask_sae(&mut self, op0: A, op1: B);
3243}
3244
3245impl<'a> Vcvttps2qqMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
3246    fn vcvttps2qq_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
3247        self.emit(VCVTTPS2QQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3248    }
3249}
3250
3251/// `VCVTTPS2QQ_MASKZ` (VCVTTPS2QQ). 
3252/// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
3253///
3254///
3255/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
3256///
3257/// Supported operand variants:
3258///
3259/// ```text
3260/// +---+----------+
3261/// | # | Operands |
3262/// +---+----------+
3263/// | 1 | Xmm, Mem |
3264/// | 2 | Xmm, Xmm |
3265/// | 3 | Ymm, Mem |
3266/// | 4 | Ymm, Xmm |
3267/// | 5 | Zmm, Mem |
3268/// | 6 | Zmm, Ymm |
3269/// +---+----------+
3270/// ```
3271pub trait Vcvttps2qqMaskzEmitter<A, B> {
3272    fn vcvttps2qq_maskz(&mut self, op0: A, op1: B);
3273}
3274
3275impl<'a> Vcvttps2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
3276    fn vcvttps2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
3277        self.emit(VCVTTPS2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3278    }
3279}
3280
3281impl<'a> Vcvttps2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
3282    fn vcvttps2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
3283        self.emit(VCVTTPS2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3284    }
3285}
3286
3287impl<'a> Vcvttps2qqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
3288    fn vcvttps2qq_maskz(&mut self, op0: Ymm, op1: Xmm) {
3289        self.emit(VCVTTPS2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3290    }
3291}
3292
3293impl<'a> Vcvttps2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
3294    fn vcvttps2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
3295        self.emit(VCVTTPS2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3296    }
3297}
3298
3299impl<'a> Vcvttps2qqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
3300    fn vcvttps2qq_maskz(&mut self, op0: Zmm, op1: Ymm) {
3301        self.emit(VCVTTPS2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3302    }
3303}
3304
3305impl<'a> Vcvttps2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
3306    fn vcvttps2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
3307        self.emit(VCVTTPS2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3308    }
3309}
3310
3311/// `VCVTTPS2QQ_MASKZ_SAE` (VCVTTPS2QQ). 
3312/// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
3313///
3314///
3315/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
3316///
3317/// Supported operand variants:
3318///
3319/// ```text
3320/// +---+----------+
3321/// | # | Operands |
3322/// +---+----------+
3323/// | 1 | Zmm, Ymm |
3324/// +---+----------+
3325/// ```
3326pub trait Vcvttps2qqMaskzSaeEmitter<A, B> {
3327    fn vcvttps2qq_maskz_sae(&mut self, op0: A, op1: B);
3328}
3329
3330impl<'a> Vcvttps2qqMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
3331    fn vcvttps2qq_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
3332        self.emit(VCVTTPS2QQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3333    }
3334}
3335
3336/// `VCVTTPS2QQ_SAE` (VCVTTPS2QQ). 
3337/// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
3338///
3339///
3340/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
3341///
3342/// Supported operand variants:
3343///
3344/// ```text
3345/// +---+----------+
3346/// | # | Operands |
3347/// +---+----------+
3348/// | 1 | Zmm, Ymm |
3349/// +---+----------+
3350/// ```
3351pub trait Vcvttps2qqSaeEmitter<A, B> {
3352    fn vcvttps2qq_sae(&mut self, op0: A, op1: B);
3353}
3354
3355impl<'a> Vcvttps2qqSaeEmitter<Zmm, Ymm> for Assembler<'a> {
3356    fn vcvttps2qq_sae(&mut self, op0: Zmm, op1: Ymm) {
3357        self.emit(VCVTTPS2QQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3358    }
3359}
3360
3361/// `VFPCLASSPD` (VFPCLASSPD). 
3362/// The FPCLASSPD instruction checks the packed double precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:8/4/2] of the destination are cleared.
3363///
3364///
3365/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPD.html).
3366///
3367/// Supported operand variants:
3368///
3369/// ```text
3370/// +---+----------------+
3371/// | # | Operands       |
3372/// +---+----------------+
3373/// | 1 | KReg, Mem, Imm |
3374/// | 2 | KReg, Xmm, Imm |
3375/// | 3 | KReg, Ymm, Imm |
3376/// | 4 | KReg, Zmm, Imm |
3377/// +---+----------------+
3378/// ```
3379pub trait VfpclasspdEmitter<A, B, C> {
3380    fn vfpclasspd(&mut self, op0: A, op1: B, op2: C);
3381}
3382
3383impl<'a> VfpclasspdEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3384    fn vfpclasspd(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3385        self.emit(VFPCLASSPD128KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3386    }
3387}
3388
3389impl<'a> VfpclasspdEmitter<KReg, Mem, Imm> for Assembler<'a> {
3390    fn vfpclasspd(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3391        self.emit(VFPCLASSPD128KMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3392    }
3393}
3394
3395impl<'a> VfpclasspdEmitter<KReg, Ymm, Imm> for Assembler<'a> {
3396    fn vfpclasspd(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
3397        self.emit(VFPCLASSPD256KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3398    }
3399}
3400
3401impl<'a> VfpclasspdEmitter<KReg, Zmm, Imm> for Assembler<'a> {
3402    fn vfpclasspd(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
3403        self.emit(VFPCLASSPD512KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3404    }
3405}
3406
3407/// `VFPCLASSPD_MASK` (VFPCLASSPD). 
3408/// The FPCLASSPD instruction checks the packed double precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:8/4/2] of the destination are cleared.
3409///
3410///
3411/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPD.html).
3412///
3413/// Supported operand variants:
3414///
3415/// ```text
3416/// +---+----------------+
3417/// | # | Operands       |
3418/// +---+----------------+
3419/// | 1 | KReg, Mem, Imm |
3420/// | 2 | KReg, Xmm, Imm |
3421/// | 3 | KReg, Ymm, Imm |
3422/// | 4 | KReg, Zmm, Imm |
3423/// +---+----------------+
3424/// ```
3425pub trait VfpclasspdMaskEmitter<A, B, C> {
3426    fn vfpclasspd_mask(&mut self, op0: A, op1: B, op2: C);
3427}
3428
3429impl<'a> VfpclasspdMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3430    fn vfpclasspd_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3431        self.emit(VFPCLASSPD128KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3432    }
3433}
3434
3435impl<'a> VfpclasspdMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
3436    fn vfpclasspd_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3437        self.emit(VFPCLASSPD128KMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3438    }
3439}
3440
3441impl<'a> VfpclasspdMaskEmitter<KReg, Ymm, Imm> for Assembler<'a> {
3442    fn vfpclasspd_mask(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
3443        self.emit(VFPCLASSPD256KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3444    }
3445}
3446
3447impl<'a> VfpclasspdMaskEmitter<KReg, Zmm, Imm> for Assembler<'a> {
3448    fn vfpclasspd_mask(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
3449        self.emit(VFPCLASSPD512KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3450    }
3451}
3452
3453/// `VFPCLASSPS` (VFPCLASSPS). 
3454/// The FPCLASSPS instruction checks the packed single-precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:16/8/4] of the destination are cleared.
3455///
3456///
3457/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPS.html).
3458///
3459/// Supported operand variants:
3460///
3461/// ```text
3462/// +---+----------------+
3463/// | # | Operands       |
3464/// +---+----------------+
3465/// | 1 | KReg, Mem, Imm |
3466/// | 2 | KReg, Xmm, Imm |
3467/// | 3 | KReg, Ymm, Imm |
3468/// | 4 | KReg, Zmm, Imm |
3469/// +---+----------------+
3470/// ```
3471pub trait VfpclasspsEmitter<A, B, C> {
3472    fn vfpclassps(&mut self, op0: A, op1: B, op2: C);
3473}
3474
3475impl<'a> VfpclasspsEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3476    fn vfpclassps(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3477        self.emit(VFPCLASSPS128KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3478    }
3479}
3480
3481impl<'a> VfpclasspsEmitter<KReg, Mem, Imm> for Assembler<'a> {
3482    fn vfpclassps(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3483        self.emit(VFPCLASSPS128KMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3484    }
3485}
3486
3487impl<'a> VfpclasspsEmitter<KReg, Ymm, Imm> for Assembler<'a> {
3488    fn vfpclassps(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
3489        self.emit(VFPCLASSPS256KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3490    }
3491}
3492
3493impl<'a> VfpclasspsEmitter<KReg, Zmm, Imm> for Assembler<'a> {
3494    fn vfpclassps(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
3495        self.emit(VFPCLASSPS512KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3496    }
3497}
3498
3499/// `VFPCLASSPS_MASK` (VFPCLASSPS). 
3500/// The FPCLASSPS instruction checks the packed single-precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:16/8/4] of the destination are cleared.
3501///
3502///
3503/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPS.html).
3504///
3505/// Supported operand variants:
3506///
3507/// ```text
3508/// +---+----------------+
3509/// | # | Operands       |
3510/// +---+----------------+
3511/// | 1 | KReg, Mem, Imm |
3512/// | 2 | KReg, Xmm, Imm |
3513/// | 3 | KReg, Ymm, Imm |
3514/// | 4 | KReg, Zmm, Imm |
3515/// +---+----------------+
3516/// ```
3517pub trait VfpclasspsMaskEmitter<A, B, C> {
3518    fn vfpclassps_mask(&mut self, op0: A, op1: B, op2: C);
3519}
3520
3521impl<'a> VfpclasspsMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3522    fn vfpclassps_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3523        self.emit(VFPCLASSPS128KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3524    }
3525}
3526
3527impl<'a> VfpclasspsMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
3528    fn vfpclassps_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3529        self.emit(VFPCLASSPS128KMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3530    }
3531}
3532
3533impl<'a> VfpclasspsMaskEmitter<KReg, Ymm, Imm> for Assembler<'a> {
3534    fn vfpclassps_mask(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
3535        self.emit(VFPCLASSPS256KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3536    }
3537}
3538
3539impl<'a> VfpclasspsMaskEmitter<KReg, Zmm, Imm> for Assembler<'a> {
3540    fn vfpclassps_mask(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
3541        self.emit(VFPCLASSPS512KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3542    }
3543}
3544
3545/// `VFPCLASSSD` (VFPCLASSSD). 
3546/// The FPCLASSSD instruction checks the low double precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
3547///
3548///
3549/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSD.html).
3550///
3551/// Supported operand variants:
3552///
3553/// ```text
3554/// +---+----------------+
3555/// | # | Operands       |
3556/// +---+----------------+
3557/// | 1 | KReg, Mem, Imm |
3558/// | 2 | KReg, Xmm, Imm |
3559/// +---+----------------+
3560/// ```
3561pub trait VfpclasssdEmitter<A, B, C> {
3562    fn vfpclasssd(&mut self, op0: A, op1: B, op2: C);
3563}
3564
3565impl<'a> VfpclasssdEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3566    fn vfpclasssd(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3567        self.emit(VFPCLASSSDKRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3568    }
3569}
3570
3571impl<'a> VfpclasssdEmitter<KReg, Mem, Imm> for Assembler<'a> {
3572    fn vfpclasssd(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3573        self.emit(VFPCLASSSDKMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3574    }
3575}
3576
3577/// `VFPCLASSSD_MASK` (VFPCLASSSD). 
3578/// The FPCLASSSD instruction checks the low double precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
3579///
3580///
3581/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSD.html).
3582///
3583/// Supported operand variants:
3584///
3585/// ```text
3586/// +---+----------------+
3587/// | # | Operands       |
3588/// +---+----------------+
3589/// | 1 | KReg, Mem, Imm |
3590/// | 2 | KReg, Xmm, Imm |
3591/// +---+----------------+
3592/// ```
3593pub trait VfpclasssdMaskEmitter<A, B, C> {
3594    fn vfpclasssd_mask(&mut self, op0: A, op1: B, op2: C);
3595}
3596
3597impl<'a> VfpclasssdMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3598    fn vfpclasssd_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3599        self.emit(VFPCLASSSDKRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3600    }
3601}
3602
3603impl<'a> VfpclasssdMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
3604    fn vfpclasssd_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3605        self.emit(VFPCLASSSDKMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3606    }
3607}
3608
3609/// `VFPCLASSSS` (VFPCLASSSS). 
3610/// The FPCLASSSS instruction checks the low single-precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
3611///
3612///
3613/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSS.html).
3614///
3615/// Supported operand variants:
3616///
3617/// ```text
3618/// +---+----------------+
3619/// | # | Operands       |
3620/// +---+----------------+
3621/// | 1 | KReg, Mem, Imm |
3622/// | 2 | KReg, Xmm, Imm |
3623/// +---+----------------+
3624/// ```
3625pub trait VfpclassssEmitter<A, B, C> {
3626    fn vfpclassss(&mut self, op0: A, op1: B, op2: C);
3627}
3628
3629impl<'a> VfpclassssEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3630    fn vfpclassss(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3631        self.emit(VFPCLASSSSKRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3632    }
3633}
3634
3635impl<'a> VfpclassssEmitter<KReg, Mem, Imm> for Assembler<'a> {
3636    fn vfpclassss(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3637        self.emit(VFPCLASSSSKMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3638    }
3639}
3640
3641/// `VFPCLASSSS_MASK` (VFPCLASSSS). 
3642/// The FPCLASSSS instruction checks the low single-precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
3643///
3644///
3645/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSS.html).
3646///
3647/// Supported operand variants:
3648///
3649/// ```text
3650/// +---+----------------+
3651/// | # | Operands       |
3652/// +---+----------------+
3653/// | 1 | KReg, Mem, Imm |
3654/// | 2 | KReg, Xmm, Imm |
3655/// +---+----------------+
3656/// ```
3657pub trait VfpclassssMaskEmitter<A, B, C> {
3658    fn vfpclassss_mask(&mut self, op0: A, op1: B, op2: C);
3659}
3660
3661impl<'a> VfpclassssMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
3662    fn vfpclassss_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
3663        self.emit(VFPCLASSSSKRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3664    }
3665}
3666
3667impl<'a> VfpclassssMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
3668    fn vfpclassss_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
3669        self.emit(VFPCLASSSSKMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3670    }
3671}
3672
3673/// `VINSERTF32X8` (VINSERTF32X8). 
3674/// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
3675///
3676///
3677/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
3678///
3679/// Supported operand variants:
3680///
3681/// ```text
3682/// +---+--------------------+
3683/// | # | Operands           |
3684/// +---+--------------------+
3685/// | 1 | Zmm, Zmm, Mem, Imm |
3686/// | 2 | Zmm, Zmm, Ymm, Imm |
3687/// +---+--------------------+
3688/// ```
3689pub trait Vinsertf32x8Emitter<A, B, C, D> {
3690    fn vinsertf32x8(&mut self, op0: A, op1: B, op2: C, op3: D);
3691}
3692
3693impl<'a> Vinsertf32x8Emitter<Zmm, Zmm, Ymm, Imm> for Assembler<'a> {
3694    fn vinsertf32x8(&mut self, op0: Zmm, op1: Zmm, op2: Ymm, op3: Imm) {
3695        self.emit(VINSERTF32X8_512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3696    }
3697}
3698
3699impl<'a> Vinsertf32x8Emitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3700    fn vinsertf32x8(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3701        self.emit(VINSERTF32X8_512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3702    }
3703}
3704
3705/// `VINSERTF32X8_MASK` (VINSERTF32X8). 
3706/// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
3707///
3708///
3709/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
3710///
3711/// Supported operand variants:
3712///
3713/// ```text
3714/// +---+--------------------+
3715/// | # | Operands           |
3716/// +---+--------------------+
3717/// | 1 | Zmm, Zmm, Mem, Imm |
3718/// | 2 | Zmm, Zmm, Ymm, Imm |
3719/// +---+--------------------+
3720/// ```
3721pub trait Vinsertf32x8MaskEmitter<A, B, C, D> {
3722    fn vinsertf32x8_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
3723}
3724
3725impl<'a> Vinsertf32x8MaskEmitter<Zmm, Zmm, Ymm, Imm> for Assembler<'a> {
3726    fn vinsertf32x8_mask(&mut self, op0: Zmm, op1: Zmm, op2: Ymm, op3: Imm) {
3727        self.emit(VINSERTF32X8_512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3728    }
3729}
3730
3731impl<'a> Vinsertf32x8MaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3732    fn vinsertf32x8_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3733        self.emit(VINSERTF32X8_512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3734    }
3735}
3736
3737/// `VINSERTF32X8_MASKZ` (VINSERTF32X8). 
3738/// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
3739///
3740///
3741/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
3742///
3743/// Supported operand variants:
3744///
3745/// ```text
3746/// +---+--------------------+
3747/// | # | Operands           |
3748/// +---+--------------------+
3749/// | 1 | Zmm, Zmm, Mem, Imm |
3750/// | 2 | Zmm, Zmm, Ymm, Imm |
3751/// +---+--------------------+
3752/// ```
3753pub trait Vinsertf32x8MaskzEmitter<A, B, C, D> {
3754    fn vinsertf32x8_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
3755}
3756
3757impl<'a> Vinsertf32x8MaskzEmitter<Zmm, Zmm, Ymm, Imm> for Assembler<'a> {
3758    fn vinsertf32x8_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Ymm, op3: Imm) {
3759        self.emit(VINSERTF32X8_512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3760    }
3761}
3762
3763impl<'a> Vinsertf32x8MaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3764    fn vinsertf32x8_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3765        self.emit(VINSERTF32X8_512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3766    }
3767}
3768
3769/// `VINSERTF64X2` (VINSERTF64X2). 
3770/// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
3771///
3772///
3773/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
3774///
3775/// Supported operand variants:
3776///
3777/// ```text
3778/// +---+--------------------+
3779/// | # | Operands           |
3780/// +---+--------------------+
3781/// | 1 | Ymm, Ymm, Mem, Imm |
3782/// | 2 | Ymm, Ymm, Xmm, Imm |
3783/// | 3 | Zmm, Zmm, Mem, Imm |
3784/// | 4 | Zmm, Zmm, Xmm, Imm |
3785/// +---+--------------------+
3786/// ```
3787pub trait Vinsertf64x2Emitter<A, B, C, D> {
3788    fn vinsertf64x2(&mut self, op0: A, op1: B, op2: C, op3: D);
3789}
3790
3791impl<'a> Vinsertf64x2Emitter<Ymm, Ymm, Xmm, Imm> for Assembler<'a> {
3792    fn vinsertf64x2(&mut self, op0: Ymm, op1: Ymm, op2: Xmm, op3: Imm) {
3793        self.emit(VINSERTF64X2_256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3794    }
3795}
3796
3797impl<'a> Vinsertf64x2Emitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
3798    fn vinsertf64x2(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
3799        self.emit(VINSERTF64X2_256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3800    }
3801}
3802
3803impl<'a> Vinsertf64x2Emitter<Zmm, Zmm, Xmm, Imm> for Assembler<'a> {
3804    fn vinsertf64x2(&mut self, op0: Zmm, op1: Zmm, op2: Xmm, op3: Imm) {
3805        self.emit(VINSERTF64X2_512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3806    }
3807}
3808
3809impl<'a> Vinsertf64x2Emitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3810    fn vinsertf64x2(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3811        self.emit(VINSERTF64X2_512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3812    }
3813}
3814
3815/// `VINSERTF64X2_MASK` (VINSERTF64X2). 
3816/// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
3817///
3818///
3819/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
3820///
3821/// Supported operand variants:
3822///
3823/// ```text
3824/// +---+--------------------+
3825/// | # | Operands           |
3826/// +---+--------------------+
3827/// | 1 | Ymm, Ymm, Mem, Imm |
3828/// | 2 | Ymm, Ymm, Xmm, Imm |
3829/// | 3 | Zmm, Zmm, Mem, Imm |
3830/// | 4 | Zmm, Zmm, Xmm, Imm |
3831/// +---+--------------------+
3832/// ```
3833pub trait Vinsertf64x2MaskEmitter<A, B, C, D> {
3834    fn vinsertf64x2_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
3835}
3836
3837impl<'a> Vinsertf64x2MaskEmitter<Ymm, Ymm, Xmm, Imm> for Assembler<'a> {
3838    fn vinsertf64x2_mask(&mut self, op0: Ymm, op1: Ymm, op2: Xmm, op3: Imm) {
3839        self.emit(VINSERTF64X2_256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3840    }
3841}
3842
3843impl<'a> Vinsertf64x2MaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
3844    fn vinsertf64x2_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
3845        self.emit(VINSERTF64X2_256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3846    }
3847}
3848
3849impl<'a> Vinsertf64x2MaskEmitter<Zmm, Zmm, Xmm, Imm> for Assembler<'a> {
3850    fn vinsertf64x2_mask(&mut self, op0: Zmm, op1: Zmm, op2: Xmm, op3: Imm) {
3851        self.emit(VINSERTF64X2_512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3852    }
3853}
3854
3855impl<'a> Vinsertf64x2MaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3856    fn vinsertf64x2_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3857        self.emit(VINSERTF64X2_512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3858    }
3859}
3860
3861/// `VINSERTF64X2_MASKZ` (VINSERTF64X2). 
3862/// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
3863///
3864///
3865/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
3866///
3867/// Supported operand variants:
3868///
3869/// ```text
3870/// +---+--------------------+
3871/// | # | Operands           |
3872/// +---+--------------------+
3873/// | 1 | Ymm, Ymm, Mem, Imm |
3874/// | 2 | Ymm, Ymm, Xmm, Imm |
3875/// | 3 | Zmm, Zmm, Mem, Imm |
3876/// | 4 | Zmm, Zmm, Xmm, Imm |
3877/// +---+--------------------+
3878/// ```
3879pub trait Vinsertf64x2MaskzEmitter<A, B, C, D> {
3880    fn vinsertf64x2_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
3881}
3882
3883impl<'a> Vinsertf64x2MaskzEmitter<Ymm, Ymm, Xmm, Imm> for Assembler<'a> {
3884    fn vinsertf64x2_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Xmm, op3: Imm) {
3885        self.emit(VINSERTF64X2_256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3886    }
3887}
3888
3889impl<'a> Vinsertf64x2MaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
3890    fn vinsertf64x2_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
3891        self.emit(VINSERTF64X2_256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3892    }
3893}
3894
3895impl<'a> Vinsertf64x2MaskzEmitter<Zmm, Zmm, Xmm, Imm> for Assembler<'a> {
3896    fn vinsertf64x2_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Xmm, op3: Imm) {
3897        self.emit(VINSERTF64X2_512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3898    }
3899}
3900
3901impl<'a> Vinsertf64x2MaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3902    fn vinsertf64x2_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3903        self.emit(VINSERTF64X2_512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3904    }
3905}
3906
3907/// `VINSERTI32X8` (VINSERTI32X8). 
3908/// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
3909///
3910///
3911/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
3912///
3913/// Supported operand variants:
3914///
3915/// ```text
3916/// +---+--------------------+
3917/// | # | Operands           |
3918/// +---+--------------------+
3919/// | 1 | Zmm, Zmm, Mem, Imm |
3920/// | 2 | Zmm, Zmm, Ymm, Imm |
3921/// +---+--------------------+
3922/// ```
3923pub trait Vinserti32x8Emitter<A, B, C, D> {
3924    fn vinserti32x8(&mut self, op0: A, op1: B, op2: C, op3: D);
3925}
3926
3927impl<'a> Vinserti32x8Emitter<Zmm, Zmm, Ymm, Imm> for Assembler<'a> {
3928    fn vinserti32x8(&mut self, op0: Zmm, op1: Zmm, op2: Ymm, op3: Imm) {
3929        self.emit(VINSERTI32X8_512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3930    }
3931}
3932
3933impl<'a> Vinserti32x8Emitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3934    fn vinserti32x8(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3935        self.emit(VINSERTI32X8_512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3936    }
3937}
3938
3939/// `VINSERTI32X8_MASK` (VINSERTI32X8). 
3940/// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
3941///
3942///
3943/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
3944///
3945/// Supported operand variants:
3946///
3947/// ```text
3948/// +---+--------------------+
3949/// | # | Operands           |
3950/// +---+--------------------+
3951/// | 1 | Zmm, Zmm, Mem, Imm |
3952/// | 2 | Zmm, Zmm, Ymm, Imm |
3953/// +---+--------------------+
3954/// ```
3955pub trait Vinserti32x8MaskEmitter<A, B, C, D> {
3956    fn vinserti32x8_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
3957}
3958
3959impl<'a> Vinserti32x8MaskEmitter<Zmm, Zmm, Ymm, Imm> for Assembler<'a> {
3960    fn vinserti32x8_mask(&mut self, op0: Zmm, op1: Zmm, op2: Ymm, op3: Imm) {
3961        self.emit(VINSERTI32X8_512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3962    }
3963}
3964
3965impl<'a> Vinserti32x8MaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3966    fn vinserti32x8_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3967        self.emit(VINSERTI32X8_512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3968    }
3969}
3970
3971/// `VINSERTI32X8_MASKZ` (VINSERTI32X8). 
3972/// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
3973///
3974///
3975/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
3976///
3977/// Supported operand variants:
3978///
3979/// ```text
3980/// +---+--------------------+
3981/// | # | Operands           |
3982/// +---+--------------------+
3983/// | 1 | Zmm, Zmm, Mem, Imm |
3984/// | 2 | Zmm, Zmm, Ymm, Imm |
3985/// +---+--------------------+
3986/// ```
3987pub trait Vinserti32x8MaskzEmitter<A, B, C, D> {
3988    fn vinserti32x8_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
3989}
3990
3991impl<'a> Vinserti32x8MaskzEmitter<Zmm, Zmm, Ymm, Imm> for Assembler<'a> {
3992    fn vinserti32x8_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Ymm, op3: Imm) {
3993        self.emit(VINSERTI32X8_512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3994    }
3995}
3996
3997impl<'a> Vinserti32x8MaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3998    fn vinserti32x8_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3999        self.emit(VINSERTI32X8_512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4000    }
4001}
4002
4003/// `VINSERTI64X2` (VINSERTI64X2). 
4004/// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
4005///
4006///
4007/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
4008///
4009/// Supported operand variants:
4010///
4011/// ```text
4012/// +---+--------------------+
4013/// | # | Operands           |
4014/// +---+--------------------+
4015/// | 1 | Ymm, Ymm, Mem, Imm |
4016/// | 2 | Ymm, Ymm, Xmm, Imm |
4017/// | 3 | Zmm, Zmm, Mem, Imm |
4018/// | 4 | Zmm, Zmm, Xmm, Imm |
4019/// +---+--------------------+
4020/// ```
4021pub trait Vinserti64x2Emitter<A, B, C, D> {
4022    fn vinserti64x2(&mut self, op0: A, op1: B, op2: C, op3: D);
4023}
4024
4025impl<'a> Vinserti64x2Emitter<Ymm, Ymm, Xmm, Imm> for Assembler<'a> {
4026    fn vinserti64x2(&mut self, op0: Ymm, op1: Ymm, op2: Xmm, op3: Imm) {
4027        self.emit(VINSERTI64X2_256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4028    }
4029}
4030
4031impl<'a> Vinserti64x2Emitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
4032    fn vinserti64x2(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
4033        self.emit(VINSERTI64X2_256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4034    }
4035}
4036
4037impl<'a> Vinserti64x2Emitter<Zmm, Zmm, Xmm, Imm> for Assembler<'a> {
4038    fn vinserti64x2(&mut self, op0: Zmm, op1: Zmm, op2: Xmm, op3: Imm) {
4039        self.emit(VINSERTI64X2_512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4040    }
4041}
4042
4043impl<'a> Vinserti64x2Emitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
4044    fn vinserti64x2(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
4045        self.emit(VINSERTI64X2_512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4046    }
4047}
4048
4049/// `VINSERTI64X2_MASK` (VINSERTI64X2). 
4050/// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
4051///
4052///
4053/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
4054///
4055/// Supported operand variants:
4056///
4057/// ```text
4058/// +---+--------------------+
4059/// | # | Operands           |
4060/// +---+--------------------+
4061/// | 1 | Ymm, Ymm, Mem, Imm |
4062/// | 2 | Ymm, Ymm, Xmm, Imm |
4063/// | 3 | Zmm, Zmm, Mem, Imm |
4064/// | 4 | Zmm, Zmm, Xmm, Imm |
4065/// +---+--------------------+
4066/// ```
4067pub trait Vinserti64x2MaskEmitter<A, B, C, D> {
4068    fn vinserti64x2_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
4069}
4070
4071impl<'a> Vinserti64x2MaskEmitter<Ymm, Ymm, Xmm, Imm> for Assembler<'a> {
4072    fn vinserti64x2_mask(&mut self, op0: Ymm, op1: Ymm, op2: Xmm, op3: Imm) {
4073        self.emit(VINSERTI64X2_256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4074    }
4075}
4076
4077impl<'a> Vinserti64x2MaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
4078    fn vinserti64x2_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
4079        self.emit(VINSERTI64X2_256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4080    }
4081}
4082
4083impl<'a> Vinserti64x2MaskEmitter<Zmm, Zmm, Xmm, Imm> for Assembler<'a> {
4084    fn vinserti64x2_mask(&mut self, op0: Zmm, op1: Zmm, op2: Xmm, op3: Imm) {
4085        self.emit(VINSERTI64X2_512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4086    }
4087}
4088
4089impl<'a> Vinserti64x2MaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
4090    fn vinserti64x2_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
4091        self.emit(VINSERTI64X2_512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4092    }
4093}
4094
4095/// `VINSERTI64X2_MASKZ` (VINSERTI64X2). 
4096/// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
4097///
4098///
4099/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
4100///
4101/// Supported operand variants:
4102///
4103/// ```text
4104/// +---+--------------------+
4105/// | # | Operands           |
4106/// +---+--------------------+
4107/// | 1 | Ymm, Ymm, Mem, Imm |
4108/// | 2 | Ymm, Ymm, Xmm, Imm |
4109/// | 3 | Zmm, Zmm, Mem, Imm |
4110/// | 4 | Zmm, Zmm, Xmm, Imm |
4111/// +---+--------------------+
4112/// ```
4113pub trait Vinserti64x2MaskzEmitter<A, B, C, D> {
4114    fn vinserti64x2_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
4115}
4116
4117impl<'a> Vinserti64x2MaskzEmitter<Ymm, Ymm, Xmm, Imm> for Assembler<'a> {
4118    fn vinserti64x2_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Xmm, op3: Imm) {
4119        self.emit(VINSERTI64X2_256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4120    }
4121}
4122
4123impl<'a> Vinserti64x2MaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
4124    fn vinserti64x2_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
4125        self.emit(VINSERTI64X2_256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4126    }
4127}
4128
4129impl<'a> Vinserti64x2MaskzEmitter<Zmm, Zmm, Xmm, Imm> for Assembler<'a> {
4130    fn vinserti64x2_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Xmm, op3: Imm) {
4131        self.emit(VINSERTI64X2_512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4132    }
4133}
4134
4135impl<'a> Vinserti64x2MaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
4136    fn vinserti64x2_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
4137        self.emit(VINSERTI64X2_512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4138    }
4139}
4140
4141/// `VORPD` (VORPD). 
4142/// Performs a bitwise logical OR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
4143///
4144///
4145/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPD.html).
4146///
4147/// Supported operand variants:
4148///
4149/// ```text
4150/// +---+---------------+
4151/// | # | Operands      |
4152/// +---+---------------+
4153/// | 1 | Xmm, Xmm, Mem |
4154/// | 2 | Xmm, Xmm, Xmm |
4155/// | 3 | Ymm, Ymm, Mem |
4156/// | 4 | Ymm, Ymm, Ymm |
4157/// | 5 | Zmm, Zmm, Mem |
4158/// | 6 | Zmm, Zmm, Zmm |
4159/// +---+---------------+
4160/// ```
4161pub trait VorpdEmitter<A, B, C> {
4162    fn vorpd(&mut self, op0: A, op1: B, op2: C);
4163}
4164
4165impl<'a> VorpdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4166    fn vorpd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4167        self.emit(VORPD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4168    }
4169}
4170
4171impl<'a> VorpdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4172    fn vorpd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4173        self.emit(VORPD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4174    }
4175}
4176
4177impl<'a> VorpdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4178    fn vorpd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4179        self.emit(VORPD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4180    }
4181}
4182
4183impl<'a> VorpdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4184    fn vorpd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4185        self.emit(VORPD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4186    }
4187}
4188
4189impl<'a> VorpdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4190    fn vorpd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4191        self.emit(VORPD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4192    }
4193}
4194
4195impl<'a> VorpdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4196    fn vorpd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4197        self.emit(VORPD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4198    }
4199}
4200
4201/// `VORPD_MASK` (VORPD). 
4202/// Performs a bitwise logical OR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
4203///
4204///
4205/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPD.html).
4206///
4207/// Supported operand variants:
4208///
4209/// ```text
4210/// +---+---------------+
4211/// | # | Operands      |
4212/// +---+---------------+
4213/// | 1 | Xmm, Xmm, Mem |
4214/// | 2 | Xmm, Xmm, Xmm |
4215/// | 3 | Ymm, Ymm, Mem |
4216/// | 4 | Ymm, Ymm, Ymm |
4217/// | 5 | Zmm, Zmm, Mem |
4218/// | 6 | Zmm, Zmm, Zmm |
4219/// +---+---------------+
4220/// ```
4221pub trait VorpdMaskEmitter<A, B, C> {
4222    fn vorpd_mask(&mut self, op0: A, op1: B, op2: C);
4223}
4224
4225impl<'a> VorpdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4226    fn vorpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4227        self.emit(VORPD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4228    }
4229}
4230
4231impl<'a> VorpdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4232    fn vorpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4233        self.emit(VORPD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4234    }
4235}
4236
4237impl<'a> VorpdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4238    fn vorpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4239        self.emit(VORPD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4240    }
4241}
4242
4243impl<'a> VorpdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4244    fn vorpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4245        self.emit(VORPD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4246    }
4247}
4248
4249impl<'a> VorpdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4250    fn vorpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4251        self.emit(VORPD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4252    }
4253}
4254
4255impl<'a> VorpdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4256    fn vorpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4257        self.emit(VORPD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4258    }
4259}
4260
4261/// `VORPD_MASKZ` (VORPD). 
4262/// Performs a bitwise logical OR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
4263///
4264///
4265/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPD.html).
4266///
4267/// Supported operand variants:
4268///
4269/// ```text
4270/// +---+---------------+
4271/// | # | Operands      |
4272/// +---+---------------+
4273/// | 1 | Xmm, Xmm, Mem |
4274/// | 2 | Xmm, Xmm, Xmm |
4275/// | 3 | Ymm, Ymm, Mem |
4276/// | 4 | Ymm, Ymm, Ymm |
4277/// | 5 | Zmm, Zmm, Mem |
4278/// | 6 | Zmm, Zmm, Zmm |
4279/// +---+---------------+
4280/// ```
4281pub trait VorpdMaskzEmitter<A, B, C> {
4282    fn vorpd_maskz(&mut self, op0: A, op1: B, op2: C);
4283}
4284
4285impl<'a> VorpdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4286    fn vorpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4287        self.emit(VORPD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4288    }
4289}
4290
4291impl<'a> VorpdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4292    fn vorpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4293        self.emit(VORPD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4294    }
4295}
4296
4297impl<'a> VorpdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4298    fn vorpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4299        self.emit(VORPD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4300    }
4301}
4302
4303impl<'a> VorpdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4304    fn vorpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4305        self.emit(VORPD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4306    }
4307}
4308
4309impl<'a> VorpdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4310    fn vorpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4311        self.emit(VORPD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4312    }
4313}
4314
4315impl<'a> VorpdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4316    fn vorpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4317        self.emit(VORPD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4318    }
4319}
4320
4321/// `VORPS` (VORPS). 
4322/// Performs a bitwise logical OR of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
4323///
4324///
4325/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPS.html).
4326///
4327/// Supported operand variants:
4328///
4329/// ```text
4330/// +---+---------------+
4331/// | # | Operands      |
4332/// +---+---------------+
4333/// | 1 | Xmm, Xmm, Mem |
4334/// | 2 | Xmm, Xmm, Xmm |
4335/// | 3 | Ymm, Ymm, Mem |
4336/// | 4 | Ymm, Ymm, Ymm |
4337/// | 5 | Zmm, Zmm, Mem |
4338/// | 6 | Zmm, Zmm, Zmm |
4339/// +---+---------------+
4340/// ```
4341pub trait VorpsEmitter<A, B, C> {
4342    fn vorps(&mut self, op0: A, op1: B, op2: C);
4343}
4344
4345impl<'a> VorpsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4346    fn vorps(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4347        self.emit(VORPS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4348    }
4349}
4350
4351impl<'a> VorpsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4352    fn vorps(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4353        self.emit(VORPS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4354    }
4355}
4356
4357impl<'a> VorpsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4358    fn vorps(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4359        self.emit(VORPS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4360    }
4361}
4362
4363impl<'a> VorpsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4364    fn vorps(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4365        self.emit(VORPS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4366    }
4367}
4368
4369impl<'a> VorpsEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4370    fn vorps(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4371        self.emit(VORPS512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4372    }
4373}
4374
4375impl<'a> VorpsEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4376    fn vorps(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4377        self.emit(VORPS512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4378    }
4379}
4380
4381/// `VORPS_MASK` (VORPS). 
4382/// Performs a bitwise logical OR of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
4383///
4384///
4385/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPS.html).
4386///
4387/// Supported operand variants:
4388///
4389/// ```text
4390/// +---+---------------+
4391/// | # | Operands      |
4392/// +---+---------------+
4393/// | 1 | Xmm, Xmm, Mem |
4394/// | 2 | Xmm, Xmm, Xmm |
4395/// | 3 | Ymm, Ymm, Mem |
4396/// | 4 | Ymm, Ymm, Ymm |
4397/// | 5 | Zmm, Zmm, Mem |
4398/// | 6 | Zmm, Zmm, Zmm |
4399/// +---+---------------+
4400/// ```
4401pub trait VorpsMaskEmitter<A, B, C> {
4402    fn vorps_mask(&mut self, op0: A, op1: B, op2: C);
4403}
4404
4405impl<'a> VorpsMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4406    fn vorps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4407        self.emit(VORPS128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4408    }
4409}
4410
4411impl<'a> VorpsMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4412    fn vorps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4413        self.emit(VORPS128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4414    }
4415}
4416
4417impl<'a> VorpsMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4418    fn vorps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4419        self.emit(VORPS256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4420    }
4421}
4422
4423impl<'a> VorpsMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4424    fn vorps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4425        self.emit(VORPS256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4426    }
4427}
4428
4429impl<'a> VorpsMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4430    fn vorps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4431        self.emit(VORPS512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4432    }
4433}
4434
4435impl<'a> VorpsMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4436    fn vorps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4437        self.emit(VORPS512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4438    }
4439}
4440
4441/// `VORPS_MASKZ` (VORPS). 
4442/// Performs a bitwise logical OR of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
4443///
4444///
4445/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPS.html).
4446///
4447/// Supported operand variants:
4448///
4449/// ```text
4450/// +---+---------------+
4451/// | # | Operands      |
4452/// +---+---------------+
4453/// | 1 | Xmm, Xmm, Mem |
4454/// | 2 | Xmm, Xmm, Xmm |
4455/// | 3 | Ymm, Ymm, Mem |
4456/// | 4 | Ymm, Ymm, Ymm |
4457/// | 5 | Zmm, Zmm, Mem |
4458/// | 6 | Zmm, Zmm, Zmm |
4459/// +---+---------------+
4460/// ```
4461pub trait VorpsMaskzEmitter<A, B, C> {
4462    fn vorps_maskz(&mut self, op0: A, op1: B, op2: C);
4463}
4464
4465impl<'a> VorpsMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4466    fn vorps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4467        self.emit(VORPS128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4468    }
4469}
4470
4471impl<'a> VorpsMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4472    fn vorps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4473        self.emit(VORPS128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4474    }
4475}
4476
4477impl<'a> VorpsMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4478    fn vorps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4479        self.emit(VORPS256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4480    }
4481}
4482
4483impl<'a> VorpsMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4484    fn vorps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4485        self.emit(VORPS256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4486    }
4487}
4488
4489impl<'a> VorpsMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4490    fn vorps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4491        self.emit(VORPS512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4492    }
4493}
4494
4495impl<'a> VorpsMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4496    fn vorps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4497        self.emit(VORPS512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4498    }
4499}
4500
4501/// `VPMOVD2M` (VPMOVD2M). 
4502/// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
4503///
4504///
4505/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
4506///
4507/// Supported operand variants:
4508///
4509/// ```text
4510/// +---+-----------+
4511/// | # | Operands  |
4512/// +---+-----------+
4513/// | 1 | KReg, Xmm |
4514/// | 2 | KReg, Ymm |
4515/// | 3 | KReg, Zmm |
4516/// +---+-----------+
4517/// ```
4518pub trait Vpmovd2mEmitter<A, B> {
4519    fn vpmovd2m(&mut self, op0: A, op1: B);
4520}
4521
4522impl<'a> Vpmovd2mEmitter<KReg, Xmm> for Assembler<'a> {
4523    fn vpmovd2m(&mut self, op0: KReg, op1: Xmm) {
4524        self.emit(VPMOVD2M128KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4525    }
4526}
4527
4528impl<'a> Vpmovd2mEmitter<KReg, Ymm> for Assembler<'a> {
4529    fn vpmovd2m(&mut self, op0: KReg, op1: Ymm) {
4530        self.emit(VPMOVD2M256KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4531    }
4532}
4533
4534impl<'a> Vpmovd2mEmitter<KReg, Zmm> for Assembler<'a> {
4535    fn vpmovd2m(&mut self, op0: KReg, op1: Zmm) {
4536        self.emit(VPMOVD2M512KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4537    }
4538}
4539
4540/// `VPMOVM2D` (VPMOVM2D). 
4541/// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
4542///
4543///
4544/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
4545///
4546/// Supported operand variants:
4547///
4548/// ```text
4549/// +---+-----------+
4550/// | # | Operands  |
4551/// +---+-----------+
4552/// | 1 | Xmm, KReg |
4553/// | 2 | Ymm, KReg |
4554/// | 3 | Zmm, KReg |
4555/// +---+-----------+
4556/// ```
4557pub trait Vpmovm2dEmitter<A, B> {
4558    fn vpmovm2d(&mut self, op0: A, op1: B);
4559}
4560
4561impl<'a> Vpmovm2dEmitter<Xmm, KReg> for Assembler<'a> {
4562    fn vpmovm2d(&mut self, op0: Xmm, op1: KReg) {
4563        self.emit(VPMOVM2D128RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4564    }
4565}
4566
4567impl<'a> Vpmovm2dEmitter<Ymm, KReg> for Assembler<'a> {
4568    fn vpmovm2d(&mut self, op0: Ymm, op1: KReg) {
4569        self.emit(VPMOVM2D256RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4570    }
4571}
4572
4573impl<'a> Vpmovm2dEmitter<Zmm, KReg> for Assembler<'a> {
4574    fn vpmovm2d(&mut self, op0: Zmm, op1: KReg) {
4575        self.emit(VPMOVM2D512RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4576    }
4577}
4578
4579/// `VPMOVM2Q` (VPMOVM2Q). 
4580/// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
4581///
4582///
4583/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
4584///
4585/// Supported operand variants:
4586///
4587/// ```text
4588/// +---+-----------+
4589/// | # | Operands  |
4590/// +---+-----------+
4591/// | 1 | Xmm, KReg |
4592/// | 2 | Ymm, KReg |
4593/// | 3 | Zmm, KReg |
4594/// +---+-----------+
4595/// ```
4596pub trait Vpmovm2qEmitter<A, B> {
4597    fn vpmovm2q(&mut self, op0: A, op1: B);
4598}
4599
4600impl<'a> Vpmovm2qEmitter<Xmm, KReg> for Assembler<'a> {
4601    fn vpmovm2q(&mut self, op0: Xmm, op1: KReg) {
4602        self.emit(VPMOVM2Q128RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4603    }
4604}
4605
4606impl<'a> Vpmovm2qEmitter<Ymm, KReg> for Assembler<'a> {
4607    fn vpmovm2q(&mut self, op0: Ymm, op1: KReg) {
4608        self.emit(VPMOVM2Q256RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4609    }
4610}
4611
4612impl<'a> Vpmovm2qEmitter<Zmm, KReg> for Assembler<'a> {
4613    fn vpmovm2q(&mut self, op0: Zmm, op1: KReg) {
4614        self.emit(VPMOVM2Q512RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4615    }
4616}
4617
4618/// `VPMOVQ2M` (VPMOVQ2M). 
4619/// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
4620///
4621///
4622/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
4623///
4624/// Supported operand variants:
4625///
4626/// ```text
4627/// +---+-----------+
4628/// | # | Operands  |
4629/// +---+-----------+
4630/// | 1 | KReg, Xmm |
4631/// | 2 | KReg, Ymm |
4632/// | 3 | KReg, Zmm |
4633/// +---+-----------+
4634/// ```
4635pub trait Vpmovq2mEmitter<A, B> {
4636    fn vpmovq2m(&mut self, op0: A, op1: B);
4637}
4638
4639impl<'a> Vpmovq2mEmitter<KReg, Xmm> for Assembler<'a> {
4640    fn vpmovq2m(&mut self, op0: KReg, op1: Xmm) {
4641        self.emit(VPMOVQ2M128KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4642    }
4643}
4644
4645impl<'a> Vpmovq2mEmitter<KReg, Ymm> for Assembler<'a> {
4646    fn vpmovq2m(&mut self, op0: KReg, op1: Ymm) {
4647        self.emit(VPMOVQ2M256KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4648    }
4649}
4650
4651impl<'a> Vpmovq2mEmitter<KReg, Zmm> for Assembler<'a> {
4652    fn vpmovq2m(&mut self, op0: KReg, op1: Zmm) {
4653        self.emit(VPMOVQ2M512KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4654    }
4655}
4656
4657/// `VPMULLD` (VPMULLD). 
4658/// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
4659///
4660///
4661/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
4662///
4663/// Supported operand variants:
4664///
4665/// ```text
4666/// +---+---------------+
4667/// | # | Operands      |
4668/// +---+---------------+
4669/// | 1 | Xmm, Xmm, Mem |
4670/// | 2 | Xmm, Xmm, Xmm |
4671/// | 3 | Ymm, Ymm, Mem |
4672/// | 4 | Ymm, Ymm, Ymm |
4673/// | 5 | Zmm, Zmm, Mem |
4674/// | 6 | Zmm, Zmm, Zmm |
4675/// +---+---------------+
4676/// ```
4677pub trait VpmulldEmitter<A, B, C> {
4678    fn vpmulld(&mut self, op0: A, op1: B, op2: C);
4679}
4680
4681impl<'a> VpmulldEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4682    fn vpmulld(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4683        self.emit(VPMULLD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4684    }
4685}
4686
4687impl<'a> VpmulldEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4688    fn vpmulld(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4689        self.emit(VPMULLD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4690    }
4691}
4692
4693impl<'a> VpmulldEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4694    fn vpmulld(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4695        self.emit(VPMULLD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4696    }
4697}
4698
4699impl<'a> VpmulldEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4700    fn vpmulld(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4701        self.emit(VPMULLD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4702    }
4703}
4704
4705impl<'a> VpmulldEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4706    fn vpmulld(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4707        self.emit(VPMULLD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4708    }
4709}
4710
4711impl<'a> VpmulldEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4712    fn vpmulld(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4713        self.emit(VPMULLD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4714    }
4715}
4716
4717/// `VPMULLD_MASK` (VPMULLD). 
4718/// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
4719///
4720///
4721/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
4722///
4723/// Supported operand variants:
4724///
4725/// ```text
4726/// +---+---------------+
4727/// | # | Operands      |
4728/// +---+---------------+
4729/// | 1 | Xmm, Xmm, Mem |
4730/// | 2 | Xmm, Xmm, Xmm |
4731/// | 3 | Ymm, Ymm, Mem |
4732/// | 4 | Ymm, Ymm, Ymm |
4733/// | 5 | Zmm, Zmm, Mem |
4734/// | 6 | Zmm, Zmm, Zmm |
4735/// +---+---------------+
4736/// ```
4737pub trait VpmulldMaskEmitter<A, B, C> {
4738    fn vpmulld_mask(&mut self, op0: A, op1: B, op2: C);
4739}
4740
4741impl<'a> VpmulldMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4742    fn vpmulld_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4743        self.emit(VPMULLD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4744    }
4745}
4746
4747impl<'a> VpmulldMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4748    fn vpmulld_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4749        self.emit(VPMULLD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4750    }
4751}
4752
4753impl<'a> VpmulldMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4754    fn vpmulld_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4755        self.emit(VPMULLD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4756    }
4757}
4758
4759impl<'a> VpmulldMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4760    fn vpmulld_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4761        self.emit(VPMULLD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4762    }
4763}
4764
4765impl<'a> VpmulldMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4766    fn vpmulld_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4767        self.emit(VPMULLD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4768    }
4769}
4770
4771impl<'a> VpmulldMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4772    fn vpmulld_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4773        self.emit(VPMULLD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4774    }
4775}
4776
4777/// `VPMULLD_MASKZ` (VPMULLD). 
4778/// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
4779///
4780///
4781/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
4782///
4783/// Supported operand variants:
4784///
4785/// ```text
4786/// +---+---------------+
4787/// | # | Operands      |
4788/// +---+---------------+
4789/// | 1 | Xmm, Xmm, Mem |
4790/// | 2 | Xmm, Xmm, Xmm |
4791/// | 3 | Ymm, Ymm, Mem |
4792/// | 4 | Ymm, Ymm, Ymm |
4793/// | 5 | Zmm, Zmm, Mem |
4794/// | 6 | Zmm, Zmm, Zmm |
4795/// +---+---------------+
4796/// ```
4797pub trait VpmulldMaskzEmitter<A, B, C> {
4798    fn vpmulld_maskz(&mut self, op0: A, op1: B, op2: C);
4799}
4800
4801impl<'a> VpmulldMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4802    fn vpmulld_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4803        self.emit(VPMULLD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4804    }
4805}
4806
4807impl<'a> VpmulldMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4808    fn vpmulld_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4809        self.emit(VPMULLD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4810    }
4811}
4812
4813impl<'a> VpmulldMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4814    fn vpmulld_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4815        self.emit(VPMULLD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4816    }
4817}
4818
4819impl<'a> VpmulldMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4820    fn vpmulld_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4821        self.emit(VPMULLD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4822    }
4823}
4824
4825impl<'a> VpmulldMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4826    fn vpmulld_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4827        self.emit(VPMULLD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4828    }
4829}
4830
4831impl<'a> VpmulldMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4832    fn vpmulld_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4833        self.emit(VPMULLD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4834    }
4835}
4836
4837/// `VPMULLQ` (VPMULLQ). 
4838/// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
4839///
4840///
4841/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
4842///
4843/// Supported operand variants:
4844///
4845/// ```text
4846/// +---+---------------+
4847/// | # | Operands      |
4848/// +---+---------------+
4849/// | 1 | Xmm, Xmm, Mem |
4850/// | 2 | Xmm, Xmm, Xmm |
4851/// | 3 | Ymm, Ymm, Mem |
4852/// | 4 | Ymm, Ymm, Ymm |
4853/// | 5 | Zmm, Zmm, Mem |
4854/// | 6 | Zmm, Zmm, Zmm |
4855/// +---+---------------+
4856/// ```
4857pub trait VpmullqEmitter<A, B, C> {
4858    fn vpmullq(&mut self, op0: A, op1: B, op2: C);
4859}
4860
4861impl<'a> VpmullqEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4862    fn vpmullq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4863        self.emit(VPMULLQ128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4864    }
4865}
4866
4867impl<'a> VpmullqEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4868    fn vpmullq(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4869        self.emit(VPMULLQ128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4870    }
4871}
4872
4873impl<'a> VpmullqEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4874    fn vpmullq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4875        self.emit(VPMULLQ256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4876    }
4877}
4878
4879impl<'a> VpmullqEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4880    fn vpmullq(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4881        self.emit(VPMULLQ256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4882    }
4883}
4884
4885impl<'a> VpmullqEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4886    fn vpmullq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4887        self.emit(VPMULLQ512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4888    }
4889}
4890
4891impl<'a> VpmullqEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4892    fn vpmullq(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4893        self.emit(VPMULLQ512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4894    }
4895}
4896
4897/// `VPMULLQ_MASK` (VPMULLQ). 
4898/// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
4899///
4900///
4901/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
4902///
4903/// Supported operand variants:
4904///
4905/// ```text
4906/// +---+---------------+
4907/// | # | Operands      |
4908/// +---+---------------+
4909/// | 1 | Xmm, Xmm, Mem |
4910/// | 2 | Xmm, Xmm, Xmm |
4911/// | 3 | Ymm, Ymm, Mem |
4912/// | 4 | Ymm, Ymm, Ymm |
4913/// | 5 | Zmm, Zmm, Mem |
4914/// | 6 | Zmm, Zmm, Zmm |
4915/// +---+---------------+
4916/// ```
4917pub trait VpmullqMaskEmitter<A, B, C> {
4918    fn vpmullq_mask(&mut self, op0: A, op1: B, op2: C);
4919}
4920
4921impl<'a> VpmullqMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4922    fn vpmullq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4923        self.emit(VPMULLQ128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4924    }
4925}
4926
4927impl<'a> VpmullqMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4928    fn vpmullq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4929        self.emit(VPMULLQ128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4930    }
4931}
4932
4933impl<'a> VpmullqMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4934    fn vpmullq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4935        self.emit(VPMULLQ256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4936    }
4937}
4938
4939impl<'a> VpmullqMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4940    fn vpmullq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4941        self.emit(VPMULLQ256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4942    }
4943}
4944
4945impl<'a> VpmullqMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4946    fn vpmullq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4947        self.emit(VPMULLQ512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4948    }
4949}
4950
4951impl<'a> VpmullqMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4952    fn vpmullq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4953        self.emit(VPMULLQ512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4954    }
4955}
4956
4957/// `VPMULLQ_MASKZ` (VPMULLQ). 
4958/// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
4959///
4960///
4961/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
4962///
4963/// Supported operand variants:
4964///
4965/// ```text
4966/// +---+---------------+
4967/// | # | Operands      |
4968/// +---+---------------+
4969/// | 1 | Xmm, Xmm, Mem |
4970/// | 2 | Xmm, Xmm, Xmm |
4971/// | 3 | Ymm, Ymm, Mem |
4972/// | 4 | Ymm, Ymm, Ymm |
4973/// | 5 | Zmm, Zmm, Mem |
4974/// | 6 | Zmm, Zmm, Zmm |
4975/// +---+---------------+
4976/// ```
4977pub trait VpmullqMaskzEmitter<A, B, C> {
4978    fn vpmullq_maskz(&mut self, op0: A, op1: B, op2: C);
4979}
4980
4981impl<'a> VpmullqMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4982    fn vpmullq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4983        self.emit(VPMULLQ128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4984    }
4985}
4986
4987impl<'a> VpmullqMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4988    fn vpmullq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4989        self.emit(VPMULLQ128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4990    }
4991}
4992
4993impl<'a> VpmullqMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4994    fn vpmullq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4995        self.emit(VPMULLQ256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4996    }
4997}
4998
4999impl<'a> VpmullqMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5000    fn vpmullq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5001        self.emit(VPMULLQ256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5002    }
5003}
5004
5005impl<'a> VpmullqMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5006    fn vpmullq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5007        self.emit(VPMULLQ512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5008    }
5009}
5010
5011impl<'a> VpmullqMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5012    fn vpmullq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5013        self.emit(VPMULLQ512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5014    }
5015}
5016
5017/// `VRANGEPD` (VRANGEPD). 
5018/// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5019///
5020///
5021/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
5022///
5023/// Supported operand variants:
5024///
5025/// ```text
5026/// +---+--------------------+
5027/// | # | Operands           |
5028/// +---+--------------------+
5029/// | 1 | Xmm, Xmm, Mem, Imm |
5030/// | 2 | Xmm, Xmm, Xmm, Imm |
5031/// | 3 | Ymm, Ymm, Mem, Imm |
5032/// | 4 | Ymm, Ymm, Ymm, Imm |
5033/// | 5 | Zmm, Zmm, Mem, Imm |
5034/// | 6 | Zmm, Zmm, Zmm, Imm |
5035/// +---+--------------------+
5036/// ```
5037pub trait VrangepdEmitter<A, B, C, D> {
5038    fn vrangepd(&mut self, op0: A, op1: B, op2: C, op3: D);
5039}
5040
5041impl<'a> VrangepdEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5042    fn vrangepd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5043        self.emit(VRANGEPD128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5044    }
5045}
5046
5047impl<'a> VrangepdEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5048    fn vrangepd(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5049        self.emit(VRANGEPD128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5050    }
5051}
5052
5053impl<'a> VrangepdEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
5054    fn vrangepd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
5055        self.emit(VRANGEPD256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5056    }
5057}
5058
5059impl<'a> VrangepdEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
5060    fn vrangepd(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
5061        self.emit(VRANGEPD256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5062    }
5063}
5064
5065impl<'a> VrangepdEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5066    fn vrangepd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5067        self.emit(VRANGEPD512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5068    }
5069}
5070
5071impl<'a> VrangepdEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
5072    fn vrangepd(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
5073        self.emit(VRANGEPD512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5074    }
5075}
5076
5077/// `VRANGEPD_MASK` (VRANGEPD). 
5078/// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5079///
5080///
5081/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
5082///
5083/// Supported operand variants:
5084///
5085/// ```text
5086/// +---+--------------------+
5087/// | # | Operands           |
5088/// +---+--------------------+
5089/// | 1 | Xmm, Xmm, Mem, Imm |
5090/// | 2 | Xmm, Xmm, Xmm, Imm |
5091/// | 3 | Ymm, Ymm, Mem, Imm |
5092/// | 4 | Ymm, Ymm, Ymm, Imm |
5093/// | 5 | Zmm, Zmm, Mem, Imm |
5094/// | 6 | Zmm, Zmm, Zmm, Imm |
5095/// +---+--------------------+
5096/// ```
5097pub trait VrangepdMaskEmitter<A, B, C, D> {
5098    fn vrangepd_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5099}
5100
5101impl<'a> VrangepdMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5102    fn vrangepd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5103        self.emit(VRANGEPD128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5104    }
5105}
5106
5107impl<'a> VrangepdMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5108    fn vrangepd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5109        self.emit(VRANGEPD128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5110    }
5111}
5112
5113impl<'a> VrangepdMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
5114    fn vrangepd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
5115        self.emit(VRANGEPD256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5116    }
5117}
5118
5119impl<'a> VrangepdMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
5120    fn vrangepd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
5121        self.emit(VRANGEPD256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5122    }
5123}
5124
5125impl<'a> VrangepdMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5126    fn vrangepd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5127        self.emit(VRANGEPD512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5128    }
5129}
5130
5131impl<'a> VrangepdMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
5132    fn vrangepd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
5133        self.emit(VRANGEPD512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5134    }
5135}
5136
5137/// `VRANGEPD_MASK_SAE` (VRANGEPD). 
5138/// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5139///
5140///
5141/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
5142///
5143/// Supported operand variants:
5144///
5145/// ```text
5146/// +---+--------------------+
5147/// | # | Operands           |
5148/// +---+--------------------+
5149/// | 1 | Zmm, Zmm, Zmm, Imm |
5150/// +---+--------------------+
5151/// ```
5152pub trait VrangepdMaskSaeEmitter<A, B, C, D> {
5153    fn vrangepd_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5154}
5155
5156impl<'a> VrangepdMaskSaeEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5157    fn vrangepd_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5158        self.emit(VRANGEPD512RRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5159    }
5160}
5161
5162/// `VRANGEPD_MASKZ` (VRANGEPD). 
5163/// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5164///
5165///
5166/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
5167///
5168/// Supported operand variants:
5169///
5170/// ```text
5171/// +---+--------------------+
5172/// | # | Operands           |
5173/// +---+--------------------+
5174/// | 1 | Xmm, Xmm, Mem, Imm |
5175/// | 2 | Xmm, Xmm, Xmm, Imm |
5176/// | 3 | Ymm, Ymm, Mem, Imm |
5177/// | 4 | Ymm, Ymm, Ymm, Imm |
5178/// | 5 | Zmm, Zmm, Mem, Imm |
5179/// | 6 | Zmm, Zmm, Zmm, Imm |
5180/// +---+--------------------+
5181/// ```
5182pub trait VrangepdMaskzEmitter<A, B, C, D> {
5183    fn vrangepd_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
5184}
5185
5186impl<'a> VrangepdMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5187    fn vrangepd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5188        self.emit(VRANGEPD128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5189    }
5190}
5191
5192impl<'a> VrangepdMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5193    fn vrangepd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5194        self.emit(VRANGEPD128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5195    }
5196}
5197
5198impl<'a> VrangepdMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
5199    fn vrangepd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
5200        self.emit(VRANGEPD256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5201    }
5202}
5203
5204impl<'a> VrangepdMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
5205    fn vrangepd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
5206        self.emit(VRANGEPD256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5207    }
5208}
5209
5210impl<'a> VrangepdMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5211    fn vrangepd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5212        self.emit(VRANGEPD512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5213    }
5214}
5215
5216impl<'a> VrangepdMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
5217    fn vrangepd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
5218        self.emit(VRANGEPD512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5219    }
5220}
5221
5222/// `VRANGEPD_MASKZ_SAE` (VRANGEPD). 
5223/// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5224///
5225///
5226/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
5227///
5228/// Supported operand variants:
5229///
5230/// ```text
5231/// +---+--------------------+
5232/// | # | Operands           |
5233/// +---+--------------------+
5234/// | 1 | Zmm, Zmm, Zmm, Imm |
5235/// +---+--------------------+
5236/// ```
5237pub trait VrangepdMaskzSaeEmitter<A, B, C, D> {
5238    fn vrangepd_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5239}
5240
5241impl<'a> VrangepdMaskzSaeEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5242    fn vrangepd_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5243        self.emit(VRANGEPD512RRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5244    }
5245}
5246
5247/// `VRANGEPD_SAE` (VRANGEPD). 
5248/// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5249///
5250///
5251/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
5252///
5253/// Supported operand variants:
5254///
5255/// ```text
5256/// +---+--------------------+
5257/// | # | Operands           |
5258/// +---+--------------------+
5259/// | 1 | Zmm, Zmm, Zmm, Imm |
5260/// +---+--------------------+
5261/// ```
5262pub trait VrangepdSaeEmitter<A, B, C, D> {
5263    fn vrangepd_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5264}
5265
5266impl<'a> VrangepdSaeEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5267    fn vrangepd_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5268        self.emit(VRANGEPD512RRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5269    }
5270}
5271
5272/// `VRANGEPS` (VRANGEPS). 
5273/// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5274///
5275///
5276/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
5277///
5278/// Supported operand variants:
5279///
5280/// ```text
5281/// +---+--------------------+
5282/// | # | Operands           |
5283/// +---+--------------------+
5284/// | 1 | Xmm, Xmm, Mem, Imm |
5285/// | 2 | Xmm, Xmm, Xmm, Imm |
5286/// | 3 | Ymm, Ymm, Mem, Imm |
5287/// | 4 | Ymm, Ymm, Ymm, Imm |
5288/// | 5 | Zmm, Zmm, Mem, Imm |
5289/// | 6 | Zmm, Zmm, Zmm, Imm |
5290/// +---+--------------------+
5291/// ```
5292pub trait VrangepsEmitter<A, B, C, D> {
5293    fn vrangeps(&mut self, op0: A, op1: B, op2: C, op3: D);
5294}
5295
5296impl<'a> VrangepsEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5297    fn vrangeps(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5298        self.emit(VRANGEPS128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5299    }
5300}
5301
5302impl<'a> VrangepsEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5303    fn vrangeps(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5304        self.emit(VRANGEPS128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5305    }
5306}
5307
5308impl<'a> VrangepsEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
5309    fn vrangeps(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
5310        self.emit(VRANGEPS256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5311    }
5312}
5313
5314impl<'a> VrangepsEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
5315    fn vrangeps(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
5316        self.emit(VRANGEPS256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5317    }
5318}
5319
5320impl<'a> VrangepsEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5321    fn vrangeps(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5322        self.emit(VRANGEPS512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5323    }
5324}
5325
5326impl<'a> VrangepsEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
5327    fn vrangeps(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
5328        self.emit(VRANGEPS512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5329    }
5330}
5331
5332/// `VRANGEPS_MASK` (VRANGEPS). 
5333/// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5334///
5335///
5336/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
5337///
5338/// Supported operand variants:
5339///
5340/// ```text
5341/// +---+--------------------+
5342/// | # | Operands           |
5343/// +---+--------------------+
5344/// | 1 | Xmm, Xmm, Mem, Imm |
5345/// | 2 | Xmm, Xmm, Xmm, Imm |
5346/// | 3 | Ymm, Ymm, Mem, Imm |
5347/// | 4 | Ymm, Ymm, Ymm, Imm |
5348/// | 5 | Zmm, Zmm, Mem, Imm |
5349/// | 6 | Zmm, Zmm, Zmm, Imm |
5350/// +---+--------------------+
5351/// ```
5352pub trait VrangepsMaskEmitter<A, B, C, D> {
5353    fn vrangeps_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5354}
5355
5356impl<'a> VrangepsMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5357    fn vrangeps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5358        self.emit(VRANGEPS128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5359    }
5360}
5361
5362impl<'a> VrangepsMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5363    fn vrangeps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5364        self.emit(VRANGEPS128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5365    }
5366}
5367
5368impl<'a> VrangepsMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
5369    fn vrangeps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
5370        self.emit(VRANGEPS256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5371    }
5372}
5373
5374impl<'a> VrangepsMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
5375    fn vrangeps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
5376        self.emit(VRANGEPS256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5377    }
5378}
5379
5380impl<'a> VrangepsMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5381    fn vrangeps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5382        self.emit(VRANGEPS512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5383    }
5384}
5385
5386impl<'a> VrangepsMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
5387    fn vrangeps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
5388        self.emit(VRANGEPS512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5389    }
5390}
5391
5392/// `VRANGEPS_MASK_SAE` (VRANGEPS). 
5393/// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5394///
5395///
5396/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
5397///
5398/// Supported operand variants:
5399///
5400/// ```text
5401/// +---+--------------------+
5402/// | # | Operands           |
5403/// +---+--------------------+
5404/// | 1 | Zmm, Zmm, Zmm, Imm |
5405/// +---+--------------------+
5406/// ```
5407pub trait VrangepsMaskSaeEmitter<A, B, C, D> {
5408    fn vrangeps_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5409}
5410
5411impl<'a> VrangepsMaskSaeEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5412    fn vrangeps_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5413        self.emit(VRANGEPS512RRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5414    }
5415}
5416
5417/// `VRANGEPS_MASKZ` (VRANGEPS). 
5418/// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5419///
5420///
5421/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
5422///
5423/// Supported operand variants:
5424///
5425/// ```text
5426/// +---+--------------------+
5427/// | # | Operands           |
5428/// +---+--------------------+
5429/// | 1 | Xmm, Xmm, Mem, Imm |
5430/// | 2 | Xmm, Xmm, Xmm, Imm |
5431/// | 3 | Ymm, Ymm, Mem, Imm |
5432/// | 4 | Ymm, Ymm, Ymm, Imm |
5433/// | 5 | Zmm, Zmm, Mem, Imm |
5434/// | 6 | Zmm, Zmm, Zmm, Imm |
5435/// +---+--------------------+
5436/// ```
5437pub trait VrangepsMaskzEmitter<A, B, C, D> {
5438    fn vrangeps_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
5439}
5440
5441impl<'a> VrangepsMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5442    fn vrangeps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5443        self.emit(VRANGEPS128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5444    }
5445}
5446
5447impl<'a> VrangepsMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5448    fn vrangeps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5449        self.emit(VRANGEPS128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5450    }
5451}
5452
5453impl<'a> VrangepsMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
5454    fn vrangeps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
5455        self.emit(VRANGEPS256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5456    }
5457}
5458
5459impl<'a> VrangepsMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
5460    fn vrangeps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
5461        self.emit(VRANGEPS256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5462    }
5463}
5464
5465impl<'a> VrangepsMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5466    fn vrangeps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5467        self.emit(VRANGEPS512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5468    }
5469}
5470
5471impl<'a> VrangepsMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
5472    fn vrangeps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
5473        self.emit(VRANGEPS512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5474    }
5475}
5476
5477/// `VRANGEPS_MASKZ_SAE` (VRANGEPS). 
5478/// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5479///
5480///
5481/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
5482///
5483/// Supported operand variants:
5484///
5485/// ```text
5486/// +---+--------------------+
5487/// | # | Operands           |
5488/// +---+--------------------+
5489/// | 1 | Zmm, Zmm, Zmm, Imm |
5490/// +---+--------------------+
5491/// ```
5492pub trait VrangepsMaskzSaeEmitter<A, B, C, D> {
5493    fn vrangeps_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5494}
5495
5496impl<'a> VrangepsMaskzSaeEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5497    fn vrangeps_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5498        self.emit(VRANGEPS512RRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5499    }
5500}
5501
5502/// `VRANGEPS_SAE` (VRANGEPS). 
5503/// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
5504///
5505///
5506/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
5507///
5508/// Supported operand variants:
5509///
5510/// ```text
5511/// +---+--------------------+
5512/// | # | Operands           |
5513/// +---+--------------------+
5514/// | 1 | Zmm, Zmm, Zmm, Imm |
5515/// +---+--------------------+
5516/// ```
5517pub trait VrangepsSaeEmitter<A, B, C, D> {
5518    fn vrangeps_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5519}
5520
5521impl<'a> VrangepsSaeEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
5522    fn vrangeps_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
5523        self.emit(VRANGEPS512RRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5524    }
5525}
5526
5527/// `VRANGESD` (VRANGESD). 
5528/// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
5529///
5530///
5531/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
5532///
5533/// Supported operand variants:
5534///
5535/// ```text
5536/// +---+--------------------+
5537/// | # | Operands           |
5538/// +---+--------------------+
5539/// | 1 | Xmm, Xmm, Mem, Imm |
5540/// | 2 | Xmm, Xmm, Xmm, Imm |
5541/// +---+--------------------+
5542/// ```
5543pub trait VrangesdEmitter<A, B, C, D> {
5544    fn vrangesd(&mut self, op0: A, op1: B, op2: C, op3: D);
5545}
5546
5547impl<'a> VrangesdEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5548    fn vrangesd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5549        self.emit(VRANGESDRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5550    }
5551}
5552
5553impl<'a> VrangesdEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5554    fn vrangesd(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5555        self.emit(VRANGESDRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5556    }
5557}
5558
5559/// `VRANGESD_MASK` (VRANGESD). 
5560/// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
5561///
5562///
5563/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
5564///
5565/// Supported operand variants:
5566///
5567/// ```text
5568/// +---+--------------------+
5569/// | # | Operands           |
5570/// +---+--------------------+
5571/// | 1 | Xmm, Xmm, Mem, Imm |
5572/// | 2 | Xmm, Xmm, Xmm, Imm |
5573/// +---+--------------------+
5574/// ```
5575pub trait VrangesdMaskEmitter<A, B, C, D> {
5576    fn vrangesd_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5577}
5578
5579impl<'a> VrangesdMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5580    fn vrangesd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5581        self.emit(VRANGESDRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5582    }
5583}
5584
5585impl<'a> VrangesdMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5586    fn vrangesd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5587        self.emit(VRANGESDRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5588    }
5589}
5590
5591/// `VRANGESD_MASK_SAE` (VRANGESD). 
5592/// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
5593///
5594///
5595/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
5596///
5597/// Supported operand variants:
5598///
5599/// ```text
5600/// +---+--------------------+
5601/// | # | Operands           |
5602/// +---+--------------------+
5603/// | 1 | Xmm, Xmm, Xmm, Imm |
5604/// +---+--------------------+
5605/// ```
5606pub trait VrangesdMaskSaeEmitter<A, B, C, D> {
5607    fn vrangesd_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5608}
5609
5610impl<'a> VrangesdMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5611    fn vrangesd_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5612        self.emit(VRANGESDRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5613    }
5614}
5615
5616/// `VRANGESD_MASKZ` (VRANGESD). 
5617/// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
5618///
5619///
5620/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
5621///
5622/// Supported operand variants:
5623///
5624/// ```text
5625/// +---+--------------------+
5626/// | # | Operands           |
5627/// +---+--------------------+
5628/// | 1 | Xmm, Xmm, Mem, Imm |
5629/// | 2 | Xmm, Xmm, Xmm, Imm |
5630/// +---+--------------------+
5631/// ```
5632pub trait VrangesdMaskzEmitter<A, B, C, D> {
5633    fn vrangesd_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
5634}
5635
5636impl<'a> VrangesdMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5637    fn vrangesd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5638        self.emit(VRANGESDRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5639    }
5640}
5641
5642impl<'a> VrangesdMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5643    fn vrangesd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5644        self.emit(VRANGESDRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5645    }
5646}
5647
5648/// `VRANGESD_MASKZ_SAE` (VRANGESD). 
5649/// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
5650///
5651///
5652/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
5653///
5654/// Supported operand variants:
5655///
5656/// ```text
5657/// +---+--------------------+
5658/// | # | Operands           |
5659/// +---+--------------------+
5660/// | 1 | Xmm, Xmm, Xmm, Imm |
5661/// +---+--------------------+
5662/// ```
5663pub trait VrangesdMaskzSaeEmitter<A, B, C, D> {
5664    fn vrangesd_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5665}
5666
5667impl<'a> VrangesdMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5668    fn vrangesd_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5669        self.emit(VRANGESDRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5670    }
5671}
5672
5673/// `VRANGESD_SAE` (VRANGESD). 
5674/// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
5675///
5676///
5677/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
5678///
5679/// Supported operand variants:
5680///
5681/// ```text
5682/// +---+--------------------+
5683/// | # | Operands           |
5684/// +---+--------------------+
5685/// | 1 | Xmm, Xmm, Xmm, Imm |
5686/// +---+--------------------+
5687/// ```
5688pub trait VrangesdSaeEmitter<A, B, C, D> {
5689    fn vrangesd_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5690}
5691
5692impl<'a> VrangesdSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5693    fn vrangesd_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5694        self.emit(VRANGESDRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5695    }
5696}
5697
5698/// `VRANGESS` (VRANGESS). 
5699/// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
5700///
5701///
5702/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
5703///
5704/// Supported operand variants:
5705///
5706/// ```text
5707/// +---+--------------------+
5708/// | # | Operands           |
5709/// +---+--------------------+
5710/// | 1 | Xmm, Xmm, Mem, Imm |
5711/// | 2 | Xmm, Xmm, Xmm, Imm |
5712/// +---+--------------------+
5713/// ```
5714pub trait VrangessEmitter<A, B, C, D> {
5715    fn vrangess(&mut self, op0: A, op1: B, op2: C, op3: D);
5716}
5717
5718impl<'a> VrangessEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5719    fn vrangess(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5720        self.emit(VRANGESSRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5721    }
5722}
5723
5724impl<'a> VrangessEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5725    fn vrangess(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5726        self.emit(VRANGESSRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5727    }
5728}
5729
5730/// `VRANGESS_MASK` (VRANGESS). 
5731/// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
5732///
5733///
5734/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
5735///
5736/// Supported operand variants:
5737///
5738/// ```text
5739/// +---+--------------------+
5740/// | # | Operands           |
5741/// +---+--------------------+
5742/// | 1 | Xmm, Xmm, Mem, Imm |
5743/// | 2 | Xmm, Xmm, Xmm, Imm |
5744/// +---+--------------------+
5745/// ```
5746pub trait VrangessMaskEmitter<A, B, C, D> {
5747    fn vrangess_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5748}
5749
5750impl<'a> VrangessMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5751    fn vrangess_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5752        self.emit(VRANGESSRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5753    }
5754}
5755
5756impl<'a> VrangessMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5757    fn vrangess_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5758        self.emit(VRANGESSRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5759    }
5760}
5761
5762/// `VRANGESS_MASK_SAE` (VRANGESS). 
5763/// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
5764///
5765///
5766/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
5767///
5768/// Supported operand variants:
5769///
5770/// ```text
5771/// +---+--------------------+
5772/// | # | Operands           |
5773/// +---+--------------------+
5774/// | 1 | Xmm, Xmm, Xmm, Imm |
5775/// +---+--------------------+
5776/// ```
5777pub trait VrangessMaskSaeEmitter<A, B, C, D> {
5778    fn vrangess_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5779}
5780
5781impl<'a> VrangessMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5782    fn vrangess_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5783        self.emit(VRANGESSRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5784    }
5785}
5786
5787/// `VRANGESS_MASKZ` (VRANGESS). 
5788/// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
5789///
5790///
5791/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
5792///
5793/// Supported operand variants:
5794///
5795/// ```text
5796/// +---+--------------------+
5797/// | # | Operands           |
5798/// +---+--------------------+
5799/// | 1 | Xmm, Xmm, Mem, Imm |
5800/// | 2 | Xmm, Xmm, Xmm, Imm |
5801/// +---+--------------------+
5802/// ```
5803pub trait VrangessMaskzEmitter<A, B, C, D> {
5804    fn vrangess_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
5805}
5806
5807impl<'a> VrangessMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5808    fn vrangess_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5809        self.emit(VRANGESSRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5810    }
5811}
5812
5813impl<'a> VrangessMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
5814    fn vrangess_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
5815        self.emit(VRANGESSRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5816    }
5817}
5818
5819/// `VRANGESS_MASKZ_SAE` (VRANGESS). 
5820/// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
5821///
5822///
5823/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
5824///
5825/// Supported operand variants:
5826///
5827/// ```text
5828/// +---+--------------------+
5829/// | # | Operands           |
5830/// +---+--------------------+
5831/// | 1 | Xmm, Xmm, Xmm, Imm |
5832/// +---+--------------------+
5833/// ```
5834pub trait VrangessMaskzSaeEmitter<A, B, C, D> {
5835    fn vrangess_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5836}
5837
5838impl<'a> VrangessMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5839    fn vrangess_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5840        self.emit(VRANGESSRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5841    }
5842}
5843
5844/// `VRANGESS_SAE` (VRANGESS). 
5845/// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
5846///
5847///
5848/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
5849///
5850/// Supported operand variants:
5851///
5852/// ```text
5853/// +---+--------------------+
5854/// | # | Operands           |
5855/// +---+--------------------+
5856/// | 1 | Xmm, Xmm, Xmm, Imm |
5857/// +---+--------------------+
5858/// ```
5859pub trait VrangessSaeEmitter<A, B, C, D> {
5860    fn vrangess_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
5861}
5862
5863impl<'a> VrangessSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
5864    fn vrangess_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
5865        self.emit(VRANGESSRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5866    }
5867}
5868
5869/// `VREDUCEPD` (VREDUCEPD). 
5870/// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
5871///
5872///
5873/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
5874///
5875/// Supported operand variants:
5876///
5877/// ```text
5878/// +---+---------------+
5879/// | # | Operands      |
5880/// +---+---------------+
5881/// | 1 | Xmm, Mem, Imm |
5882/// | 2 | Xmm, Xmm, Imm |
5883/// | 3 | Ymm, Mem, Imm |
5884/// | 4 | Ymm, Ymm, Imm |
5885/// | 5 | Zmm, Mem, Imm |
5886/// | 6 | Zmm, Zmm, Imm |
5887/// +---+---------------+
5888/// ```
5889pub trait VreducepdEmitter<A, B, C> {
5890    fn vreducepd(&mut self, op0: A, op1: B, op2: C);
5891}
5892
5893impl<'a> VreducepdEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
5894    fn vreducepd(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
5895        self.emit(VREDUCEPD128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5896    }
5897}
5898
5899impl<'a> VreducepdEmitter<Xmm, Mem, Imm> for Assembler<'a> {
5900    fn vreducepd(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
5901        self.emit(VREDUCEPD128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5902    }
5903}
5904
5905impl<'a> VreducepdEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
5906    fn vreducepd(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
5907        self.emit(VREDUCEPD256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5908    }
5909}
5910
5911impl<'a> VreducepdEmitter<Ymm, Mem, Imm> for Assembler<'a> {
5912    fn vreducepd(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
5913        self.emit(VREDUCEPD256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5914    }
5915}
5916
5917impl<'a> VreducepdEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
5918    fn vreducepd(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
5919        self.emit(VREDUCEPD512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5920    }
5921}
5922
5923impl<'a> VreducepdEmitter<Zmm, Mem, Imm> for Assembler<'a> {
5924    fn vreducepd(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
5925        self.emit(VREDUCEPD512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5926    }
5927}
5928
5929/// `VREDUCEPD_MASK` (VREDUCEPD). 
5930/// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
5931///
5932///
5933/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
5934///
5935/// Supported operand variants:
5936///
5937/// ```text
5938/// +---+---------------+
5939/// | # | Operands      |
5940/// +---+---------------+
5941/// | 1 | Xmm, Mem, Imm |
5942/// | 2 | Xmm, Xmm, Imm |
5943/// | 3 | Ymm, Mem, Imm |
5944/// | 4 | Ymm, Ymm, Imm |
5945/// | 5 | Zmm, Mem, Imm |
5946/// | 6 | Zmm, Zmm, Imm |
5947/// +---+---------------+
5948/// ```
5949pub trait VreducepdMaskEmitter<A, B, C> {
5950    fn vreducepd_mask(&mut self, op0: A, op1: B, op2: C);
5951}
5952
5953impl<'a> VreducepdMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
5954    fn vreducepd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
5955        self.emit(VREDUCEPD128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5956    }
5957}
5958
5959impl<'a> VreducepdMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
5960    fn vreducepd_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
5961        self.emit(VREDUCEPD128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5962    }
5963}
5964
5965impl<'a> VreducepdMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
5966    fn vreducepd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
5967        self.emit(VREDUCEPD256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5968    }
5969}
5970
5971impl<'a> VreducepdMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
5972    fn vreducepd_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
5973        self.emit(VREDUCEPD256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5974    }
5975}
5976
5977impl<'a> VreducepdMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
5978    fn vreducepd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
5979        self.emit(VREDUCEPD512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5980    }
5981}
5982
5983impl<'a> VreducepdMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
5984    fn vreducepd_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
5985        self.emit(VREDUCEPD512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5986    }
5987}
5988
5989/// `VREDUCEPD_MASK_SAE` (VREDUCEPD). 
5990/// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
5991///
5992///
5993/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
5994///
5995/// Supported operand variants:
5996///
5997/// ```text
5998/// +---+---------------+
5999/// | # | Operands      |
6000/// +---+---------------+
6001/// | 1 | Zmm, Zmm, Imm |
6002/// +---+---------------+
6003/// ```
6004pub trait VreducepdMaskSaeEmitter<A, B, C> {
6005    fn vreducepd_mask_sae(&mut self, op0: A, op1: B, op2: C);
6006}
6007
6008impl<'a> VreducepdMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6009    fn vreducepd_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6010        self.emit(VREDUCEPD512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6011    }
6012}
6013
6014/// `VREDUCEPD_MASKZ` (VREDUCEPD). 
6015/// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6016///
6017///
6018/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
6019///
6020/// Supported operand variants:
6021///
6022/// ```text
6023/// +---+---------------+
6024/// | # | Operands      |
6025/// +---+---------------+
6026/// | 1 | Xmm, Mem, Imm |
6027/// | 2 | Xmm, Xmm, Imm |
6028/// | 3 | Ymm, Mem, Imm |
6029/// | 4 | Ymm, Ymm, Imm |
6030/// | 5 | Zmm, Mem, Imm |
6031/// | 6 | Zmm, Zmm, Imm |
6032/// +---+---------------+
6033/// ```
6034pub trait VreducepdMaskzEmitter<A, B, C> {
6035    fn vreducepd_maskz(&mut self, op0: A, op1: B, op2: C);
6036}
6037
6038impl<'a> VreducepdMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
6039    fn vreducepd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
6040        self.emit(VREDUCEPD128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6041    }
6042}
6043
6044impl<'a> VreducepdMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
6045    fn vreducepd_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
6046        self.emit(VREDUCEPD128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6047    }
6048}
6049
6050impl<'a> VreducepdMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
6051    fn vreducepd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
6052        self.emit(VREDUCEPD256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6053    }
6054}
6055
6056impl<'a> VreducepdMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
6057    fn vreducepd_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
6058        self.emit(VREDUCEPD256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6059    }
6060}
6061
6062impl<'a> VreducepdMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6063    fn vreducepd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6064        self.emit(VREDUCEPD512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6065    }
6066}
6067
6068impl<'a> VreducepdMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
6069    fn vreducepd_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
6070        self.emit(VREDUCEPD512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6071    }
6072}
6073
6074/// `VREDUCEPD_MASKZ_SAE` (VREDUCEPD). 
6075/// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6076///
6077///
6078/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
6079///
6080/// Supported operand variants:
6081///
6082/// ```text
6083/// +---+---------------+
6084/// | # | Operands      |
6085/// +---+---------------+
6086/// | 1 | Zmm, Zmm, Imm |
6087/// +---+---------------+
6088/// ```
6089pub trait VreducepdMaskzSaeEmitter<A, B, C> {
6090    fn vreducepd_maskz_sae(&mut self, op0: A, op1: B, op2: C);
6091}
6092
6093impl<'a> VreducepdMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6094    fn vreducepd_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6095        self.emit(VREDUCEPD512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6096    }
6097}
6098
6099/// `VREDUCEPD_SAE` (VREDUCEPD). 
6100/// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6101///
6102///
6103/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
6104///
6105/// Supported operand variants:
6106///
6107/// ```text
6108/// +---+---------------+
6109/// | # | Operands      |
6110/// +---+---------------+
6111/// | 1 | Zmm, Zmm, Imm |
6112/// +---+---------------+
6113/// ```
6114pub trait VreducepdSaeEmitter<A, B, C> {
6115    fn vreducepd_sae(&mut self, op0: A, op1: B, op2: C);
6116}
6117
6118impl<'a> VreducepdSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6119    fn vreducepd_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6120        self.emit(VREDUCEPD512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6121    }
6122}
6123
6124/// `VREDUCEPS` (VREDUCEPS). 
6125/// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6126///
6127///
6128/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
6129///
6130/// Supported operand variants:
6131///
6132/// ```text
6133/// +---+---------------+
6134/// | # | Operands      |
6135/// +---+---------------+
6136/// | 1 | Xmm, Mem, Imm |
6137/// | 2 | Xmm, Xmm, Imm |
6138/// | 3 | Ymm, Mem, Imm |
6139/// | 4 | Ymm, Ymm, Imm |
6140/// | 5 | Zmm, Mem, Imm |
6141/// | 6 | Zmm, Zmm, Imm |
6142/// +---+---------------+
6143/// ```
6144pub trait VreducepsEmitter<A, B, C> {
6145    fn vreduceps(&mut self, op0: A, op1: B, op2: C);
6146}
6147
6148impl<'a> VreducepsEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
6149    fn vreduceps(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
6150        self.emit(VREDUCEPS128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6151    }
6152}
6153
6154impl<'a> VreducepsEmitter<Xmm, Mem, Imm> for Assembler<'a> {
6155    fn vreduceps(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
6156        self.emit(VREDUCEPS128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6157    }
6158}
6159
6160impl<'a> VreducepsEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
6161    fn vreduceps(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
6162        self.emit(VREDUCEPS256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6163    }
6164}
6165
6166impl<'a> VreducepsEmitter<Ymm, Mem, Imm> for Assembler<'a> {
6167    fn vreduceps(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
6168        self.emit(VREDUCEPS256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6169    }
6170}
6171
6172impl<'a> VreducepsEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6173    fn vreduceps(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6174        self.emit(VREDUCEPS512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6175    }
6176}
6177
6178impl<'a> VreducepsEmitter<Zmm, Mem, Imm> for Assembler<'a> {
6179    fn vreduceps(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
6180        self.emit(VREDUCEPS512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6181    }
6182}
6183
6184/// `VREDUCEPS_MASK` (VREDUCEPS). 
6185/// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6186///
6187///
6188/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
6189///
6190/// Supported operand variants:
6191///
6192/// ```text
6193/// +---+---------------+
6194/// | # | Operands      |
6195/// +---+---------------+
6196/// | 1 | Xmm, Mem, Imm |
6197/// | 2 | Xmm, Xmm, Imm |
6198/// | 3 | Ymm, Mem, Imm |
6199/// | 4 | Ymm, Ymm, Imm |
6200/// | 5 | Zmm, Mem, Imm |
6201/// | 6 | Zmm, Zmm, Imm |
6202/// +---+---------------+
6203/// ```
6204pub trait VreducepsMaskEmitter<A, B, C> {
6205    fn vreduceps_mask(&mut self, op0: A, op1: B, op2: C);
6206}
6207
6208impl<'a> VreducepsMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
6209    fn vreduceps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
6210        self.emit(VREDUCEPS128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6211    }
6212}
6213
6214impl<'a> VreducepsMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
6215    fn vreduceps_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
6216        self.emit(VREDUCEPS128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6217    }
6218}
6219
6220impl<'a> VreducepsMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
6221    fn vreduceps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
6222        self.emit(VREDUCEPS256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6223    }
6224}
6225
6226impl<'a> VreducepsMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
6227    fn vreduceps_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
6228        self.emit(VREDUCEPS256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6229    }
6230}
6231
6232impl<'a> VreducepsMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6233    fn vreduceps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6234        self.emit(VREDUCEPS512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6235    }
6236}
6237
6238impl<'a> VreducepsMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
6239    fn vreduceps_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
6240        self.emit(VREDUCEPS512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6241    }
6242}
6243
6244/// `VREDUCEPS_MASK_SAE` (VREDUCEPS). 
6245/// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6246///
6247///
6248/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
6249///
6250/// Supported operand variants:
6251///
6252/// ```text
6253/// +---+---------------+
6254/// | # | Operands      |
6255/// +---+---------------+
6256/// | 1 | Zmm, Zmm, Imm |
6257/// +---+---------------+
6258/// ```
6259pub trait VreducepsMaskSaeEmitter<A, B, C> {
6260    fn vreduceps_mask_sae(&mut self, op0: A, op1: B, op2: C);
6261}
6262
6263impl<'a> VreducepsMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6264    fn vreduceps_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6265        self.emit(VREDUCEPS512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6266    }
6267}
6268
6269/// `VREDUCEPS_MASKZ` (VREDUCEPS). 
6270/// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6271///
6272///
6273/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
6274///
6275/// Supported operand variants:
6276///
6277/// ```text
6278/// +---+---------------+
6279/// | # | Operands      |
6280/// +---+---------------+
6281/// | 1 | Xmm, Mem, Imm |
6282/// | 2 | Xmm, Xmm, Imm |
6283/// | 3 | Ymm, Mem, Imm |
6284/// | 4 | Ymm, Ymm, Imm |
6285/// | 5 | Zmm, Mem, Imm |
6286/// | 6 | Zmm, Zmm, Imm |
6287/// +---+---------------+
6288/// ```
6289pub trait VreducepsMaskzEmitter<A, B, C> {
6290    fn vreduceps_maskz(&mut self, op0: A, op1: B, op2: C);
6291}
6292
6293impl<'a> VreducepsMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
6294    fn vreduceps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
6295        self.emit(VREDUCEPS128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6296    }
6297}
6298
6299impl<'a> VreducepsMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
6300    fn vreduceps_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
6301        self.emit(VREDUCEPS128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6302    }
6303}
6304
6305impl<'a> VreducepsMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
6306    fn vreduceps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
6307        self.emit(VREDUCEPS256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6308    }
6309}
6310
6311impl<'a> VreducepsMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
6312    fn vreduceps_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
6313        self.emit(VREDUCEPS256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6314    }
6315}
6316
6317impl<'a> VreducepsMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6318    fn vreduceps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6319        self.emit(VREDUCEPS512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6320    }
6321}
6322
6323impl<'a> VreducepsMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
6324    fn vreduceps_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
6325        self.emit(VREDUCEPS512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6326    }
6327}
6328
6329/// `VREDUCEPS_MASKZ_SAE` (VREDUCEPS). 
6330/// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6331///
6332///
6333/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
6334///
6335/// Supported operand variants:
6336///
6337/// ```text
6338/// +---+---------------+
6339/// | # | Operands      |
6340/// +---+---------------+
6341/// | 1 | Zmm, Zmm, Imm |
6342/// +---+---------------+
6343/// ```
6344pub trait VreducepsMaskzSaeEmitter<A, B, C> {
6345    fn vreduceps_maskz_sae(&mut self, op0: A, op1: B, op2: C);
6346}
6347
6348impl<'a> VreducepsMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6349    fn vreduceps_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6350        self.emit(VREDUCEPS512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6351    }
6352}
6353
6354/// `VREDUCEPS_SAE` (VREDUCEPS). 
6355/// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
6356///
6357///
6358/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
6359///
6360/// Supported operand variants:
6361///
6362/// ```text
6363/// +---+---------------+
6364/// | # | Operands      |
6365/// +---+---------------+
6366/// | 1 | Zmm, Zmm, Imm |
6367/// +---+---------------+
6368/// ```
6369pub trait VreducepsSaeEmitter<A, B, C> {
6370    fn vreduceps_sae(&mut self, op0: A, op1: B, op2: C);
6371}
6372
6373impl<'a> VreducepsSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
6374    fn vreduceps_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
6375        self.emit(VREDUCEPS512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6376    }
6377}
6378
6379/// `VREDUCESD` (VREDUCESD). 
6380/// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
6381///
6382///
6383/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
6384///
6385/// Supported operand variants:
6386///
6387/// ```text
6388/// +---+--------------------+
6389/// | # | Operands           |
6390/// +---+--------------------+
6391/// | 1 | Xmm, Xmm, Mem, Imm |
6392/// | 2 | Xmm, Xmm, Xmm, Imm |
6393/// +---+--------------------+
6394/// ```
6395pub trait VreducesdEmitter<A, B, C, D> {
6396    fn vreducesd(&mut self, op0: A, op1: B, op2: C, op3: D);
6397}
6398
6399impl<'a> VreducesdEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6400    fn vreducesd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6401        self.emit(VREDUCESDRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6402    }
6403}
6404
6405impl<'a> VreducesdEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6406    fn vreducesd(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6407        self.emit(VREDUCESDRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6408    }
6409}
6410
6411/// `VREDUCESD_MASK` (VREDUCESD). 
6412/// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
6413///
6414///
6415/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
6416///
6417/// Supported operand variants:
6418///
6419/// ```text
6420/// +---+--------------------+
6421/// | # | Operands           |
6422/// +---+--------------------+
6423/// | 1 | Xmm, Xmm, Mem, Imm |
6424/// | 2 | Xmm, Xmm, Xmm, Imm |
6425/// +---+--------------------+
6426/// ```
6427pub trait VreducesdMaskEmitter<A, B, C, D> {
6428    fn vreducesd_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
6429}
6430
6431impl<'a> VreducesdMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6432    fn vreducesd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6433        self.emit(VREDUCESDRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6434    }
6435}
6436
6437impl<'a> VreducesdMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6438    fn vreducesd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6439        self.emit(VREDUCESDRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6440    }
6441}
6442
6443/// `VREDUCESD_MASK_SAE` (VREDUCESD). 
6444/// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
6445///
6446///
6447/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
6448///
6449/// Supported operand variants:
6450///
6451/// ```text
6452/// +---+--------------------+
6453/// | # | Operands           |
6454/// +---+--------------------+
6455/// | 1 | Xmm, Xmm, Xmm, Imm |
6456/// +---+--------------------+
6457/// ```
6458pub trait VreducesdMaskSaeEmitter<A, B, C, D> {
6459    fn vreducesd_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
6460}
6461
6462impl<'a> VreducesdMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6463    fn vreducesd_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6464        self.emit(VREDUCESDRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6465    }
6466}
6467
6468/// `VREDUCESD_MASKZ` (VREDUCESD). 
6469/// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
6470///
6471///
6472/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
6473///
6474/// Supported operand variants:
6475///
6476/// ```text
6477/// +---+--------------------+
6478/// | # | Operands           |
6479/// +---+--------------------+
6480/// | 1 | Xmm, Xmm, Mem, Imm |
6481/// | 2 | Xmm, Xmm, Xmm, Imm |
6482/// +---+--------------------+
6483/// ```
6484pub trait VreducesdMaskzEmitter<A, B, C, D> {
6485    fn vreducesd_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
6486}
6487
6488impl<'a> VreducesdMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6489    fn vreducesd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6490        self.emit(VREDUCESDRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6491    }
6492}
6493
6494impl<'a> VreducesdMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6495    fn vreducesd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6496        self.emit(VREDUCESDRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6497    }
6498}
6499
6500/// `VREDUCESD_MASKZ_SAE` (VREDUCESD). 
6501/// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
6502///
6503///
6504/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
6505///
6506/// Supported operand variants:
6507///
6508/// ```text
6509/// +---+--------------------+
6510/// | # | Operands           |
6511/// +---+--------------------+
6512/// | 1 | Xmm, Xmm, Xmm, Imm |
6513/// +---+--------------------+
6514/// ```
6515pub trait VreducesdMaskzSaeEmitter<A, B, C, D> {
6516    fn vreducesd_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
6517}
6518
6519impl<'a> VreducesdMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6520    fn vreducesd_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6521        self.emit(VREDUCESDRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6522    }
6523}
6524
6525/// `VREDUCESD_SAE` (VREDUCESD). 
6526/// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
6527///
6528///
6529/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
6530///
6531/// Supported operand variants:
6532///
6533/// ```text
6534/// +---+--------------------+
6535/// | # | Operands           |
6536/// +---+--------------------+
6537/// | 1 | Xmm, Xmm, Xmm, Imm |
6538/// +---+--------------------+
6539/// ```
6540pub trait VreducesdSaeEmitter<A, B, C, D> {
6541    fn vreducesd_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
6542}
6543
6544impl<'a> VreducesdSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6545    fn vreducesd_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6546        self.emit(VREDUCESDRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6547    }
6548}
6549
6550/// `VREDUCESS` (VREDUCESS). 
6551/// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
6552///
6553///
6554/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
6555///
6556/// Supported operand variants:
6557///
6558/// ```text
6559/// +---+--------------------+
6560/// | # | Operands           |
6561/// +---+--------------------+
6562/// | 1 | Xmm, Xmm, Mem, Imm |
6563/// | 2 | Xmm, Xmm, Xmm, Imm |
6564/// +---+--------------------+
6565/// ```
6566pub trait VreducessEmitter<A, B, C, D> {
6567    fn vreducess(&mut self, op0: A, op1: B, op2: C, op3: D);
6568}
6569
6570impl<'a> VreducessEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6571    fn vreducess(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6572        self.emit(VREDUCESSRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6573    }
6574}
6575
6576impl<'a> VreducessEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6577    fn vreducess(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6578        self.emit(VREDUCESSRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6579    }
6580}
6581
6582/// `VREDUCESS_MASK` (VREDUCESS). 
6583/// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
6584///
6585///
6586/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
6587///
6588/// Supported operand variants:
6589///
6590/// ```text
6591/// +---+--------------------+
6592/// | # | Operands           |
6593/// +---+--------------------+
6594/// | 1 | Xmm, Xmm, Mem, Imm |
6595/// | 2 | Xmm, Xmm, Xmm, Imm |
6596/// +---+--------------------+
6597/// ```
6598pub trait VreducessMaskEmitter<A, B, C, D> {
6599    fn vreducess_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
6600}
6601
6602impl<'a> VreducessMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6603    fn vreducess_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6604        self.emit(VREDUCESSRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6605    }
6606}
6607
6608impl<'a> VreducessMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6609    fn vreducess_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6610        self.emit(VREDUCESSRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6611    }
6612}
6613
6614/// `VREDUCESS_MASK_SAE` (VREDUCESS). 
6615/// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
6616///
6617///
6618/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
6619///
6620/// Supported operand variants:
6621///
6622/// ```text
6623/// +---+--------------------+
6624/// | # | Operands           |
6625/// +---+--------------------+
6626/// | 1 | Xmm, Xmm, Xmm, Imm |
6627/// +---+--------------------+
6628/// ```
6629pub trait VreducessMaskSaeEmitter<A, B, C, D> {
6630    fn vreducess_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
6631}
6632
6633impl<'a> VreducessMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6634    fn vreducess_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6635        self.emit(VREDUCESSRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6636    }
6637}
6638
6639/// `VREDUCESS_MASKZ` (VREDUCESS). 
6640/// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
6641///
6642///
6643/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
6644///
6645/// Supported operand variants:
6646///
6647/// ```text
6648/// +---+--------------------+
6649/// | # | Operands           |
6650/// +---+--------------------+
6651/// | 1 | Xmm, Xmm, Mem, Imm |
6652/// | 2 | Xmm, Xmm, Xmm, Imm |
6653/// +---+--------------------+
6654/// ```
6655pub trait VreducessMaskzEmitter<A, B, C, D> {
6656    fn vreducess_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
6657}
6658
6659impl<'a> VreducessMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6660    fn vreducess_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6661        self.emit(VREDUCESSRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6662    }
6663}
6664
6665impl<'a> VreducessMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6666    fn vreducess_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6667        self.emit(VREDUCESSRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6668    }
6669}
6670
6671/// `VREDUCESS_MASKZ_SAE` (VREDUCESS). 
6672/// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
6673///
6674///
6675/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
6676///
6677/// Supported operand variants:
6678///
6679/// ```text
6680/// +---+--------------------+
6681/// | # | Operands           |
6682/// +---+--------------------+
6683/// | 1 | Xmm, Xmm, Xmm, Imm |
6684/// +---+--------------------+
6685/// ```
6686pub trait VreducessMaskzSaeEmitter<A, B, C, D> {
6687    fn vreducess_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
6688}
6689
6690impl<'a> VreducessMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6691    fn vreducess_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6692        self.emit(VREDUCESSRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6693    }
6694}
6695
6696/// `VREDUCESS_SAE` (VREDUCESS). 
6697/// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
6698///
6699///
6700/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
6701///
6702/// Supported operand variants:
6703///
6704/// ```text
6705/// +---+--------------------+
6706/// | # | Operands           |
6707/// +---+--------------------+
6708/// | 1 | Xmm, Xmm, Xmm, Imm |
6709/// +---+--------------------+
6710/// ```
6711pub trait VreducessSaeEmitter<A, B, C, D> {
6712    fn vreducess_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
6713}
6714
6715impl<'a> VreducessSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
6716    fn vreducess_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
6717        self.emit(VREDUCESSRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6718    }
6719}
6720
6721/// `VXORPD` (VXORPD). 
6722/// Performs a bitwise logical XOR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
6723///
6724///
6725/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPD.html).
6726///
6727/// Supported operand variants:
6728///
6729/// ```text
6730/// +---+---------------+
6731/// | # | Operands      |
6732/// +---+---------------+
6733/// | 1 | Xmm, Xmm, Mem |
6734/// | 2 | Xmm, Xmm, Xmm |
6735/// | 3 | Ymm, Ymm, Mem |
6736/// | 4 | Ymm, Ymm, Ymm |
6737/// | 5 | Zmm, Zmm, Mem |
6738/// | 6 | Zmm, Zmm, Zmm |
6739/// +---+---------------+
6740/// ```
6741pub trait VxorpdEmitter<A, B, C> {
6742    fn vxorpd(&mut self, op0: A, op1: B, op2: C);
6743}
6744
6745impl<'a> VxorpdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6746    fn vxorpd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6747        self.emit(VXORPD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6748    }
6749}
6750
6751impl<'a> VxorpdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6752    fn vxorpd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6753        self.emit(VXORPD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6754    }
6755}
6756
6757impl<'a> VxorpdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6758    fn vxorpd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6759        self.emit(VXORPD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6760    }
6761}
6762
6763impl<'a> VxorpdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6764    fn vxorpd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6765        self.emit(VXORPD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6766    }
6767}
6768
6769impl<'a> VxorpdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6770    fn vxorpd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6771        self.emit(VXORPD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6772    }
6773}
6774
6775impl<'a> VxorpdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6776    fn vxorpd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6777        self.emit(VXORPD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6778    }
6779}
6780
6781/// `VXORPD_MASK` (VXORPD). 
6782/// Performs a bitwise logical XOR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
6783///
6784///
6785/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPD.html).
6786///
6787/// Supported operand variants:
6788///
6789/// ```text
6790/// +---+---------------+
6791/// | # | Operands      |
6792/// +---+---------------+
6793/// | 1 | Xmm, Xmm, Mem |
6794/// | 2 | Xmm, Xmm, Xmm |
6795/// | 3 | Ymm, Ymm, Mem |
6796/// | 4 | Ymm, Ymm, Ymm |
6797/// | 5 | Zmm, Zmm, Mem |
6798/// | 6 | Zmm, Zmm, Zmm |
6799/// +---+---------------+
6800/// ```
6801pub trait VxorpdMaskEmitter<A, B, C> {
6802    fn vxorpd_mask(&mut self, op0: A, op1: B, op2: C);
6803}
6804
6805impl<'a> VxorpdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6806    fn vxorpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6807        self.emit(VXORPD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6808    }
6809}
6810
6811impl<'a> VxorpdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6812    fn vxorpd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6813        self.emit(VXORPD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6814    }
6815}
6816
6817impl<'a> VxorpdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6818    fn vxorpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6819        self.emit(VXORPD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6820    }
6821}
6822
6823impl<'a> VxorpdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6824    fn vxorpd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6825        self.emit(VXORPD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6826    }
6827}
6828
6829impl<'a> VxorpdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6830    fn vxorpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6831        self.emit(VXORPD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6832    }
6833}
6834
6835impl<'a> VxorpdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6836    fn vxorpd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6837        self.emit(VXORPD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6838    }
6839}
6840
6841/// `VXORPD_MASKZ` (VXORPD). 
6842/// Performs a bitwise logical XOR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
6843///
6844///
6845/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPD.html).
6846///
6847/// Supported operand variants:
6848///
6849/// ```text
6850/// +---+---------------+
6851/// | # | Operands      |
6852/// +---+---------------+
6853/// | 1 | Xmm, Xmm, Mem |
6854/// | 2 | Xmm, Xmm, Xmm |
6855/// | 3 | Ymm, Ymm, Mem |
6856/// | 4 | Ymm, Ymm, Ymm |
6857/// | 5 | Zmm, Zmm, Mem |
6858/// | 6 | Zmm, Zmm, Zmm |
6859/// +---+---------------+
6860/// ```
6861pub trait VxorpdMaskzEmitter<A, B, C> {
6862    fn vxorpd_maskz(&mut self, op0: A, op1: B, op2: C);
6863}
6864
6865impl<'a> VxorpdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6866    fn vxorpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6867        self.emit(VXORPD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6868    }
6869}
6870
6871impl<'a> VxorpdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6872    fn vxorpd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6873        self.emit(VXORPD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6874    }
6875}
6876
6877impl<'a> VxorpdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6878    fn vxorpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6879        self.emit(VXORPD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6880    }
6881}
6882
6883impl<'a> VxorpdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6884    fn vxorpd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6885        self.emit(VXORPD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6886    }
6887}
6888
6889impl<'a> VxorpdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6890    fn vxorpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6891        self.emit(VXORPD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6892    }
6893}
6894
6895impl<'a> VxorpdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6896    fn vxorpd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6897        self.emit(VXORPD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6898    }
6899}
6900
6901/// `VXORPS` (VXORPS). 
6902/// Performs a bitwise logical XOR of the four, eight or sixteen packed single-precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
6903///
6904///
6905/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPS.html).
6906///
6907/// Supported operand variants:
6908///
6909/// ```text
6910/// +---+---------------+
6911/// | # | Operands      |
6912/// +---+---------------+
6913/// | 1 | Xmm, Xmm, Mem |
6914/// | 2 | Xmm, Xmm, Xmm |
6915/// | 3 | Ymm, Ymm, Mem |
6916/// | 4 | Ymm, Ymm, Ymm |
6917/// | 5 | Zmm, Zmm, Mem |
6918/// | 6 | Zmm, Zmm, Zmm |
6919/// +---+---------------+
6920/// ```
6921pub trait VxorpsEmitter<A, B, C> {
6922    fn vxorps(&mut self, op0: A, op1: B, op2: C);
6923}
6924
6925impl<'a> VxorpsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6926    fn vxorps(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6927        self.emit(VXORPS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6928    }
6929}
6930
6931impl<'a> VxorpsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6932    fn vxorps(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6933        self.emit(VXORPS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6934    }
6935}
6936
6937impl<'a> VxorpsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6938    fn vxorps(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6939        self.emit(VXORPS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6940    }
6941}
6942
6943impl<'a> VxorpsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6944    fn vxorps(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6945        self.emit(VXORPS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6946    }
6947}
6948
6949impl<'a> VxorpsEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6950    fn vxorps(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6951        self.emit(VXORPS512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6952    }
6953}
6954
6955impl<'a> VxorpsEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6956    fn vxorps(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6957        self.emit(VXORPS512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6958    }
6959}
6960
6961/// `VXORPS_MASK` (VXORPS). 
6962/// Performs a bitwise logical XOR of the four, eight or sixteen packed single-precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
6963///
6964///
6965/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPS.html).
6966///
6967/// Supported operand variants:
6968///
6969/// ```text
6970/// +---+---------------+
6971/// | # | Operands      |
6972/// +---+---------------+
6973/// | 1 | Xmm, Xmm, Mem |
6974/// | 2 | Xmm, Xmm, Xmm |
6975/// | 3 | Ymm, Ymm, Mem |
6976/// | 4 | Ymm, Ymm, Ymm |
6977/// | 5 | Zmm, Zmm, Mem |
6978/// | 6 | Zmm, Zmm, Zmm |
6979/// +---+---------------+
6980/// ```
6981pub trait VxorpsMaskEmitter<A, B, C> {
6982    fn vxorps_mask(&mut self, op0: A, op1: B, op2: C);
6983}
6984
6985impl<'a> VxorpsMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6986    fn vxorps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6987        self.emit(VXORPS128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6988    }
6989}
6990
6991impl<'a> VxorpsMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6992    fn vxorps_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6993        self.emit(VXORPS128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6994    }
6995}
6996
6997impl<'a> VxorpsMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6998    fn vxorps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6999        self.emit(VXORPS256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7000    }
7001}
7002
7003impl<'a> VxorpsMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7004    fn vxorps_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7005        self.emit(VXORPS256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7006    }
7007}
7008
7009impl<'a> VxorpsMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7010    fn vxorps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7011        self.emit(VXORPS512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7012    }
7013}
7014
7015impl<'a> VxorpsMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7016    fn vxorps_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7017        self.emit(VXORPS512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7018    }
7019}
7020
7021/// `VXORPS_MASKZ` (VXORPS). 
7022/// Performs a bitwise logical XOR of the four, eight or sixteen packed single-precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
7023///
7024///
7025/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPS.html).
7026///
7027/// Supported operand variants:
7028///
7029/// ```text
7030/// +---+---------------+
7031/// | # | Operands      |
7032/// +---+---------------+
7033/// | 1 | Xmm, Xmm, Mem |
7034/// | 2 | Xmm, Xmm, Xmm |
7035/// | 3 | Ymm, Ymm, Mem |
7036/// | 4 | Ymm, Ymm, Ymm |
7037/// | 5 | Zmm, Zmm, Mem |
7038/// | 6 | Zmm, Zmm, Zmm |
7039/// +---+---------------+
7040/// ```
7041pub trait VxorpsMaskzEmitter<A, B, C> {
7042    fn vxorps_maskz(&mut self, op0: A, op1: B, op2: C);
7043}
7044
7045impl<'a> VxorpsMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7046    fn vxorps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7047        self.emit(VXORPS128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7048    }
7049}
7050
7051impl<'a> VxorpsMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7052    fn vxorps_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7053        self.emit(VXORPS128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7054    }
7055}
7056
7057impl<'a> VxorpsMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7058    fn vxorps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7059        self.emit(VXORPS256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7060    }
7061}
7062
7063impl<'a> VxorpsMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7064    fn vxorps_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7065        self.emit(VXORPS256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7066    }
7067}
7068
7069impl<'a> VxorpsMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7070    fn vxorps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7071        self.emit(VXORPS512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7072    }
7073}
7074
7075impl<'a> VxorpsMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7076    fn vxorps_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7077        self.emit(VXORPS512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7078    }
7079}
7080
7081
7082impl<'a> Assembler<'a> {
7083    /// `KADDB` (KADDB). 
7084    /// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
7085    ///
7086    ///
7087    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
7088    ///
7089    /// Supported operand variants:
7090    ///
7091    /// ```text
7092    /// +---+------------------+
7093    /// | # | Operands         |
7094    /// +---+------------------+
7095    /// | 1 | KReg, KReg, KReg |
7096    /// +---+------------------+
7097    /// ```
7098    #[inline]
7099    pub fn kaddb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7100    where Assembler<'a>: KaddbEmitter<A, B, C> {
7101        <Self as KaddbEmitter<A, B, C>>::kaddb(self, op0, op1, op2);
7102    }
7103    /// `KADDW` (KADDW). 
7104    /// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
7105    ///
7106    ///
7107    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
7108    ///
7109    /// Supported operand variants:
7110    ///
7111    /// ```text
7112    /// +---+------------------+
7113    /// | # | Operands         |
7114    /// +---+------------------+
7115    /// | 1 | KReg, KReg, KReg |
7116    /// +---+------------------+
7117    /// ```
7118    #[inline]
7119    pub fn kaddw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7120    where Assembler<'a>: KaddwEmitter<A, B, C> {
7121        <Self as KaddwEmitter<A, B, C>>::kaddw(self, op0, op1, op2);
7122    }
7123    /// `KANDB` (KANDB). 
7124    /// Performs a bitwise AND between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
7125    ///
7126    ///
7127    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDW%3AKANDB%3AKANDQ%3AKANDD.html).
7128    ///
7129    /// Supported operand variants:
7130    ///
7131    /// ```text
7132    /// +---+------------------+
7133    /// | # | Operands         |
7134    /// +---+------------------+
7135    /// | 1 | KReg, KReg, KReg |
7136    /// +---+------------------+
7137    /// ```
7138    #[inline]
7139    pub fn kandb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7140    where Assembler<'a>: KandbEmitter<A, B, C> {
7141        <Self as KandbEmitter<A, B, C>>::kandb(self, op0, op1, op2);
7142    }
7143    /// `KANDNB` (KANDNB). 
7144    /// Performs a bitwise AND NOT between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
7145    ///
7146    ///
7147    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDNW%3AKANDNB%3AKANDNQ%3AKANDND.html).
7148    ///
7149    /// Supported operand variants:
7150    ///
7151    /// ```text
7152    /// +---+------------------+
7153    /// | # | Operands         |
7154    /// +---+------------------+
7155    /// | 1 | KReg, KReg, KReg |
7156    /// +---+------------------+
7157    /// ```
7158    #[inline]
7159    pub fn kandnb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7160    where Assembler<'a>: KandnbEmitter<A, B, C> {
7161        <Self as KandnbEmitter<A, B, C>>::kandnb(self, op0, op1, op2);
7162    }
7163    /// `KMOVB` (KMOVB). 
7164    /// Copies values from the source operand (second operand) to the destination operand (first operand). The source and destination operands can be mask registers, memory location or general purpose. The instruction cannot be used to transfer data between general purpose registers and or memory locations.
7165    ///
7166    ///
7167    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KMOVW%3AKMOVB%3AKMOVQ%3AKMOVD.html).
7168    ///
7169    /// Supported operand variants:
7170    ///
7171    /// ```text
7172    /// +---+------------+
7173    /// | # | Operands   |
7174    /// +---+------------+
7175    /// | 1 | Gpd, KReg  |
7176    /// | 2 | KReg, Gpd  |
7177    /// | 3 | KReg, KReg |
7178    /// | 4 | KReg, Mem  |
7179    /// | 5 | Mem, KReg  |
7180    /// +---+------------+
7181    /// ```
7182    #[inline]
7183    pub fn kmovb<A, B>(&mut self, op0: A, op1: B)
7184    where Assembler<'a>: KmovbEmitter<A, B> {
7185        <Self as KmovbEmitter<A, B>>::kmovb(self, op0, op1);
7186    }
7187    /// `KNOTB` (KNOTB). 
7188    /// Performs a bitwise NOT of vector mask k2 and writes the result into vector mask k1.
7189    ///
7190    ///
7191    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KNOTW%3AKNOTB%3AKNOTQ%3AKNOTD.html).
7192    ///
7193    /// Supported operand variants:
7194    ///
7195    /// ```text
7196    /// +---+------------+
7197    /// | # | Operands   |
7198    /// +---+------------+
7199    /// | 1 | KReg, KReg |
7200    /// +---+------------+
7201    /// ```
7202    #[inline]
7203    pub fn knotb<A, B>(&mut self, op0: A, op1: B)
7204    where Assembler<'a>: KnotbEmitter<A, B> {
7205        <Self as KnotbEmitter<A, B>>::knotb(self, op0, op1);
7206    }
7207    /// `KORB` (KORB). 
7208    /// Performs a bitwise OR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
7209    ///
7210    ///
7211    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORW%3AKORB%3AKORQ%3AKORD.html).
7212    ///
7213    /// Supported operand variants:
7214    ///
7215    /// ```text
7216    /// +---+------------------+
7217    /// | # | Operands         |
7218    /// +---+------------------+
7219    /// | 1 | KReg, KReg, KReg |
7220    /// +---+------------------+
7221    /// ```
7222    #[inline]
7223    pub fn korb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7224    where Assembler<'a>: KorbEmitter<A, B, C> {
7225        <Self as KorbEmitter<A, B, C>>::korb(self, op0, op1, op2);
7226    }
7227    /// `KORTESTB` (KORTESTB). 
7228    /// Performs a bitwise OR between the vector mask register k2, and the vector mask register k1, and sets CF and ZF based on the operation result.
7229    ///
7230    ///
7231    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORTESTW%3AKORTESTB%3AKORTESTQ%3AKORTESTD.html).
7232    ///
7233    /// Supported operand variants:
7234    ///
7235    /// ```text
7236    /// +---+------------+
7237    /// | # | Operands   |
7238    /// +---+------------+
7239    /// | 1 | KReg, KReg |
7240    /// +---+------------+
7241    /// ```
7242    #[inline]
7243    pub fn kortestb<A, B>(&mut self, op0: A, op1: B)
7244    where Assembler<'a>: KortestbEmitter<A, B> {
7245        <Self as KortestbEmitter<A, B>>::kortestb(self, op0, op1);
7246    }
7247    /// `KSHIFTLB` (KSHIFTLB). 
7248    /// Shifts 8/16/32/64 bits in the second operand (source operand) left by the count specified in immediate byte and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
7249    ///
7250    ///
7251    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTLW%3AKSHIFTLB%3AKSHIFTLQ%3AKSHIFTLD.html).
7252    ///
7253    /// Supported operand variants:
7254    ///
7255    /// ```text
7256    /// +---+-----------------+
7257    /// | # | Operands        |
7258    /// +---+-----------------+
7259    /// | 1 | KReg, KReg, Imm |
7260    /// +---+-----------------+
7261    /// ```
7262    #[inline]
7263    pub fn kshiftlb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7264    where Assembler<'a>: KshiftlbEmitter<A, B, C> {
7265        <Self as KshiftlbEmitter<A, B, C>>::kshiftlb(self, op0, op1, op2);
7266    }
7267    /// `KSHIFTRB` (KSHIFTRB). 
7268    /// Shifts 8/16/32/64 bits in the second operand (source operand) right by the count specified in immediate and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
7269    ///
7270    ///
7271    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTRW%3AKSHIFTRB%3AKSHIFTRQ%3AKSHIFTRD.html).
7272    ///
7273    /// Supported operand variants:
7274    ///
7275    /// ```text
7276    /// +---+-----------------+
7277    /// | # | Operands        |
7278    /// +---+-----------------+
7279    /// | 1 | KReg, KReg, Imm |
7280    /// +---+-----------------+
7281    /// ```
7282    #[inline]
7283    pub fn kshiftrb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7284    where Assembler<'a>: KshiftrbEmitter<A, B, C> {
7285        <Self as KshiftrbEmitter<A, B, C>>::kshiftrb(self, op0, op1, op2);
7286    }
7287    /// `KTESTB` (KTESTB). 
7288    /// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
7289    ///
7290    ///
7291    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
7292    ///
7293    /// Supported operand variants:
7294    ///
7295    /// ```text
7296    /// +---+------------+
7297    /// | # | Operands   |
7298    /// +---+------------+
7299    /// | 1 | KReg, KReg |
7300    /// +---+------------+
7301    /// ```
7302    #[inline]
7303    pub fn ktestb<A, B>(&mut self, op0: A, op1: B)
7304    where Assembler<'a>: KtestbEmitter<A, B> {
7305        <Self as KtestbEmitter<A, B>>::ktestb(self, op0, op1);
7306    }
7307    /// `KTESTW` (KTESTW). 
7308    /// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
7309    ///
7310    ///
7311    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
7312    ///
7313    /// Supported operand variants:
7314    ///
7315    /// ```text
7316    /// +---+------------+
7317    /// | # | Operands   |
7318    /// +---+------------+
7319    /// | 1 | KReg, KReg |
7320    /// +---+------------+
7321    /// ```
7322    #[inline]
7323    pub fn ktestw<A, B>(&mut self, op0: A, op1: B)
7324    where Assembler<'a>: KtestwEmitter<A, B> {
7325        <Self as KtestwEmitter<A, B>>::ktestw(self, op0, op1);
7326    }
7327    /// `KXNORB` (KXNORB). 
7328    /// Performs a bitwise XNOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
7329    ///
7330    ///
7331    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXNORW%3AKXNORB%3AKXNORQ%3AKXNORD.html).
7332    ///
7333    /// Supported operand variants:
7334    ///
7335    /// ```text
7336    /// +---+------------------+
7337    /// | # | Operands         |
7338    /// +---+------------------+
7339    /// | 1 | KReg, KReg, KReg |
7340    /// +---+------------------+
7341    /// ```
7342    #[inline]
7343    pub fn kxnorb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7344    where Assembler<'a>: KxnorbEmitter<A, B, C> {
7345        <Self as KxnorbEmitter<A, B, C>>::kxnorb(self, op0, op1, op2);
7346    }
7347    /// `KXORB` (KXORB). 
7348    /// Performs a bitwise XOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
7349    ///
7350    ///
7351    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXORW%3AKXORB%3AKXORQ%3AKXORD.html).
7352    ///
7353    /// Supported operand variants:
7354    ///
7355    /// ```text
7356    /// +---+------------------+
7357    /// | # | Operands         |
7358    /// +---+------------------+
7359    /// | 1 | KReg, KReg, KReg |
7360    /// +---+------------------+
7361    /// ```
7362    #[inline]
7363    pub fn kxorb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7364    where Assembler<'a>: KxorbEmitter<A, B, C> {
7365        <Self as KxorbEmitter<A, B, C>>::kxorb(self, op0, op1, op2);
7366    }
7367    /// `VANDNPD` (VANDNPD). 
7368    /// Performs a bitwise logical AND NOT of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7369    ///
7370    ///
7371    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPD.html).
7372    ///
7373    /// Supported operand variants:
7374    ///
7375    /// ```text
7376    /// +---+---------------+
7377    /// | # | Operands      |
7378    /// +---+---------------+
7379    /// | 1 | Xmm, Xmm, Mem |
7380    /// | 2 | Xmm, Xmm, Xmm |
7381    /// | 3 | Ymm, Ymm, Mem |
7382    /// | 4 | Ymm, Ymm, Ymm |
7383    /// | 5 | Zmm, Zmm, Mem |
7384    /// | 6 | Zmm, Zmm, Zmm |
7385    /// +---+---------------+
7386    /// ```
7387    #[inline]
7388    pub fn vandnpd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7389    where Assembler<'a>: VandnpdEmitter<A, B, C> {
7390        <Self as VandnpdEmitter<A, B, C>>::vandnpd(self, op0, op1, op2);
7391    }
7392    /// `VANDNPD_MASK` (VANDNPD). 
7393    /// Performs a bitwise logical AND NOT of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7394    ///
7395    ///
7396    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPD.html).
7397    ///
7398    /// Supported operand variants:
7399    ///
7400    /// ```text
7401    /// +---+---------------+
7402    /// | # | Operands      |
7403    /// +---+---------------+
7404    /// | 1 | Xmm, Xmm, Mem |
7405    /// | 2 | Xmm, Xmm, Xmm |
7406    /// | 3 | Ymm, Ymm, Mem |
7407    /// | 4 | Ymm, Ymm, Ymm |
7408    /// | 5 | Zmm, Zmm, Mem |
7409    /// | 6 | Zmm, Zmm, Zmm |
7410    /// +---+---------------+
7411    /// ```
7412    #[inline]
7413    pub fn vandnpd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7414    where Assembler<'a>: VandnpdMaskEmitter<A, B, C> {
7415        <Self as VandnpdMaskEmitter<A, B, C>>::vandnpd_mask(self, op0, op1, op2);
7416    }
7417    /// `VANDNPD_MASKZ` (VANDNPD). 
7418    /// Performs a bitwise logical AND NOT of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7419    ///
7420    ///
7421    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPD.html).
7422    ///
7423    /// Supported operand variants:
7424    ///
7425    /// ```text
7426    /// +---+---------------+
7427    /// | # | Operands      |
7428    /// +---+---------------+
7429    /// | 1 | Xmm, Xmm, Mem |
7430    /// | 2 | Xmm, Xmm, Xmm |
7431    /// | 3 | Ymm, Ymm, Mem |
7432    /// | 4 | Ymm, Ymm, Ymm |
7433    /// | 5 | Zmm, Zmm, Mem |
7434    /// | 6 | Zmm, Zmm, Zmm |
7435    /// +---+---------------+
7436    /// ```
7437    #[inline]
7438    pub fn vandnpd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7439    where Assembler<'a>: VandnpdMaskzEmitter<A, B, C> {
7440        <Self as VandnpdMaskzEmitter<A, B, C>>::vandnpd_maskz(self, op0, op1, op2);
7441    }
7442    /// `VANDNPS` (VANDNPS). 
7443    /// Performs a bitwise logical AND NOT of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7444    ///
7445    ///
7446    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPS.html).
7447    ///
7448    /// Supported operand variants:
7449    ///
7450    /// ```text
7451    /// +---+---------------+
7452    /// | # | Operands      |
7453    /// +---+---------------+
7454    /// | 1 | Xmm, Xmm, Mem |
7455    /// | 2 | Xmm, Xmm, Xmm |
7456    /// | 3 | Ymm, Ymm, Mem |
7457    /// | 4 | Ymm, Ymm, Ymm |
7458    /// | 5 | Zmm, Zmm, Mem |
7459    /// | 6 | Zmm, Zmm, Zmm |
7460    /// +---+---------------+
7461    /// ```
7462    #[inline]
7463    pub fn vandnps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7464    where Assembler<'a>: VandnpsEmitter<A, B, C> {
7465        <Self as VandnpsEmitter<A, B, C>>::vandnps(self, op0, op1, op2);
7466    }
7467    /// `VANDNPS_MASK` (VANDNPS). 
7468    /// Performs a bitwise logical AND NOT of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7469    ///
7470    ///
7471    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPS.html).
7472    ///
7473    /// Supported operand variants:
7474    ///
7475    /// ```text
7476    /// +---+---------------+
7477    /// | # | Operands      |
7478    /// +---+---------------+
7479    /// | 1 | Xmm, Xmm, Mem |
7480    /// | 2 | Xmm, Xmm, Xmm |
7481    /// | 3 | Ymm, Ymm, Mem |
7482    /// | 4 | Ymm, Ymm, Ymm |
7483    /// | 5 | Zmm, Zmm, Mem |
7484    /// | 6 | Zmm, Zmm, Zmm |
7485    /// +---+---------------+
7486    /// ```
7487    #[inline]
7488    pub fn vandnps_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7489    where Assembler<'a>: VandnpsMaskEmitter<A, B, C> {
7490        <Self as VandnpsMaskEmitter<A, B, C>>::vandnps_mask(self, op0, op1, op2);
7491    }
7492    /// `VANDNPS_MASKZ` (VANDNPS). 
7493    /// Performs a bitwise logical AND NOT of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7494    ///
7495    ///
7496    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDNPS.html).
7497    ///
7498    /// Supported operand variants:
7499    ///
7500    /// ```text
7501    /// +---+---------------+
7502    /// | # | Operands      |
7503    /// +---+---------------+
7504    /// | 1 | Xmm, Xmm, Mem |
7505    /// | 2 | Xmm, Xmm, Xmm |
7506    /// | 3 | Ymm, Ymm, Mem |
7507    /// | 4 | Ymm, Ymm, Ymm |
7508    /// | 5 | Zmm, Zmm, Mem |
7509    /// | 6 | Zmm, Zmm, Zmm |
7510    /// +---+---------------+
7511    /// ```
7512    #[inline]
7513    pub fn vandnps_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7514    where Assembler<'a>: VandnpsMaskzEmitter<A, B, C> {
7515        <Self as VandnpsMaskzEmitter<A, B, C>>::vandnps_maskz(self, op0, op1, op2);
7516    }
7517    /// `VANDPD` (VANDPD). 
7518    /// Performs a bitwise logical AND of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7519    ///
7520    ///
7521    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPD.html).
7522    ///
7523    /// Supported operand variants:
7524    ///
7525    /// ```text
7526    /// +---+---------------+
7527    /// | # | Operands      |
7528    /// +---+---------------+
7529    /// | 1 | Xmm, Xmm, Mem |
7530    /// | 2 | Xmm, Xmm, Xmm |
7531    /// | 3 | Ymm, Ymm, Mem |
7532    /// | 4 | Ymm, Ymm, Ymm |
7533    /// | 5 | Zmm, Zmm, Mem |
7534    /// | 6 | Zmm, Zmm, Zmm |
7535    /// +---+---------------+
7536    /// ```
7537    #[inline]
7538    pub fn vandpd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7539    where Assembler<'a>: VandpdEmitter<A, B, C> {
7540        <Self as VandpdEmitter<A, B, C>>::vandpd(self, op0, op1, op2);
7541    }
7542    /// `VANDPD_MASK` (VANDPD). 
7543    /// Performs a bitwise logical AND of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7544    ///
7545    ///
7546    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPD.html).
7547    ///
7548    /// Supported operand variants:
7549    ///
7550    /// ```text
7551    /// +---+---------------+
7552    /// | # | Operands      |
7553    /// +---+---------------+
7554    /// | 1 | Xmm, Xmm, Mem |
7555    /// | 2 | Xmm, Xmm, Xmm |
7556    /// | 3 | Ymm, Ymm, Mem |
7557    /// | 4 | Ymm, Ymm, Ymm |
7558    /// | 5 | Zmm, Zmm, Mem |
7559    /// | 6 | Zmm, Zmm, Zmm |
7560    /// +---+---------------+
7561    /// ```
7562    #[inline]
7563    pub fn vandpd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7564    where Assembler<'a>: VandpdMaskEmitter<A, B, C> {
7565        <Self as VandpdMaskEmitter<A, B, C>>::vandpd_mask(self, op0, op1, op2);
7566    }
7567    /// `VANDPD_MASKZ` (VANDPD). 
7568    /// Performs a bitwise logical AND of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7569    ///
7570    ///
7571    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPD.html).
7572    ///
7573    /// Supported operand variants:
7574    ///
7575    /// ```text
7576    /// +---+---------------+
7577    /// | # | Operands      |
7578    /// +---+---------------+
7579    /// | 1 | Xmm, Xmm, Mem |
7580    /// | 2 | Xmm, Xmm, Xmm |
7581    /// | 3 | Ymm, Ymm, Mem |
7582    /// | 4 | Ymm, Ymm, Ymm |
7583    /// | 5 | Zmm, Zmm, Mem |
7584    /// | 6 | Zmm, Zmm, Zmm |
7585    /// +---+---------------+
7586    /// ```
7587    #[inline]
7588    pub fn vandpd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7589    where Assembler<'a>: VandpdMaskzEmitter<A, B, C> {
7590        <Self as VandpdMaskzEmitter<A, B, C>>::vandpd_maskz(self, op0, op1, op2);
7591    }
7592    /// `VANDPS` (VANDPS). 
7593    /// Performs a bitwise logical AND of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7594    ///
7595    ///
7596    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPS.html).
7597    ///
7598    /// Supported operand variants:
7599    ///
7600    /// ```text
7601    /// +---+---------------+
7602    /// | # | Operands      |
7603    /// +---+---------------+
7604    /// | 1 | Xmm, Xmm, Mem |
7605    /// | 2 | Xmm, Xmm, Xmm |
7606    /// | 3 | Ymm, Ymm, Mem |
7607    /// | 4 | Ymm, Ymm, Ymm |
7608    /// | 5 | Zmm, Zmm, Mem |
7609    /// | 6 | Zmm, Zmm, Zmm |
7610    /// +---+---------------+
7611    /// ```
7612    #[inline]
7613    pub fn vandps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7614    where Assembler<'a>: VandpsEmitter<A, B, C> {
7615        <Self as VandpsEmitter<A, B, C>>::vandps(self, op0, op1, op2);
7616    }
7617    /// `VANDPS_MASK` (VANDPS). 
7618    /// Performs a bitwise logical AND of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7619    ///
7620    ///
7621    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPS.html).
7622    ///
7623    /// Supported operand variants:
7624    ///
7625    /// ```text
7626    /// +---+---------------+
7627    /// | # | Operands      |
7628    /// +---+---------------+
7629    /// | 1 | Xmm, Xmm, Mem |
7630    /// | 2 | Xmm, Xmm, Xmm |
7631    /// | 3 | Ymm, Ymm, Mem |
7632    /// | 4 | Ymm, Ymm, Ymm |
7633    /// | 5 | Zmm, Zmm, Mem |
7634    /// | 6 | Zmm, Zmm, Zmm |
7635    /// +---+---------------+
7636    /// ```
7637    #[inline]
7638    pub fn vandps_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7639    where Assembler<'a>: VandpsMaskEmitter<A, B, C> {
7640        <Self as VandpsMaskEmitter<A, B, C>>::vandps_mask(self, op0, op1, op2);
7641    }
7642    /// `VANDPS_MASKZ` (VANDPS). 
7643    /// Performs a bitwise logical AND of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
7644    ///
7645    ///
7646    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ANDPS.html).
7647    ///
7648    /// Supported operand variants:
7649    ///
7650    /// ```text
7651    /// +---+---------------+
7652    /// | # | Operands      |
7653    /// +---+---------------+
7654    /// | 1 | Xmm, Xmm, Mem |
7655    /// | 2 | Xmm, Xmm, Xmm |
7656    /// | 3 | Ymm, Ymm, Mem |
7657    /// | 4 | Ymm, Ymm, Ymm |
7658    /// | 5 | Zmm, Zmm, Mem |
7659    /// | 6 | Zmm, Zmm, Zmm |
7660    /// +---+---------------+
7661    /// ```
7662    #[inline]
7663    pub fn vandps_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
7664    where Assembler<'a>: VandpsMaskzEmitter<A, B, C> {
7665        <Self as VandpsMaskzEmitter<A, B, C>>::vandps_maskz(self, op0, op1, op2);
7666    }
7667    /// `VBROADCASTF32X2` (VBROADCASTF32X2). 
7668    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7669    ///
7670    ///
7671    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7672    ///
7673    /// Supported operand variants:
7674    ///
7675    /// ```text
7676    /// +---+----------+
7677    /// | # | Operands |
7678    /// +---+----------+
7679    /// | 1 | Ymm, Mem |
7680    /// | 2 | Ymm, Xmm |
7681    /// | 3 | Zmm, Mem |
7682    /// | 4 | Zmm, Xmm |
7683    /// +---+----------+
7684    /// ```
7685    #[inline]
7686    pub fn vbroadcastf32x2<A, B>(&mut self, op0: A, op1: B)
7687    where Assembler<'a>: Vbroadcastf32x2Emitter<A, B> {
7688        <Self as Vbroadcastf32x2Emitter<A, B>>::vbroadcastf32x2(self, op0, op1);
7689    }
7690    /// `VBROADCASTF32X2_MASK` (VBROADCASTF32X2). 
7691    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7692    ///
7693    ///
7694    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7695    ///
7696    /// Supported operand variants:
7697    ///
7698    /// ```text
7699    /// +---+----------+
7700    /// | # | Operands |
7701    /// +---+----------+
7702    /// | 1 | Ymm, Mem |
7703    /// | 2 | Ymm, Xmm |
7704    /// | 3 | Zmm, Mem |
7705    /// | 4 | Zmm, Xmm |
7706    /// +---+----------+
7707    /// ```
7708    #[inline]
7709    pub fn vbroadcastf32x2_mask<A, B>(&mut self, op0: A, op1: B)
7710    where Assembler<'a>: Vbroadcastf32x2MaskEmitter<A, B> {
7711        <Self as Vbroadcastf32x2MaskEmitter<A, B>>::vbroadcastf32x2_mask(self, op0, op1);
7712    }
7713    /// `VBROADCASTF32X2_MASKZ` (VBROADCASTF32X2). 
7714    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7715    ///
7716    ///
7717    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7718    ///
7719    /// Supported operand variants:
7720    ///
7721    /// ```text
7722    /// +---+----------+
7723    /// | # | Operands |
7724    /// +---+----------+
7725    /// | 1 | Ymm, Mem |
7726    /// | 2 | Ymm, Xmm |
7727    /// | 3 | Zmm, Mem |
7728    /// | 4 | Zmm, Xmm |
7729    /// +---+----------+
7730    /// ```
7731    #[inline]
7732    pub fn vbroadcastf32x2_maskz<A, B>(&mut self, op0: A, op1: B)
7733    where Assembler<'a>: Vbroadcastf32x2MaskzEmitter<A, B> {
7734        <Self as Vbroadcastf32x2MaskzEmitter<A, B>>::vbroadcastf32x2_maskz(self, op0, op1);
7735    }
7736    /// `VBROADCASTF32X8` (VBROADCASTF32X8). 
7737    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7738    ///
7739    ///
7740    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7741    ///
7742    /// Supported operand variants:
7743    ///
7744    /// ```text
7745    /// +---+----------+
7746    /// | # | Operands |
7747    /// +---+----------+
7748    /// | 1 | Zmm, Mem |
7749    /// +---+----------+
7750    /// ```
7751    #[inline]
7752    pub fn vbroadcastf32x8<A, B>(&mut self, op0: A, op1: B)
7753    where Assembler<'a>: Vbroadcastf32x8Emitter<A, B> {
7754        <Self as Vbroadcastf32x8Emitter<A, B>>::vbroadcastf32x8(self, op0, op1);
7755    }
7756    /// `VBROADCASTF32X8_MASK` (VBROADCASTF32X8). 
7757    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7758    ///
7759    ///
7760    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7761    ///
7762    /// Supported operand variants:
7763    ///
7764    /// ```text
7765    /// +---+----------+
7766    /// | # | Operands |
7767    /// +---+----------+
7768    /// | 1 | Zmm, Mem |
7769    /// +---+----------+
7770    /// ```
7771    #[inline]
7772    pub fn vbroadcastf32x8_mask<A, B>(&mut self, op0: A, op1: B)
7773    where Assembler<'a>: Vbroadcastf32x8MaskEmitter<A, B> {
7774        <Self as Vbroadcastf32x8MaskEmitter<A, B>>::vbroadcastf32x8_mask(self, op0, op1);
7775    }
7776    /// `VBROADCASTF32X8_MASKZ` (VBROADCASTF32X8). 
7777    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7778    ///
7779    ///
7780    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7781    ///
7782    /// Supported operand variants:
7783    ///
7784    /// ```text
7785    /// +---+----------+
7786    /// | # | Operands |
7787    /// +---+----------+
7788    /// | 1 | Zmm, Mem |
7789    /// +---+----------+
7790    /// ```
7791    #[inline]
7792    pub fn vbroadcastf32x8_maskz<A, B>(&mut self, op0: A, op1: B)
7793    where Assembler<'a>: Vbroadcastf32x8MaskzEmitter<A, B> {
7794        <Self as Vbroadcastf32x8MaskzEmitter<A, B>>::vbroadcastf32x8_maskz(self, op0, op1);
7795    }
7796    /// `VBROADCASTF64X2` (VBROADCASTF64X2). 
7797    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7798    ///
7799    ///
7800    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7801    ///
7802    /// Supported operand variants:
7803    ///
7804    /// ```text
7805    /// +---+----------+
7806    /// | # | Operands |
7807    /// +---+----------+
7808    /// | 1 | Ymm, Mem |
7809    /// | 2 | Zmm, Mem |
7810    /// +---+----------+
7811    /// ```
7812    #[inline]
7813    pub fn vbroadcastf64x2<A, B>(&mut self, op0: A, op1: B)
7814    where Assembler<'a>: Vbroadcastf64x2Emitter<A, B> {
7815        <Self as Vbroadcastf64x2Emitter<A, B>>::vbroadcastf64x2(self, op0, op1);
7816    }
7817    /// `VBROADCASTF64X2_MASK` (VBROADCASTF64X2). 
7818    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7819    ///
7820    ///
7821    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7822    ///
7823    /// Supported operand variants:
7824    ///
7825    /// ```text
7826    /// +---+----------+
7827    /// | # | Operands |
7828    /// +---+----------+
7829    /// | 1 | Ymm, Mem |
7830    /// | 2 | Zmm, Mem |
7831    /// +---+----------+
7832    /// ```
7833    #[inline]
7834    pub fn vbroadcastf64x2_mask<A, B>(&mut self, op0: A, op1: B)
7835    where Assembler<'a>: Vbroadcastf64x2MaskEmitter<A, B> {
7836        <Self as Vbroadcastf64x2MaskEmitter<A, B>>::vbroadcastf64x2_mask(self, op0, op1);
7837    }
7838    /// `VBROADCASTF64X2_MASKZ` (VBROADCASTF64X2). 
7839    /// VBROADCASTSD/VBROADCASTSS/VBROADCASTF128 load floating-point values as one tuple from the source operand (second operand) in memory and broadcast to all elements of the destination operand (first operand).
7840    ///
7841    ///
7842    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VBROADCAST.html).
7843    ///
7844    /// Supported operand variants:
7845    ///
7846    /// ```text
7847    /// +---+----------+
7848    /// | # | Operands |
7849    /// +---+----------+
7850    /// | 1 | Ymm, Mem |
7851    /// | 2 | Zmm, Mem |
7852    /// +---+----------+
7853    /// ```
7854    #[inline]
7855    pub fn vbroadcastf64x2_maskz<A, B>(&mut self, op0: A, op1: B)
7856    where Assembler<'a>: Vbroadcastf64x2MaskzEmitter<A, B> {
7857        <Self as Vbroadcastf64x2MaskzEmitter<A, B>>::vbroadcastf64x2_maskz(self, op0, op1);
7858    }
7859    /// `VBROADCASTI32X2`.
7860    ///
7861    /// Supported operand variants:
7862    ///
7863    /// ```text
7864    /// +---+----------+
7865    /// | # | Operands |
7866    /// +---+----------+
7867    /// | 1 | Xmm, Mem |
7868    /// | 2 | Xmm, Xmm |
7869    /// | 3 | Ymm, Mem |
7870    /// | 4 | Ymm, Xmm |
7871    /// | 5 | Zmm, Mem |
7872    /// | 6 | Zmm, Xmm |
7873    /// +---+----------+
7874    /// ```
7875    #[inline]
7876    pub fn vbroadcasti32x2<A, B>(&mut self, op0: A, op1: B)
7877    where Assembler<'a>: Vbroadcasti32x2Emitter<A, B> {
7878        <Self as Vbroadcasti32x2Emitter<A, B>>::vbroadcasti32x2(self, op0, op1);
7879    }
7880    /// `VBROADCASTI32X2_MASK`.
7881    ///
7882    /// Supported operand variants:
7883    ///
7884    /// ```text
7885    /// +---+----------+
7886    /// | # | Operands |
7887    /// +---+----------+
7888    /// | 1 | Xmm, Mem |
7889    /// | 2 | Xmm, Xmm |
7890    /// | 3 | Ymm, Mem |
7891    /// | 4 | Ymm, Xmm |
7892    /// | 5 | Zmm, Mem |
7893    /// | 6 | Zmm, Xmm |
7894    /// +---+----------+
7895    /// ```
7896    #[inline]
7897    pub fn vbroadcasti32x2_mask<A, B>(&mut self, op0: A, op1: B)
7898    where Assembler<'a>: Vbroadcasti32x2MaskEmitter<A, B> {
7899        <Self as Vbroadcasti32x2MaskEmitter<A, B>>::vbroadcasti32x2_mask(self, op0, op1);
7900    }
7901    /// `VBROADCASTI32X2_MASKZ`.
7902    ///
7903    /// Supported operand variants:
7904    ///
7905    /// ```text
7906    /// +---+----------+
7907    /// | # | Operands |
7908    /// +---+----------+
7909    /// | 1 | Xmm, Mem |
7910    /// | 2 | Xmm, Xmm |
7911    /// | 3 | Ymm, Mem |
7912    /// | 4 | Ymm, Xmm |
7913    /// | 5 | Zmm, Mem |
7914    /// | 6 | Zmm, Xmm |
7915    /// +---+----------+
7916    /// ```
7917    #[inline]
7918    pub fn vbroadcasti32x2_maskz<A, B>(&mut self, op0: A, op1: B)
7919    where Assembler<'a>: Vbroadcasti32x2MaskzEmitter<A, B> {
7920        <Self as Vbroadcasti32x2MaskzEmitter<A, B>>::vbroadcasti32x2_maskz(self, op0, op1);
7921    }
7922    /// `VBROADCASTI32X4` (VBROADCASTI32X4). 
7923    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
7924    ///
7925    ///
7926    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
7927    ///
7928    /// Supported operand variants:
7929    ///
7930    /// ```text
7931    /// +---+----------+
7932    /// | # | Operands |
7933    /// +---+----------+
7934    /// | 1 | Ymm, Mem |
7935    /// | 2 | Zmm, Mem |
7936    /// +---+----------+
7937    /// ```
7938    #[inline]
7939    pub fn vbroadcasti32x4<A, B>(&mut self, op0: A, op1: B)
7940    where Assembler<'a>: Vbroadcasti32x4Emitter<A, B> {
7941        <Self as Vbroadcasti32x4Emitter<A, B>>::vbroadcasti32x4(self, op0, op1);
7942    }
7943    /// `VBROADCASTI32X4_MASK` (VBROADCASTI32X4). 
7944    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
7945    ///
7946    ///
7947    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
7948    ///
7949    /// Supported operand variants:
7950    ///
7951    /// ```text
7952    /// +---+----------+
7953    /// | # | Operands |
7954    /// +---+----------+
7955    /// | 1 | Ymm, Mem |
7956    /// | 2 | Zmm, Mem |
7957    /// +---+----------+
7958    /// ```
7959    #[inline]
7960    pub fn vbroadcasti32x4_mask<A, B>(&mut self, op0: A, op1: B)
7961    where Assembler<'a>: Vbroadcasti32x4MaskEmitter<A, B> {
7962        <Self as Vbroadcasti32x4MaskEmitter<A, B>>::vbroadcasti32x4_mask(self, op0, op1);
7963    }
7964    /// `VBROADCASTI32X4_MASKZ` (VBROADCASTI32X4). 
7965    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
7966    ///
7967    ///
7968    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
7969    ///
7970    /// Supported operand variants:
7971    ///
7972    /// ```text
7973    /// +---+----------+
7974    /// | # | Operands |
7975    /// +---+----------+
7976    /// | 1 | Ymm, Mem |
7977    /// | 2 | Zmm, Mem |
7978    /// +---+----------+
7979    /// ```
7980    #[inline]
7981    pub fn vbroadcasti32x4_maskz<A, B>(&mut self, op0: A, op1: B)
7982    where Assembler<'a>: Vbroadcasti32x4MaskzEmitter<A, B> {
7983        <Self as Vbroadcasti32x4MaskzEmitter<A, B>>::vbroadcasti32x4_maskz(self, op0, op1);
7984    }
7985    /// `VBROADCASTI32X8` (VBROADCASTI32X8). 
7986    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
7987    ///
7988    ///
7989    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
7990    ///
7991    /// Supported operand variants:
7992    ///
7993    /// ```text
7994    /// +---+----------+
7995    /// | # | Operands |
7996    /// +---+----------+
7997    /// | 1 | Zmm, Mem |
7998    /// +---+----------+
7999    /// ```
8000    #[inline]
8001    pub fn vbroadcasti32x8<A, B>(&mut self, op0: A, op1: B)
8002    where Assembler<'a>: Vbroadcasti32x8Emitter<A, B> {
8003        <Self as Vbroadcasti32x8Emitter<A, B>>::vbroadcasti32x8(self, op0, op1);
8004    }
8005    /// `VBROADCASTI32X8_MASK` (VBROADCASTI32X8). 
8006    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
8007    ///
8008    ///
8009    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
8010    ///
8011    /// Supported operand variants:
8012    ///
8013    /// ```text
8014    /// +---+----------+
8015    /// | # | Operands |
8016    /// +---+----------+
8017    /// | 1 | Zmm, Mem |
8018    /// +---+----------+
8019    /// ```
8020    #[inline]
8021    pub fn vbroadcasti32x8_mask<A, B>(&mut self, op0: A, op1: B)
8022    where Assembler<'a>: Vbroadcasti32x8MaskEmitter<A, B> {
8023        <Self as Vbroadcasti32x8MaskEmitter<A, B>>::vbroadcasti32x8_mask(self, op0, op1);
8024    }
8025    /// `VBROADCASTI32X8_MASKZ` (VBROADCASTI32X8). 
8026    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
8027    ///
8028    ///
8029    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
8030    ///
8031    /// Supported operand variants:
8032    ///
8033    /// ```text
8034    /// +---+----------+
8035    /// | # | Operands |
8036    /// +---+----------+
8037    /// | 1 | Zmm, Mem |
8038    /// +---+----------+
8039    /// ```
8040    #[inline]
8041    pub fn vbroadcasti32x8_maskz<A, B>(&mut self, op0: A, op1: B)
8042    where Assembler<'a>: Vbroadcasti32x8MaskzEmitter<A, B> {
8043        <Self as Vbroadcasti32x8MaskzEmitter<A, B>>::vbroadcasti32x8_maskz(self, op0, op1);
8044    }
8045    /// `VBROADCASTI64X2` (VBROADCASTI64X2). 
8046    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
8047    ///
8048    ///
8049    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
8050    ///
8051    /// Supported operand variants:
8052    ///
8053    /// ```text
8054    /// +---+----------+
8055    /// | # | Operands |
8056    /// +---+----------+
8057    /// | 1 | Ymm, Mem |
8058    /// | 2 | Zmm, Mem |
8059    /// +---+----------+
8060    /// ```
8061    #[inline]
8062    pub fn vbroadcasti64x2<A, B>(&mut self, op0: A, op1: B)
8063    where Assembler<'a>: Vbroadcasti64x2Emitter<A, B> {
8064        <Self as Vbroadcasti64x2Emitter<A, B>>::vbroadcasti64x2(self, op0, op1);
8065    }
8066    /// `VBROADCASTI64X2_MASK` (VBROADCASTI64X2). 
8067    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
8068    ///
8069    ///
8070    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
8071    ///
8072    /// Supported operand variants:
8073    ///
8074    /// ```text
8075    /// +---+----------+
8076    /// | # | Operands |
8077    /// +---+----------+
8078    /// | 1 | Ymm, Mem |
8079    /// | 2 | Zmm, Mem |
8080    /// +---+----------+
8081    /// ```
8082    #[inline]
8083    pub fn vbroadcasti64x2_mask<A, B>(&mut self, op0: A, op1: B)
8084    where Assembler<'a>: Vbroadcasti64x2MaskEmitter<A, B> {
8085        <Self as Vbroadcasti64x2MaskEmitter<A, B>>::vbroadcasti64x2_mask(self, op0, op1);
8086    }
8087    /// `VBROADCASTI64X2_MASKZ` (VBROADCASTI64X2). 
8088    /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
8089    ///
8090    ///
8091    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
8092    ///
8093    /// Supported operand variants:
8094    ///
8095    /// ```text
8096    /// +---+----------+
8097    /// | # | Operands |
8098    /// +---+----------+
8099    /// | 1 | Ymm, Mem |
8100    /// | 2 | Zmm, Mem |
8101    /// +---+----------+
8102    /// ```
8103    #[inline]
8104    pub fn vbroadcasti64x2_maskz<A, B>(&mut self, op0: A, op1: B)
8105    where Assembler<'a>: Vbroadcasti64x2MaskzEmitter<A, B> {
8106        <Self as Vbroadcasti64x2MaskzEmitter<A, B>>::vbroadcasti64x2_maskz(self, op0, op1);
8107    }
8108    /// `VCVTPD2QQ` (VCVTPD2QQ). 
8109    /// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8110    ///
8111    ///
8112    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
8113    ///
8114    /// Supported operand variants:
8115    ///
8116    /// ```text
8117    /// +---+----------+
8118    /// | # | Operands |
8119    /// +---+----------+
8120    /// | 1 | Xmm, Mem |
8121    /// | 2 | Xmm, Xmm |
8122    /// | 3 | Ymm, Mem |
8123    /// | 4 | Ymm, Ymm |
8124    /// | 5 | Zmm, Mem |
8125    /// | 6 | Zmm, Zmm |
8126    /// +---+----------+
8127    /// ```
8128    #[inline]
8129    pub fn vcvtpd2qq<A, B>(&mut self, op0: A, op1: B)
8130    where Assembler<'a>: Vcvtpd2qqEmitter<A, B> {
8131        <Self as Vcvtpd2qqEmitter<A, B>>::vcvtpd2qq(self, op0, op1);
8132    }
8133    /// `VCVTPD2QQ_ER` (VCVTPD2QQ). 
8134    /// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8135    ///
8136    ///
8137    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
8138    ///
8139    /// Supported operand variants:
8140    ///
8141    /// ```text
8142    /// +---+----------+
8143    /// | # | Operands |
8144    /// +---+----------+
8145    /// | 1 | Zmm, Zmm |
8146    /// +---+----------+
8147    /// ```
8148    #[inline]
8149    pub fn vcvtpd2qq_er<A, B>(&mut self, op0: A, op1: B)
8150    where Assembler<'a>: Vcvtpd2qqErEmitter<A, B> {
8151        <Self as Vcvtpd2qqErEmitter<A, B>>::vcvtpd2qq_er(self, op0, op1);
8152    }
8153    /// `VCVTPD2QQ_MASK` (VCVTPD2QQ). 
8154    /// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8155    ///
8156    ///
8157    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
8158    ///
8159    /// Supported operand variants:
8160    ///
8161    /// ```text
8162    /// +---+----------+
8163    /// | # | Operands |
8164    /// +---+----------+
8165    /// | 1 | Xmm, Mem |
8166    /// | 2 | Xmm, Xmm |
8167    /// | 3 | Ymm, Mem |
8168    /// | 4 | Ymm, Ymm |
8169    /// | 5 | Zmm, Mem |
8170    /// | 6 | Zmm, Zmm |
8171    /// +---+----------+
8172    /// ```
8173    #[inline]
8174    pub fn vcvtpd2qq_mask<A, B>(&mut self, op0: A, op1: B)
8175    where Assembler<'a>: Vcvtpd2qqMaskEmitter<A, B> {
8176        <Self as Vcvtpd2qqMaskEmitter<A, B>>::vcvtpd2qq_mask(self, op0, op1);
8177    }
8178    /// `VCVTPD2QQ_MASK_ER` (VCVTPD2QQ). 
8179    /// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8180    ///
8181    ///
8182    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
8183    ///
8184    /// Supported operand variants:
8185    ///
8186    /// ```text
8187    /// +---+----------+
8188    /// | # | Operands |
8189    /// +---+----------+
8190    /// | 1 | Zmm, Zmm |
8191    /// +---+----------+
8192    /// ```
8193    #[inline]
8194    pub fn vcvtpd2qq_mask_er<A, B>(&mut self, op0: A, op1: B)
8195    where Assembler<'a>: Vcvtpd2qqMaskErEmitter<A, B> {
8196        <Self as Vcvtpd2qqMaskErEmitter<A, B>>::vcvtpd2qq_mask_er(self, op0, op1);
8197    }
8198    /// `VCVTPD2QQ_MASKZ` (VCVTPD2QQ). 
8199    /// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8200    ///
8201    ///
8202    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
8203    ///
8204    /// Supported operand variants:
8205    ///
8206    /// ```text
8207    /// +---+----------+
8208    /// | # | Operands |
8209    /// +---+----------+
8210    /// | 1 | Xmm, Mem |
8211    /// | 2 | Xmm, Xmm |
8212    /// | 3 | Ymm, Mem |
8213    /// | 4 | Ymm, Ymm |
8214    /// | 5 | Zmm, Mem |
8215    /// | 6 | Zmm, Zmm |
8216    /// +---+----------+
8217    /// ```
8218    #[inline]
8219    pub fn vcvtpd2qq_maskz<A, B>(&mut self, op0: A, op1: B)
8220    where Assembler<'a>: Vcvtpd2qqMaskzEmitter<A, B> {
8221        <Self as Vcvtpd2qqMaskzEmitter<A, B>>::vcvtpd2qq_maskz(self, op0, op1);
8222    }
8223    /// `VCVTPD2QQ_MASKZ_ER` (VCVTPD2QQ). 
8224    /// Converts packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8225    ///
8226    ///
8227    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPD2QQ.html).
8228    ///
8229    /// Supported operand variants:
8230    ///
8231    /// ```text
8232    /// +---+----------+
8233    /// | # | Operands |
8234    /// +---+----------+
8235    /// | 1 | Zmm, Zmm |
8236    /// +---+----------+
8237    /// ```
8238    #[inline]
8239    pub fn vcvtpd2qq_maskz_er<A, B>(&mut self, op0: A, op1: B)
8240    where Assembler<'a>: Vcvtpd2qqMaskzErEmitter<A, B> {
8241        <Self as Vcvtpd2qqMaskzErEmitter<A, B>>::vcvtpd2qq_maskz_er(self, op0, op1);
8242    }
8243    /// `VCVTPS2QQ` (VCVTPS2QQ). 
8244    /// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8245    ///
8246    ///
8247    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
8248    ///
8249    /// Supported operand variants:
8250    ///
8251    /// ```text
8252    /// +---+----------+
8253    /// | # | Operands |
8254    /// +---+----------+
8255    /// | 1 | Xmm, Mem |
8256    /// | 2 | Xmm, Xmm |
8257    /// | 3 | Ymm, Mem |
8258    /// | 4 | Ymm, Xmm |
8259    /// | 5 | Zmm, Mem |
8260    /// | 6 | Zmm, Ymm |
8261    /// +---+----------+
8262    /// ```
8263    #[inline]
8264    pub fn vcvtps2qq<A, B>(&mut self, op0: A, op1: B)
8265    where Assembler<'a>: Vcvtps2qqEmitter<A, B> {
8266        <Self as Vcvtps2qqEmitter<A, B>>::vcvtps2qq(self, op0, op1);
8267    }
8268    /// `VCVTPS2QQ_ER` (VCVTPS2QQ). 
8269    /// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8270    ///
8271    ///
8272    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
8273    ///
8274    /// Supported operand variants:
8275    ///
8276    /// ```text
8277    /// +---+----------+
8278    /// | # | Operands |
8279    /// +---+----------+
8280    /// | 1 | Zmm, Ymm |
8281    /// +---+----------+
8282    /// ```
8283    #[inline]
8284    pub fn vcvtps2qq_er<A, B>(&mut self, op0: A, op1: B)
8285    where Assembler<'a>: Vcvtps2qqErEmitter<A, B> {
8286        <Self as Vcvtps2qqErEmitter<A, B>>::vcvtps2qq_er(self, op0, op1);
8287    }
8288    /// `VCVTPS2QQ_MASK` (VCVTPS2QQ). 
8289    /// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8290    ///
8291    ///
8292    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
8293    ///
8294    /// Supported operand variants:
8295    ///
8296    /// ```text
8297    /// +---+----------+
8298    /// | # | Operands |
8299    /// +---+----------+
8300    /// | 1 | Xmm, Mem |
8301    /// | 2 | Xmm, Xmm |
8302    /// | 3 | Ymm, Mem |
8303    /// | 4 | Ymm, Xmm |
8304    /// | 5 | Zmm, Mem |
8305    /// | 6 | Zmm, Ymm |
8306    /// +---+----------+
8307    /// ```
8308    #[inline]
8309    pub fn vcvtps2qq_mask<A, B>(&mut self, op0: A, op1: B)
8310    where Assembler<'a>: Vcvtps2qqMaskEmitter<A, B> {
8311        <Self as Vcvtps2qqMaskEmitter<A, B>>::vcvtps2qq_mask(self, op0, op1);
8312    }
8313    /// `VCVTPS2QQ_MASK_ER` (VCVTPS2QQ). 
8314    /// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8315    ///
8316    ///
8317    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
8318    ///
8319    /// Supported operand variants:
8320    ///
8321    /// ```text
8322    /// +---+----------+
8323    /// | # | Operands |
8324    /// +---+----------+
8325    /// | 1 | Zmm, Ymm |
8326    /// +---+----------+
8327    /// ```
8328    #[inline]
8329    pub fn vcvtps2qq_mask_er<A, B>(&mut self, op0: A, op1: B)
8330    where Assembler<'a>: Vcvtps2qqMaskErEmitter<A, B> {
8331        <Self as Vcvtps2qqMaskErEmitter<A, B>>::vcvtps2qq_mask_er(self, op0, op1);
8332    }
8333    /// `VCVTPS2QQ_MASKZ` (VCVTPS2QQ). 
8334    /// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8335    ///
8336    ///
8337    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
8338    ///
8339    /// Supported operand variants:
8340    ///
8341    /// ```text
8342    /// +---+----------+
8343    /// | # | Operands |
8344    /// +---+----------+
8345    /// | 1 | Xmm, Mem |
8346    /// | 2 | Xmm, Xmm |
8347    /// | 3 | Ymm, Mem |
8348    /// | 4 | Ymm, Xmm |
8349    /// | 5 | Zmm, Mem |
8350    /// | 6 | Zmm, Ymm |
8351    /// +---+----------+
8352    /// ```
8353    #[inline]
8354    pub fn vcvtps2qq_maskz<A, B>(&mut self, op0: A, op1: B)
8355    where Assembler<'a>: Vcvtps2qqMaskzEmitter<A, B> {
8356        <Self as Vcvtps2qqMaskzEmitter<A, B>>::vcvtps2qq_maskz(self, op0, op1);
8357    }
8358    /// `VCVTPS2QQ_MASKZ_ER` (VCVTPS2QQ). 
8359    /// Converts eight packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8360    ///
8361    ///
8362    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2QQ.html).
8363    ///
8364    /// Supported operand variants:
8365    ///
8366    /// ```text
8367    /// +---+----------+
8368    /// | # | Operands |
8369    /// +---+----------+
8370    /// | 1 | Zmm, Ymm |
8371    /// +---+----------+
8372    /// ```
8373    #[inline]
8374    pub fn vcvtps2qq_maskz_er<A, B>(&mut self, op0: A, op1: B)
8375    where Assembler<'a>: Vcvtps2qqMaskzErEmitter<A, B> {
8376        <Self as Vcvtps2qqMaskzErEmitter<A, B>>::vcvtps2qq_maskz_er(self, op0, op1);
8377    }
8378    /// `VCVTQQ2PD` (VCVTQQ2PD). 
8379    /// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
8380    ///
8381    ///
8382    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
8383    ///
8384    /// Supported operand variants:
8385    ///
8386    /// ```text
8387    /// +---+----------+
8388    /// | # | Operands |
8389    /// +---+----------+
8390    /// | 1 | Xmm, Mem |
8391    /// | 2 | Xmm, Xmm |
8392    /// | 3 | Ymm, Mem |
8393    /// | 4 | Ymm, Ymm |
8394    /// | 5 | Zmm, Mem |
8395    /// | 6 | Zmm, Zmm |
8396    /// +---+----------+
8397    /// ```
8398    #[inline]
8399    pub fn vcvtqq2pd<A, B>(&mut self, op0: A, op1: B)
8400    where Assembler<'a>: Vcvtqq2pdEmitter<A, B> {
8401        <Self as Vcvtqq2pdEmitter<A, B>>::vcvtqq2pd(self, op0, op1);
8402    }
8403    /// `VCVTQQ2PD_ER` (VCVTQQ2PD). 
8404    /// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
8405    ///
8406    ///
8407    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
8408    ///
8409    /// Supported operand variants:
8410    ///
8411    /// ```text
8412    /// +---+----------+
8413    /// | # | Operands |
8414    /// +---+----------+
8415    /// | 1 | Zmm, Zmm |
8416    /// +---+----------+
8417    /// ```
8418    #[inline]
8419    pub fn vcvtqq2pd_er<A, B>(&mut self, op0: A, op1: B)
8420    where Assembler<'a>: Vcvtqq2pdErEmitter<A, B> {
8421        <Self as Vcvtqq2pdErEmitter<A, B>>::vcvtqq2pd_er(self, op0, op1);
8422    }
8423    /// `VCVTQQ2PD_MASK` (VCVTQQ2PD). 
8424    /// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
8425    ///
8426    ///
8427    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
8428    ///
8429    /// Supported operand variants:
8430    ///
8431    /// ```text
8432    /// +---+----------+
8433    /// | # | Operands |
8434    /// +---+----------+
8435    /// | 1 | Xmm, Mem |
8436    /// | 2 | Xmm, Xmm |
8437    /// | 3 | Ymm, Mem |
8438    /// | 4 | Ymm, Ymm |
8439    /// | 5 | Zmm, Mem |
8440    /// | 6 | Zmm, Zmm |
8441    /// +---+----------+
8442    /// ```
8443    #[inline]
8444    pub fn vcvtqq2pd_mask<A, B>(&mut self, op0: A, op1: B)
8445    where Assembler<'a>: Vcvtqq2pdMaskEmitter<A, B> {
8446        <Self as Vcvtqq2pdMaskEmitter<A, B>>::vcvtqq2pd_mask(self, op0, op1);
8447    }
8448    /// `VCVTQQ2PD_MASK_ER` (VCVTQQ2PD). 
8449    /// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
8450    ///
8451    ///
8452    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
8453    ///
8454    /// Supported operand variants:
8455    ///
8456    /// ```text
8457    /// +---+----------+
8458    /// | # | Operands |
8459    /// +---+----------+
8460    /// | 1 | Zmm, Zmm |
8461    /// +---+----------+
8462    /// ```
8463    #[inline]
8464    pub fn vcvtqq2pd_mask_er<A, B>(&mut self, op0: A, op1: B)
8465    where Assembler<'a>: Vcvtqq2pdMaskErEmitter<A, B> {
8466        <Self as Vcvtqq2pdMaskErEmitter<A, B>>::vcvtqq2pd_mask_er(self, op0, op1);
8467    }
8468    /// `VCVTQQ2PD_MASKZ` (VCVTQQ2PD). 
8469    /// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
8470    ///
8471    ///
8472    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
8473    ///
8474    /// Supported operand variants:
8475    ///
8476    /// ```text
8477    /// +---+----------+
8478    /// | # | Operands |
8479    /// +---+----------+
8480    /// | 1 | Xmm, Mem |
8481    /// | 2 | Xmm, Xmm |
8482    /// | 3 | Ymm, Mem |
8483    /// | 4 | Ymm, Ymm |
8484    /// | 5 | Zmm, Mem |
8485    /// | 6 | Zmm, Zmm |
8486    /// +---+----------+
8487    /// ```
8488    #[inline]
8489    pub fn vcvtqq2pd_maskz<A, B>(&mut self, op0: A, op1: B)
8490    where Assembler<'a>: Vcvtqq2pdMaskzEmitter<A, B> {
8491        <Self as Vcvtqq2pdMaskzEmitter<A, B>>::vcvtqq2pd_maskz(self, op0, op1);
8492    }
8493    /// `VCVTQQ2PD_MASKZ_ER` (VCVTQQ2PD). 
8494    /// Converts packed quadword integers in the source operand (second operand) to packed double precision floating-point values in the destination operand (first operand).
8495    ///
8496    ///
8497    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PD.html).
8498    ///
8499    /// Supported operand variants:
8500    ///
8501    /// ```text
8502    /// +---+----------+
8503    /// | # | Operands |
8504    /// +---+----------+
8505    /// | 1 | Zmm, Zmm |
8506    /// +---+----------+
8507    /// ```
8508    #[inline]
8509    pub fn vcvtqq2pd_maskz_er<A, B>(&mut self, op0: A, op1: B)
8510    where Assembler<'a>: Vcvtqq2pdMaskzErEmitter<A, B> {
8511        <Self as Vcvtqq2pdMaskzErEmitter<A, B>>::vcvtqq2pd_maskz_er(self, op0, op1);
8512    }
8513    /// `VCVTQQ2PS` (VCVTQQ2PS). 
8514    /// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
8515    ///
8516    ///
8517    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
8518    ///
8519    /// Supported operand variants:
8520    ///
8521    /// ```text
8522    /// +---+----------+
8523    /// | # | Operands |
8524    /// +---+----------+
8525    /// | 1 | Xmm, Mem |
8526    /// | 2 | Xmm, Xmm |
8527    /// | 3 | Xmm, Ymm |
8528    /// | 4 | Ymm, Mem |
8529    /// | 5 | Ymm, Zmm |
8530    /// +---+----------+
8531    /// ```
8532    #[inline]
8533    pub fn vcvtqq2ps<A, B>(&mut self, op0: A, op1: B)
8534    where Assembler<'a>: Vcvtqq2psEmitter<A, B> {
8535        <Self as Vcvtqq2psEmitter<A, B>>::vcvtqq2ps(self, op0, op1);
8536    }
8537    /// `VCVTQQ2PS_ER` (VCVTQQ2PS). 
8538    /// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
8539    ///
8540    ///
8541    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
8542    ///
8543    /// Supported operand variants:
8544    ///
8545    /// ```text
8546    /// +---+----------+
8547    /// | # | Operands |
8548    /// +---+----------+
8549    /// | 1 | Ymm, Zmm |
8550    /// +---+----------+
8551    /// ```
8552    #[inline]
8553    pub fn vcvtqq2ps_er<A, B>(&mut self, op0: A, op1: B)
8554    where Assembler<'a>: Vcvtqq2psErEmitter<A, B> {
8555        <Self as Vcvtqq2psErEmitter<A, B>>::vcvtqq2ps_er(self, op0, op1);
8556    }
8557    /// `VCVTQQ2PS_MASK` (VCVTQQ2PS). 
8558    /// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
8559    ///
8560    ///
8561    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
8562    ///
8563    /// Supported operand variants:
8564    ///
8565    /// ```text
8566    /// +---+----------+
8567    /// | # | Operands |
8568    /// +---+----------+
8569    /// | 1 | Xmm, Mem |
8570    /// | 2 | Xmm, Xmm |
8571    /// | 3 | Xmm, Ymm |
8572    /// | 4 | Ymm, Mem |
8573    /// | 5 | Ymm, Zmm |
8574    /// +---+----------+
8575    /// ```
8576    #[inline]
8577    pub fn vcvtqq2ps_mask<A, B>(&mut self, op0: A, op1: B)
8578    where Assembler<'a>: Vcvtqq2psMaskEmitter<A, B> {
8579        <Self as Vcvtqq2psMaskEmitter<A, B>>::vcvtqq2ps_mask(self, op0, op1);
8580    }
8581    /// `VCVTQQ2PS_MASK_ER` (VCVTQQ2PS). 
8582    /// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
8583    ///
8584    ///
8585    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
8586    ///
8587    /// Supported operand variants:
8588    ///
8589    /// ```text
8590    /// +---+----------+
8591    /// | # | Operands |
8592    /// +---+----------+
8593    /// | 1 | Ymm, Zmm |
8594    /// +---+----------+
8595    /// ```
8596    #[inline]
8597    pub fn vcvtqq2ps_mask_er<A, B>(&mut self, op0: A, op1: B)
8598    where Assembler<'a>: Vcvtqq2psMaskErEmitter<A, B> {
8599        <Self as Vcvtqq2psMaskErEmitter<A, B>>::vcvtqq2ps_mask_er(self, op0, op1);
8600    }
8601    /// `VCVTQQ2PS_MASKZ` (VCVTQQ2PS). 
8602    /// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
8603    ///
8604    ///
8605    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
8606    ///
8607    /// Supported operand variants:
8608    ///
8609    /// ```text
8610    /// +---+----------+
8611    /// | # | Operands |
8612    /// +---+----------+
8613    /// | 1 | Xmm, Mem |
8614    /// | 2 | Xmm, Xmm |
8615    /// | 3 | Xmm, Ymm |
8616    /// | 4 | Ymm, Mem |
8617    /// | 5 | Ymm, Zmm |
8618    /// +---+----------+
8619    /// ```
8620    #[inline]
8621    pub fn vcvtqq2ps_maskz<A, B>(&mut self, op0: A, op1: B)
8622    where Assembler<'a>: Vcvtqq2psMaskzEmitter<A, B> {
8623        <Self as Vcvtqq2psMaskzEmitter<A, B>>::vcvtqq2ps_maskz(self, op0, op1);
8624    }
8625    /// `VCVTQQ2PS_MASKZ_ER` (VCVTQQ2PS). 
8626    /// Converts packed quadword integers in the source operand (second operand) to packed single precision floating-point values in the destination operand (first operand).
8627    ///
8628    ///
8629    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTQQ2PS.html).
8630    ///
8631    /// Supported operand variants:
8632    ///
8633    /// ```text
8634    /// +---+----------+
8635    /// | # | Operands |
8636    /// +---+----------+
8637    /// | 1 | Ymm, Zmm |
8638    /// +---+----------+
8639    /// ```
8640    #[inline]
8641    pub fn vcvtqq2ps_maskz_er<A, B>(&mut self, op0: A, op1: B)
8642    where Assembler<'a>: Vcvtqq2psMaskzErEmitter<A, B> {
8643        <Self as Vcvtqq2psMaskzErEmitter<A, B>>::vcvtqq2ps_maskz_er(self, op0, op1);
8644    }
8645    /// `VCVTTPD2QQ` (VCVTTPD2QQ). 
8646    /// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8647    ///
8648    ///
8649    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
8650    ///
8651    /// Supported operand variants:
8652    ///
8653    /// ```text
8654    /// +---+----------+
8655    /// | # | Operands |
8656    /// +---+----------+
8657    /// | 1 | Xmm, Mem |
8658    /// | 2 | Xmm, Xmm |
8659    /// | 3 | Ymm, Mem |
8660    /// | 4 | Ymm, Ymm |
8661    /// | 5 | Zmm, Mem |
8662    /// | 6 | Zmm, Zmm |
8663    /// +---+----------+
8664    /// ```
8665    #[inline]
8666    pub fn vcvttpd2qq<A, B>(&mut self, op0: A, op1: B)
8667    where Assembler<'a>: Vcvttpd2qqEmitter<A, B> {
8668        <Self as Vcvttpd2qqEmitter<A, B>>::vcvttpd2qq(self, op0, op1);
8669    }
8670    /// `VCVTTPD2QQ_MASK` (VCVTTPD2QQ). 
8671    /// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8672    ///
8673    ///
8674    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
8675    ///
8676    /// Supported operand variants:
8677    ///
8678    /// ```text
8679    /// +---+----------+
8680    /// | # | Operands |
8681    /// +---+----------+
8682    /// | 1 | Xmm, Mem |
8683    /// | 2 | Xmm, Xmm |
8684    /// | 3 | Ymm, Mem |
8685    /// | 4 | Ymm, Ymm |
8686    /// | 5 | Zmm, Mem |
8687    /// | 6 | Zmm, Zmm |
8688    /// +---+----------+
8689    /// ```
8690    #[inline]
8691    pub fn vcvttpd2qq_mask<A, B>(&mut self, op0: A, op1: B)
8692    where Assembler<'a>: Vcvttpd2qqMaskEmitter<A, B> {
8693        <Self as Vcvttpd2qqMaskEmitter<A, B>>::vcvttpd2qq_mask(self, op0, op1);
8694    }
8695    /// `VCVTTPD2QQ_MASK_SAE` (VCVTTPD2QQ). 
8696    /// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8697    ///
8698    ///
8699    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
8700    ///
8701    /// Supported operand variants:
8702    ///
8703    /// ```text
8704    /// +---+----------+
8705    /// | # | Operands |
8706    /// +---+----------+
8707    /// | 1 | Zmm, Zmm |
8708    /// +---+----------+
8709    /// ```
8710    #[inline]
8711    pub fn vcvttpd2qq_mask_sae<A, B>(&mut self, op0: A, op1: B)
8712    where Assembler<'a>: Vcvttpd2qqMaskSaeEmitter<A, B> {
8713        <Self as Vcvttpd2qqMaskSaeEmitter<A, B>>::vcvttpd2qq_mask_sae(self, op0, op1);
8714    }
8715    /// `VCVTTPD2QQ_MASKZ` (VCVTTPD2QQ). 
8716    /// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8717    ///
8718    ///
8719    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
8720    ///
8721    /// Supported operand variants:
8722    ///
8723    /// ```text
8724    /// +---+----------+
8725    /// | # | Operands |
8726    /// +---+----------+
8727    /// | 1 | Xmm, Mem |
8728    /// | 2 | Xmm, Xmm |
8729    /// | 3 | Ymm, Mem |
8730    /// | 4 | Ymm, Ymm |
8731    /// | 5 | Zmm, Mem |
8732    /// | 6 | Zmm, Zmm |
8733    /// +---+----------+
8734    /// ```
8735    #[inline]
8736    pub fn vcvttpd2qq_maskz<A, B>(&mut self, op0: A, op1: B)
8737    where Assembler<'a>: Vcvttpd2qqMaskzEmitter<A, B> {
8738        <Self as Vcvttpd2qqMaskzEmitter<A, B>>::vcvttpd2qq_maskz(self, op0, op1);
8739    }
8740    /// `VCVTTPD2QQ_MASKZ_SAE` (VCVTTPD2QQ). 
8741    /// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8742    ///
8743    ///
8744    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
8745    ///
8746    /// Supported operand variants:
8747    ///
8748    /// ```text
8749    /// +---+----------+
8750    /// | # | Operands |
8751    /// +---+----------+
8752    /// | 1 | Zmm, Zmm |
8753    /// +---+----------+
8754    /// ```
8755    #[inline]
8756    pub fn vcvttpd2qq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
8757    where Assembler<'a>: Vcvttpd2qqMaskzSaeEmitter<A, B> {
8758        <Self as Vcvttpd2qqMaskzSaeEmitter<A, B>>::vcvttpd2qq_maskz_sae(self, op0, op1);
8759    }
8760    /// `VCVTTPD2QQ_SAE` (VCVTTPD2QQ). 
8761    /// Converts with truncation packed double precision floating-point values in the source operand (second operand) to packed quadword integers in the destination operand (first operand).
8762    ///
8763    ///
8764    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPD2QQ.html).
8765    ///
8766    /// Supported operand variants:
8767    ///
8768    /// ```text
8769    /// +---+----------+
8770    /// | # | Operands |
8771    /// +---+----------+
8772    /// | 1 | Zmm, Zmm |
8773    /// +---+----------+
8774    /// ```
8775    #[inline]
8776    pub fn vcvttpd2qq_sae<A, B>(&mut self, op0: A, op1: B)
8777    where Assembler<'a>: Vcvttpd2qqSaeEmitter<A, B> {
8778        <Self as Vcvttpd2qqSaeEmitter<A, B>>::vcvttpd2qq_sae(self, op0, op1);
8779    }
8780    /// `VCVTTPS2QQ` (VCVTTPS2QQ). 
8781    /// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8782    ///
8783    ///
8784    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
8785    ///
8786    /// Supported operand variants:
8787    ///
8788    /// ```text
8789    /// +---+----------+
8790    /// | # | Operands |
8791    /// +---+----------+
8792    /// | 1 | Xmm, Mem |
8793    /// | 2 | Xmm, Xmm |
8794    /// | 3 | Ymm, Mem |
8795    /// | 4 | Ymm, Xmm |
8796    /// | 5 | Zmm, Mem |
8797    /// | 6 | Zmm, Ymm |
8798    /// +---+----------+
8799    /// ```
8800    #[inline]
8801    pub fn vcvttps2qq<A, B>(&mut self, op0: A, op1: B)
8802    where Assembler<'a>: Vcvttps2qqEmitter<A, B> {
8803        <Self as Vcvttps2qqEmitter<A, B>>::vcvttps2qq(self, op0, op1);
8804    }
8805    /// `VCVTTPS2QQ_MASK` (VCVTTPS2QQ). 
8806    /// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8807    ///
8808    ///
8809    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
8810    ///
8811    /// Supported operand variants:
8812    ///
8813    /// ```text
8814    /// +---+----------+
8815    /// | # | Operands |
8816    /// +---+----------+
8817    /// | 1 | Xmm, Mem |
8818    /// | 2 | Xmm, Xmm |
8819    /// | 3 | Ymm, Mem |
8820    /// | 4 | Ymm, Xmm |
8821    /// | 5 | Zmm, Mem |
8822    /// | 6 | Zmm, Ymm |
8823    /// +---+----------+
8824    /// ```
8825    #[inline]
8826    pub fn vcvttps2qq_mask<A, B>(&mut self, op0: A, op1: B)
8827    where Assembler<'a>: Vcvttps2qqMaskEmitter<A, B> {
8828        <Self as Vcvttps2qqMaskEmitter<A, B>>::vcvttps2qq_mask(self, op0, op1);
8829    }
8830    /// `VCVTTPS2QQ_MASK_SAE` (VCVTTPS2QQ). 
8831    /// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8832    ///
8833    ///
8834    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
8835    ///
8836    /// Supported operand variants:
8837    ///
8838    /// ```text
8839    /// +---+----------+
8840    /// | # | Operands |
8841    /// +---+----------+
8842    /// | 1 | Zmm, Ymm |
8843    /// +---+----------+
8844    /// ```
8845    #[inline]
8846    pub fn vcvttps2qq_mask_sae<A, B>(&mut self, op0: A, op1: B)
8847    where Assembler<'a>: Vcvttps2qqMaskSaeEmitter<A, B> {
8848        <Self as Vcvttps2qqMaskSaeEmitter<A, B>>::vcvttps2qq_mask_sae(self, op0, op1);
8849    }
8850    /// `VCVTTPS2QQ_MASKZ` (VCVTTPS2QQ). 
8851    /// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8852    ///
8853    ///
8854    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
8855    ///
8856    /// Supported operand variants:
8857    ///
8858    /// ```text
8859    /// +---+----------+
8860    /// | # | Operands |
8861    /// +---+----------+
8862    /// | 1 | Xmm, Mem |
8863    /// | 2 | Xmm, Xmm |
8864    /// | 3 | Ymm, Mem |
8865    /// | 4 | Ymm, Xmm |
8866    /// | 5 | Zmm, Mem |
8867    /// | 6 | Zmm, Ymm |
8868    /// +---+----------+
8869    /// ```
8870    #[inline]
8871    pub fn vcvttps2qq_maskz<A, B>(&mut self, op0: A, op1: B)
8872    where Assembler<'a>: Vcvttps2qqMaskzEmitter<A, B> {
8873        <Self as Vcvttps2qqMaskzEmitter<A, B>>::vcvttps2qq_maskz(self, op0, op1);
8874    }
8875    /// `VCVTTPS2QQ_MASKZ_SAE` (VCVTTPS2QQ). 
8876    /// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8877    ///
8878    ///
8879    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
8880    ///
8881    /// Supported operand variants:
8882    ///
8883    /// ```text
8884    /// +---+----------+
8885    /// | # | Operands |
8886    /// +---+----------+
8887    /// | 1 | Zmm, Ymm |
8888    /// +---+----------+
8889    /// ```
8890    #[inline]
8891    pub fn vcvttps2qq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
8892    where Assembler<'a>: Vcvttps2qqMaskzSaeEmitter<A, B> {
8893        <Self as Vcvttps2qqMaskzSaeEmitter<A, B>>::vcvttps2qq_maskz_sae(self, op0, op1);
8894    }
8895    /// `VCVTTPS2QQ_SAE` (VCVTTPS2QQ). 
8896    /// Converts with truncation packed single precision floating-point values in the source operand to eight signed quadword integers in the destination operand.
8897    ///
8898    ///
8899    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTTPS2QQ.html).
8900    ///
8901    /// Supported operand variants:
8902    ///
8903    /// ```text
8904    /// +---+----------+
8905    /// | # | Operands |
8906    /// +---+----------+
8907    /// | 1 | Zmm, Ymm |
8908    /// +---+----------+
8909    /// ```
8910    #[inline]
8911    pub fn vcvttps2qq_sae<A, B>(&mut self, op0: A, op1: B)
8912    where Assembler<'a>: Vcvttps2qqSaeEmitter<A, B> {
8913        <Self as Vcvttps2qqSaeEmitter<A, B>>::vcvttps2qq_sae(self, op0, op1);
8914    }
8915    /// `VFPCLASSPD` (VFPCLASSPD). 
8916    /// The FPCLASSPD instruction checks the packed double precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:8/4/2] of the destination are cleared.
8917    ///
8918    ///
8919    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPD.html).
8920    ///
8921    /// Supported operand variants:
8922    ///
8923    /// ```text
8924    /// +---+----------------+
8925    /// | # | Operands       |
8926    /// +---+----------------+
8927    /// | 1 | KReg, Mem, Imm |
8928    /// | 2 | KReg, Xmm, Imm |
8929    /// | 3 | KReg, Ymm, Imm |
8930    /// | 4 | KReg, Zmm, Imm |
8931    /// +---+----------------+
8932    /// ```
8933    #[inline]
8934    pub fn vfpclasspd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
8935    where Assembler<'a>: VfpclasspdEmitter<A, B, C> {
8936        <Self as VfpclasspdEmitter<A, B, C>>::vfpclasspd(self, op0, op1, op2);
8937    }
8938    /// `VFPCLASSPD_MASK` (VFPCLASSPD). 
8939    /// The FPCLASSPD instruction checks the packed double precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:8/4/2] of the destination are cleared.
8940    ///
8941    ///
8942    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPD.html).
8943    ///
8944    /// Supported operand variants:
8945    ///
8946    /// ```text
8947    /// +---+----------------+
8948    /// | # | Operands       |
8949    /// +---+----------------+
8950    /// | 1 | KReg, Mem, Imm |
8951    /// | 2 | KReg, Xmm, Imm |
8952    /// | 3 | KReg, Ymm, Imm |
8953    /// | 4 | KReg, Zmm, Imm |
8954    /// +---+----------------+
8955    /// ```
8956    #[inline]
8957    pub fn vfpclasspd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
8958    where Assembler<'a>: VfpclasspdMaskEmitter<A, B, C> {
8959        <Self as VfpclasspdMaskEmitter<A, B, C>>::vfpclasspd_mask(self, op0, op1, op2);
8960    }
8961    /// `VFPCLASSPS` (VFPCLASSPS). 
8962    /// The FPCLASSPS instruction checks the packed single-precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:16/8/4] of the destination are cleared.
8963    ///
8964    ///
8965    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPS.html).
8966    ///
8967    /// Supported operand variants:
8968    ///
8969    /// ```text
8970    /// +---+----------------+
8971    /// | # | Operands       |
8972    /// +---+----------------+
8973    /// | 1 | KReg, Mem, Imm |
8974    /// | 2 | KReg, Xmm, Imm |
8975    /// | 3 | KReg, Ymm, Imm |
8976    /// | 4 | KReg, Zmm, Imm |
8977    /// +---+----------------+
8978    /// ```
8979    #[inline]
8980    pub fn vfpclassps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
8981    where Assembler<'a>: VfpclasspsEmitter<A, B, C> {
8982        <Self as VfpclasspsEmitter<A, B, C>>::vfpclassps(self, op0, op1, op2);
8983    }
8984    /// `VFPCLASSPS_MASK` (VFPCLASSPS). 
8985    /// The FPCLASSPS instruction checks the packed single-precision floating-point values for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result of each element is written to the corresponding bit in a mask register k2 according to the writemask k1. Bits [MAX_KL-1:16/8/4] of the destination are cleared.
8986    ///
8987    ///
8988    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSPS.html).
8989    ///
8990    /// Supported operand variants:
8991    ///
8992    /// ```text
8993    /// +---+----------------+
8994    /// | # | Operands       |
8995    /// +---+----------------+
8996    /// | 1 | KReg, Mem, Imm |
8997    /// | 2 | KReg, Xmm, Imm |
8998    /// | 3 | KReg, Ymm, Imm |
8999    /// | 4 | KReg, Zmm, Imm |
9000    /// +---+----------------+
9001    /// ```
9002    #[inline]
9003    pub fn vfpclassps_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9004    where Assembler<'a>: VfpclasspsMaskEmitter<A, B, C> {
9005        <Self as VfpclasspsMaskEmitter<A, B, C>>::vfpclassps_mask(self, op0, op1, op2);
9006    }
9007    /// `VFPCLASSSD` (VFPCLASSSD). 
9008    /// The FPCLASSSD instruction checks the low double precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
9009    ///
9010    ///
9011    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSD.html).
9012    ///
9013    /// Supported operand variants:
9014    ///
9015    /// ```text
9016    /// +---+----------------+
9017    /// | # | Operands       |
9018    /// +---+----------------+
9019    /// | 1 | KReg, Mem, Imm |
9020    /// | 2 | KReg, Xmm, Imm |
9021    /// +---+----------------+
9022    /// ```
9023    #[inline]
9024    pub fn vfpclasssd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9025    where Assembler<'a>: VfpclasssdEmitter<A, B, C> {
9026        <Self as VfpclasssdEmitter<A, B, C>>::vfpclasssd(self, op0, op1, op2);
9027    }
9028    /// `VFPCLASSSD_MASK` (VFPCLASSSD). 
9029    /// The FPCLASSSD instruction checks the low double precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
9030    ///
9031    ///
9032    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSD.html).
9033    ///
9034    /// Supported operand variants:
9035    ///
9036    /// ```text
9037    /// +---+----------------+
9038    /// | # | Operands       |
9039    /// +---+----------------+
9040    /// | 1 | KReg, Mem, Imm |
9041    /// | 2 | KReg, Xmm, Imm |
9042    /// +---+----------------+
9043    /// ```
9044    #[inline]
9045    pub fn vfpclasssd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9046    where Assembler<'a>: VfpclasssdMaskEmitter<A, B, C> {
9047        <Self as VfpclasssdMaskEmitter<A, B, C>>::vfpclasssd_mask(self, op0, op1, op2);
9048    }
9049    /// `VFPCLASSSS` (VFPCLASSSS). 
9050    /// The FPCLASSSS instruction checks the low single-precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
9051    ///
9052    ///
9053    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSS.html).
9054    ///
9055    /// Supported operand variants:
9056    ///
9057    /// ```text
9058    /// +---+----------------+
9059    /// | # | Operands       |
9060    /// +---+----------------+
9061    /// | 1 | KReg, Mem, Imm |
9062    /// | 2 | KReg, Xmm, Imm |
9063    /// +---+----------------+
9064    /// ```
9065    #[inline]
9066    pub fn vfpclassss<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9067    where Assembler<'a>: VfpclassssEmitter<A, B, C> {
9068        <Self as VfpclassssEmitter<A, B, C>>::vfpclassss(self, op0, op1, op2);
9069    }
9070    /// `VFPCLASSSS_MASK` (VFPCLASSSS). 
9071    /// The FPCLASSSS instruction checks the low single-precision floating-point value in the source operand for special categories, specified by the set bits in the imm8 byte. Each set bit in imm8 specifies a category of floating-point values that the input data element is classified against. The classified results of all specified categories of an input value are ORed together to form the final boolean result for the input element. The result is written to the low bit in a mask register k2 according to the writemask k1. Bits MAX_KL-1: 1 of the destination are cleared.
9072    ///
9073    ///
9074    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VFPCLASSSS.html).
9075    ///
9076    /// Supported operand variants:
9077    ///
9078    /// ```text
9079    /// +---+----------------+
9080    /// | # | Operands       |
9081    /// +---+----------------+
9082    /// | 1 | KReg, Mem, Imm |
9083    /// | 2 | KReg, Xmm, Imm |
9084    /// +---+----------------+
9085    /// ```
9086    #[inline]
9087    pub fn vfpclassss_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9088    where Assembler<'a>: VfpclassssMaskEmitter<A, B, C> {
9089        <Self as VfpclassssMaskEmitter<A, B, C>>::vfpclassss_mask(self, op0, op1, op2);
9090    }
9091    /// `VINSERTF32X8` (VINSERTF32X8). 
9092    /// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
9093    ///
9094    ///
9095    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
9096    ///
9097    /// Supported operand variants:
9098    ///
9099    /// ```text
9100    /// +---+--------------------+
9101    /// | # | Operands           |
9102    /// +---+--------------------+
9103    /// | 1 | Zmm, Zmm, Mem, Imm |
9104    /// | 2 | Zmm, Zmm, Ymm, Imm |
9105    /// +---+--------------------+
9106    /// ```
9107    #[inline]
9108    pub fn vinsertf32x8<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9109    where Assembler<'a>: Vinsertf32x8Emitter<A, B, C, D> {
9110        <Self as Vinsertf32x8Emitter<A, B, C, D>>::vinsertf32x8(self, op0, op1, op2, op3);
9111    }
9112    /// `VINSERTF32X8_MASK` (VINSERTF32X8). 
9113    /// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
9114    ///
9115    ///
9116    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
9117    ///
9118    /// Supported operand variants:
9119    ///
9120    /// ```text
9121    /// +---+--------------------+
9122    /// | # | Operands           |
9123    /// +---+--------------------+
9124    /// | 1 | Zmm, Zmm, Mem, Imm |
9125    /// | 2 | Zmm, Zmm, Ymm, Imm |
9126    /// +---+--------------------+
9127    /// ```
9128    #[inline]
9129    pub fn vinsertf32x8_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9130    where Assembler<'a>: Vinsertf32x8MaskEmitter<A, B, C, D> {
9131        <Self as Vinsertf32x8MaskEmitter<A, B, C, D>>::vinsertf32x8_mask(self, op0, op1, op2, op3);
9132    }
9133    /// `VINSERTF32X8_MASKZ` (VINSERTF32X8). 
9134    /// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
9135    ///
9136    ///
9137    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
9138    ///
9139    /// Supported operand variants:
9140    ///
9141    /// ```text
9142    /// +---+--------------------+
9143    /// | # | Operands           |
9144    /// +---+--------------------+
9145    /// | 1 | Zmm, Zmm, Mem, Imm |
9146    /// | 2 | Zmm, Zmm, Ymm, Imm |
9147    /// +---+--------------------+
9148    /// ```
9149    #[inline]
9150    pub fn vinsertf32x8_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9151    where Assembler<'a>: Vinsertf32x8MaskzEmitter<A, B, C, D> {
9152        <Self as Vinsertf32x8MaskzEmitter<A, B, C, D>>::vinsertf32x8_maskz(self, op0, op1, op2, op3);
9153    }
9154    /// `VINSERTF64X2` (VINSERTF64X2). 
9155    /// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
9156    ///
9157    ///
9158    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
9159    ///
9160    /// Supported operand variants:
9161    ///
9162    /// ```text
9163    /// +---+--------------------+
9164    /// | # | Operands           |
9165    /// +---+--------------------+
9166    /// | 1 | Ymm, Ymm, Mem, Imm |
9167    /// | 2 | Ymm, Ymm, Xmm, Imm |
9168    /// | 3 | Zmm, Zmm, Mem, Imm |
9169    /// | 4 | Zmm, Zmm, Xmm, Imm |
9170    /// +---+--------------------+
9171    /// ```
9172    #[inline]
9173    pub fn vinsertf64x2<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9174    where Assembler<'a>: Vinsertf64x2Emitter<A, B, C, D> {
9175        <Self as Vinsertf64x2Emitter<A, B, C, D>>::vinsertf64x2(self, op0, op1, op2, op3);
9176    }
9177    /// `VINSERTF64X2_MASK` (VINSERTF64X2). 
9178    /// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
9179    ///
9180    ///
9181    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
9182    ///
9183    /// Supported operand variants:
9184    ///
9185    /// ```text
9186    /// +---+--------------------+
9187    /// | # | Operands           |
9188    /// +---+--------------------+
9189    /// | 1 | Ymm, Ymm, Mem, Imm |
9190    /// | 2 | Ymm, Ymm, Xmm, Imm |
9191    /// | 3 | Zmm, Zmm, Mem, Imm |
9192    /// | 4 | Zmm, Zmm, Xmm, Imm |
9193    /// +---+--------------------+
9194    /// ```
9195    #[inline]
9196    pub fn vinsertf64x2_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9197    where Assembler<'a>: Vinsertf64x2MaskEmitter<A, B, C, D> {
9198        <Self as Vinsertf64x2MaskEmitter<A, B, C, D>>::vinsertf64x2_mask(self, op0, op1, op2, op3);
9199    }
9200    /// `VINSERTF64X2_MASKZ` (VINSERTF64X2). 
9201    /// VINSERTF128/VINSERTF32x4 and VINSERTF64x2 insert 128-bits of packed floating-point values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granularity offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination operand are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The destination and first source operands are vector registers.
9202    ///
9203    ///
9204    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTF128%3AVINSERTF32x4%3AVINSERTF64x2%3AVINSERTF32x8%3AVINSERTF64x4.html).
9205    ///
9206    /// Supported operand variants:
9207    ///
9208    /// ```text
9209    /// +---+--------------------+
9210    /// | # | Operands           |
9211    /// +---+--------------------+
9212    /// | 1 | Ymm, Ymm, Mem, Imm |
9213    /// | 2 | Ymm, Ymm, Xmm, Imm |
9214    /// | 3 | Zmm, Zmm, Mem, Imm |
9215    /// | 4 | Zmm, Zmm, Xmm, Imm |
9216    /// +---+--------------------+
9217    /// ```
9218    #[inline]
9219    pub fn vinsertf64x2_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9220    where Assembler<'a>: Vinsertf64x2MaskzEmitter<A, B, C, D> {
9221        <Self as Vinsertf64x2MaskzEmitter<A, B, C, D>>::vinsertf64x2_maskz(self, op0, op1, op2, op3);
9222    }
9223    /// `VINSERTI32X8` (VINSERTI32X8). 
9224    /// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
9225    ///
9226    ///
9227    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
9228    ///
9229    /// Supported operand variants:
9230    ///
9231    /// ```text
9232    /// +---+--------------------+
9233    /// | # | Operands           |
9234    /// +---+--------------------+
9235    /// | 1 | Zmm, Zmm, Mem, Imm |
9236    /// | 2 | Zmm, Zmm, Ymm, Imm |
9237    /// +---+--------------------+
9238    /// ```
9239    #[inline]
9240    pub fn vinserti32x8<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9241    where Assembler<'a>: Vinserti32x8Emitter<A, B, C, D> {
9242        <Self as Vinserti32x8Emitter<A, B, C, D>>::vinserti32x8(self, op0, op1, op2, op3);
9243    }
9244    /// `VINSERTI32X8_MASK` (VINSERTI32X8). 
9245    /// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
9246    ///
9247    ///
9248    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
9249    ///
9250    /// Supported operand variants:
9251    ///
9252    /// ```text
9253    /// +---+--------------------+
9254    /// | # | Operands           |
9255    /// +---+--------------------+
9256    /// | 1 | Zmm, Zmm, Mem, Imm |
9257    /// | 2 | Zmm, Zmm, Ymm, Imm |
9258    /// +---+--------------------+
9259    /// ```
9260    #[inline]
9261    pub fn vinserti32x8_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9262    where Assembler<'a>: Vinserti32x8MaskEmitter<A, B, C, D> {
9263        <Self as Vinserti32x8MaskEmitter<A, B, C, D>>::vinserti32x8_mask(self, op0, op1, op2, op3);
9264    }
9265    /// `VINSERTI32X8_MASKZ` (VINSERTI32X8). 
9266    /// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
9267    ///
9268    ///
9269    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
9270    ///
9271    /// Supported operand variants:
9272    ///
9273    /// ```text
9274    /// +---+--------------------+
9275    /// | # | Operands           |
9276    /// +---+--------------------+
9277    /// | 1 | Zmm, Zmm, Mem, Imm |
9278    /// | 2 | Zmm, Zmm, Ymm, Imm |
9279    /// +---+--------------------+
9280    /// ```
9281    #[inline]
9282    pub fn vinserti32x8_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9283    where Assembler<'a>: Vinserti32x8MaskzEmitter<A, B, C, D> {
9284        <Self as Vinserti32x8MaskzEmitter<A, B, C, D>>::vinserti32x8_maskz(self, op0, op1, op2, op3);
9285    }
9286    /// `VINSERTI64X2` (VINSERTI64X2). 
9287    /// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
9288    ///
9289    ///
9290    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
9291    ///
9292    /// Supported operand variants:
9293    ///
9294    /// ```text
9295    /// +---+--------------------+
9296    /// | # | Operands           |
9297    /// +---+--------------------+
9298    /// | 1 | Ymm, Ymm, Mem, Imm |
9299    /// | 2 | Ymm, Ymm, Xmm, Imm |
9300    /// | 3 | Zmm, Zmm, Mem, Imm |
9301    /// | 4 | Zmm, Zmm, Xmm, Imm |
9302    /// +---+--------------------+
9303    /// ```
9304    #[inline]
9305    pub fn vinserti64x2<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9306    where Assembler<'a>: Vinserti64x2Emitter<A, B, C, D> {
9307        <Self as Vinserti64x2Emitter<A, B, C, D>>::vinserti64x2(self, op0, op1, op2, op3);
9308    }
9309    /// `VINSERTI64X2_MASK` (VINSERTI64X2). 
9310    /// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
9311    ///
9312    ///
9313    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
9314    ///
9315    /// Supported operand variants:
9316    ///
9317    /// ```text
9318    /// +---+--------------------+
9319    /// | # | Operands           |
9320    /// +---+--------------------+
9321    /// | 1 | Ymm, Ymm, Mem, Imm |
9322    /// | 2 | Ymm, Ymm, Xmm, Imm |
9323    /// | 3 | Zmm, Zmm, Mem, Imm |
9324    /// | 4 | Zmm, Zmm, Xmm, Imm |
9325    /// +---+--------------------+
9326    /// ```
9327    #[inline]
9328    pub fn vinserti64x2_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9329    where Assembler<'a>: Vinserti64x2MaskEmitter<A, B, C, D> {
9330        <Self as Vinserti64x2MaskEmitter<A, B, C, D>>::vinserti64x2_mask(self, op0, op1, op2, op3);
9331    }
9332    /// `VINSERTI64X2_MASKZ` (VINSERTI64X2). 
9333    /// VINSERTI32x4 and VINSERTI64x2 inserts 128-bits of packed integer values from the second source operand (the third operand) into the destination operand (the first operand) at an 128-bit granular offset multiplied by imm8[0] (256-bit) or imm8[1:0]. The remaining portions of the destination are copied from the corresponding fields of the first source operand (the second operand). The second source operand can be either an XMM register or a 128-bit memory location. The high 6/7bits of the immediate are ignored. The destination operand is a ZMM/YMM register and updated at 32 and 64-bit granularity according to the writemask.
9334    ///
9335    ///
9336    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VINSERTI128%3AVINSERTI32x4%3AVINSERTI64x2%3AVINSERTI32x8%3AVINSERTI64x4.html).
9337    ///
9338    /// Supported operand variants:
9339    ///
9340    /// ```text
9341    /// +---+--------------------+
9342    /// | # | Operands           |
9343    /// +---+--------------------+
9344    /// | 1 | Ymm, Ymm, Mem, Imm |
9345    /// | 2 | Ymm, Ymm, Xmm, Imm |
9346    /// | 3 | Zmm, Zmm, Mem, Imm |
9347    /// | 4 | Zmm, Zmm, Xmm, Imm |
9348    /// +---+--------------------+
9349    /// ```
9350    #[inline]
9351    pub fn vinserti64x2_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9352    where Assembler<'a>: Vinserti64x2MaskzEmitter<A, B, C, D> {
9353        <Self as Vinserti64x2MaskzEmitter<A, B, C, D>>::vinserti64x2_maskz(self, op0, op1, op2, op3);
9354    }
9355    /// `VORPD` (VORPD). 
9356    /// Performs a bitwise logical OR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
9357    ///
9358    ///
9359    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPD.html).
9360    ///
9361    /// Supported operand variants:
9362    ///
9363    /// ```text
9364    /// +---+---------------+
9365    /// | # | Operands      |
9366    /// +---+---------------+
9367    /// | 1 | Xmm, Xmm, Mem |
9368    /// | 2 | Xmm, Xmm, Xmm |
9369    /// | 3 | Ymm, Ymm, Mem |
9370    /// | 4 | Ymm, Ymm, Ymm |
9371    /// | 5 | Zmm, Zmm, Mem |
9372    /// | 6 | Zmm, Zmm, Zmm |
9373    /// +---+---------------+
9374    /// ```
9375    #[inline]
9376    pub fn vorpd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9377    where Assembler<'a>: VorpdEmitter<A, B, C> {
9378        <Self as VorpdEmitter<A, B, C>>::vorpd(self, op0, op1, op2);
9379    }
9380    /// `VORPD_MASK` (VORPD). 
9381    /// Performs a bitwise logical OR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
9382    ///
9383    ///
9384    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPD.html).
9385    ///
9386    /// Supported operand variants:
9387    ///
9388    /// ```text
9389    /// +---+---------------+
9390    /// | # | Operands      |
9391    /// +---+---------------+
9392    /// | 1 | Xmm, Xmm, Mem |
9393    /// | 2 | Xmm, Xmm, Xmm |
9394    /// | 3 | Ymm, Ymm, Mem |
9395    /// | 4 | Ymm, Ymm, Ymm |
9396    /// | 5 | Zmm, Zmm, Mem |
9397    /// | 6 | Zmm, Zmm, Zmm |
9398    /// +---+---------------+
9399    /// ```
9400    #[inline]
9401    pub fn vorpd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9402    where Assembler<'a>: VorpdMaskEmitter<A, B, C> {
9403        <Self as VorpdMaskEmitter<A, B, C>>::vorpd_mask(self, op0, op1, op2);
9404    }
9405    /// `VORPD_MASKZ` (VORPD). 
9406    /// Performs a bitwise logical OR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
9407    ///
9408    ///
9409    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPD.html).
9410    ///
9411    /// Supported operand variants:
9412    ///
9413    /// ```text
9414    /// +---+---------------+
9415    /// | # | Operands      |
9416    /// +---+---------------+
9417    /// | 1 | Xmm, Xmm, Mem |
9418    /// | 2 | Xmm, Xmm, Xmm |
9419    /// | 3 | Ymm, Ymm, Mem |
9420    /// | 4 | Ymm, Ymm, Ymm |
9421    /// | 5 | Zmm, Zmm, Mem |
9422    /// | 6 | Zmm, Zmm, Zmm |
9423    /// +---+---------------+
9424    /// ```
9425    #[inline]
9426    pub fn vorpd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9427    where Assembler<'a>: VorpdMaskzEmitter<A, B, C> {
9428        <Self as VorpdMaskzEmitter<A, B, C>>::vorpd_maskz(self, op0, op1, op2);
9429    }
9430    /// `VORPS` (VORPS). 
9431    /// Performs a bitwise logical OR of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
9432    ///
9433    ///
9434    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPS.html).
9435    ///
9436    /// Supported operand variants:
9437    ///
9438    /// ```text
9439    /// +---+---------------+
9440    /// | # | Operands      |
9441    /// +---+---------------+
9442    /// | 1 | Xmm, Xmm, Mem |
9443    /// | 2 | Xmm, Xmm, Xmm |
9444    /// | 3 | Ymm, Ymm, Mem |
9445    /// | 4 | Ymm, Ymm, Ymm |
9446    /// | 5 | Zmm, Zmm, Mem |
9447    /// | 6 | Zmm, Zmm, Zmm |
9448    /// +---+---------------+
9449    /// ```
9450    #[inline]
9451    pub fn vorps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9452    where Assembler<'a>: VorpsEmitter<A, B, C> {
9453        <Self as VorpsEmitter<A, B, C>>::vorps(self, op0, op1, op2);
9454    }
9455    /// `VORPS_MASK` (VORPS). 
9456    /// Performs a bitwise logical OR of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
9457    ///
9458    ///
9459    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPS.html).
9460    ///
9461    /// Supported operand variants:
9462    ///
9463    /// ```text
9464    /// +---+---------------+
9465    /// | # | Operands      |
9466    /// +---+---------------+
9467    /// | 1 | Xmm, Xmm, Mem |
9468    /// | 2 | Xmm, Xmm, Xmm |
9469    /// | 3 | Ymm, Ymm, Mem |
9470    /// | 4 | Ymm, Ymm, Ymm |
9471    /// | 5 | Zmm, Zmm, Mem |
9472    /// | 6 | Zmm, Zmm, Zmm |
9473    /// +---+---------------+
9474    /// ```
9475    #[inline]
9476    pub fn vorps_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9477    where Assembler<'a>: VorpsMaskEmitter<A, B, C> {
9478        <Self as VorpsMaskEmitter<A, B, C>>::vorps_mask(self, op0, op1, op2);
9479    }
9480    /// `VORPS_MASKZ` (VORPS). 
9481    /// Performs a bitwise logical OR of the four, eight or sixteen packed single precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
9482    ///
9483    ///
9484    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ORPS.html).
9485    ///
9486    /// Supported operand variants:
9487    ///
9488    /// ```text
9489    /// +---+---------------+
9490    /// | # | Operands      |
9491    /// +---+---------------+
9492    /// | 1 | Xmm, Xmm, Mem |
9493    /// | 2 | Xmm, Xmm, Xmm |
9494    /// | 3 | Ymm, Ymm, Mem |
9495    /// | 4 | Ymm, Ymm, Ymm |
9496    /// | 5 | Zmm, Zmm, Mem |
9497    /// | 6 | Zmm, Zmm, Zmm |
9498    /// +---+---------------+
9499    /// ```
9500    #[inline]
9501    pub fn vorps_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9502    where Assembler<'a>: VorpsMaskzEmitter<A, B, C> {
9503        <Self as VorpsMaskzEmitter<A, B, C>>::vorps_maskz(self, op0, op1, op2);
9504    }
9505    /// `VPMOVD2M` (VPMOVD2M). 
9506    /// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
9507    ///
9508    ///
9509    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
9510    ///
9511    /// Supported operand variants:
9512    ///
9513    /// ```text
9514    /// +---+-----------+
9515    /// | # | Operands  |
9516    /// +---+-----------+
9517    /// | 1 | KReg, Xmm |
9518    /// | 2 | KReg, Ymm |
9519    /// | 3 | KReg, Zmm |
9520    /// +---+-----------+
9521    /// ```
9522    #[inline]
9523    pub fn vpmovd2m<A, B>(&mut self, op0: A, op1: B)
9524    where Assembler<'a>: Vpmovd2mEmitter<A, B> {
9525        <Self as Vpmovd2mEmitter<A, B>>::vpmovd2m(self, op0, op1);
9526    }
9527    /// `VPMOVM2D` (VPMOVM2D). 
9528    /// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
9529    ///
9530    ///
9531    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
9532    ///
9533    /// Supported operand variants:
9534    ///
9535    /// ```text
9536    /// +---+-----------+
9537    /// | # | Operands  |
9538    /// +---+-----------+
9539    /// | 1 | Xmm, KReg |
9540    /// | 2 | Ymm, KReg |
9541    /// | 3 | Zmm, KReg |
9542    /// +---+-----------+
9543    /// ```
9544    #[inline]
9545    pub fn vpmovm2d<A, B>(&mut self, op0: A, op1: B)
9546    where Assembler<'a>: Vpmovm2dEmitter<A, B> {
9547        <Self as Vpmovm2dEmitter<A, B>>::vpmovm2d(self, op0, op1);
9548    }
9549    /// `VPMOVM2Q` (VPMOVM2Q). 
9550    /// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
9551    ///
9552    ///
9553    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
9554    ///
9555    /// Supported operand variants:
9556    ///
9557    /// ```text
9558    /// +---+-----------+
9559    /// | # | Operands  |
9560    /// +---+-----------+
9561    /// | 1 | Xmm, KReg |
9562    /// | 2 | Ymm, KReg |
9563    /// | 3 | Zmm, KReg |
9564    /// +---+-----------+
9565    /// ```
9566    #[inline]
9567    pub fn vpmovm2q<A, B>(&mut self, op0: A, op1: B)
9568    where Assembler<'a>: Vpmovm2qEmitter<A, B> {
9569        <Self as Vpmovm2qEmitter<A, B>>::vpmovm2q(self, op0, op1);
9570    }
9571    /// `VPMOVQ2M` (VPMOVQ2M). 
9572    /// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
9573    ///
9574    ///
9575    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
9576    ///
9577    /// Supported operand variants:
9578    ///
9579    /// ```text
9580    /// +---+-----------+
9581    /// | # | Operands  |
9582    /// +---+-----------+
9583    /// | 1 | KReg, Xmm |
9584    /// | 2 | KReg, Ymm |
9585    /// | 3 | KReg, Zmm |
9586    /// +---+-----------+
9587    /// ```
9588    #[inline]
9589    pub fn vpmovq2m<A, B>(&mut self, op0: A, op1: B)
9590    where Assembler<'a>: Vpmovq2mEmitter<A, B> {
9591        <Self as Vpmovq2mEmitter<A, B>>::vpmovq2m(self, op0, op1);
9592    }
9593    /// `VPMULLD` (VPMULLD). 
9594    /// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
9595    ///
9596    ///
9597    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
9598    ///
9599    /// Supported operand variants:
9600    ///
9601    /// ```text
9602    /// +---+---------------+
9603    /// | # | Operands      |
9604    /// +---+---------------+
9605    /// | 1 | Xmm, Xmm, Mem |
9606    /// | 2 | Xmm, Xmm, Xmm |
9607    /// | 3 | Ymm, Ymm, Mem |
9608    /// | 4 | Ymm, Ymm, Ymm |
9609    /// | 5 | Zmm, Zmm, Mem |
9610    /// | 6 | Zmm, Zmm, Zmm |
9611    /// +---+---------------+
9612    /// ```
9613    #[inline]
9614    pub fn vpmulld<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9615    where Assembler<'a>: VpmulldEmitter<A, B, C> {
9616        <Self as VpmulldEmitter<A, B, C>>::vpmulld(self, op0, op1, op2);
9617    }
9618    /// `VPMULLD_MASK` (VPMULLD). 
9619    /// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
9620    ///
9621    ///
9622    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
9623    ///
9624    /// Supported operand variants:
9625    ///
9626    /// ```text
9627    /// +---+---------------+
9628    /// | # | Operands      |
9629    /// +---+---------------+
9630    /// | 1 | Xmm, Xmm, Mem |
9631    /// | 2 | Xmm, Xmm, Xmm |
9632    /// | 3 | Ymm, Ymm, Mem |
9633    /// | 4 | Ymm, Ymm, Ymm |
9634    /// | 5 | Zmm, Zmm, Mem |
9635    /// | 6 | Zmm, Zmm, Zmm |
9636    /// +---+---------------+
9637    /// ```
9638    #[inline]
9639    pub fn vpmulld_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9640    where Assembler<'a>: VpmulldMaskEmitter<A, B, C> {
9641        <Self as VpmulldMaskEmitter<A, B, C>>::vpmulld_mask(self, op0, op1, op2);
9642    }
9643    /// `VPMULLD_MASKZ` (VPMULLD). 
9644    /// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
9645    ///
9646    ///
9647    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
9648    ///
9649    /// Supported operand variants:
9650    ///
9651    /// ```text
9652    /// +---+---------------+
9653    /// | # | Operands      |
9654    /// +---+---------------+
9655    /// | 1 | Xmm, Xmm, Mem |
9656    /// | 2 | Xmm, Xmm, Xmm |
9657    /// | 3 | Ymm, Ymm, Mem |
9658    /// | 4 | Ymm, Ymm, Ymm |
9659    /// | 5 | Zmm, Zmm, Mem |
9660    /// | 6 | Zmm, Zmm, Zmm |
9661    /// +---+---------------+
9662    /// ```
9663    #[inline]
9664    pub fn vpmulld_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9665    where Assembler<'a>: VpmulldMaskzEmitter<A, B, C> {
9666        <Self as VpmulldMaskzEmitter<A, B, C>>::vpmulld_maskz(self, op0, op1, op2);
9667    }
9668    /// `VPMULLQ` (VPMULLQ). 
9669    /// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
9670    ///
9671    ///
9672    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
9673    ///
9674    /// Supported operand variants:
9675    ///
9676    /// ```text
9677    /// +---+---------------+
9678    /// | # | Operands      |
9679    /// +---+---------------+
9680    /// | 1 | Xmm, Xmm, Mem |
9681    /// | 2 | Xmm, Xmm, Xmm |
9682    /// | 3 | Ymm, Ymm, Mem |
9683    /// | 4 | Ymm, Ymm, Ymm |
9684    /// | 5 | Zmm, Zmm, Mem |
9685    /// | 6 | Zmm, Zmm, Zmm |
9686    /// +---+---------------+
9687    /// ```
9688    #[inline]
9689    pub fn vpmullq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9690    where Assembler<'a>: VpmullqEmitter<A, B, C> {
9691        <Self as VpmullqEmitter<A, B, C>>::vpmullq(self, op0, op1, op2);
9692    }
9693    /// `VPMULLQ_MASK` (VPMULLQ). 
9694    /// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
9695    ///
9696    ///
9697    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
9698    ///
9699    /// Supported operand variants:
9700    ///
9701    /// ```text
9702    /// +---+---------------+
9703    /// | # | Operands      |
9704    /// +---+---------------+
9705    /// | 1 | Xmm, Xmm, Mem |
9706    /// | 2 | Xmm, Xmm, Xmm |
9707    /// | 3 | Ymm, Ymm, Mem |
9708    /// | 4 | Ymm, Ymm, Ymm |
9709    /// | 5 | Zmm, Zmm, Mem |
9710    /// | 6 | Zmm, Zmm, Zmm |
9711    /// +---+---------------+
9712    /// ```
9713    #[inline]
9714    pub fn vpmullq_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9715    where Assembler<'a>: VpmullqMaskEmitter<A, B, C> {
9716        <Self as VpmullqMaskEmitter<A, B, C>>::vpmullq_mask(self, op0, op1, op2);
9717    }
9718    /// `VPMULLQ_MASKZ` (VPMULLQ). 
9719    /// Performs a SIMD signed multiply of the packed signed dword/qword integers from each element of the first source operand with the corresponding element in the second source operand. The low 32/64 bits of each 64/128-bit intermediate results are stored to the destination operand.
9720    ///
9721    ///
9722    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLD%3APMULLQ.html).
9723    ///
9724    /// Supported operand variants:
9725    ///
9726    /// ```text
9727    /// +---+---------------+
9728    /// | # | Operands      |
9729    /// +---+---------------+
9730    /// | 1 | Xmm, Xmm, Mem |
9731    /// | 2 | Xmm, Xmm, Xmm |
9732    /// | 3 | Ymm, Ymm, Mem |
9733    /// | 4 | Ymm, Ymm, Ymm |
9734    /// | 5 | Zmm, Zmm, Mem |
9735    /// | 6 | Zmm, Zmm, Zmm |
9736    /// +---+---------------+
9737    /// ```
9738    #[inline]
9739    pub fn vpmullq_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
9740    where Assembler<'a>: VpmullqMaskzEmitter<A, B, C> {
9741        <Self as VpmullqMaskzEmitter<A, B, C>>::vpmullq_maskz(self, op0, op1, op2);
9742    }
9743    /// `VRANGEPD` (VRANGEPD). 
9744    /// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9745    ///
9746    ///
9747    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
9748    ///
9749    /// Supported operand variants:
9750    ///
9751    /// ```text
9752    /// +---+--------------------+
9753    /// | # | Operands           |
9754    /// +---+--------------------+
9755    /// | 1 | Xmm, Xmm, Mem, Imm |
9756    /// | 2 | Xmm, Xmm, Xmm, Imm |
9757    /// | 3 | Ymm, Ymm, Mem, Imm |
9758    /// | 4 | Ymm, Ymm, Ymm, Imm |
9759    /// | 5 | Zmm, Zmm, Mem, Imm |
9760    /// | 6 | Zmm, Zmm, Zmm, Imm |
9761    /// +---+--------------------+
9762    /// ```
9763    #[inline]
9764    pub fn vrangepd<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9765    where Assembler<'a>: VrangepdEmitter<A, B, C, D> {
9766        <Self as VrangepdEmitter<A, B, C, D>>::vrangepd(self, op0, op1, op2, op3);
9767    }
9768    /// `VRANGEPD_MASK` (VRANGEPD). 
9769    /// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9770    ///
9771    ///
9772    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
9773    ///
9774    /// Supported operand variants:
9775    ///
9776    /// ```text
9777    /// +---+--------------------+
9778    /// | # | Operands           |
9779    /// +---+--------------------+
9780    /// | 1 | Xmm, Xmm, Mem, Imm |
9781    /// | 2 | Xmm, Xmm, Xmm, Imm |
9782    /// | 3 | Ymm, Ymm, Mem, Imm |
9783    /// | 4 | Ymm, Ymm, Ymm, Imm |
9784    /// | 5 | Zmm, Zmm, Mem, Imm |
9785    /// | 6 | Zmm, Zmm, Zmm, Imm |
9786    /// +---+--------------------+
9787    /// ```
9788    #[inline]
9789    pub fn vrangepd_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9790    where Assembler<'a>: VrangepdMaskEmitter<A, B, C, D> {
9791        <Self as VrangepdMaskEmitter<A, B, C, D>>::vrangepd_mask(self, op0, op1, op2, op3);
9792    }
9793    /// `VRANGEPD_MASK_SAE` (VRANGEPD). 
9794    /// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9795    ///
9796    ///
9797    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
9798    ///
9799    /// Supported operand variants:
9800    ///
9801    /// ```text
9802    /// +---+--------------------+
9803    /// | # | Operands           |
9804    /// +---+--------------------+
9805    /// | 1 | Zmm, Zmm, Zmm, Imm |
9806    /// +---+--------------------+
9807    /// ```
9808    #[inline]
9809    pub fn vrangepd_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9810    where Assembler<'a>: VrangepdMaskSaeEmitter<A, B, C, D> {
9811        <Self as VrangepdMaskSaeEmitter<A, B, C, D>>::vrangepd_mask_sae(self, op0, op1, op2, op3);
9812    }
9813    /// `VRANGEPD_MASKZ` (VRANGEPD). 
9814    /// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9815    ///
9816    ///
9817    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
9818    ///
9819    /// Supported operand variants:
9820    ///
9821    /// ```text
9822    /// +---+--------------------+
9823    /// | # | Operands           |
9824    /// +---+--------------------+
9825    /// | 1 | Xmm, Xmm, Mem, Imm |
9826    /// | 2 | Xmm, Xmm, Xmm, Imm |
9827    /// | 3 | Ymm, Ymm, Mem, Imm |
9828    /// | 4 | Ymm, Ymm, Ymm, Imm |
9829    /// | 5 | Zmm, Zmm, Mem, Imm |
9830    /// | 6 | Zmm, Zmm, Zmm, Imm |
9831    /// +---+--------------------+
9832    /// ```
9833    #[inline]
9834    pub fn vrangepd_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9835    where Assembler<'a>: VrangepdMaskzEmitter<A, B, C, D> {
9836        <Self as VrangepdMaskzEmitter<A, B, C, D>>::vrangepd_maskz(self, op0, op1, op2, op3);
9837    }
9838    /// `VRANGEPD_MASKZ_SAE` (VRANGEPD). 
9839    /// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9840    ///
9841    ///
9842    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
9843    ///
9844    /// Supported operand variants:
9845    ///
9846    /// ```text
9847    /// +---+--------------------+
9848    /// | # | Operands           |
9849    /// +---+--------------------+
9850    /// | 1 | Zmm, Zmm, Zmm, Imm |
9851    /// +---+--------------------+
9852    /// ```
9853    #[inline]
9854    pub fn vrangepd_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9855    where Assembler<'a>: VrangepdMaskzSaeEmitter<A, B, C, D> {
9856        <Self as VrangepdMaskzSaeEmitter<A, B, C, D>>::vrangepd_maskz_sae(self, op0, op1, op2, op3);
9857    }
9858    /// `VRANGEPD_SAE` (VRANGEPD). 
9859    /// This instruction calculates 2/4/8 range operation outputs from two sets of packed input double precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9860    ///
9861    ///
9862    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPD.html).
9863    ///
9864    /// Supported operand variants:
9865    ///
9866    /// ```text
9867    /// +---+--------------------+
9868    /// | # | Operands           |
9869    /// +---+--------------------+
9870    /// | 1 | Zmm, Zmm, Zmm, Imm |
9871    /// +---+--------------------+
9872    /// ```
9873    #[inline]
9874    pub fn vrangepd_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9875    where Assembler<'a>: VrangepdSaeEmitter<A, B, C, D> {
9876        <Self as VrangepdSaeEmitter<A, B, C, D>>::vrangepd_sae(self, op0, op1, op2, op3);
9877    }
9878    /// `VRANGEPS` (VRANGEPS). 
9879    /// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9880    ///
9881    ///
9882    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
9883    ///
9884    /// Supported operand variants:
9885    ///
9886    /// ```text
9887    /// +---+--------------------+
9888    /// | # | Operands           |
9889    /// +---+--------------------+
9890    /// | 1 | Xmm, Xmm, Mem, Imm |
9891    /// | 2 | Xmm, Xmm, Xmm, Imm |
9892    /// | 3 | Ymm, Ymm, Mem, Imm |
9893    /// | 4 | Ymm, Ymm, Ymm, Imm |
9894    /// | 5 | Zmm, Zmm, Mem, Imm |
9895    /// | 6 | Zmm, Zmm, Zmm, Imm |
9896    /// +---+--------------------+
9897    /// ```
9898    #[inline]
9899    pub fn vrangeps<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9900    where Assembler<'a>: VrangepsEmitter<A, B, C, D> {
9901        <Self as VrangepsEmitter<A, B, C, D>>::vrangeps(self, op0, op1, op2, op3);
9902    }
9903    /// `VRANGEPS_MASK` (VRANGEPS). 
9904    /// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9905    ///
9906    ///
9907    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
9908    ///
9909    /// Supported operand variants:
9910    ///
9911    /// ```text
9912    /// +---+--------------------+
9913    /// | # | Operands           |
9914    /// +---+--------------------+
9915    /// | 1 | Xmm, Xmm, Mem, Imm |
9916    /// | 2 | Xmm, Xmm, Xmm, Imm |
9917    /// | 3 | Ymm, Ymm, Mem, Imm |
9918    /// | 4 | Ymm, Ymm, Ymm, Imm |
9919    /// | 5 | Zmm, Zmm, Mem, Imm |
9920    /// | 6 | Zmm, Zmm, Zmm, Imm |
9921    /// +---+--------------------+
9922    /// ```
9923    #[inline]
9924    pub fn vrangeps_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9925    where Assembler<'a>: VrangepsMaskEmitter<A, B, C, D> {
9926        <Self as VrangepsMaskEmitter<A, B, C, D>>::vrangeps_mask(self, op0, op1, op2, op3);
9927    }
9928    /// `VRANGEPS_MASK_SAE` (VRANGEPS). 
9929    /// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9930    ///
9931    ///
9932    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
9933    ///
9934    /// Supported operand variants:
9935    ///
9936    /// ```text
9937    /// +---+--------------------+
9938    /// | # | Operands           |
9939    /// +---+--------------------+
9940    /// | 1 | Zmm, Zmm, Zmm, Imm |
9941    /// +---+--------------------+
9942    /// ```
9943    #[inline]
9944    pub fn vrangeps_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9945    where Assembler<'a>: VrangepsMaskSaeEmitter<A, B, C, D> {
9946        <Self as VrangepsMaskSaeEmitter<A, B, C, D>>::vrangeps_mask_sae(self, op0, op1, op2, op3);
9947    }
9948    /// `VRANGEPS_MASKZ` (VRANGEPS). 
9949    /// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9950    ///
9951    ///
9952    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
9953    ///
9954    /// Supported operand variants:
9955    ///
9956    /// ```text
9957    /// +---+--------------------+
9958    /// | # | Operands           |
9959    /// +---+--------------------+
9960    /// | 1 | Xmm, Xmm, Mem, Imm |
9961    /// | 2 | Xmm, Xmm, Xmm, Imm |
9962    /// | 3 | Ymm, Ymm, Mem, Imm |
9963    /// | 4 | Ymm, Ymm, Ymm, Imm |
9964    /// | 5 | Zmm, Zmm, Mem, Imm |
9965    /// | 6 | Zmm, Zmm, Zmm, Imm |
9966    /// +---+--------------------+
9967    /// ```
9968    #[inline]
9969    pub fn vrangeps_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9970    where Assembler<'a>: VrangepsMaskzEmitter<A, B, C, D> {
9971        <Self as VrangepsMaskzEmitter<A, B, C, D>>::vrangeps_maskz(self, op0, op1, op2, op3);
9972    }
9973    /// `VRANGEPS_MASKZ_SAE` (VRANGEPS). 
9974    /// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9975    ///
9976    ///
9977    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
9978    ///
9979    /// Supported operand variants:
9980    ///
9981    /// ```text
9982    /// +---+--------------------+
9983    /// | # | Operands           |
9984    /// +---+--------------------+
9985    /// | 1 | Zmm, Zmm, Zmm, Imm |
9986    /// +---+--------------------+
9987    /// ```
9988    #[inline]
9989    pub fn vrangeps_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
9990    where Assembler<'a>: VrangepsMaskzSaeEmitter<A, B, C, D> {
9991        <Self as VrangepsMaskzSaeEmitter<A, B, C, D>>::vrangeps_maskz_sae(self, op0, op1, op2, op3);
9992    }
9993    /// `VRANGEPS_SAE` (VRANGEPS). 
9994    /// This instruction calculates 4/8/16 range operation outputs from two sets of packed input single-precision floating-point values in the first source operand (the second operand) and the second source operand (the third operand). The range outputs are written to the destination operand (the first operand) under the writemask k1.
9995    ///
9996    ///
9997    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGEPS.html).
9998    ///
9999    /// Supported operand variants:
10000    ///
10001    /// ```text
10002    /// +---+--------------------+
10003    /// | # | Operands           |
10004    /// +---+--------------------+
10005    /// | 1 | Zmm, Zmm, Zmm, Imm |
10006    /// +---+--------------------+
10007    /// ```
10008    #[inline]
10009    pub fn vrangeps_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10010    where Assembler<'a>: VrangepsSaeEmitter<A, B, C, D> {
10011        <Self as VrangepsSaeEmitter<A, B, C, D>>::vrangeps_sae(self, op0, op1, op2, op3);
10012    }
10013    /// `VRANGESD` (VRANGESD). 
10014    /// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
10015    ///
10016    ///
10017    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
10018    ///
10019    /// Supported operand variants:
10020    ///
10021    /// ```text
10022    /// +---+--------------------+
10023    /// | # | Operands           |
10024    /// +---+--------------------+
10025    /// | 1 | Xmm, Xmm, Mem, Imm |
10026    /// | 2 | Xmm, Xmm, Xmm, Imm |
10027    /// +---+--------------------+
10028    /// ```
10029    #[inline]
10030    pub fn vrangesd<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10031    where Assembler<'a>: VrangesdEmitter<A, B, C, D> {
10032        <Self as VrangesdEmitter<A, B, C, D>>::vrangesd(self, op0, op1, op2, op3);
10033    }
10034    /// `VRANGESD_MASK` (VRANGESD). 
10035    /// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
10036    ///
10037    ///
10038    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
10039    ///
10040    /// Supported operand variants:
10041    ///
10042    /// ```text
10043    /// +---+--------------------+
10044    /// | # | Operands           |
10045    /// +---+--------------------+
10046    /// | 1 | Xmm, Xmm, Mem, Imm |
10047    /// | 2 | Xmm, Xmm, Xmm, Imm |
10048    /// +---+--------------------+
10049    /// ```
10050    #[inline]
10051    pub fn vrangesd_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10052    where Assembler<'a>: VrangesdMaskEmitter<A, B, C, D> {
10053        <Self as VrangesdMaskEmitter<A, B, C, D>>::vrangesd_mask(self, op0, op1, op2, op3);
10054    }
10055    /// `VRANGESD_MASK_SAE` (VRANGESD). 
10056    /// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
10057    ///
10058    ///
10059    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
10060    ///
10061    /// Supported operand variants:
10062    ///
10063    /// ```text
10064    /// +---+--------------------+
10065    /// | # | Operands           |
10066    /// +---+--------------------+
10067    /// | 1 | Xmm, Xmm, Xmm, Imm |
10068    /// +---+--------------------+
10069    /// ```
10070    #[inline]
10071    pub fn vrangesd_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10072    where Assembler<'a>: VrangesdMaskSaeEmitter<A, B, C, D> {
10073        <Self as VrangesdMaskSaeEmitter<A, B, C, D>>::vrangesd_mask_sae(self, op0, op1, op2, op3);
10074    }
10075    /// `VRANGESD_MASKZ` (VRANGESD). 
10076    /// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
10077    ///
10078    ///
10079    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
10080    ///
10081    /// Supported operand variants:
10082    ///
10083    /// ```text
10084    /// +---+--------------------+
10085    /// | # | Operands           |
10086    /// +---+--------------------+
10087    /// | 1 | Xmm, Xmm, Mem, Imm |
10088    /// | 2 | Xmm, Xmm, Xmm, Imm |
10089    /// +---+--------------------+
10090    /// ```
10091    #[inline]
10092    pub fn vrangesd_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10093    where Assembler<'a>: VrangesdMaskzEmitter<A, B, C, D> {
10094        <Self as VrangesdMaskzEmitter<A, B, C, D>>::vrangesd_maskz(self, op0, op1, op2, op3);
10095    }
10096    /// `VRANGESD_MASKZ_SAE` (VRANGESD). 
10097    /// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
10098    ///
10099    ///
10100    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
10101    ///
10102    /// Supported operand variants:
10103    ///
10104    /// ```text
10105    /// +---+--------------------+
10106    /// | # | Operands           |
10107    /// +---+--------------------+
10108    /// | 1 | Xmm, Xmm, Xmm, Imm |
10109    /// +---+--------------------+
10110    /// ```
10111    #[inline]
10112    pub fn vrangesd_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10113    where Assembler<'a>: VrangesdMaskzSaeEmitter<A, B, C, D> {
10114        <Self as VrangesdMaskzSaeEmitter<A, B, C, D>>::vrangesd_maskz_sae(self, op0, op1, op2, op3);
10115    }
10116    /// `VRANGESD_SAE` (VRANGESD). 
10117    /// This instruction calculates a range operation output from two input double precision floating-point values in the low qword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low qword element of the destination operand (the first operand) under the writemask k1.
10118    ///
10119    ///
10120    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESD.html).
10121    ///
10122    /// Supported operand variants:
10123    ///
10124    /// ```text
10125    /// +---+--------------------+
10126    /// | # | Operands           |
10127    /// +---+--------------------+
10128    /// | 1 | Xmm, Xmm, Xmm, Imm |
10129    /// +---+--------------------+
10130    /// ```
10131    #[inline]
10132    pub fn vrangesd_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10133    where Assembler<'a>: VrangesdSaeEmitter<A, B, C, D> {
10134        <Self as VrangesdSaeEmitter<A, B, C, D>>::vrangesd_sae(self, op0, op1, op2, op3);
10135    }
10136    /// `VRANGESS` (VRANGESS). 
10137    /// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
10138    ///
10139    ///
10140    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
10141    ///
10142    /// Supported operand variants:
10143    ///
10144    /// ```text
10145    /// +---+--------------------+
10146    /// | # | Operands           |
10147    /// +---+--------------------+
10148    /// | 1 | Xmm, Xmm, Mem, Imm |
10149    /// | 2 | Xmm, Xmm, Xmm, Imm |
10150    /// +---+--------------------+
10151    /// ```
10152    #[inline]
10153    pub fn vrangess<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10154    where Assembler<'a>: VrangessEmitter<A, B, C, D> {
10155        <Self as VrangessEmitter<A, B, C, D>>::vrangess(self, op0, op1, op2, op3);
10156    }
10157    /// `VRANGESS_MASK` (VRANGESS). 
10158    /// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
10159    ///
10160    ///
10161    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
10162    ///
10163    /// Supported operand variants:
10164    ///
10165    /// ```text
10166    /// +---+--------------------+
10167    /// | # | Operands           |
10168    /// +---+--------------------+
10169    /// | 1 | Xmm, Xmm, Mem, Imm |
10170    /// | 2 | Xmm, Xmm, Xmm, Imm |
10171    /// +---+--------------------+
10172    /// ```
10173    #[inline]
10174    pub fn vrangess_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10175    where Assembler<'a>: VrangessMaskEmitter<A, B, C, D> {
10176        <Self as VrangessMaskEmitter<A, B, C, D>>::vrangess_mask(self, op0, op1, op2, op3);
10177    }
10178    /// `VRANGESS_MASK_SAE` (VRANGESS). 
10179    /// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
10180    ///
10181    ///
10182    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
10183    ///
10184    /// Supported operand variants:
10185    ///
10186    /// ```text
10187    /// +---+--------------------+
10188    /// | # | Operands           |
10189    /// +---+--------------------+
10190    /// | 1 | Xmm, Xmm, Xmm, Imm |
10191    /// +---+--------------------+
10192    /// ```
10193    #[inline]
10194    pub fn vrangess_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10195    where Assembler<'a>: VrangessMaskSaeEmitter<A, B, C, D> {
10196        <Self as VrangessMaskSaeEmitter<A, B, C, D>>::vrangess_mask_sae(self, op0, op1, op2, op3);
10197    }
10198    /// `VRANGESS_MASKZ` (VRANGESS). 
10199    /// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
10200    ///
10201    ///
10202    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
10203    ///
10204    /// Supported operand variants:
10205    ///
10206    /// ```text
10207    /// +---+--------------------+
10208    /// | # | Operands           |
10209    /// +---+--------------------+
10210    /// | 1 | Xmm, Xmm, Mem, Imm |
10211    /// | 2 | Xmm, Xmm, Xmm, Imm |
10212    /// +---+--------------------+
10213    /// ```
10214    #[inline]
10215    pub fn vrangess_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10216    where Assembler<'a>: VrangessMaskzEmitter<A, B, C, D> {
10217        <Self as VrangessMaskzEmitter<A, B, C, D>>::vrangess_maskz(self, op0, op1, op2, op3);
10218    }
10219    /// `VRANGESS_MASKZ_SAE` (VRANGESS). 
10220    /// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
10221    ///
10222    ///
10223    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
10224    ///
10225    /// Supported operand variants:
10226    ///
10227    /// ```text
10228    /// +---+--------------------+
10229    /// | # | Operands           |
10230    /// +---+--------------------+
10231    /// | 1 | Xmm, Xmm, Xmm, Imm |
10232    /// +---+--------------------+
10233    /// ```
10234    #[inline]
10235    pub fn vrangess_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10236    where Assembler<'a>: VrangessMaskzSaeEmitter<A, B, C, D> {
10237        <Self as VrangessMaskzSaeEmitter<A, B, C, D>>::vrangess_maskz_sae(self, op0, op1, op2, op3);
10238    }
10239    /// `VRANGESS_SAE` (VRANGESS). 
10240    /// This instruction calculates a range operation output from two input single-precision floating-point values in the low dword element of the first source operand (the second operand) and second source operand (the third operand). The range output is written to the low dword element of the destination operand (the first operand) under the writemask k1.
10241    ///
10242    ///
10243    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VRANGESS.html).
10244    ///
10245    /// Supported operand variants:
10246    ///
10247    /// ```text
10248    /// +---+--------------------+
10249    /// | # | Operands           |
10250    /// +---+--------------------+
10251    /// | 1 | Xmm, Xmm, Xmm, Imm |
10252    /// +---+--------------------+
10253    /// ```
10254    #[inline]
10255    pub fn vrangess_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10256    where Assembler<'a>: VrangessSaeEmitter<A, B, C, D> {
10257        <Self as VrangessSaeEmitter<A, B, C, D>>::vrangess_sae(self, op0, op1, op2, op3);
10258    }
10259    /// `VREDUCEPD` (VREDUCEPD). 
10260    /// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10261    ///
10262    ///
10263    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
10264    ///
10265    /// Supported operand variants:
10266    ///
10267    /// ```text
10268    /// +---+---------------+
10269    /// | # | Operands      |
10270    /// +---+---------------+
10271    /// | 1 | Xmm, Mem, Imm |
10272    /// | 2 | Xmm, Xmm, Imm |
10273    /// | 3 | Ymm, Mem, Imm |
10274    /// | 4 | Ymm, Ymm, Imm |
10275    /// | 5 | Zmm, Mem, Imm |
10276    /// | 6 | Zmm, Zmm, Imm |
10277    /// +---+---------------+
10278    /// ```
10279    #[inline]
10280    pub fn vreducepd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10281    where Assembler<'a>: VreducepdEmitter<A, B, C> {
10282        <Self as VreducepdEmitter<A, B, C>>::vreducepd(self, op0, op1, op2);
10283    }
10284    /// `VREDUCEPD_MASK` (VREDUCEPD). 
10285    /// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10286    ///
10287    ///
10288    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
10289    ///
10290    /// Supported operand variants:
10291    ///
10292    /// ```text
10293    /// +---+---------------+
10294    /// | # | Operands      |
10295    /// +---+---------------+
10296    /// | 1 | Xmm, Mem, Imm |
10297    /// | 2 | Xmm, Xmm, Imm |
10298    /// | 3 | Ymm, Mem, Imm |
10299    /// | 4 | Ymm, Ymm, Imm |
10300    /// | 5 | Zmm, Mem, Imm |
10301    /// | 6 | Zmm, Zmm, Imm |
10302    /// +---+---------------+
10303    /// ```
10304    #[inline]
10305    pub fn vreducepd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10306    where Assembler<'a>: VreducepdMaskEmitter<A, B, C> {
10307        <Self as VreducepdMaskEmitter<A, B, C>>::vreducepd_mask(self, op0, op1, op2);
10308    }
10309    /// `VREDUCEPD_MASK_SAE` (VREDUCEPD). 
10310    /// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10311    ///
10312    ///
10313    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
10314    ///
10315    /// Supported operand variants:
10316    ///
10317    /// ```text
10318    /// +---+---------------+
10319    /// | # | Operands      |
10320    /// +---+---------------+
10321    /// | 1 | Zmm, Zmm, Imm |
10322    /// +---+---------------+
10323    /// ```
10324    #[inline]
10325    pub fn vreducepd_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10326    where Assembler<'a>: VreducepdMaskSaeEmitter<A, B, C> {
10327        <Self as VreducepdMaskSaeEmitter<A, B, C>>::vreducepd_mask_sae(self, op0, op1, op2);
10328    }
10329    /// `VREDUCEPD_MASKZ` (VREDUCEPD). 
10330    /// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10331    ///
10332    ///
10333    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
10334    ///
10335    /// Supported operand variants:
10336    ///
10337    /// ```text
10338    /// +---+---------------+
10339    /// | # | Operands      |
10340    /// +---+---------------+
10341    /// | 1 | Xmm, Mem, Imm |
10342    /// | 2 | Xmm, Xmm, Imm |
10343    /// | 3 | Ymm, Mem, Imm |
10344    /// | 4 | Ymm, Ymm, Imm |
10345    /// | 5 | Zmm, Mem, Imm |
10346    /// | 6 | Zmm, Zmm, Imm |
10347    /// +---+---------------+
10348    /// ```
10349    #[inline]
10350    pub fn vreducepd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10351    where Assembler<'a>: VreducepdMaskzEmitter<A, B, C> {
10352        <Self as VreducepdMaskzEmitter<A, B, C>>::vreducepd_maskz(self, op0, op1, op2);
10353    }
10354    /// `VREDUCEPD_MASKZ_SAE` (VREDUCEPD). 
10355    /// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10356    ///
10357    ///
10358    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
10359    ///
10360    /// Supported operand variants:
10361    ///
10362    /// ```text
10363    /// +---+---------------+
10364    /// | # | Operands      |
10365    /// +---+---------------+
10366    /// | 1 | Zmm, Zmm, Imm |
10367    /// +---+---------------+
10368    /// ```
10369    #[inline]
10370    pub fn vreducepd_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10371    where Assembler<'a>: VreducepdMaskzSaeEmitter<A, B, C> {
10372        <Self as VreducepdMaskzSaeEmitter<A, B, C>>::vreducepd_maskz_sae(self, op0, op1, op2);
10373    }
10374    /// `VREDUCEPD_SAE` (VREDUCEPD). 
10375    /// Perform reduction transformation of the packed binary encoded double precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10376    ///
10377    ///
10378    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPD.html).
10379    ///
10380    /// Supported operand variants:
10381    ///
10382    /// ```text
10383    /// +---+---------------+
10384    /// | # | Operands      |
10385    /// +---+---------------+
10386    /// | 1 | Zmm, Zmm, Imm |
10387    /// +---+---------------+
10388    /// ```
10389    #[inline]
10390    pub fn vreducepd_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10391    where Assembler<'a>: VreducepdSaeEmitter<A, B, C> {
10392        <Self as VreducepdSaeEmitter<A, B, C>>::vreducepd_sae(self, op0, op1, op2);
10393    }
10394    /// `VREDUCEPS` (VREDUCEPS). 
10395    /// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10396    ///
10397    ///
10398    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
10399    ///
10400    /// Supported operand variants:
10401    ///
10402    /// ```text
10403    /// +---+---------------+
10404    /// | # | Operands      |
10405    /// +---+---------------+
10406    /// | 1 | Xmm, Mem, Imm |
10407    /// | 2 | Xmm, Xmm, Imm |
10408    /// | 3 | Ymm, Mem, Imm |
10409    /// | 4 | Ymm, Ymm, Imm |
10410    /// | 5 | Zmm, Mem, Imm |
10411    /// | 6 | Zmm, Zmm, Imm |
10412    /// +---+---------------+
10413    /// ```
10414    #[inline]
10415    pub fn vreduceps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10416    where Assembler<'a>: VreducepsEmitter<A, B, C> {
10417        <Self as VreducepsEmitter<A, B, C>>::vreduceps(self, op0, op1, op2);
10418    }
10419    /// `VREDUCEPS_MASK` (VREDUCEPS). 
10420    /// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10421    ///
10422    ///
10423    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
10424    ///
10425    /// Supported operand variants:
10426    ///
10427    /// ```text
10428    /// +---+---------------+
10429    /// | # | Operands      |
10430    /// +---+---------------+
10431    /// | 1 | Xmm, Mem, Imm |
10432    /// | 2 | Xmm, Xmm, Imm |
10433    /// | 3 | Ymm, Mem, Imm |
10434    /// | 4 | Ymm, Ymm, Imm |
10435    /// | 5 | Zmm, Mem, Imm |
10436    /// | 6 | Zmm, Zmm, Imm |
10437    /// +---+---------------+
10438    /// ```
10439    #[inline]
10440    pub fn vreduceps_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10441    where Assembler<'a>: VreducepsMaskEmitter<A, B, C> {
10442        <Self as VreducepsMaskEmitter<A, B, C>>::vreduceps_mask(self, op0, op1, op2);
10443    }
10444    /// `VREDUCEPS_MASK_SAE` (VREDUCEPS). 
10445    /// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10446    ///
10447    ///
10448    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
10449    ///
10450    /// Supported operand variants:
10451    ///
10452    /// ```text
10453    /// +---+---------------+
10454    /// | # | Operands      |
10455    /// +---+---------------+
10456    /// | 1 | Zmm, Zmm, Imm |
10457    /// +---+---------------+
10458    /// ```
10459    #[inline]
10460    pub fn vreduceps_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10461    where Assembler<'a>: VreducepsMaskSaeEmitter<A, B, C> {
10462        <Self as VreducepsMaskSaeEmitter<A, B, C>>::vreduceps_mask_sae(self, op0, op1, op2);
10463    }
10464    /// `VREDUCEPS_MASKZ` (VREDUCEPS). 
10465    /// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10466    ///
10467    ///
10468    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
10469    ///
10470    /// Supported operand variants:
10471    ///
10472    /// ```text
10473    /// +---+---------------+
10474    /// | # | Operands      |
10475    /// +---+---------------+
10476    /// | 1 | Xmm, Mem, Imm |
10477    /// | 2 | Xmm, Xmm, Imm |
10478    /// | 3 | Ymm, Mem, Imm |
10479    /// | 4 | Ymm, Ymm, Imm |
10480    /// | 5 | Zmm, Mem, Imm |
10481    /// | 6 | Zmm, Zmm, Imm |
10482    /// +---+---------------+
10483    /// ```
10484    #[inline]
10485    pub fn vreduceps_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10486    where Assembler<'a>: VreducepsMaskzEmitter<A, B, C> {
10487        <Self as VreducepsMaskzEmitter<A, B, C>>::vreduceps_maskz(self, op0, op1, op2);
10488    }
10489    /// `VREDUCEPS_MASKZ_SAE` (VREDUCEPS). 
10490    /// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10491    ///
10492    ///
10493    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
10494    ///
10495    /// Supported operand variants:
10496    ///
10497    /// ```text
10498    /// +---+---------------+
10499    /// | # | Operands      |
10500    /// +---+---------------+
10501    /// | 1 | Zmm, Zmm, Imm |
10502    /// +---+---------------+
10503    /// ```
10504    #[inline]
10505    pub fn vreduceps_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10506    where Assembler<'a>: VreducepsMaskzSaeEmitter<A, B, C> {
10507        <Self as VreducepsMaskzSaeEmitter<A, B, C>>::vreduceps_maskz_sae(self, op0, op1, op2);
10508    }
10509    /// `VREDUCEPS_SAE` (VREDUCEPS). 
10510    /// Perform reduction transformation of the packed binary encoded single-precision floating-point values in the source operand (the second operand) and store the reduced results in binary floating-point format to the destination operand (the first operand) under the writemask k1.
10511    ///
10512    ///
10513    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCEPS.html).
10514    ///
10515    /// Supported operand variants:
10516    ///
10517    /// ```text
10518    /// +---+---------------+
10519    /// | # | Operands      |
10520    /// +---+---------------+
10521    /// | 1 | Zmm, Zmm, Imm |
10522    /// +---+---------------+
10523    /// ```
10524    #[inline]
10525    pub fn vreduceps_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10526    where Assembler<'a>: VreducepsSaeEmitter<A, B, C> {
10527        <Self as VreducepsSaeEmitter<A, B, C>>::vreduceps_sae(self, op0, op1, op2);
10528    }
10529    /// `VREDUCESD` (VREDUCESD). 
10530    /// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
10531    ///
10532    ///
10533    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
10534    ///
10535    /// Supported operand variants:
10536    ///
10537    /// ```text
10538    /// +---+--------------------+
10539    /// | # | Operands           |
10540    /// +---+--------------------+
10541    /// | 1 | Xmm, Xmm, Mem, Imm |
10542    /// | 2 | Xmm, Xmm, Xmm, Imm |
10543    /// +---+--------------------+
10544    /// ```
10545    #[inline]
10546    pub fn vreducesd<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10547    where Assembler<'a>: VreducesdEmitter<A, B, C, D> {
10548        <Self as VreducesdEmitter<A, B, C, D>>::vreducesd(self, op0, op1, op2, op3);
10549    }
10550    /// `VREDUCESD_MASK` (VREDUCESD). 
10551    /// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
10552    ///
10553    ///
10554    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
10555    ///
10556    /// Supported operand variants:
10557    ///
10558    /// ```text
10559    /// +---+--------------------+
10560    /// | # | Operands           |
10561    /// +---+--------------------+
10562    /// | 1 | Xmm, Xmm, Mem, Imm |
10563    /// | 2 | Xmm, Xmm, Xmm, Imm |
10564    /// +---+--------------------+
10565    /// ```
10566    #[inline]
10567    pub fn vreducesd_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10568    where Assembler<'a>: VreducesdMaskEmitter<A, B, C, D> {
10569        <Self as VreducesdMaskEmitter<A, B, C, D>>::vreducesd_mask(self, op0, op1, op2, op3);
10570    }
10571    /// `VREDUCESD_MASK_SAE` (VREDUCESD). 
10572    /// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
10573    ///
10574    ///
10575    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
10576    ///
10577    /// Supported operand variants:
10578    ///
10579    /// ```text
10580    /// +---+--------------------+
10581    /// | # | Operands           |
10582    /// +---+--------------------+
10583    /// | 1 | Xmm, Xmm, Xmm, Imm |
10584    /// +---+--------------------+
10585    /// ```
10586    #[inline]
10587    pub fn vreducesd_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10588    where Assembler<'a>: VreducesdMaskSaeEmitter<A, B, C, D> {
10589        <Self as VreducesdMaskSaeEmitter<A, B, C, D>>::vreducesd_mask_sae(self, op0, op1, op2, op3);
10590    }
10591    /// `VREDUCESD_MASKZ` (VREDUCESD). 
10592    /// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
10593    ///
10594    ///
10595    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
10596    ///
10597    /// Supported operand variants:
10598    ///
10599    /// ```text
10600    /// +---+--------------------+
10601    /// | # | Operands           |
10602    /// +---+--------------------+
10603    /// | 1 | Xmm, Xmm, Mem, Imm |
10604    /// | 2 | Xmm, Xmm, Xmm, Imm |
10605    /// +---+--------------------+
10606    /// ```
10607    #[inline]
10608    pub fn vreducesd_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10609    where Assembler<'a>: VreducesdMaskzEmitter<A, B, C, D> {
10610        <Self as VreducesdMaskzEmitter<A, B, C, D>>::vreducesd_maskz(self, op0, op1, op2, op3);
10611    }
10612    /// `VREDUCESD_MASKZ_SAE` (VREDUCESD). 
10613    /// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
10614    ///
10615    ///
10616    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
10617    ///
10618    /// Supported operand variants:
10619    ///
10620    /// ```text
10621    /// +---+--------------------+
10622    /// | # | Operands           |
10623    /// +---+--------------------+
10624    /// | 1 | Xmm, Xmm, Xmm, Imm |
10625    /// +---+--------------------+
10626    /// ```
10627    #[inline]
10628    pub fn vreducesd_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10629    where Assembler<'a>: VreducesdMaskzSaeEmitter<A, B, C, D> {
10630        <Self as VreducesdMaskzSaeEmitter<A, B, C, D>>::vreducesd_maskz_sae(self, op0, op1, op2, op3);
10631    }
10632    /// `VREDUCESD_SAE` (VREDUCESD). 
10633    /// Perform a reduction transformation of the binary encoded double precision floating-point value in the low qword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low qword element of the destination operand (the first operand) under the writemask k1. Bits 127:64 of the destination operand are copied from respective qword elements of the first source operand (the second operand).
10634    ///
10635    ///
10636    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESD.html).
10637    ///
10638    /// Supported operand variants:
10639    ///
10640    /// ```text
10641    /// +---+--------------------+
10642    /// | # | Operands           |
10643    /// +---+--------------------+
10644    /// | 1 | Xmm, Xmm, Xmm, Imm |
10645    /// +---+--------------------+
10646    /// ```
10647    #[inline]
10648    pub fn vreducesd_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10649    where Assembler<'a>: VreducesdSaeEmitter<A, B, C, D> {
10650        <Self as VreducesdSaeEmitter<A, B, C, D>>::vreducesd_sae(self, op0, op1, op2, op3);
10651    }
10652    /// `VREDUCESS` (VREDUCESS). 
10653    /// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
10654    ///
10655    ///
10656    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
10657    ///
10658    /// Supported operand variants:
10659    ///
10660    /// ```text
10661    /// +---+--------------------+
10662    /// | # | Operands           |
10663    /// +---+--------------------+
10664    /// | 1 | Xmm, Xmm, Mem, Imm |
10665    /// | 2 | Xmm, Xmm, Xmm, Imm |
10666    /// +---+--------------------+
10667    /// ```
10668    #[inline]
10669    pub fn vreducess<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10670    where Assembler<'a>: VreducessEmitter<A, B, C, D> {
10671        <Self as VreducessEmitter<A, B, C, D>>::vreducess(self, op0, op1, op2, op3);
10672    }
10673    /// `VREDUCESS_MASK` (VREDUCESS). 
10674    /// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
10675    ///
10676    ///
10677    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
10678    ///
10679    /// Supported operand variants:
10680    ///
10681    /// ```text
10682    /// +---+--------------------+
10683    /// | # | Operands           |
10684    /// +---+--------------------+
10685    /// | 1 | Xmm, Xmm, Mem, Imm |
10686    /// | 2 | Xmm, Xmm, Xmm, Imm |
10687    /// +---+--------------------+
10688    /// ```
10689    #[inline]
10690    pub fn vreducess_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10691    where Assembler<'a>: VreducessMaskEmitter<A, B, C, D> {
10692        <Self as VreducessMaskEmitter<A, B, C, D>>::vreducess_mask(self, op0, op1, op2, op3);
10693    }
10694    /// `VREDUCESS_MASK_SAE` (VREDUCESS). 
10695    /// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
10696    ///
10697    ///
10698    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
10699    ///
10700    /// Supported operand variants:
10701    ///
10702    /// ```text
10703    /// +---+--------------------+
10704    /// | # | Operands           |
10705    /// +---+--------------------+
10706    /// | 1 | Xmm, Xmm, Xmm, Imm |
10707    /// +---+--------------------+
10708    /// ```
10709    #[inline]
10710    pub fn vreducess_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10711    where Assembler<'a>: VreducessMaskSaeEmitter<A, B, C, D> {
10712        <Self as VreducessMaskSaeEmitter<A, B, C, D>>::vreducess_mask_sae(self, op0, op1, op2, op3);
10713    }
10714    /// `VREDUCESS_MASKZ` (VREDUCESS). 
10715    /// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
10716    ///
10717    ///
10718    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
10719    ///
10720    /// Supported operand variants:
10721    ///
10722    /// ```text
10723    /// +---+--------------------+
10724    /// | # | Operands           |
10725    /// +---+--------------------+
10726    /// | 1 | Xmm, Xmm, Mem, Imm |
10727    /// | 2 | Xmm, Xmm, Xmm, Imm |
10728    /// +---+--------------------+
10729    /// ```
10730    #[inline]
10731    pub fn vreducess_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10732    where Assembler<'a>: VreducessMaskzEmitter<A, B, C, D> {
10733        <Self as VreducessMaskzEmitter<A, B, C, D>>::vreducess_maskz(self, op0, op1, op2, op3);
10734    }
10735    /// `VREDUCESS_MASKZ_SAE` (VREDUCESS). 
10736    /// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
10737    ///
10738    ///
10739    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
10740    ///
10741    /// Supported operand variants:
10742    ///
10743    /// ```text
10744    /// +---+--------------------+
10745    /// | # | Operands           |
10746    /// +---+--------------------+
10747    /// | 1 | Xmm, Xmm, Xmm, Imm |
10748    /// +---+--------------------+
10749    /// ```
10750    #[inline]
10751    pub fn vreducess_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10752    where Assembler<'a>: VreducessMaskzSaeEmitter<A, B, C, D> {
10753        <Self as VreducessMaskzSaeEmitter<A, B, C, D>>::vreducess_maskz_sae(self, op0, op1, op2, op3);
10754    }
10755    /// `VREDUCESS_SAE` (VREDUCESS). 
10756    /// Perform a reduction transformation of the binary encoded single-precision floating-point value in the low dword element of the second source operand (the third operand) and store the reduced result in binary floating-point format to the low dword element of the destination operand (the first operand) under the writemask k1. Bits 127:32 of the destination operand are copied from respective dword elements of the first source operand (the second operand).
10757    ///
10758    ///
10759    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VREDUCESS.html).
10760    ///
10761    /// Supported operand variants:
10762    ///
10763    /// ```text
10764    /// +---+--------------------+
10765    /// | # | Operands           |
10766    /// +---+--------------------+
10767    /// | 1 | Xmm, Xmm, Xmm, Imm |
10768    /// +---+--------------------+
10769    /// ```
10770    #[inline]
10771    pub fn vreducess_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
10772    where Assembler<'a>: VreducessSaeEmitter<A, B, C, D> {
10773        <Self as VreducessSaeEmitter<A, B, C, D>>::vreducess_sae(self, op0, op1, op2, op3);
10774    }
10775    /// `VXORPD` (VXORPD). 
10776    /// Performs a bitwise logical XOR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
10777    ///
10778    ///
10779    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPD.html).
10780    ///
10781    /// Supported operand variants:
10782    ///
10783    /// ```text
10784    /// +---+---------------+
10785    /// | # | Operands      |
10786    /// +---+---------------+
10787    /// | 1 | Xmm, Xmm, Mem |
10788    /// | 2 | Xmm, Xmm, Xmm |
10789    /// | 3 | Ymm, Ymm, Mem |
10790    /// | 4 | Ymm, Ymm, Ymm |
10791    /// | 5 | Zmm, Zmm, Mem |
10792    /// | 6 | Zmm, Zmm, Zmm |
10793    /// +---+---------------+
10794    /// ```
10795    #[inline]
10796    pub fn vxorpd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10797    where Assembler<'a>: VxorpdEmitter<A, B, C> {
10798        <Self as VxorpdEmitter<A, B, C>>::vxorpd(self, op0, op1, op2);
10799    }
10800    /// `VXORPD_MASK` (VXORPD). 
10801    /// Performs a bitwise logical XOR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
10802    ///
10803    ///
10804    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPD.html).
10805    ///
10806    /// Supported operand variants:
10807    ///
10808    /// ```text
10809    /// +---+---------------+
10810    /// | # | Operands      |
10811    /// +---+---------------+
10812    /// | 1 | Xmm, Xmm, Mem |
10813    /// | 2 | Xmm, Xmm, Xmm |
10814    /// | 3 | Ymm, Ymm, Mem |
10815    /// | 4 | Ymm, Ymm, Ymm |
10816    /// | 5 | Zmm, Zmm, Mem |
10817    /// | 6 | Zmm, Zmm, Zmm |
10818    /// +---+---------------+
10819    /// ```
10820    #[inline]
10821    pub fn vxorpd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10822    where Assembler<'a>: VxorpdMaskEmitter<A, B, C> {
10823        <Self as VxorpdMaskEmitter<A, B, C>>::vxorpd_mask(self, op0, op1, op2);
10824    }
10825    /// `VXORPD_MASKZ` (VXORPD). 
10826    /// Performs a bitwise logical XOR of the two, four or eight packed double precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand.
10827    ///
10828    ///
10829    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPD.html).
10830    ///
10831    /// Supported operand variants:
10832    ///
10833    /// ```text
10834    /// +---+---------------+
10835    /// | # | Operands      |
10836    /// +---+---------------+
10837    /// | 1 | Xmm, Xmm, Mem |
10838    /// | 2 | Xmm, Xmm, Xmm |
10839    /// | 3 | Ymm, Ymm, Mem |
10840    /// | 4 | Ymm, Ymm, Ymm |
10841    /// | 5 | Zmm, Zmm, Mem |
10842    /// | 6 | Zmm, Zmm, Zmm |
10843    /// +---+---------------+
10844    /// ```
10845    #[inline]
10846    pub fn vxorpd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10847    where Assembler<'a>: VxorpdMaskzEmitter<A, B, C> {
10848        <Self as VxorpdMaskzEmitter<A, B, C>>::vxorpd_maskz(self, op0, op1, op2);
10849    }
10850    /// `VXORPS` (VXORPS). 
10851    /// Performs a bitwise logical XOR of the four, eight or sixteen packed single-precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
10852    ///
10853    ///
10854    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPS.html).
10855    ///
10856    /// Supported operand variants:
10857    ///
10858    /// ```text
10859    /// +---+---------------+
10860    /// | # | Operands      |
10861    /// +---+---------------+
10862    /// | 1 | Xmm, Xmm, Mem |
10863    /// | 2 | Xmm, Xmm, Xmm |
10864    /// | 3 | Ymm, Ymm, Mem |
10865    /// | 4 | Ymm, Ymm, Ymm |
10866    /// | 5 | Zmm, Zmm, Mem |
10867    /// | 6 | Zmm, Zmm, Zmm |
10868    /// +---+---------------+
10869    /// ```
10870    #[inline]
10871    pub fn vxorps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10872    where Assembler<'a>: VxorpsEmitter<A, B, C> {
10873        <Self as VxorpsEmitter<A, B, C>>::vxorps(self, op0, op1, op2);
10874    }
10875    /// `VXORPS_MASK` (VXORPS). 
10876    /// Performs a bitwise logical XOR of the four, eight or sixteen packed single-precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
10877    ///
10878    ///
10879    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPS.html).
10880    ///
10881    /// Supported operand variants:
10882    ///
10883    /// ```text
10884    /// +---+---------------+
10885    /// | # | Operands      |
10886    /// +---+---------------+
10887    /// | 1 | Xmm, Xmm, Mem |
10888    /// | 2 | Xmm, Xmm, Xmm |
10889    /// | 3 | Ymm, Ymm, Mem |
10890    /// | 4 | Ymm, Ymm, Ymm |
10891    /// | 5 | Zmm, Zmm, Mem |
10892    /// | 6 | Zmm, Zmm, Zmm |
10893    /// +---+---------------+
10894    /// ```
10895    #[inline]
10896    pub fn vxorps_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10897    where Assembler<'a>: VxorpsMaskEmitter<A, B, C> {
10898        <Self as VxorpsMaskEmitter<A, B, C>>::vxorps_mask(self, op0, op1, op2);
10899    }
10900    /// `VXORPS_MASKZ` (VXORPS). 
10901    /// Performs a bitwise logical XOR of the four, eight or sixteen packed single-precision floating-point values from the first source operand and the second source operand, and stores the result in the destination operand
10902    ///
10903    ///
10904    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XORPS.html).
10905    ///
10906    /// Supported operand variants:
10907    ///
10908    /// ```text
10909    /// +---+---------------+
10910    /// | # | Operands      |
10911    /// +---+---------------+
10912    /// | 1 | Xmm, Xmm, Mem |
10913    /// | 2 | Xmm, Xmm, Xmm |
10914    /// | 3 | Ymm, Ymm, Mem |
10915    /// | 4 | Ymm, Ymm, Ymm |
10916    /// | 5 | Zmm, Zmm, Mem |
10917    /// | 6 | Zmm, Zmm, Zmm |
10918    /// +---+---------------+
10919    /// ```
10920    #[inline]
10921    pub fn vxorps_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
10922    where Assembler<'a>: VxorpsMaskzEmitter<A, B, C> {
10923        <Self as VxorpsMaskzEmitter<A, B, C>>::vxorps_maskz(self, op0, op1, op2);
10924    }
10925}