Skip to main content

asmkit/x86/features/
BASE.rs

1use crate::x86::assembler::*;
2use crate::x86::operands::*;
3use super::super::opcodes::*;
4use crate::core::emitter::*;
5use crate::core::operand::*;
6
7/// A dummy operand that represents no register. Here just for simplicity.
8const NOREG: Operand = Operand::new();
9
10/// `AADD`.
11///
12/// Supported operand variants:
13///
14/// ```text
15/// +---+----------+
16/// | # | Operands |
17/// +---+----------+
18/// | 1 | Mem, Gpd |
19/// | 2 | Mem, Gpq |
20/// +---+----------+
21/// ```
22pub trait AaddEmitter<A, B> {
23    fn aadd(&mut self, op0: A, op1: B);
24}
25
26impl<'a> AaddEmitter<Mem, Gpd> for Assembler<'a> {
27    fn aadd(&mut self, op0: Mem, op1: Gpd) {
28        self.emit(AADD32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
29    }
30}
31
32impl<'a> AaddEmitter<Mem, Gpq> for Assembler<'a> {
33    fn aadd(&mut self, op0: Mem, op1: Gpq) {
34        self.emit(AADD64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
35    }
36}
37
38/// `AAND`.
39///
40/// Supported operand variants:
41///
42/// ```text
43/// +---+----------+
44/// | # | Operands |
45/// +---+----------+
46/// | 1 | Mem, Gpd |
47/// | 2 | Mem, Gpq |
48/// +---+----------+
49/// ```
50pub trait AandEmitter<A, B> {
51    fn aand(&mut self, op0: A, op1: B);
52}
53
54impl<'a> AandEmitter<Mem, Gpd> for Assembler<'a> {
55    fn aand(&mut self, op0: Mem, op1: Gpd) {
56        self.emit(AAND32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
57    }
58}
59
60impl<'a> AandEmitter<Mem, Gpq> for Assembler<'a> {
61    fn aand(&mut self, op0: Mem, op1: Gpq) {
62        self.emit(AAND64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
63    }
64}
65
66/// `ADC` (ADC). 
67/// Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
68///
69///
70/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADC.html).
71///
72/// Supported operand variants:
73///
74/// ```text
75/// +----+--------------+
76/// | #  | Operands     |
77/// +----+--------------+
78/// | 1  | GpbLo, GpbLo |
79/// | 2  | GpbLo, Imm   |
80/// | 3  | GpbLo, Mem   |
81/// | 4  | Gpd, Gpd     |
82/// | 5  | Gpd, Imm     |
83/// | 6  | Gpd, Mem     |
84/// | 7  | Gpq, Gpq     |
85/// | 8  | Gpq, Imm     |
86/// | 9  | Gpq, Mem     |
87/// | 10 | Gpw, Gpw     |
88/// | 11 | Gpw, Imm     |
89/// | 12 | Gpw, Mem     |
90/// | 13 | Mem, GpbLo   |
91/// | 14 | Mem, Gpd     |
92/// | 15 | Mem, Gpq     |
93/// | 16 | Mem, Gpw     |
94/// | 17 | Mem, Imm     |
95/// +----+--------------+
96/// ```
97pub trait AdcEmitter<A, B> {
98    fn adc(&mut self, op0: A, op1: B);
99}
100
101impl<'a> AdcEmitter<GpbLo, GpbLo> for Assembler<'a> {
102    fn adc(&mut self, op0: GpbLo, op1: GpbLo) {
103        self.emit(ADC8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
104    }
105}
106
107impl<'a> AdcEmitter<Mem, GpbLo> for Assembler<'a> {
108    fn adc(&mut self, op0: Mem, op1: GpbLo) {
109        self.emit(ADC8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
110    }
111}
112
113impl<'a> AdcEmitter<Gpw, Gpw> for Assembler<'a> {
114    fn adc(&mut self, op0: Gpw, op1: Gpw) {
115        self.emit(ADC16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
116    }
117}
118
119impl<'a> AdcEmitter<Mem, Gpw> for Assembler<'a> {
120    fn adc(&mut self, op0: Mem, op1: Gpw) {
121        self.emit(ADC16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
122    }
123}
124
125impl<'a> AdcEmitter<Gpd, Gpd> for Assembler<'a> {
126    fn adc(&mut self, op0: Gpd, op1: Gpd) {
127        self.emit(ADC32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
128    }
129}
130
131impl<'a> AdcEmitter<Mem, Gpd> for Assembler<'a> {
132    fn adc(&mut self, op0: Mem, op1: Gpd) {
133        self.emit(ADC32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
134    }
135}
136
137impl<'a> AdcEmitter<Gpq, Gpq> for Assembler<'a> {
138    fn adc(&mut self, op0: Gpq, op1: Gpq) {
139        self.emit(ADC64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
140    }
141}
142
143impl<'a> AdcEmitter<Mem, Gpq> for Assembler<'a> {
144    fn adc(&mut self, op0: Mem, op1: Gpq) {
145        self.emit(ADC64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
146    }
147}
148
149impl<'a> AdcEmitter<GpbLo, Mem> for Assembler<'a> {
150    fn adc(&mut self, op0: GpbLo, op1: Mem) {
151        self.emit(ADC8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
152    }
153}
154
155impl<'a> AdcEmitter<Gpw, Mem> for Assembler<'a> {
156    fn adc(&mut self, op0: Gpw, op1: Mem) {
157        self.emit(ADC16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
158    }
159}
160
161impl<'a> AdcEmitter<Gpd, Mem> for Assembler<'a> {
162    fn adc(&mut self, op0: Gpd, op1: Mem) {
163        self.emit(ADC32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
164    }
165}
166
167impl<'a> AdcEmitter<Gpq, Mem> for Assembler<'a> {
168    fn adc(&mut self, op0: Gpq, op1: Mem) {
169        self.emit(ADC64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
170    }
171}
172
173impl<'a> AdcEmitter<GpbLo, Imm> for Assembler<'a> {
174    fn adc(&mut self, op0: GpbLo, op1: Imm) {
175        self.emit(ADC8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
176    }
177}
178
179impl<'a> AdcEmitter<Gpw, Imm> for Assembler<'a> {
180    fn adc(&mut self, op0: Gpw, op1: Imm) {
181        self.emit(ADC16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
182    }
183}
184
185impl<'a> AdcEmitter<Gpd, Imm> for Assembler<'a> {
186    fn adc(&mut self, op0: Gpd, op1: Imm) {
187        self.emit(ADC32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
188    }
189}
190
191impl<'a> AdcEmitter<Gpq, Imm> for Assembler<'a> {
192    fn adc(&mut self, op0: Gpq, op1: Imm) {
193        self.emit(ADC64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
194    }
195}
196
197impl<'a> AdcEmitter<Mem, Imm> for Assembler<'a> {
198    fn adc(&mut self, op0: Mem, op1: Imm) {
199        self.emit(ADC8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
200    }
201}
202
203/// `ADD` (ADD). 
204/// Adds the destination operand (first operand) and the source operand (second operand) and then stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
205///
206///
207/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADD.html).
208///
209/// Supported operand variants:
210///
211/// ```text
212/// +----+--------------+
213/// | #  | Operands     |
214/// +----+--------------+
215/// | 1  | GpbLo, GpbLo |
216/// | 2  | GpbLo, Imm   |
217/// | 3  | GpbLo, Mem   |
218/// | 4  | Gpd, Gpd     |
219/// | 5  | Gpd, Imm     |
220/// | 6  | Gpd, Mem     |
221/// | 7  | Gpq, Gpq     |
222/// | 8  | Gpq, Imm     |
223/// | 9  | Gpq, Mem     |
224/// | 10 | Gpw, Gpw     |
225/// | 11 | Gpw, Imm     |
226/// | 12 | Gpw, Mem     |
227/// | 13 | Mem, GpbLo   |
228/// | 14 | Mem, Gpd     |
229/// | 15 | Mem, Gpq     |
230/// | 16 | Mem, Gpw     |
231/// | 17 | Mem, Imm     |
232/// +----+--------------+
233/// ```
234pub trait AddEmitter<A, B> {
235    fn add(&mut self, op0: A, op1: B);
236}
237
238impl<'a> AddEmitter<GpbLo, GpbLo> for Assembler<'a> {
239    fn add(&mut self, op0: GpbLo, op1: GpbLo) {
240        self.emit(ADD8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
241    }
242}
243
244impl<'a> AddEmitter<Mem, GpbLo> for Assembler<'a> {
245    fn add(&mut self, op0: Mem, op1: GpbLo) {
246        self.emit(ADD8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
247    }
248}
249
250impl<'a> AddEmitter<Gpw, Gpw> for Assembler<'a> {
251    fn add(&mut self, op0: Gpw, op1: Gpw) {
252        self.emit(ADD16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
253    }
254}
255
256impl<'a> AddEmitter<Mem, Gpw> for Assembler<'a> {
257    fn add(&mut self, op0: Mem, op1: Gpw) {
258        self.emit(ADD16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
259    }
260}
261
262impl<'a> AddEmitter<Gpd, Gpd> for Assembler<'a> {
263    fn add(&mut self, op0: Gpd, op1: Gpd) {
264        self.emit(ADD32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
265    }
266}
267
268impl<'a> AddEmitter<Mem, Gpd> for Assembler<'a> {
269    fn add(&mut self, op0: Mem, op1: Gpd) {
270        self.emit(ADD32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
271    }
272}
273
274impl<'a> AddEmitter<Gpq, Gpq> for Assembler<'a> {
275    fn add(&mut self, op0: Gpq, op1: Gpq) {
276        self.emit(ADD64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
277    }
278}
279
280impl<'a> AddEmitter<Mem, Gpq> for Assembler<'a> {
281    fn add(&mut self, op0: Mem, op1: Gpq) {
282        self.emit(ADD64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
283    }
284}
285
286impl<'a> AddEmitter<GpbLo, Mem> for Assembler<'a> {
287    fn add(&mut self, op0: GpbLo, op1: Mem) {
288        self.emit(ADD8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
289    }
290}
291
292impl<'a> AddEmitter<Gpw, Mem> for Assembler<'a> {
293    fn add(&mut self, op0: Gpw, op1: Mem) {
294        self.emit(ADD16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
295    }
296}
297
298impl<'a> AddEmitter<Gpd, Mem> for Assembler<'a> {
299    fn add(&mut self, op0: Gpd, op1: Mem) {
300        self.emit(ADD32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
301    }
302}
303
304impl<'a> AddEmitter<Gpq, Mem> for Assembler<'a> {
305    fn add(&mut self, op0: Gpq, op1: Mem) {
306        self.emit(ADD64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
307    }
308}
309
310impl<'a> AddEmitter<GpbLo, Imm> for Assembler<'a> {
311    fn add(&mut self, op0: GpbLo, op1: Imm) {
312        self.emit(ADD8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
313    }
314}
315
316impl<'a> AddEmitter<Gpw, Imm> for Assembler<'a> {
317    fn add(&mut self, op0: Gpw, op1: Imm) {
318        self.emit(ADD16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
319    }
320}
321
322impl<'a> AddEmitter<Gpd, Imm> for Assembler<'a> {
323    fn add(&mut self, op0: Gpd, op1: Imm) {
324        self.emit(ADD32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
325    }
326}
327
328impl<'a> AddEmitter<Gpq, Imm> for Assembler<'a> {
329    fn add(&mut self, op0: Gpq, op1: Imm) {
330        self.emit(ADD64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
331    }
332}
333
334impl<'a> AddEmitter<Mem, Imm> for Assembler<'a> {
335    fn add(&mut self, op0: Mem, op1: Imm) {
336        self.emit(ADD8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
337    }
338}
339
340/// `AND` (AND). 
341/// Performs a bitwise AND operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is set to 1 if both corresponding bits of the first and second operands are 1; otherwise, it is set to 0.
342///
343///
344/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AND.html).
345///
346/// Supported operand variants:
347///
348/// ```text
349/// +----+--------------+
350/// | #  | Operands     |
351/// +----+--------------+
352/// | 1  | GpbLo, GpbLo |
353/// | 2  | GpbLo, Imm   |
354/// | 3  | GpbLo, Mem   |
355/// | 4  | Gpd, Gpd     |
356/// | 5  | Gpd, Imm     |
357/// | 6  | Gpd, Mem     |
358/// | 7  | Gpq, Gpq     |
359/// | 8  | Gpq, Imm     |
360/// | 9  | Gpq, Mem     |
361/// | 10 | Gpw, Gpw     |
362/// | 11 | Gpw, Imm     |
363/// | 12 | Gpw, Mem     |
364/// | 13 | Mem, GpbLo   |
365/// | 14 | Mem, Gpd     |
366/// | 15 | Mem, Gpq     |
367/// | 16 | Mem, Gpw     |
368/// | 17 | Mem, Imm     |
369/// +----+--------------+
370/// ```
371pub trait AndEmitter<A, B> {
372    fn and(&mut self, op0: A, op1: B);
373}
374
375impl<'a> AndEmitter<GpbLo, GpbLo> for Assembler<'a> {
376    fn and(&mut self, op0: GpbLo, op1: GpbLo) {
377        self.emit(AND8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
378    }
379}
380
381impl<'a> AndEmitter<Mem, GpbLo> for Assembler<'a> {
382    fn and(&mut self, op0: Mem, op1: GpbLo) {
383        self.emit(AND8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
384    }
385}
386
387impl<'a> AndEmitter<Gpw, Gpw> for Assembler<'a> {
388    fn and(&mut self, op0: Gpw, op1: Gpw) {
389        self.emit(AND16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
390    }
391}
392
393impl<'a> AndEmitter<Mem, Gpw> for Assembler<'a> {
394    fn and(&mut self, op0: Mem, op1: Gpw) {
395        self.emit(AND16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
396    }
397}
398
399impl<'a> AndEmitter<Gpd, Gpd> for Assembler<'a> {
400    fn and(&mut self, op0: Gpd, op1: Gpd) {
401        self.emit(AND32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
402    }
403}
404
405impl<'a> AndEmitter<Mem, Gpd> for Assembler<'a> {
406    fn and(&mut self, op0: Mem, op1: Gpd) {
407        self.emit(AND32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
408    }
409}
410
411impl<'a> AndEmitter<Gpq, Gpq> for Assembler<'a> {
412    fn and(&mut self, op0: Gpq, op1: Gpq) {
413        self.emit(AND64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
414    }
415}
416
417impl<'a> AndEmitter<Mem, Gpq> for Assembler<'a> {
418    fn and(&mut self, op0: Mem, op1: Gpq) {
419        self.emit(AND64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
420    }
421}
422
423impl<'a> AndEmitter<GpbLo, Mem> for Assembler<'a> {
424    fn and(&mut self, op0: GpbLo, op1: Mem) {
425        self.emit(AND8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
426    }
427}
428
429impl<'a> AndEmitter<Gpw, Mem> for Assembler<'a> {
430    fn and(&mut self, op0: Gpw, op1: Mem) {
431        self.emit(AND16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
432    }
433}
434
435impl<'a> AndEmitter<Gpd, Mem> for Assembler<'a> {
436    fn and(&mut self, op0: Gpd, op1: Mem) {
437        self.emit(AND32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
438    }
439}
440
441impl<'a> AndEmitter<Gpq, Mem> for Assembler<'a> {
442    fn and(&mut self, op0: Gpq, op1: Mem) {
443        self.emit(AND64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
444    }
445}
446
447impl<'a> AndEmitter<GpbLo, Imm> for Assembler<'a> {
448    fn and(&mut self, op0: GpbLo, op1: Imm) {
449        self.emit(AND8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
450    }
451}
452
453impl<'a> AndEmitter<Gpw, Imm> for Assembler<'a> {
454    fn and(&mut self, op0: Gpw, op1: Imm) {
455        self.emit(AND16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
456    }
457}
458
459impl<'a> AndEmitter<Gpd, Imm> for Assembler<'a> {
460    fn and(&mut self, op0: Gpd, op1: Imm) {
461        self.emit(AND32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
462    }
463}
464
465impl<'a> AndEmitter<Gpq, Imm> for Assembler<'a> {
466    fn and(&mut self, op0: Gpq, op1: Imm) {
467        self.emit(AND64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
468    }
469}
470
471impl<'a> AndEmitter<Mem, Imm> for Assembler<'a> {
472    fn and(&mut self, op0: Mem, op1: Imm) {
473        self.emit(AND8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
474    }
475}
476
477/// `AOR`.
478///
479/// Supported operand variants:
480///
481/// ```text
482/// +---+----------+
483/// | # | Operands |
484/// +---+----------+
485/// | 1 | Mem, Gpd |
486/// | 2 | Mem, Gpq |
487/// +---+----------+
488/// ```
489pub trait AorEmitter<A, B> {
490    fn aor(&mut self, op0: A, op1: B);
491}
492
493impl<'a> AorEmitter<Mem, Gpd> for Assembler<'a> {
494    fn aor(&mut self, op0: Mem, op1: Gpd) {
495        self.emit(AOR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
496    }
497}
498
499impl<'a> AorEmitter<Mem, Gpq> for Assembler<'a> {
500    fn aor(&mut self, op0: Mem, op1: Gpq) {
501        self.emit(AOR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
502    }
503}
504
505/// `AXOR`.
506///
507/// Supported operand variants:
508///
509/// ```text
510/// +---+----------+
511/// | # | Operands |
512/// +---+----------+
513/// | 1 | Mem, Gpd |
514/// | 2 | Mem, Gpq |
515/// +---+----------+
516/// ```
517pub trait AxorEmitter<A, B> {
518    fn axor(&mut self, op0: A, op1: B);
519}
520
521impl<'a> AxorEmitter<Mem, Gpd> for Assembler<'a> {
522    fn axor(&mut self, op0: Mem, op1: Gpd) {
523        self.emit(AXOR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
524    }
525}
526
527impl<'a> AxorEmitter<Mem, Gpq> for Assembler<'a> {
528    fn axor(&mut self, op0: Mem, op1: Gpq) {
529        self.emit(AXOR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
530    }
531}
532
533/// `BSF` (BSF). 
534/// Searches the source operand (second operand) for the least significant set bit (1 bit). If a least significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content of the source operand is 0, the content of the destination operand is undefined.
535///
536///
537/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSF.html).
538///
539/// Supported operand variants:
540///
541/// ```text
542/// +---+----------+
543/// | # | Operands |
544/// +---+----------+
545/// | 1 | Gpd, Gpd |
546/// | 2 | Gpd, Mem |
547/// | 3 | Gpq, Gpq |
548/// | 4 | Gpq, Mem |
549/// | 5 | Gpw, Gpw |
550/// | 6 | Gpw, Mem |
551/// +---+----------+
552/// ```
553pub trait BsfEmitter<A, B> {
554    fn bsf(&mut self, op0: A, op1: B);
555}
556
557impl<'a> BsfEmitter<Gpw, Gpw> for Assembler<'a> {
558    fn bsf(&mut self, op0: Gpw, op1: Gpw) {
559        self.emit(BSF16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
560    }
561}
562
563impl<'a> BsfEmitter<Gpw, Mem> for Assembler<'a> {
564    fn bsf(&mut self, op0: Gpw, op1: Mem) {
565        self.emit(BSF16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
566    }
567}
568
569impl<'a> BsfEmitter<Gpd, Gpd> for Assembler<'a> {
570    fn bsf(&mut self, op0: Gpd, op1: Gpd) {
571        self.emit(BSF32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
572    }
573}
574
575impl<'a> BsfEmitter<Gpd, Mem> for Assembler<'a> {
576    fn bsf(&mut self, op0: Gpd, op1: Mem) {
577        self.emit(BSF32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
578    }
579}
580
581impl<'a> BsfEmitter<Gpq, Gpq> for Assembler<'a> {
582    fn bsf(&mut self, op0: Gpq, op1: Gpq) {
583        self.emit(BSF64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
584    }
585}
586
587impl<'a> BsfEmitter<Gpq, Mem> for Assembler<'a> {
588    fn bsf(&mut self, op0: Gpq, op1: Mem) {
589        self.emit(BSF64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
590    }
591}
592
593/// `BSR` (BSR). 
594/// Searches the source operand (second operand) for the most significant set bit (1 bit). If a most significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content source operand is 0, the content of the destination operand is undefined.
595///
596///
597/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSR.html).
598///
599/// Supported operand variants:
600///
601/// ```text
602/// +---+----------+
603/// | # | Operands |
604/// +---+----------+
605/// | 1 | Gpd, Gpd |
606/// | 2 | Gpd, Mem |
607/// | 3 | Gpq, Gpq |
608/// | 4 | Gpq, Mem |
609/// | 5 | Gpw, Gpw |
610/// | 6 | Gpw, Mem |
611/// +---+----------+
612/// ```
613pub trait BsrEmitter<A, B> {
614    fn bsr(&mut self, op0: A, op1: B);
615}
616
617impl<'a> BsrEmitter<Gpw, Gpw> for Assembler<'a> {
618    fn bsr(&mut self, op0: Gpw, op1: Gpw) {
619        self.emit(BSR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
620    }
621}
622
623impl<'a> BsrEmitter<Gpw, Mem> for Assembler<'a> {
624    fn bsr(&mut self, op0: Gpw, op1: Mem) {
625        self.emit(BSR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
626    }
627}
628
629impl<'a> BsrEmitter<Gpd, Gpd> for Assembler<'a> {
630    fn bsr(&mut self, op0: Gpd, op1: Gpd) {
631        self.emit(BSR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
632    }
633}
634
635impl<'a> BsrEmitter<Gpd, Mem> for Assembler<'a> {
636    fn bsr(&mut self, op0: Gpd, op1: Mem) {
637        self.emit(BSR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
638    }
639}
640
641impl<'a> BsrEmitter<Gpq, Gpq> for Assembler<'a> {
642    fn bsr(&mut self, op0: Gpq, op1: Gpq) {
643        self.emit(BSR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
644    }
645}
646
647impl<'a> BsrEmitter<Gpq, Mem> for Assembler<'a> {
648    fn bsr(&mut self, op0: Gpq, op1: Mem) {
649        self.emit(BSR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
650    }
651}
652
653/// `BT` (BT). 
654/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
655///
656///
657/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BT.html).
658///
659/// Supported operand variants:
660///
661/// ```text
662/// +----+----------+
663/// | #  | Operands |
664/// +----+----------+
665/// | 1  | Gpd, Gpd |
666/// | 2  | Gpd, Imm |
667/// | 3  | Gpq, Gpq |
668/// | 4  | Gpq, Imm |
669/// | 5  | Gpw, Gpw |
670/// | 6  | Gpw, Imm |
671/// | 7  | Mem, Gpd |
672/// | 8  | Mem, Gpq |
673/// | 9  | Mem, Gpw |
674/// | 10 | Mem, Imm |
675/// +----+----------+
676/// ```
677pub trait BtEmitter<A, B> {
678    fn bt(&mut self, op0: A, op1: B);
679}
680
681impl<'a> BtEmitter<Gpw, Gpw> for Assembler<'a> {
682    fn bt(&mut self, op0: Gpw, op1: Gpw) {
683        self.emit(BT16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
684    }
685}
686
687impl<'a> BtEmitter<Mem, Gpw> for Assembler<'a> {
688    fn bt(&mut self, op0: Mem, op1: Gpw) {
689        self.emit(BT16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
690    }
691}
692
693impl<'a> BtEmitter<Gpd, Gpd> for Assembler<'a> {
694    fn bt(&mut self, op0: Gpd, op1: Gpd) {
695        self.emit(BT32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
696    }
697}
698
699impl<'a> BtEmitter<Mem, Gpd> for Assembler<'a> {
700    fn bt(&mut self, op0: Mem, op1: Gpd) {
701        self.emit(BT32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
702    }
703}
704
705impl<'a> BtEmitter<Gpq, Gpq> for Assembler<'a> {
706    fn bt(&mut self, op0: Gpq, op1: Gpq) {
707        self.emit(BT64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
708    }
709}
710
711impl<'a> BtEmitter<Mem, Gpq> for Assembler<'a> {
712    fn bt(&mut self, op0: Mem, op1: Gpq) {
713        self.emit(BT64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
714    }
715}
716
717impl<'a> BtEmitter<Gpw, Imm> for Assembler<'a> {
718    fn bt(&mut self, op0: Gpw, op1: Imm) {
719        self.emit(BT16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
720    }
721}
722
723impl<'a> BtEmitter<Mem, Imm> for Assembler<'a> {
724    fn bt(&mut self, op0: Mem, op1: Imm) {
725        self.emit(BT16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
726    }
727}
728
729impl<'a> BtEmitter<Gpd, Imm> for Assembler<'a> {
730    fn bt(&mut self, op0: Gpd, op1: Imm) {
731        self.emit(BT32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
732    }
733}
734
735impl<'a> BtEmitter<Gpq, Imm> for Assembler<'a> {
736    fn bt(&mut self, op0: Gpq, op1: Imm) {
737        self.emit(BT64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
738    }
739}
740
741/// `BTC` (BTC). 
742/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
743///
744///
745/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTC.html).
746///
747/// Supported operand variants:
748///
749/// ```text
750/// +----+----------+
751/// | #  | Operands |
752/// +----+----------+
753/// | 1  | Gpd, Gpd |
754/// | 2  | Gpd, Imm |
755/// | 3  | Gpq, Gpq |
756/// | 4  | Gpq, Imm |
757/// | 5  | Gpw, Gpw |
758/// | 6  | Gpw, Imm |
759/// | 7  | Mem, Gpd |
760/// | 8  | Mem, Gpq |
761/// | 9  | Mem, Gpw |
762/// | 10 | Mem, Imm |
763/// +----+----------+
764/// ```
765pub trait BtcEmitter<A, B> {
766    fn btc(&mut self, op0: A, op1: B);
767}
768
769impl<'a> BtcEmitter<Gpw, Imm> for Assembler<'a> {
770    fn btc(&mut self, op0: Gpw, op1: Imm) {
771        self.emit(BTC16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
772    }
773}
774
775impl<'a> BtcEmitter<Mem, Imm> for Assembler<'a> {
776    fn btc(&mut self, op0: Mem, op1: Imm) {
777        self.emit(BTC16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
778    }
779}
780
781impl<'a> BtcEmitter<Gpd, Imm> for Assembler<'a> {
782    fn btc(&mut self, op0: Gpd, op1: Imm) {
783        self.emit(BTC32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
784    }
785}
786
787impl<'a> BtcEmitter<Gpq, Imm> for Assembler<'a> {
788    fn btc(&mut self, op0: Gpq, op1: Imm) {
789        self.emit(BTC64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
790    }
791}
792
793impl<'a> BtcEmitter<Gpw, Gpw> for Assembler<'a> {
794    fn btc(&mut self, op0: Gpw, op1: Gpw) {
795        self.emit(BTC16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
796    }
797}
798
799impl<'a> BtcEmitter<Mem, Gpw> for Assembler<'a> {
800    fn btc(&mut self, op0: Mem, op1: Gpw) {
801        self.emit(BTC16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
802    }
803}
804
805impl<'a> BtcEmitter<Gpd, Gpd> for Assembler<'a> {
806    fn btc(&mut self, op0: Gpd, op1: Gpd) {
807        self.emit(BTC32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
808    }
809}
810
811impl<'a> BtcEmitter<Mem, Gpd> for Assembler<'a> {
812    fn btc(&mut self, op0: Mem, op1: Gpd) {
813        self.emit(BTC32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
814    }
815}
816
817impl<'a> BtcEmitter<Gpq, Gpq> for Assembler<'a> {
818    fn btc(&mut self, op0: Gpq, op1: Gpq) {
819        self.emit(BTC64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
820    }
821}
822
823impl<'a> BtcEmitter<Mem, Gpq> for Assembler<'a> {
824    fn btc(&mut self, op0: Mem, op1: Gpq) {
825        self.emit(BTC64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
826    }
827}
828
829/// `BTR` (BTR). 
830/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and clears the selected bit in the bit string to 0. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
831///
832///
833/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTR.html).
834///
835/// Supported operand variants:
836///
837/// ```text
838/// +----+----------+
839/// | #  | Operands |
840/// +----+----------+
841/// | 1  | Gpd, Gpd |
842/// | 2  | Gpd, Imm |
843/// | 3  | Gpq, Gpq |
844/// | 4  | Gpq, Imm |
845/// | 5  | Gpw, Gpw |
846/// | 6  | Gpw, Imm |
847/// | 7  | Mem, Gpd |
848/// | 8  | Mem, Gpq |
849/// | 9  | Mem, Gpw |
850/// | 10 | Mem, Imm |
851/// +----+----------+
852/// ```
853pub trait BtrEmitter<A, B> {
854    fn btr(&mut self, op0: A, op1: B);
855}
856
857impl<'a> BtrEmitter<Gpw, Gpw> for Assembler<'a> {
858    fn btr(&mut self, op0: Gpw, op1: Gpw) {
859        self.emit(BTR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
860    }
861}
862
863impl<'a> BtrEmitter<Mem, Gpw> for Assembler<'a> {
864    fn btr(&mut self, op0: Mem, op1: Gpw) {
865        self.emit(BTR16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
866    }
867}
868
869impl<'a> BtrEmitter<Gpd, Gpd> for Assembler<'a> {
870    fn btr(&mut self, op0: Gpd, op1: Gpd) {
871        self.emit(BTR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
872    }
873}
874
875impl<'a> BtrEmitter<Mem, Gpd> for Assembler<'a> {
876    fn btr(&mut self, op0: Mem, op1: Gpd) {
877        self.emit(BTR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
878    }
879}
880
881impl<'a> BtrEmitter<Gpq, Gpq> for Assembler<'a> {
882    fn btr(&mut self, op0: Gpq, op1: Gpq) {
883        self.emit(BTR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
884    }
885}
886
887impl<'a> BtrEmitter<Mem, Gpq> for Assembler<'a> {
888    fn btr(&mut self, op0: Mem, op1: Gpq) {
889        self.emit(BTR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
890    }
891}
892
893impl<'a> BtrEmitter<Gpw, Imm> for Assembler<'a> {
894    fn btr(&mut self, op0: Gpw, op1: Imm) {
895        self.emit(BTR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
896    }
897}
898
899impl<'a> BtrEmitter<Mem, Imm> for Assembler<'a> {
900    fn btr(&mut self, op0: Mem, op1: Imm) {
901        self.emit(BTR16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
902    }
903}
904
905impl<'a> BtrEmitter<Gpd, Imm> for Assembler<'a> {
906    fn btr(&mut self, op0: Gpd, op1: Imm) {
907        self.emit(BTR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
908    }
909}
910
911impl<'a> BtrEmitter<Gpq, Imm> for Assembler<'a> {
912    fn btr(&mut self, op0: Gpq, op1: Imm) {
913        self.emit(BTR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
914    }
915}
916
917/// `BTS` (BTS). 
918/// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and sets the selected bit in the bit string to 1. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
919///
920///
921/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTS.html).
922///
923/// Supported operand variants:
924///
925/// ```text
926/// +----+----------+
927/// | #  | Operands |
928/// +----+----------+
929/// | 1  | Gpd, Gpd |
930/// | 2  | Gpd, Imm |
931/// | 3  | Gpq, Gpq |
932/// | 4  | Gpq, Imm |
933/// | 5  | Gpw, Gpw |
934/// | 6  | Gpw, Imm |
935/// | 7  | Mem, Gpd |
936/// | 8  | Mem, Gpq |
937/// | 9  | Mem, Gpw |
938/// | 10 | Mem, Imm |
939/// +----+----------+
940/// ```
941pub trait BtsEmitter<A, B> {
942    fn bts(&mut self, op0: A, op1: B);
943}
944
945impl<'a> BtsEmitter<Gpw, Gpw> for Assembler<'a> {
946    fn bts(&mut self, op0: Gpw, op1: Gpw) {
947        self.emit(BTS16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
948    }
949}
950
951impl<'a> BtsEmitter<Mem, Gpw> for Assembler<'a> {
952    fn bts(&mut self, op0: Mem, op1: Gpw) {
953        self.emit(BTS16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
954    }
955}
956
957impl<'a> BtsEmitter<Gpd, Gpd> for Assembler<'a> {
958    fn bts(&mut self, op0: Gpd, op1: Gpd) {
959        self.emit(BTS32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
960    }
961}
962
963impl<'a> BtsEmitter<Mem, Gpd> for Assembler<'a> {
964    fn bts(&mut self, op0: Mem, op1: Gpd) {
965        self.emit(BTS32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
966    }
967}
968
969impl<'a> BtsEmitter<Gpq, Gpq> for Assembler<'a> {
970    fn bts(&mut self, op0: Gpq, op1: Gpq) {
971        self.emit(BTS64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
972    }
973}
974
975impl<'a> BtsEmitter<Mem, Gpq> for Assembler<'a> {
976    fn bts(&mut self, op0: Mem, op1: Gpq) {
977        self.emit(BTS64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
978    }
979}
980
981impl<'a> BtsEmitter<Gpw, Imm> for Assembler<'a> {
982    fn bts(&mut self, op0: Gpw, op1: Imm) {
983        self.emit(BTS16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
984    }
985}
986
987impl<'a> BtsEmitter<Mem, Imm> for Assembler<'a> {
988    fn bts(&mut self, op0: Mem, op1: Imm) {
989        self.emit(BTS16MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
990    }
991}
992
993impl<'a> BtsEmitter<Gpd, Imm> for Assembler<'a> {
994    fn bts(&mut self, op0: Gpd, op1: Imm) {
995        self.emit(BTS32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
996    }
997}
998
999impl<'a> BtsEmitter<Gpq, Imm> for Assembler<'a> {
1000    fn bts(&mut self, op0: Gpq, op1: Imm) {
1001        self.emit(BTS64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1002    }
1003}
1004
1005/// `CALL` (CALL). 
1006/// Saves procedure linking information on the stack and branches to the called procedure specified using the target operand. The target operand specifies the address of the first instruction in the called procedure. The operand can be an immediate value, a general-purpose register, or a memory location.
1007///
1008///
1009/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CALL.html).
1010///
1011/// Supported operand variants:
1012///
1013/// ```text
1014/// +---+----------+
1015/// | # | Operands |
1016/// +---+----------+
1017/// | 1 | Gpq      |
1018/// | 2 | Imm      |
1019/// | 3 | Label    |
1020/// | 4 | Mem      |
1021/// | 5 | Sym      |
1022/// +---+----------+
1023/// ```
1024pub trait CallEmitter<A> {
1025    fn call(&mut self, op0: A);
1026}
1027
1028impl<'a> CallEmitter<Imm> for Assembler<'a> {
1029    fn call(&mut self, op0: Imm) {
1030        self.emit(CALL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1031    }
1032}
1033
1034impl<'a> CallEmitter<Sym> for Assembler<'a> {
1035    fn call(&mut self, op0: Sym) {
1036        self.emit(CALL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1037    }
1038}
1039
1040impl<'a> CallEmitter<Label> for Assembler<'a> {
1041    fn call(&mut self, op0: Label) {
1042        self.emit(CALL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1043    }
1044}
1045
1046impl<'a> CallEmitter<Gpq> for Assembler<'a> {
1047    fn call(&mut self, op0: Gpq) {
1048        self.emit(CALLR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1049    }
1050}
1051
1052impl<'a> CallEmitter<Mem> for Assembler<'a> {
1053    fn call(&mut self, op0: Mem) {
1054        self.emit(CALLM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1055    }
1056}
1057
1058/// `CALLF`.
1059///
1060/// Supported operand variants:
1061///
1062/// ```text
1063/// +---+----------+
1064/// | # | Operands |
1065/// +---+----------+
1066/// | 1 | Mem      |
1067/// +---+----------+
1068/// ```
1069pub trait CallfEmitter<A> {
1070    fn callf(&mut self, op0: A);
1071}
1072
1073impl<'a> CallfEmitter<Mem> for Assembler<'a> {
1074    fn callf(&mut self, op0: Mem) {
1075        self.emit(CALLF16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1076    }
1077}
1078
1079/// `CBW`.
1080///
1081/// Supported operand variants:
1082///
1083/// ```text
1084/// +---+----------+
1085/// | # | Operands |
1086/// +---+----------+
1087/// | 1 | (none)   |
1088/// +---+----------+
1089/// ```
1090pub trait CbwEmitter {
1091    fn cbw(&mut self);
1092}
1093
1094impl<'a> CbwEmitter for Assembler<'a> {
1095    fn cbw(&mut self) {
1096        self.emit(CBW, &NOREG, &NOREG, &NOREG, &NOREG);
1097    }
1098}
1099
1100/// `CDQ`.
1101///
1102/// Supported operand variants:
1103///
1104/// ```text
1105/// +---+----------+
1106/// | # | Operands |
1107/// +---+----------+
1108/// | 1 | (none)   |
1109/// +---+----------+
1110/// ```
1111pub trait CdqEmitter {
1112    fn cdq(&mut self);
1113}
1114
1115impl<'a> CdqEmitter for Assembler<'a> {
1116    fn cdq(&mut self) {
1117        self.emit(CDQ, &NOREG, &NOREG, &NOREG, &NOREG);
1118    }
1119}
1120
1121/// `CDQE`.
1122///
1123/// Supported operand variants:
1124///
1125/// ```text
1126/// +---+----------+
1127/// | # | Operands |
1128/// +---+----------+
1129/// | 1 | (none)   |
1130/// +---+----------+
1131/// ```
1132pub trait CdqeEmitter {
1133    fn cdqe(&mut self);
1134}
1135
1136impl<'a> CdqeEmitter for Assembler<'a> {
1137    fn cdqe(&mut self) {
1138        self.emit(CDQE, &NOREG, &NOREG, &NOREG, &NOREG);
1139    }
1140}
1141
1142/// `CLC` (CLC). 
1143/// Clears the CF flag in the EFLAGS register. Operation is the same in all modes.
1144///
1145///
1146/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLC.html).
1147///
1148/// Supported operand variants:
1149///
1150/// ```text
1151/// +---+----------+
1152/// | # | Operands |
1153/// +---+----------+
1154/// | 1 | (none)   |
1155/// +---+----------+
1156/// ```
1157pub trait ClcEmitter {
1158    fn clc(&mut self);
1159}
1160
1161impl<'a> ClcEmitter for Assembler<'a> {
1162    fn clc(&mut self) {
1163        self.emit(CLC, &NOREG, &NOREG, &NOREG, &NOREG);
1164    }
1165}
1166
1167/// `CLD` (CLD). 
1168/// Clears the DF flag in the EFLAGS register. When the DF flag is set to 0, string operations increment the index registers (ESI and/or EDI). Operation is the same in all modes.
1169///
1170///
1171/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLD.html).
1172///
1173/// Supported operand variants:
1174///
1175/// ```text
1176/// +---+----------+
1177/// | # | Operands |
1178/// +---+----------+
1179/// | 1 | (none)   |
1180/// +---+----------+
1181/// ```
1182pub trait CldEmitter {
1183    fn cld(&mut self);
1184}
1185
1186impl<'a> CldEmitter for Assembler<'a> {
1187    fn cld(&mut self) {
1188        self.emit(CLD, &NOREG, &NOREG, &NOREG, &NOREG);
1189    }
1190}
1191
1192/// `CLFLUSH` (CLFLUSH). 
1193/// Invalidates from every level of the cache hierarchy in the cache coherence domain the cache line that contains the linear address specified with the memory operand. If that cache line contains modified data at any level of the cache hierarchy, that data is written back to memory. The source operand is a byte memory location.
1194///
1195///
1196/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLFLUSH.html).
1197///
1198/// Supported operand variants:
1199///
1200/// ```text
1201/// +---+----------+
1202/// | # | Operands |
1203/// +---+----------+
1204/// | 1 | Mem      |
1205/// +---+----------+
1206/// ```
1207pub trait ClflushEmitter<A> {
1208    fn clflush(&mut self, op0: A);
1209}
1210
1211impl<'a> ClflushEmitter<Mem> for Assembler<'a> {
1212    fn clflush(&mut self, op0: Mem) {
1213        self.emit(CLFLUSHM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1214    }
1215}
1216
1217/// `CLI` (CLI). 
1218/// In most cases, CLI clears the IF flag in the EFLAGS register and no other flags are affected. Clearing the IF flag causes the processor to ignore maskable external interrupts. The IF flag and the CLI and STI instruction have no effect on the generation of exceptions and NMI interrupts.
1219///
1220///
1221/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLI.html).
1222///
1223/// Supported operand variants:
1224///
1225/// ```text
1226/// +---+----------+
1227/// | # | Operands |
1228/// +---+----------+
1229/// | 1 | (none)   |
1230/// +---+----------+
1231/// ```
1232pub trait CliEmitter {
1233    fn cli(&mut self);
1234}
1235
1236impl<'a> CliEmitter for Assembler<'a> {
1237    fn cli(&mut self) {
1238        self.emit(CLI, &NOREG, &NOREG, &NOREG, &NOREG);
1239    }
1240}
1241
1242/// `CLTS` (CLTS). 
1243/// Clears the task-switched (TS) flag in the CR0 register. This instruction is intended for use in operating-system procedures. It is a privileged instruction that can only be executed at a CPL of 0. It is allowed to be executed in real-address mode to allow initialization for protected mode.
1244///
1245///
1246/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLTS.html).
1247///
1248/// Supported operand variants:
1249///
1250/// ```text
1251/// +---+----------+
1252/// | # | Operands |
1253/// +---+----------+
1254/// | 1 | (none)   |
1255/// +---+----------+
1256/// ```
1257pub trait CltsEmitter {
1258    fn clts(&mut self);
1259}
1260
1261impl<'a> CltsEmitter for Assembler<'a> {
1262    fn clts(&mut self) {
1263        self.emit(CLTS, &NOREG, &NOREG, &NOREG, &NOREG);
1264    }
1265}
1266
1267/// `CMC` (CMC). 
1268/// Complements the CF flag in the EFLAGS register. CMC operation is the same in non-64-bit modes and 64-bit mode.
1269///
1270///
1271/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMC.html).
1272///
1273/// Supported operand variants:
1274///
1275/// ```text
1276/// +---+----------+
1277/// | # | Operands |
1278/// +---+----------+
1279/// | 1 | (none)   |
1280/// +---+----------+
1281/// ```
1282pub trait CmcEmitter {
1283    fn cmc(&mut self);
1284}
1285
1286impl<'a> CmcEmitter for Assembler<'a> {
1287    fn cmc(&mut self) {
1288        self.emit(CMC, &NOREG, &NOREG, &NOREG, &NOREG);
1289    }
1290}
1291
1292/// `CMP` (CMP). 
1293/// Compares the first source operand with the second source operand and sets the status flags in the EFLAGS register according to the results. The comparison is performed by subtracting the second operand from the first operand and then setting the status flags in the same manner as the SUB instruction. When an immediate value is used as an operand, it is sign-extended to the length of the first operand.
1294///
1295///
1296/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMP.html).
1297///
1298/// Supported operand variants:
1299///
1300/// ```text
1301/// +----+--------------+
1302/// | #  | Operands     |
1303/// +----+--------------+
1304/// | 1  | GpbLo, GpbLo |
1305/// | 2  | GpbLo, Imm   |
1306/// | 3  | GpbLo, Mem   |
1307/// | 4  | Gpd, Gpd     |
1308/// | 5  | Gpd, Imm     |
1309/// | 6  | Gpd, Mem     |
1310/// | 7  | Gpq, Gpq     |
1311/// | 8  | Gpq, Imm     |
1312/// | 9  | Gpq, Mem     |
1313/// | 10 | Gpw, Gpw     |
1314/// | 11 | Gpw, Imm     |
1315/// | 12 | Gpw, Mem     |
1316/// | 13 | Mem, GpbLo   |
1317/// | 14 | Mem, Gpd     |
1318/// | 15 | Mem, Gpq     |
1319/// | 16 | Mem, Gpw     |
1320/// | 17 | Mem, Imm     |
1321/// +----+--------------+
1322/// ```
1323pub trait CmpEmitter<A, B> {
1324    fn cmp(&mut self, op0: A, op1: B);
1325}
1326
1327impl<'a> CmpEmitter<GpbLo, GpbLo> for Assembler<'a> {
1328    fn cmp(&mut self, op0: GpbLo, op1: GpbLo) {
1329        self.emit(CMP8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1330    }
1331}
1332
1333impl<'a> CmpEmitter<Mem, GpbLo> for Assembler<'a> {
1334    fn cmp(&mut self, op0: Mem, op1: GpbLo) {
1335        self.emit(CMP8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1336    }
1337}
1338
1339impl<'a> CmpEmitter<Gpw, Gpw> for Assembler<'a> {
1340    fn cmp(&mut self, op0: Gpw, op1: Gpw) {
1341        self.emit(CMP16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1342    }
1343}
1344
1345impl<'a> CmpEmitter<Mem, Gpw> for Assembler<'a> {
1346    fn cmp(&mut self, op0: Mem, op1: Gpw) {
1347        self.emit(CMP16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1348    }
1349}
1350
1351impl<'a> CmpEmitter<Gpd, Gpd> for Assembler<'a> {
1352    fn cmp(&mut self, op0: Gpd, op1: Gpd) {
1353        self.emit(CMP32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1354    }
1355}
1356
1357impl<'a> CmpEmitter<Mem, Gpd> for Assembler<'a> {
1358    fn cmp(&mut self, op0: Mem, op1: Gpd) {
1359        self.emit(CMP32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1360    }
1361}
1362
1363impl<'a> CmpEmitter<Gpq, Gpq> for Assembler<'a> {
1364    fn cmp(&mut self, op0: Gpq, op1: Gpq) {
1365        self.emit(CMP64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1366    }
1367}
1368
1369impl<'a> CmpEmitter<Mem, Gpq> for Assembler<'a> {
1370    fn cmp(&mut self, op0: Mem, op1: Gpq) {
1371        self.emit(CMP64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1372    }
1373}
1374
1375impl<'a> CmpEmitter<GpbLo, Mem> for Assembler<'a> {
1376    fn cmp(&mut self, op0: GpbLo, op1: Mem) {
1377        self.emit(CMP8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1378    }
1379}
1380
1381impl<'a> CmpEmitter<Gpw, Mem> for Assembler<'a> {
1382    fn cmp(&mut self, op0: Gpw, op1: Mem) {
1383        self.emit(CMP16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1384    }
1385}
1386
1387impl<'a> CmpEmitter<Gpd, Mem> for Assembler<'a> {
1388    fn cmp(&mut self, op0: Gpd, op1: Mem) {
1389        self.emit(CMP32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1390    }
1391}
1392
1393impl<'a> CmpEmitter<Gpq, Mem> for Assembler<'a> {
1394    fn cmp(&mut self, op0: Gpq, op1: Mem) {
1395        self.emit(CMP64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1396    }
1397}
1398
1399impl<'a> CmpEmitter<GpbLo, Imm> for Assembler<'a> {
1400    fn cmp(&mut self, op0: GpbLo, op1: Imm) {
1401        self.emit(CMP8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1402    }
1403}
1404
1405impl<'a> CmpEmitter<Gpw, Imm> for Assembler<'a> {
1406    fn cmp(&mut self, op0: Gpw, op1: Imm) {
1407        self.emit(CMP16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1408    }
1409}
1410
1411impl<'a> CmpEmitter<Gpd, Imm> for Assembler<'a> {
1412    fn cmp(&mut self, op0: Gpd, op1: Imm) {
1413        self.emit(CMP32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1414    }
1415}
1416
1417impl<'a> CmpEmitter<Gpq, Imm> for Assembler<'a> {
1418    fn cmp(&mut self, op0: Gpq, op1: Imm) {
1419        self.emit(CMP64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1420    }
1421}
1422
1423impl<'a> CmpEmitter<Mem, Imm> for Assembler<'a> {
1424    fn cmp(&mut self, op0: Mem, op1: Imm) {
1425        self.emit(CMP8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1426    }
1427}
1428
1429/// `CMPS` (CMPS). 
1430/// Compares the byte, word, doubleword, or quadword specified with the first source operand with the byte, word, doubleword, or quadword specified with the second source operand and sets the status flags in the EFLAGS register according to the results.
1431///
1432///
1433/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMPS%3ACMPSB%3ACMPSW%3ACMPSD%3ACMPSQ.html).
1434///
1435/// Supported operand variants:
1436///
1437/// ```text
1438/// +---+----------+
1439/// | # | Operands |
1440/// +---+----------+
1441/// | 1 | (none)   |
1442/// +---+----------+
1443/// ```
1444pub trait CmpsEmitter {
1445    fn cmps(&mut self);
1446}
1447
1448impl<'a> CmpsEmitter for Assembler<'a> {
1449    fn cmps(&mut self) {
1450        self.emit(CMPS8, &NOREG, &NOREG, &NOREG, &NOREG);
1451    }
1452}
1453
1454/// `CQO`.
1455///
1456/// Supported operand variants:
1457///
1458/// ```text
1459/// +---+----------+
1460/// | # | Operands |
1461/// +---+----------+
1462/// | 1 | (none)   |
1463/// +---+----------+
1464/// ```
1465pub trait CqoEmitter {
1466    fn cqo(&mut self);
1467}
1468
1469impl<'a> CqoEmitter for Assembler<'a> {
1470    fn cqo(&mut self) {
1471        self.emit(CQO, &NOREG, &NOREG, &NOREG, &NOREG);
1472    }
1473}
1474
1475/// `CWD`.
1476///
1477/// Supported operand variants:
1478///
1479/// ```text
1480/// +---+----------+
1481/// | # | Operands |
1482/// +---+----------+
1483/// | 1 | (none)   |
1484/// +---+----------+
1485/// ```
1486pub trait CwdEmitter {
1487    fn cwd(&mut self);
1488}
1489
1490impl<'a> CwdEmitter for Assembler<'a> {
1491    fn cwd(&mut self) {
1492        self.emit(CWD, &NOREG, &NOREG, &NOREG, &NOREG);
1493    }
1494}
1495
1496/// `CWDE`.
1497///
1498/// Supported operand variants:
1499///
1500/// ```text
1501/// +---+----------+
1502/// | # | Operands |
1503/// +---+----------+
1504/// | 1 | (none)   |
1505/// +---+----------+
1506/// ```
1507pub trait CwdeEmitter {
1508    fn cwde(&mut self);
1509}
1510
1511impl<'a> CwdeEmitter for Assembler<'a> {
1512    fn cwde(&mut self) {
1513        self.emit(CWDE, &NOREG, &NOREG, &NOREG, &NOREG);
1514    }
1515}
1516
1517/// `C_EX`.
1518///
1519/// Supported operand variants:
1520///
1521/// ```text
1522/// +---+----------+
1523/// | # | Operands |
1524/// +---+----------+
1525/// | 1 | (none)   |
1526/// +---+----------+
1527/// ```
1528pub trait CExEmitter {
1529    fn c_ex(&mut self);
1530}
1531
1532impl<'a> CExEmitter for Assembler<'a> {
1533    fn c_ex(&mut self) {
1534        self.emit(C_EX16, &NOREG, &NOREG, &NOREG, &NOREG);
1535    }
1536}
1537
1538/// `C_SEP`.
1539///
1540/// Supported operand variants:
1541///
1542/// ```text
1543/// +---+----------+
1544/// | # | Operands |
1545/// +---+----------+
1546/// | 1 | (none)   |
1547/// +---+----------+
1548/// ```
1549pub trait CSepEmitter {
1550    fn c_sep(&mut self);
1551}
1552
1553impl<'a> CSepEmitter for Assembler<'a> {
1554    fn c_sep(&mut self) {
1555        self.emit(C_SEP16, &NOREG, &NOREG, &NOREG, &NOREG);
1556    }
1557}
1558
1559/// `DEC` (DEC). 
1560/// Subtracts 1 from the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (To perform a decrement operation that updates the CF flag, use a SUB instruction with an immediate operand of 1.)
1561///
1562///
1563/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DEC.html).
1564///
1565/// Supported operand variants:
1566///
1567/// ```text
1568/// +---+----------+
1569/// | # | Operands |
1570/// +---+----------+
1571/// | 1 | GpbLo    |
1572/// | 2 | Gpd      |
1573/// | 3 | Gpq      |
1574/// | 4 | Gpw      |
1575/// | 5 | Mem      |
1576/// +---+----------+
1577/// ```
1578pub trait DecEmitter<A> {
1579    fn dec(&mut self, op0: A);
1580}
1581
1582impl<'a> DecEmitter<GpbLo> for Assembler<'a> {
1583    fn dec(&mut self, op0: GpbLo) {
1584        self.emit(DEC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1585    }
1586}
1587
1588impl<'a> DecEmitter<Mem> for Assembler<'a> {
1589    fn dec(&mut self, op0: Mem) {
1590        self.emit(DEC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1591    }
1592}
1593
1594impl<'a> DecEmitter<Gpw> for Assembler<'a> {
1595    fn dec(&mut self, op0: Gpw) {
1596        self.emit(DEC16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1597    }
1598}
1599
1600impl<'a> DecEmitter<Gpd> for Assembler<'a> {
1601    fn dec(&mut self, op0: Gpd) {
1602        self.emit(DEC32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1603    }
1604}
1605
1606impl<'a> DecEmitter<Gpq> for Assembler<'a> {
1607    fn dec(&mut self, op0: Gpq) {
1608        self.emit(DEC64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1609    }
1610}
1611
1612/// `DIV` (DIV). 
1613/// Divides unsigned the value in the AX, DX:AX, EDX:EAX, or RDX:RAX registers (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, EDX:EAX, or RDX:RAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor). Division using 64-bit operand is available only in 64-bit mode.
1614///
1615///
1616/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DIV.html).
1617///
1618/// Supported operand variants:
1619///
1620/// ```text
1621/// +---+----------+
1622/// | # | Operands |
1623/// +---+----------+
1624/// | 1 | GpbLo    |
1625/// | 2 | Gpd      |
1626/// | 3 | Gpq      |
1627/// | 4 | Gpw      |
1628/// | 5 | Mem      |
1629/// +---+----------+
1630/// ```
1631pub trait DivEmitter<A> {
1632    fn div(&mut self, op0: A);
1633}
1634
1635impl<'a> DivEmitter<GpbLo> for Assembler<'a> {
1636    fn div(&mut self, op0: GpbLo) {
1637        self.emit(DIV8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1638    }
1639}
1640
1641impl<'a> DivEmitter<Mem> for Assembler<'a> {
1642    fn div(&mut self, op0: Mem) {
1643        self.emit(DIV8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1644    }
1645}
1646
1647impl<'a> DivEmitter<Gpw> for Assembler<'a> {
1648    fn div(&mut self, op0: Gpw) {
1649        self.emit(DIV16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1650    }
1651}
1652
1653impl<'a> DivEmitter<Gpd> for Assembler<'a> {
1654    fn div(&mut self, op0: Gpd) {
1655        self.emit(DIV32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1656    }
1657}
1658
1659impl<'a> DivEmitter<Gpq> for Assembler<'a> {
1660    fn div(&mut self, op0: Gpq) {
1661        self.emit(DIV64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1662    }
1663}
1664
1665/// `ENTER` (ENTER). 
1666/// Creates a stack frame (comprising of space for dynamic storage and 1-32 frame pointer storage) for a procedure. The first operand (imm16) specifies the size of the dynamic storage in the stack frame (that is, the number of bytes of dynamically allocated on the stack for the procedure). The second operand (imm8) gives the lexical nesting level (0 to 31) of the procedure. The nesting level (imm8 mod 32) and the OperandSize attribute determine the size in bytes of the storage space for frame pointers.
1667///
1668///
1669/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ENTER.html).
1670///
1671/// Supported operand variants:
1672///
1673/// ```text
1674/// +---+----------+
1675/// | # | Operands |
1676/// +---+----------+
1677/// | 1 | Imm      |
1678/// +---+----------+
1679/// ```
1680pub trait EnterEmitter<A> {
1681    fn enter(&mut self, op0: A);
1682}
1683
1684impl<'a> EnterEmitter<Imm> for Assembler<'a> {
1685    fn enter(&mut self, op0: Imm) {
1686        self.emit(ENTER16I, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1687    }
1688}
1689
1690/// `FWAIT` (FWAIT). 
1691/// Causes the processor to check for and handle pending, unmasked, floating-point exceptions before proceeding. (FWAIT is an alternate mnemonic for WAIT.)
1692///
1693///
1694/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/WAIT%3AFWAIT.html).
1695///
1696/// Supported operand variants:
1697///
1698/// ```text
1699/// +---+----------+
1700/// | # | Operands |
1701/// +---+----------+
1702/// | 1 | (none)   |
1703/// +---+----------+
1704/// ```
1705pub trait FwaitEmitter {
1706    fn fwait(&mut self);
1707}
1708
1709impl<'a> FwaitEmitter for Assembler<'a> {
1710    fn fwait(&mut self) {
1711        self.emit(FWAIT, &NOREG, &NOREG, &NOREG, &NOREG);
1712    }
1713}
1714
1715/// `HLT` (HLT). 
1716/// Stops instruction execution and places the processor in a HALT state. An enabled interrupt (including NMI and SMI), a debug exception, the BINIT# signal, the INIT# signal, or the RESET# signal will resume execution. If an interrupt (including NMI) is used to resume execution after a HLT instruction, the saved instruction pointer (CS:EIP) points to the instruction following the HLT instruction.
1717///
1718///
1719/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/HLT.html).
1720///
1721/// Supported operand variants:
1722///
1723/// ```text
1724/// +---+----------+
1725/// | # | Operands |
1726/// +---+----------+
1727/// | 1 | (none)   |
1728/// +---+----------+
1729/// ```
1730pub trait HltEmitter {
1731    fn hlt(&mut self);
1732}
1733
1734impl<'a> HltEmitter for Assembler<'a> {
1735    fn hlt(&mut self) {
1736        self.emit(HLT, &NOREG, &NOREG, &NOREG, &NOREG);
1737    }
1738}
1739
1740/// `IDIV` (IDIV). 
1741/// Divides the (signed) value in the AX, DX:AX, or EDX:EAX (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, or EDX:EAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor).
1742///
1743///
1744/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IDIV.html).
1745///
1746/// Supported operand variants:
1747///
1748/// ```text
1749/// +---+----------+
1750/// | # | Operands |
1751/// +---+----------+
1752/// | 1 | GpbLo    |
1753/// | 2 | Gpd      |
1754/// | 3 | Gpq      |
1755/// | 4 | Gpw      |
1756/// | 5 | Mem      |
1757/// +---+----------+
1758/// ```
1759pub trait IdivEmitter<A> {
1760    fn idiv(&mut self, op0: A);
1761}
1762
1763impl<'a> IdivEmitter<GpbLo> for Assembler<'a> {
1764    fn idiv(&mut self, op0: GpbLo) {
1765        self.emit(IDIV8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1766    }
1767}
1768
1769impl<'a> IdivEmitter<Mem> for Assembler<'a> {
1770    fn idiv(&mut self, op0: Mem) {
1771        self.emit(IDIV8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1772    }
1773}
1774
1775impl<'a> IdivEmitter<Gpw> for Assembler<'a> {
1776    fn idiv(&mut self, op0: Gpw) {
1777        self.emit(IDIV16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1778    }
1779}
1780
1781impl<'a> IdivEmitter<Gpd> for Assembler<'a> {
1782    fn idiv(&mut self, op0: Gpd) {
1783        self.emit(IDIV32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1784    }
1785}
1786
1787impl<'a> IdivEmitter<Gpq> for Assembler<'a> {
1788    fn idiv(&mut self, op0: Gpq) {
1789        self.emit(IDIV64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1790    }
1791}
1792
1793/// `IMUL` (IMUL). 
1794/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
1795///
1796///
1797/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
1798///
1799/// Supported operand variants:
1800///
1801/// ```text
1802/// +---+----------+
1803/// | # | Operands |
1804/// +---+----------+
1805/// | 1 | GpbLo    |
1806/// | 2 | Gpd      |
1807/// | 3 | Gpq      |
1808/// | 4 | Gpw      |
1809/// | 5 | Mem      |
1810/// +---+----------+
1811/// ```
1812pub trait ImulEmitter_1<A> {
1813    fn imul_1(&mut self, op0: A);
1814}
1815
1816impl<'a> ImulEmitter_1<GpbLo> for Assembler<'a> {
1817    fn imul_1(&mut self, op0: GpbLo) {
1818        self.emit(IMUL8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1819    }
1820}
1821
1822impl<'a> ImulEmitter_1<Mem> for Assembler<'a> {
1823    fn imul_1(&mut self, op0: Mem) {
1824        self.emit(IMUL8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1825    }
1826}
1827
1828impl<'a> ImulEmitter_1<Gpw> for Assembler<'a> {
1829    fn imul_1(&mut self, op0: Gpw) {
1830        self.emit(IMUL16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1831    }
1832}
1833
1834impl<'a> ImulEmitter_1<Gpd> for Assembler<'a> {
1835    fn imul_1(&mut self, op0: Gpd) {
1836        self.emit(IMUL32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1837    }
1838}
1839
1840impl<'a> ImulEmitter_1<Gpq> for Assembler<'a> {
1841    fn imul_1(&mut self, op0: Gpq) {
1842        self.emit(IMUL64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
1843    }
1844}
1845
1846/// `IMUL` (IMUL). 
1847/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
1848///
1849///
1850/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
1851///
1852/// Supported operand variants:
1853///
1854/// ```text
1855/// +---+----------+
1856/// | # | Operands |
1857/// +---+----------+
1858/// | 1 | Gpd, Gpd |
1859/// | 2 | Gpd, Mem |
1860/// | 3 | Gpq, Gpq |
1861/// | 4 | Gpq, Mem |
1862/// | 5 | Gpw, Gpw |
1863/// | 6 | Gpw, Mem |
1864/// +---+----------+
1865/// ```
1866pub trait ImulEmitter_2<A, B> {
1867    fn imul_2(&mut self, op0: A, op1: B);
1868}
1869
1870impl<'a> ImulEmitter_2<Gpw, Gpw> for Assembler<'a> {
1871    fn imul_2(&mut self, op0: Gpw, op1: Gpw) {
1872        self.emit(IMUL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1873    }
1874}
1875
1876impl<'a> ImulEmitter_2<Gpw, Mem> for Assembler<'a> {
1877    fn imul_2(&mut self, op0: Gpw, op1: Mem) {
1878        self.emit(IMUL16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1879    }
1880}
1881
1882impl<'a> ImulEmitter_2<Gpd, Gpd> for Assembler<'a> {
1883    fn imul_2(&mut self, op0: Gpd, op1: Gpd) {
1884        self.emit(IMUL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1885    }
1886}
1887
1888impl<'a> ImulEmitter_2<Gpd, Mem> for Assembler<'a> {
1889    fn imul_2(&mut self, op0: Gpd, op1: Mem) {
1890        self.emit(IMUL32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1891    }
1892}
1893
1894impl<'a> ImulEmitter_2<Gpq, Gpq> for Assembler<'a> {
1895    fn imul_2(&mut self, op0: Gpq, op1: Gpq) {
1896        self.emit(IMUL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1897    }
1898}
1899
1900impl<'a> ImulEmitter_2<Gpq, Mem> for Assembler<'a> {
1901    fn imul_2(&mut self, op0: Gpq, op1: Mem) {
1902        self.emit(IMUL64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1903    }
1904}
1905
1906/// `IMUL` (IMUL). 
1907/// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
1908///
1909///
1910/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
1911///
1912/// Supported operand variants:
1913///
1914/// ```text
1915/// +---+---------------+
1916/// | # | Operands      |
1917/// +---+---------------+
1918/// | 1 | Gpd, Gpd, Imm |
1919/// | 2 | Gpd, Mem, Imm |
1920/// | 3 | Gpq, Gpq, Imm |
1921/// | 4 | Gpq, Mem, Imm |
1922/// | 5 | Gpw, Gpw, Imm |
1923/// | 6 | Gpw, Mem, Imm |
1924/// +---+---------------+
1925/// ```
1926pub trait ImulEmitter_3<A, B, C> {
1927    fn imul_3(&mut self, op0: A, op1: B, op2: C);
1928}
1929
1930impl<'a> ImulEmitter_3<Gpw, Gpw, Imm> for Assembler<'a> {
1931    fn imul_3(&mut self, op0: Gpw, op1: Gpw, op2: Imm) {
1932        self.emit(IMUL16RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1933    }
1934}
1935
1936impl<'a> ImulEmitter_3<Gpw, Mem, Imm> for Assembler<'a> {
1937    fn imul_3(&mut self, op0: Gpw, op1: Mem, op2: Imm) {
1938        self.emit(IMUL16RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1939    }
1940}
1941
1942impl<'a> ImulEmitter_3<Gpd, Gpd, Imm> for Assembler<'a> {
1943    fn imul_3(&mut self, op0: Gpd, op1: Gpd, op2: Imm) {
1944        self.emit(IMUL32RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1945    }
1946}
1947
1948impl<'a> ImulEmitter_3<Gpd, Mem, Imm> for Assembler<'a> {
1949    fn imul_3(&mut self, op0: Gpd, op1: Mem, op2: Imm) {
1950        self.emit(IMUL32RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1951    }
1952}
1953
1954impl<'a> ImulEmitter_3<Gpq, Gpq, Imm> for Assembler<'a> {
1955    fn imul_3(&mut self, op0: Gpq, op1: Gpq, op2: Imm) {
1956        self.emit(IMUL64RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1957    }
1958}
1959
1960impl<'a> ImulEmitter_3<Gpq, Mem, Imm> for Assembler<'a> {
1961    fn imul_3(&mut self, op0: Gpq, op1: Mem, op2: Imm) {
1962        self.emit(IMUL64RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1963    }
1964}
1965
1966/// `IN` (IN). 
1967/// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
1968///
1969///
1970/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
1971///
1972/// Supported operand variants:
1973///
1974/// ```text
1975/// +---+----------+
1976/// | # | Operands |
1977/// +---+----------+
1978/// | 1 | (none)   |
1979/// +---+----------+
1980/// ```
1981pub trait InEmitter {
1982    fn r#in(&mut self);
1983}
1984
1985impl<'a> InEmitter for Assembler<'a> {
1986    fn r#in(&mut self) {
1987        self.emit(IN8, &NOREG, &NOREG, &NOREG, &NOREG);
1988    }
1989}
1990
1991/// `IN` (IN). 
1992/// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
1993///
1994///
1995/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
1996///
1997/// Supported operand variants:
1998///
1999/// ```text
2000/// +---+------------+
2001/// | # | Operands   |
2002/// +---+------------+
2003/// | 1 | GpbLo, Imm |
2004/// | 2 | Gpd, Imm   |
2005/// | 3 | Gpq, Imm   |
2006/// | 4 | Gpw, Imm   |
2007/// +---+------------+
2008/// ```
2009pub trait InEmitter_2<A, B> {
2010    fn r#in_2(&mut self, op0: A, op1: B);
2011}
2012
2013impl<'a> InEmitter_2<GpbLo, Imm> for Assembler<'a> {
2014    fn r#in_2(&mut self, op0: GpbLo, op1: Imm) {
2015        self.emit(IN8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2016    }
2017}
2018
2019impl<'a> InEmitter_2<Gpw, Imm> for Assembler<'a> {
2020    fn r#in_2(&mut self, op0: Gpw, op1: Imm) {
2021        self.emit(IN16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2022    }
2023}
2024
2025impl<'a> InEmitter_2<Gpd, Imm> for Assembler<'a> {
2026    fn r#in_2(&mut self, op0: Gpd, op1: Imm) {
2027        self.emit(IN32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2028    }
2029}
2030
2031impl<'a> InEmitter_2<Gpq, Imm> for Assembler<'a> {
2032    fn r#in_2(&mut self, op0: Gpq, op1: Imm) {
2033        self.emit(IN64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
2034    }
2035}
2036
2037/// `INC` (INC). 
2038/// Adds 1 to the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (Use a ADD instruction with an immediate operand of 1 to perform an increment operation that does updates the CF flag.)
2039///
2040///
2041/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INC.html).
2042///
2043/// Supported operand variants:
2044///
2045/// ```text
2046/// +---+----------+
2047/// | # | Operands |
2048/// +---+----------+
2049/// | 1 | GpbLo    |
2050/// | 2 | Gpd      |
2051/// | 3 | Gpq      |
2052/// | 4 | Gpw      |
2053/// | 5 | Mem      |
2054/// +---+----------+
2055/// ```
2056pub trait IncEmitter<A> {
2057    fn inc(&mut self, op0: A);
2058}
2059
2060impl<'a> IncEmitter<GpbLo> for Assembler<'a> {
2061    fn inc(&mut self, op0: GpbLo) {
2062        self.emit(INC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2063    }
2064}
2065
2066impl<'a> IncEmitter<Mem> for Assembler<'a> {
2067    fn inc(&mut self, op0: Mem) {
2068        self.emit(INC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2069    }
2070}
2071
2072impl<'a> IncEmitter<Gpw> for Assembler<'a> {
2073    fn inc(&mut self, op0: Gpw) {
2074        self.emit(INC16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2075    }
2076}
2077
2078impl<'a> IncEmitter<Gpd> for Assembler<'a> {
2079    fn inc(&mut self, op0: Gpd) {
2080        self.emit(INC32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2081    }
2082}
2083
2084impl<'a> IncEmitter<Gpq> for Assembler<'a> {
2085    fn inc(&mut self, op0: Gpq) {
2086        self.emit(INC64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2087    }
2088}
2089
2090/// `INS` (INS). 
2091/// Copies the data from the I/O port specified with the source operand (second operand) to the destination operand (first operand). The source operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The destination operand is a memory location, the address of which is read from either the ES:DI, ES:EDI or the RDI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The ES segment cannot be overridden with a segment override prefix.) The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
2092///
2093///
2094/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INS%3AINSB%3AINSW%3AINSD.html).
2095///
2096/// Supported operand variants:
2097///
2098/// ```text
2099/// +---+----------+
2100/// | # | Operands |
2101/// +---+----------+
2102/// | 1 | (none)   |
2103/// +---+----------+
2104/// ```
2105pub trait InsEmitter {
2106    fn ins(&mut self);
2107}
2108
2109impl<'a> InsEmitter for Assembler<'a> {
2110    fn ins(&mut self) {
2111        self.emit(INS8, &NOREG, &NOREG, &NOREG, &NOREG);
2112    }
2113}
2114
2115/// `INT` (INT). 
2116/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
2117///
2118///
2119/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
2120///
2121/// Supported operand variants:
2122///
2123/// ```text
2124/// +---+----------+
2125/// | # | Operands |
2126/// +---+----------+
2127/// | 1 | Imm      |
2128/// +---+----------+
2129/// ```
2130pub trait IntEmitter<A> {
2131    fn int(&mut self, op0: A);
2132}
2133
2134impl<'a> IntEmitter<Imm> for Assembler<'a> {
2135    fn int(&mut self, op0: Imm) {
2136        self.emit(INTI, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2137    }
2138}
2139
2140/// `INT1` (INT1). 
2141/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
2142///
2143///
2144/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
2145///
2146/// Supported operand variants:
2147///
2148/// ```text
2149/// +---+----------+
2150/// | # | Operands |
2151/// +---+----------+
2152/// | 1 | (none)   |
2153/// +---+----------+
2154/// ```
2155pub trait Int1Emitter {
2156    fn int1(&mut self);
2157}
2158
2159impl<'a> Int1Emitter for Assembler<'a> {
2160    fn int1(&mut self) {
2161        self.emit(INT1, &NOREG, &NOREG, &NOREG, &NOREG);
2162    }
2163}
2164
2165/// `INT3` (INT3). 
2166/// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
2167///
2168///
2169/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
2170///
2171/// Supported operand variants:
2172///
2173/// ```text
2174/// +---+----------+
2175/// | # | Operands |
2176/// +---+----------+
2177/// | 1 | (none)   |
2178/// +---+----------+
2179/// ```
2180pub trait Int3Emitter {
2181    fn int3(&mut self);
2182}
2183
2184impl<'a> Int3Emitter for Assembler<'a> {
2185    fn int3(&mut self) {
2186        self.emit(INT3, &NOREG, &NOREG, &NOREG, &NOREG);
2187    }
2188}
2189
2190/// `IRET` (IRET). 
2191/// Returns program control from an exception or interrupt handler to a program or procedure that was interrupted by an exception, an external interrupt, or a software-generated interrupt. These instructions are also used to perform a return from a nested task. (A nested task is created when a CALL instruction is used to initiate a task switch or when an interrupt or exception causes a task switch to an interrupt or exception handler.) See the section titled “Task Linking” in Chapter 8 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 3A.
2192///
2193///
2194/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IRET%3AIRETD%3AIRETQ.html).
2195///
2196/// Supported operand variants:
2197///
2198/// ```text
2199/// +---+----------+
2200/// | # | Operands |
2201/// +---+----------+
2202/// | 1 | (none)   |
2203/// +---+----------+
2204/// ```
2205pub trait IretEmitter {
2206    fn iret(&mut self);
2207}
2208
2209impl<'a> IretEmitter for Assembler<'a> {
2210    fn iret(&mut self) {
2211        self.emit(IRET16, &NOREG, &NOREG, &NOREG, &NOREG);
2212    }
2213}
2214
2215/// `JA` (JA). 
2216/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2217///
2218///
2219/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2220///
2221/// Supported operand variants:
2222///
2223/// ```text
2224/// +---+----------+
2225/// | # | Operands |
2226/// +---+----------+
2227/// | 1 | Imm      |
2228/// | 2 | Label    |
2229/// | 3 | Sym      |
2230/// +---+----------+
2231/// ```
2232pub trait JaEmitter<A> {
2233    fn ja(&mut self, op0: A);
2234}
2235
2236impl<'a> JaEmitter<Imm> for Assembler<'a> {
2237    fn ja(&mut self, op0: Imm) {
2238        self.emit(JA, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2239    }
2240}
2241
2242impl<'a> JaEmitter<Sym> for Assembler<'a> {
2243    fn ja(&mut self, op0: Sym) {
2244        self.emit(JA, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2245    }
2246}
2247
2248impl<'a> JaEmitter<Label> for Assembler<'a> {
2249    fn ja(&mut self, op0: Label) {
2250        self.emit(JA, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2251    }
2252}
2253
2254/// `JBE` (JBE). 
2255/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2256///
2257///
2258/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2259///
2260/// Supported operand variants:
2261///
2262/// ```text
2263/// +---+----------+
2264/// | # | Operands |
2265/// +---+----------+
2266/// | 1 | Imm      |
2267/// | 2 | Label    |
2268/// | 3 | Sym      |
2269/// +---+----------+
2270/// ```
2271pub trait JbeEmitter<A> {
2272    fn jbe(&mut self, op0: A);
2273}
2274
2275impl<'a> JbeEmitter<Imm> for Assembler<'a> {
2276    fn jbe(&mut self, op0: Imm) {
2277        self.emit(JBE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2278    }
2279}
2280
2281impl<'a> JbeEmitter<Sym> for Assembler<'a> {
2282    fn jbe(&mut self, op0: Sym) {
2283        self.emit(JBE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2284    }
2285}
2286
2287impl<'a> JbeEmitter<Label> for Assembler<'a> {
2288    fn jbe(&mut self, op0: Label) {
2289        self.emit(JBE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2290    }
2291}
2292
2293/// `JC` (JC). 
2294/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2295///
2296///
2297/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2298///
2299/// Supported operand variants:
2300///
2301/// ```text
2302/// +---+----------+
2303/// | # | Operands |
2304/// +---+----------+
2305/// | 1 | Imm      |
2306/// | 2 | Label    |
2307/// | 3 | Sym      |
2308/// +---+----------+
2309/// ```
2310pub trait JcEmitter<A> {
2311    fn jc(&mut self, op0: A);
2312}
2313
2314impl<'a> JcEmitter<Imm> for Assembler<'a> {
2315    fn jc(&mut self, op0: Imm) {
2316        self.emit(JC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2317    }
2318}
2319
2320impl<'a> JcEmitter<Sym> for Assembler<'a> {
2321    fn jc(&mut self, op0: Sym) {
2322        self.emit(JC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2323    }
2324}
2325
2326impl<'a> JcEmitter<Label> for Assembler<'a> {
2327    fn jc(&mut self, op0: Label) {
2328        self.emit(JC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2329    }
2330}
2331
2332/// `JCXZ` (JCXZ). 
2333/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2334///
2335///
2336/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2337///
2338/// Supported operand variants:
2339///
2340/// ```text
2341/// +---+----------+
2342/// | # | Operands |
2343/// +---+----------+
2344/// | 1 | Imm      |
2345/// | 2 | Label    |
2346/// | 3 | Sym      |
2347/// +---+----------+
2348/// ```
2349pub trait JcxzEmitter<A> {
2350    fn jcxz(&mut self, op0: A);
2351}
2352
2353impl<'a> JcxzEmitter<Imm> for Assembler<'a> {
2354    fn jcxz(&mut self, op0: Imm) {
2355        self.emit(JCXZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2356    }
2357}
2358
2359impl<'a> JcxzEmitter<Sym> for Assembler<'a> {
2360    fn jcxz(&mut self, op0: Sym) {
2361        self.emit(JCXZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2362    }
2363}
2364
2365impl<'a> JcxzEmitter<Label> for Assembler<'a> {
2366    fn jcxz(&mut self, op0: Label) {
2367        self.emit(JCXZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2368    }
2369}
2370
2371/// `JG` (JG). 
2372/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2373///
2374///
2375/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2376///
2377/// Supported operand variants:
2378///
2379/// ```text
2380/// +---+----------+
2381/// | # | Operands |
2382/// +---+----------+
2383/// | 1 | Imm      |
2384/// | 2 | Label    |
2385/// | 3 | Sym      |
2386/// +---+----------+
2387/// ```
2388pub trait JgEmitter<A> {
2389    fn jg(&mut self, op0: A);
2390}
2391
2392impl<'a> JgEmitter<Imm> for Assembler<'a> {
2393    fn jg(&mut self, op0: Imm) {
2394        self.emit(JG, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2395    }
2396}
2397
2398impl<'a> JgEmitter<Sym> for Assembler<'a> {
2399    fn jg(&mut self, op0: Sym) {
2400        self.emit(JG, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2401    }
2402}
2403
2404impl<'a> JgEmitter<Label> for Assembler<'a> {
2405    fn jg(&mut self, op0: Label) {
2406        self.emit(JG, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2407    }
2408}
2409
2410/// `JGE` (JGE). 
2411/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2412///
2413///
2414/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2415///
2416/// Supported operand variants:
2417///
2418/// ```text
2419/// +---+----------+
2420/// | # | Operands |
2421/// +---+----------+
2422/// | 1 | Imm      |
2423/// | 2 | Label    |
2424/// | 3 | Sym      |
2425/// +---+----------+
2426/// ```
2427pub trait JgeEmitter<A> {
2428    fn jge(&mut self, op0: A);
2429}
2430
2431impl<'a> JgeEmitter<Imm> for Assembler<'a> {
2432    fn jge(&mut self, op0: Imm) {
2433        self.emit(JGE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2434    }
2435}
2436
2437impl<'a> JgeEmitter<Sym> for Assembler<'a> {
2438    fn jge(&mut self, op0: Sym) {
2439        self.emit(JGE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2440    }
2441}
2442
2443impl<'a> JgeEmitter<Label> for Assembler<'a> {
2444    fn jge(&mut self, op0: Label) {
2445        self.emit(JGE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2446    }
2447}
2448
2449/// `JL` (JL). 
2450/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2451///
2452///
2453/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2454///
2455/// Supported operand variants:
2456///
2457/// ```text
2458/// +---+----------+
2459/// | # | Operands |
2460/// +---+----------+
2461/// | 1 | Imm      |
2462/// | 2 | Label    |
2463/// | 3 | Sym      |
2464/// +---+----------+
2465/// ```
2466pub trait JlEmitter<A> {
2467    fn jl(&mut self, op0: A);
2468}
2469
2470impl<'a> JlEmitter<Imm> for Assembler<'a> {
2471    fn jl(&mut self, op0: Imm) {
2472        self.emit(JL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2473    }
2474}
2475
2476impl<'a> JlEmitter<Sym> for Assembler<'a> {
2477    fn jl(&mut self, op0: Sym) {
2478        self.emit(JL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2479    }
2480}
2481
2482impl<'a> JlEmitter<Label> for Assembler<'a> {
2483    fn jl(&mut self, op0: Label) {
2484        self.emit(JL, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2485    }
2486}
2487
2488/// `JLE` (JLE). 
2489/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2490///
2491///
2492/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2493///
2494/// Supported operand variants:
2495///
2496/// ```text
2497/// +---+----------+
2498/// | # | Operands |
2499/// +---+----------+
2500/// | 1 | Imm      |
2501/// | 2 | Label    |
2502/// | 3 | Sym      |
2503/// +---+----------+
2504/// ```
2505pub trait JleEmitter<A> {
2506    fn jle(&mut self, op0: A);
2507}
2508
2509impl<'a> JleEmitter<Imm> for Assembler<'a> {
2510    fn jle(&mut self, op0: Imm) {
2511        self.emit(JLE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2512    }
2513}
2514
2515impl<'a> JleEmitter<Sym> for Assembler<'a> {
2516    fn jle(&mut self, op0: Sym) {
2517        self.emit(JLE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2518    }
2519}
2520
2521impl<'a> JleEmitter<Label> for Assembler<'a> {
2522    fn jle(&mut self, op0: Label) {
2523        self.emit(JLE, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2524    }
2525}
2526
2527/// `JMP` (JMP). 
2528/// Transfers program control to a different point in the instruction stream without recording return information. The destination (target) operand specifies the address of the instruction being jumped to. This operand can be an immediate value, a general-purpose register, or a memory location.
2529///
2530///
2531/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/JMP.html).
2532///
2533/// Supported operand variants:
2534///
2535/// ```text
2536/// +---+----------+
2537/// | # | Operands |
2538/// +---+----------+
2539/// | 1 | Gpq      |
2540/// | 2 | Imm      |
2541/// | 3 | Label    |
2542/// | 4 | Mem      |
2543/// | 5 | Sym      |
2544/// +---+----------+
2545/// ```
2546pub trait JmpEmitter<A> {
2547    fn jmp(&mut self, op0: A);
2548}
2549
2550impl<'a> JmpEmitter<Imm> for Assembler<'a> {
2551    fn jmp(&mut self, op0: Imm) {
2552        self.emit(JMP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2553    }
2554}
2555
2556impl<'a> JmpEmitter<Sym> for Assembler<'a> {
2557    fn jmp(&mut self, op0: Sym) {
2558        self.emit(JMP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2559    }
2560}
2561
2562impl<'a> JmpEmitter<Label> for Assembler<'a> {
2563    fn jmp(&mut self, op0: Label) {
2564        self.emit(JMP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2565    }
2566}
2567
2568impl<'a> JmpEmitter<Gpq> for Assembler<'a> {
2569    fn jmp(&mut self, op0: Gpq) {
2570        self.emit(JMPR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2571    }
2572}
2573
2574impl<'a> JmpEmitter<Mem> for Assembler<'a> {
2575    fn jmp(&mut self, op0: Mem) {
2576        self.emit(JMPM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2577    }
2578}
2579
2580/// `JMPF`.
2581///
2582/// Supported operand variants:
2583///
2584/// ```text
2585/// +---+----------+
2586/// | # | Operands |
2587/// +---+----------+
2588/// | 1 | Mem      |
2589/// +---+----------+
2590/// ```
2591pub trait JmpfEmitter<A> {
2592    fn jmpf(&mut self, op0: A);
2593}
2594
2595impl<'a> JmpfEmitter<Mem> for Assembler<'a> {
2596    fn jmpf(&mut self, op0: Mem) {
2597        self.emit(JMPF16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2598    }
2599}
2600
2601/// `JNC` (JNC). 
2602/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2603///
2604///
2605/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2606///
2607/// Supported operand variants:
2608///
2609/// ```text
2610/// +---+----------+
2611/// | # | Operands |
2612/// +---+----------+
2613/// | 1 | Imm      |
2614/// | 2 | Label    |
2615/// | 3 | Sym      |
2616/// +---+----------+
2617/// ```
2618pub trait JncEmitter<A> {
2619    fn jnc(&mut self, op0: A);
2620}
2621
2622impl<'a> JncEmitter<Imm> for Assembler<'a> {
2623    fn jnc(&mut self, op0: Imm) {
2624        self.emit(JNC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2625    }
2626}
2627
2628impl<'a> JncEmitter<Sym> for Assembler<'a> {
2629    fn jnc(&mut self, op0: Sym) {
2630        self.emit(JNC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2631    }
2632}
2633
2634impl<'a> JncEmitter<Label> for Assembler<'a> {
2635    fn jnc(&mut self, op0: Label) {
2636        self.emit(JNC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2637    }
2638}
2639
2640/// `JNO` (JNO). 
2641/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2642///
2643///
2644/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2645///
2646/// Supported operand variants:
2647///
2648/// ```text
2649/// +---+----------+
2650/// | # | Operands |
2651/// +---+----------+
2652/// | 1 | Imm      |
2653/// | 2 | Label    |
2654/// | 3 | Sym      |
2655/// +---+----------+
2656/// ```
2657pub trait JnoEmitter<A> {
2658    fn jno(&mut self, op0: A);
2659}
2660
2661impl<'a> JnoEmitter<Imm> for Assembler<'a> {
2662    fn jno(&mut self, op0: Imm) {
2663        self.emit(JNO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2664    }
2665}
2666
2667impl<'a> JnoEmitter<Sym> for Assembler<'a> {
2668    fn jno(&mut self, op0: Sym) {
2669        self.emit(JNO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2670    }
2671}
2672
2673impl<'a> JnoEmitter<Label> for Assembler<'a> {
2674    fn jno(&mut self, op0: Label) {
2675        self.emit(JNO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2676    }
2677}
2678
2679/// `JNP` (JNP). 
2680/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2681///
2682///
2683/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2684///
2685/// Supported operand variants:
2686///
2687/// ```text
2688/// +---+----------+
2689/// | # | Operands |
2690/// +---+----------+
2691/// | 1 | Imm      |
2692/// | 2 | Label    |
2693/// | 3 | Sym      |
2694/// +---+----------+
2695/// ```
2696pub trait JnpEmitter<A> {
2697    fn jnp(&mut self, op0: A);
2698}
2699
2700impl<'a> JnpEmitter<Imm> for Assembler<'a> {
2701    fn jnp(&mut self, op0: Imm) {
2702        self.emit(JNP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2703    }
2704}
2705
2706impl<'a> JnpEmitter<Sym> for Assembler<'a> {
2707    fn jnp(&mut self, op0: Sym) {
2708        self.emit(JNP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2709    }
2710}
2711
2712impl<'a> JnpEmitter<Label> for Assembler<'a> {
2713    fn jnp(&mut self, op0: Label) {
2714        self.emit(JNP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2715    }
2716}
2717
2718/// `JNS` (JNS). 
2719/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2720///
2721///
2722/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2723///
2724/// Supported operand variants:
2725///
2726/// ```text
2727/// +---+----------+
2728/// | # | Operands |
2729/// +---+----------+
2730/// | 1 | Imm      |
2731/// | 2 | Label    |
2732/// | 3 | Sym      |
2733/// +---+----------+
2734/// ```
2735pub trait JnsEmitter<A> {
2736    fn jns(&mut self, op0: A);
2737}
2738
2739impl<'a> JnsEmitter<Imm> for Assembler<'a> {
2740    fn jns(&mut self, op0: Imm) {
2741        self.emit(JNS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2742    }
2743}
2744
2745impl<'a> JnsEmitter<Sym> for Assembler<'a> {
2746    fn jns(&mut self, op0: Sym) {
2747        self.emit(JNS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2748    }
2749}
2750
2751impl<'a> JnsEmitter<Label> for Assembler<'a> {
2752    fn jns(&mut self, op0: Label) {
2753        self.emit(JNS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2754    }
2755}
2756
2757/// `JNZ` (JNZ). 
2758/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2759///
2760///
2761/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2762///
2763/// Supported operand variants:
2764///
2765/// ```text
2766/// +---+----------+
2767/// | # | Operands |
2768/// +---+----------+
2769/// | 1 | Imm      |
2770/// | 2 | Label    |
2771/// | 3 | Sym      |
2772/// +---+----------+
2773/// ```
2774pub trait JnzEmitter<A> {
2775    fn jnz(&mut self, op0: A);
2776}
2777
2778impl<'a> JnzEmitter<Imm> for Assembler<'a> {
2779    fn jnz(&mut self, op0: Imm) {
2780        self.emit(JNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2781    }
2782}
2783
2784impl<'a> JnzEmitter<Sym> for Assembler<'a> {
2785    fn jnz(&mut self, op0: Sym) {
2786        self.emit(JNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2787    }
2788}
2789
2790impl<'a> JnzEmitter<Label> for Assembler<'a> {
2791    fn jnz(&mut self, op0: Label) {
2792        self.emit(JNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2793    }
2794}
2795
2796/// `JO` (JO). 
2797/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2798///
2799///
2800/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2801///
2802/// Supported operand variants:
2803///
2804/// ```text
2805/// +---+----------+
2806/// | # | Operands |
2807/// +---+----------+
2808/// | 1 | Imm      |
2809/// | 2 | Label    |
2810/// | 3 | Sym      |
2811/// +---+----------+
2812/// ```
2813pub trait JoEmitter<A> {
2814    fn jo(&mut self, op0: A);
2815}
2816
2817impl<'a> JoEmitter<Imm> for Assembler<'a> {
2818    fn jo(&mut self, op0: Imm) {
2819        self.emit(JO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2820    }
2821}
2822
2823impl<'a> JoEmitter<Sym> for Assembler<'a> {
2824    fn jo(&mut self, op0: Sym) {
2825        self.emit(JO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2826    }
2827}
2828
2829impl<'a> JoEmitter<Label> for Assembler<'a> {
2830    fn jo(&mut self, op0: Label) {
2831        self.emit(JO, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2832    }
2833}
2834
2835/// `JP` (JP). 
2836/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2837///
2838///
2839/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2840///
2841/// Supported operand variants:
2842///
2843/// ```text
2844/// +---+----------+
2845/// | # | Operands |
2846/// +---+----------+
2847/// | 1 | Imm      |
2848/// | 2 | Label    |
2849/// | 3 | Sym      |
2850/// +---+----------+
2851/// ```
2852pub trait JpEmitter<A> {
2853    fn jp(&mut self, op0: A);
2854}
2855
2856impl<'a> JpEmitter<Imm> for Assembler<'a> {
2857    fn jp(&mut self, op0: Imm) {
2858        self.emit(JP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2859    }
2860}
2861
2862impl<'a> JpEmitter<Sym> for Assembler<'a> {
2863    fn jp(&mut self, op0: Sym) {
2864        self.emit(JP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2865    }
2866}
2867
2868impl<'a> JpEmitter<Label> for Assembler<'a> {
2869    fn jp(&mut self, op0: Label) {
2870        self.emit(JP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2871    }
2872}
2873
2874/// `JS` (JS). 
2875/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2876///
2877///
2878/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2879///
2880/// Supported operand variants:
2881///
2882/// ```text
2883/// +---+----------+
2884/// | # | Operands |
2885/// +---+----------+
2886/// | 1 | Imm      |
2887/// | 2 | Label    |
2888/// | 3 | Sym      |
2889/// +---+----------+
2890/// ```
2891pub trait JsEmitter<A> {
2892    fn js(&mut self, op0: A);
2893}
2894
2895impl<'a> JsEmitter<Imm> for Assembler<'a> {
2896    fn js(&mut self, op0: Imm) {
2897        self.emit(JS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2898    }
2899}
2900
2901impl<'a> JsEmitter<Sym> for Assembler<'a> {
2902    fn js(&mut self, op0: Sym) {
2903        self.emit(JS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2904    }
2905}
2906
2907impl<'a> JsEmitter<Label> for Assembler<'a> {
2908    fn js(&mut self, op0: Label) {
2909        self.emit(JS, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2910    }
2911}
2912
2913/// `JZ` (JZ). 
2914/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2915///
2916///
2917/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2918///
2919/// Supported operand variants:
2920///
2921/// ```text
2922/// +---+----------+
2923/// | # | Operands |
2924/// +---+----------+
2925/// | 1 | Imm      |
2926/// | 2 | Label    |
2927/// | 3 | Sym      |
2928/// +---+----------+
2929/// ```
2930pub trait JzEmitter<A> {
2931    fn jz(&mut self, op0: A);
2932}
2933
2934impl<'a> JzEmitter<Imm> for Assembler<'a> {
2935    fn jz(&mut self, op0: Imm) {
2936        self.emit(JZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2937    }
2938}
2939
2940impl<'a> JzEmitter<Sym> for Assembler<'a> {
2941    fn jz(&mut self, op0: Sym) {
2942        self.emit(JZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2943    }
2944}
2945
2946impl<'a> JzEmitter<Label> for Assembler<'a> {
2947    fn jz(&mut self, op0: Label) {
2948        self.emit(JZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2949    }
2950}
2951
2952/// `JCC` (JO). 
2953/// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
2954///
2955///
2956/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
2957///
2958/// Supported operand variants:
2959///
2960/// ```text
2961/// +---+----------+
2962/// | # | Operands |
2963/// +---+----------+
2964/// | 1 | Imm      |
2965/// | 2 | Label    |
2966/// | 3 | Sym      |
2967/// +---+----------+
2968/// ```
2969pub trait JccEmitter<A> {
2970    fn jcc(&mut self, op0: A);
2971}
2972
2973impl<'a> JccEmitter<Imm> for Assembler<'a> {
2974    fn jcc(&mut self, op0: Imm) {
2975        self.emit(JCC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2976    }
2977}
2978
2979impl<'a> JccEmitter<Sym> for Assembler<'a> {
2980    fn jcc(&mut self, op0: Sym) {
2981        self.emit(JCC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2982    }
2983}
2984
2985impl<'a> JccEmitter<Label> for Assembler<'a> {
2986    fn jcc(&mut self, op0: Label) {
2987        self.emit(JCC, op0.as_operand(), &NOREG, &NOREG, &NOREG);
2988    }
2989}
2990
2991/// `LAHF`.
2992///
2993/// Supported operand variants:
2994///
2995/// ```text
2996/// +---+----------+
2997/// | # | Operands |
2998/// +---+----------+
2999/// | 1 | (none)   |
3000/// +---+----------+
3001/// ```
3002pub trait LahfEmitter {
3003    fn lahf(&mut self);
3004}
3005
3006impl<'a> LahfEmitter for Assembler<'a> {
3007    fn lahf(&mut self) {
3008        self.emit(LAHF, &NOREG, &NOREG, &NOREG, &NOREG);
3009    }
3010}
3011
3012/// `LAR` (LAR). 
3013/// Loads the access rights from the segment descriptor specified by the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the flag register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. If the source operand is a memory address, only 16 bits of data are accessed. The destination operand is a general-purpose register.
3014///
3015///
3016/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LAR.html).
3017///
3018/// Supported operand variants:
3019///
3020/// ```text
3021/// +---+----------+
3022/// | # | Operands |
3023/// +---+----------+
3024/// | 1 | Gpd, Gpw |
3025/// | 2 | Gpd, Mem |
3026/// | 3 | Gpq, Gpw |
3027/// | 4 | Gpq, Mem |
3028/// | 5 | Gpw, Gpw |
3029/// | 6 | Gpw, Mem |
3030/// +---+----------+
3031/// ```
3032pub trait LarEmitter<A, B> {
3033    fn lar(&mut self, op0: A, op1: B);
3034}
3035
3036impl<'a> LarEmitter<Gpw, Gpw> for Assembler<'a> {
3037    fn lar(&mut self, op0: Gpw, op1: Gpw) {
3038        self.emit(LAR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3039    }
3040}
3041
3042impl<'a> LarEmitter<Gpw, Mem> for Assembler<'a> {
3043    fn lar(&mut self, op0: Gpw, op1: Mem) {
3044        self.emit(LAR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3045    }
3046}
3047
3048impl<'a> LarEmitter<Gpd, Gpw> for Assembler<'a> {
3049    fn lar(&mut self, op0: Gpd, op1: Gpw) {
3050        self.emit(LAR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3051    }
3052}
3053
3054impl<'a> LarEmitter<Gpd, Mem> for Assembler<'a> {
3055    fn lar(&mut self, op0: Gpd, op1: Mem) {
3056        self.emit(LAR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3057    }
3058}
3059
3060impl<'a> LarEmitter<Gpq, Gpw> for Assembler<'a> {
3061    fn lar(&mut self, op0: Gpq, op1: Gpw) {
3062        self.emit(LAR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3063    }
3064}
3065
3066impl<'a> LarEmitter<Gpq, Mem> for Assembler<'a> {
3067    fn lar(&mut self, op0: Gpq, op1: Mem) {
3068        self.emit(LAR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3069    }
3070}
3071
3072/// `LDTILECFG`.
3073///
3074/// Supported operand variants:
3075///
3076/// ```text
3077/// +---+----------+
3078/// | # | Operands |
3079/// +---+----------+
3080/// | 1 | Mem      |
3081/// +---+----------+
3082/// ```
3083pub trait LdtilecfgEmitter<A> {
3084    fn ldtilecfg(&mut self, op0: A);
3085}
3086
3087impl<'a> LdtilecfgEmitter<Mem> for Assembler<'a> {
3088    fn ldtilecfg(&mut self, op0: Mem) {
3089        self.emit(LDTILECFGM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3090    }
3091}
3092
3093/// `LEA` (LEA). 
3094/// Computes the effective address of the second operand (the source operand) and stores it in the first operand (destination operand). The source operand is a memory address (offset part) specified with one of the processors addressing modes; the destination operand is a general-purpose register. The address-size and operand-size attributes affect the action performed by this instruction, as shown in the following table. The operand-size attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the attribute of the code segment.
3095///
3096///
3097/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEA.html).
3098///
3099/// Supported operand variants:
3100///
3101/// ```text
3102/// +---+----------+
3103/// | # | Operands |
3104/// +---+----------+
3105/// | 1 | Gpd, Mem |
3106/// | 2 | Gpq, Mem |
3107/// | 3 | Gpw, Mem |
3108/// +---+----------+
3109/// ```
3110pub trait LeaEmitter<A, B> {
3111    fn lea(&mut self, op0: A, op1: B);
3112}
3113
3114impl<'a> LeaEmitter<Gpw, Mem> for Assembler<'a> {
3115    fn lea(&mut self, op0: Gpw, op1: Mem) {
3116        self.emit(LEA16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3117    }
3118}
3119
3120impl<'a> LeaEmitter<Gpd, Mem> for Assembler<'a> {
3121    fn lea(&mut self, op0: Gpd, op1: Mem) {
3122        self.emit(LEA32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3123    }
3124}
3125
3126impl<'a> LeaEmitter<Gpq, Mem> for Assembler<'a> {
3127    fn lea(&mut self, op0: Gpq, op1: Mem) {
3128        self.emit(LEA64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3129    }
3130}
3131
3132/// `LEAVE` (LEAVE). 
3133/// Releases the stack frame set up by an earlier ENTER instruction. The LEAVE instruction copies the frame pointer (in the EBP register) into the stack pointer register (ESP), which releases the stack space allocated to the stack frame. The old frame pointer (the frame pointer for the calling procedure that was saved by the ENTER instruction) is then popped from the stack into the EBP register, restoring the calling procedure’s stack frame.
3134///
3135///
3136/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEAVE.html).
3137///
3138/// Supported operand variants:
3139///
3140/// ```text
3141/// +---+----------+
3142/// | # | Operands |
3143/// +---+----------+
3144/// | 1 | (none)   |
3145/// +---+----------+
3146/// ```
3147pub trait LeaveEmitter {
3148    fn leave(&mut self);
3149}
3150
3151impl<'a> LeaveEmitter for Assembler<'a> {
3152    fn leave(&mut self) {
3153        self.emit(LEAVE16, &NOREG, &NOREG, &NOREG, &NOREG);
3154    }
3155}
3156
3157/// `LFS` (LFS). 
3158/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
3159///
3160///
3161/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
3162///
3163/// Supported operand variants:
3164///
3165/// ```text
3166/// +---+----------+
3167/// | # | Operands |
3168/// +---+----------+
3169/// | 1 | Gpd, Mem |
3170/// | 2 | Gpq, Mem |
3171/// | 3 | Gpw, Mem |
3172/// +---+----------+
3173/// ```
3174pub trait LfsEmitter<A, B> {
3175    fn lfs(&mut self, op0: A, op1: B);
3176}
3177
3178impl<'a> LfsEmitter<Gpw, Mem> for Assembler<'a> {
3179    fn lfs(&mut self, op0: Gpw, op1: Mem) {
3180        self.emit(LFS16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3181    }
3182}
3183
3184impl<'a> LfsEmitter<Gpd, Mem> for Assembler<'a> {
3185    fn lfs(&mut self, op0: Gpd, op1: Mem) {
3186        self.emit(LFS32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3187    }
3188}
3189
3190impl<'a> LfsEmitter<Gpq, Mem> for Assembler<'a> {
3191    fn lfs(&mut self, op0: Gpq, op1: Mem) {
3192        self.emit(LFS64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3193    }
3194}
3195
3196/// `LGDT` (LGDT). 
3197/// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
3198///
3199///
3200/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
3201///
3202/// Supported operand variants:
3203///
3204/// ```text
3205/// +---+----------+
3206/// | # | Operands |
3207/// +---+----------+
3208/// | 1 | Mem      |
3209/// +---+----------+
3210/// ```
3211pub trait LgdtEmitter<A> {
3212    fn lgdt(&mut self, op0: A);
3213}
3214
3215impl<'a> LgdtEmitter<Mem> for Assembler<'a> {
3216    fn lgdt(&mut self, op0: Mem) {
3217        self.emit(LGDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3218    }
3219}
3220
3221/// `LGS` (LGS). 
3222/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
3223///
3224///
3225/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
3226///
3227/// Supported operand variants:
3228///
3229/// ```text
3230/// +---+----------+
3231/// | # | Operands |
3232/// +---+----------+
3233/// | 1 | Gpd, Mem |
3234/// | 2 | Gpq, Mem |
3235/// | 3 | Gpw, Mem |
3236/// +---+----------+
3237/// ```
3238pub trait LgsEmitter<A, B> {
3239    fn lgs(&mut self, op0: A, op1: B);
3240}
3241
3242impl<'a> LgsEmitter<Gpw, Mem> for Assembler<'a> {
3243    fn lgs(&mut self, op0: Gpw, op1: Mem) {
3244        self.emit(LGS16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3245    }
3246}
3247
3248impl<'a> LgsEmitter<Gpd, Mem> for Assembler<'a> {
3249    fn lgs(&mut self, op0: Gpd, op1: Mem) {
3250        self.emit(LGS32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3251    }
3252}
3253
3254impl<'a> LgsEmitter<Gpq, Mem> for Assembler<'a> {
3255    fn lgs(&mut self, op0: Gpq, op1: Mem) {
3256        self.emit(LGS64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3257    }
3258}
3259
3260/// `LIDT` (LIDT). 
3261/// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
3262///
3263///
3264/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
3265///
3266/// Supported operand variants:
3267///
3268/// ```text
3269/// +---+----------+
3270/// | # | Operands |
3271/// +---+----------+
3272/// | 1 | Mem      |
3273/// +---+----------+
3274/// ```
3275pub trait LidtEmitter<A> {
3276    fn lidt(&mut self, op0: A);
3277}
3278
3279impl<'a> LidtEmitter<Mem> for Assembler<'a> {
3280    fn lidt(&mut self, op0: Mem) {
3281        self.emit(LIDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3282    }
3283}
3284
3285/// `LLDT` (LLDT). 
3286/// Loads the source operand into the segment selector field of the local descriptor table register (LDTR). The source operand (a general-purpose register or a memory location) contains a segment selector that points to a local descriptor table (LDT). After the segment selector is loaded in the LDTR, the processor uses the segment selector to locate the segment descriptor for the LDT in the global descriptor table (GDT). It then loads the segment limit and base address for the LDT from the segment descriptor into the LDTR. The segment registers DS, ES, SS, FS, GS, and CS are not affected by this instruction, nor is the LDTR field in the task state segment (TSS) for the current task.
3287///
3288///
3289/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LLDT.html).
3290///
3291/// Supported operand variants:
3292///
3293/// ```text
3294/// +---+----------+
3295/// | # | Operands |
3296/// +---+----------+
3297/// | 1 | Gpd      |
3298/// | 2 | Mem      |
3299/// +---+----------+
3300/// ```
3301pub trait LldtEmitter<A> {
3302    fn lldt(&mut self, op0: A);
3303}
3304
3305impl<'a> LldtEmitter<Gpd> for Assembler<'a> {
3306    fn lldt(&mut self, op0: Gpd) {
3307        self.emit(LLDTR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3308    }
3309}
3310
3311impl<'a> LldtEmitter<Mem> for Assembler<'a> {
3312    fn lldt(&mut self, op0: Mem) {
3313        self.emit(LLDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3314    }
3315}
3316
3317/// `LMSW` (LMSW). 
3318/// Loads the source operand into the machine status word, bits 0 through 15 of register CR0. The source operand can be a 16-bit general-purpose register or a memory location. Only the low-order 4 bits of the source operand (which contains the PE, MP, EM, and TS flags) are loaded into CR0. The PG, CD, NW, AM, WP, NE, and ET flags of CR0 are not affected. The operand-size attribute has no effect on this instruction.
3319///
3320///
3321/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LMSW.html).
3322///
3323/// Supported operand variants:
3324///
3325/// ```text
3326/// +---+----------+
3327/// | # | Operands |
3328/// +---+----------+
3329/// | 1 | Gpd      |
3330/// | 2 | Mem      |
3331/// +---+----------+
3332/// ```
3333pub trait LmswEmitter<A> {
3334    fn lmsw(&mut self, op0: A);
3335}
3336
3337impl<'a> LmswEmitter<Gpd> for Assembler<'a> {
3338    fn lmsw(&mut self, op0: Gpd) {
3339        self.emit(LMSWR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3340    }
3341}
3342
3343impl<'a> LmswEmitter<Mem> for Assembler<'a> {
3344    fn lmsw(&mut self, op0: Mem) {
3345        self.emit(LMSWM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3346    }
3347}
3348
3349/// `LODS` (LODS). 
3350/// Loads a byte, word, or doubleword from the source operand into the AL, AX, or EAX register, respectively. The source operand is a memory location, the address of which is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The DS segment may be overridden with a segment override prefix.
3351///
3352///
3353/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LODS%3ALODSB%3ALODSW%3ALODSD%3ALODSQ.html).
3354///
3355/// Supported operand variants:
3356///
3357/// ```text
3358/// +---+----------+
3359/// | # | Operands |
3360/// +---+----------+
3361/// | 1 | (none)   |
3362/// +---+----------+
3363/// ```
3364pub trait LodsEmitter {
3365    fn lods(&mut self);
3366}
3367
3368impl<'a> LodsEmitter for Assembler<'a> {
3369    fn lods(&mut self) {
3370        self.emit(LODS8, &NOREG, &NOREG, &NOREG, &NOREG);
3371    }
3372}
3373
3374/// `LOOP` (LOOP). 
3375/// Performs a loop operation using the RCX, ECX or CX register as a counter (depending on whether address size is 64 bits, 32 bits, or 16 bits). Note that the LOOP instruction ignores REX.W; but 64-bit address size can be over-ridden using a 67H prefix.
3376///
3377///
3378/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LOOP%3ALOOPcc.html).
3379///
3380/// Supported operand variants:
3381///
3382/// ```text
3383/// +---+----------+
3384/// | # | Operands |
3385/// +---+----------+
3386/// | 1 | Imm      |
3387/// | 2 | Label    |
3388/// | 3 | Sym      |
3389/// +---+----------+
3390/// ```
3391pub trait LoopEmitter<A> {
3392    fn r#loop(&mut self, op0: A);
3393}
3394
3395impl<'a> LoopEmitter<Imm> for Assembler<'a> {
3396    fn r#loop(&mut self, op0: Imm) {
3397        self.emit(LOOP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3398    }
3399}
3400
3401impl<'a> LoopEmitter<Sym> for Assembler<'a> {
3402    fn r#loop(&mut self, op0: Sym) {
3403        self.emit(LOOP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3404    }
3405}
3406
3407impl<'a> LoopEmitter<Label> for Assembler<'a> {
3408    fn r#loop(&mut self, op0: Label) {
3409        self.emit(LOOP, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3410    }
3411}
3412
3413/// `LOOPNZ`.
3414///
3415/// Supported operand variants:
3416///
3417/// ```text
3418/// +---+----------+
3419/// | # | Operands |
3420/// +---+----------+
3421/// | 1 | Imm      |
3422/// | 2 | Label    |
3423/// | 3 | Sym      |
3424/// +---+----------+
3425/// ```
3426pub trait LoopnzEmitter<A> {
3427    fn loopnz(&mut self, op0: A);
3428}
3429
3430impl<'a> LoopnzEmitter<Imm> for Assembler<'a> {
3431    fn loopnz(&mut self, op0: Imm) {
3432        self.emit(LOOPNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3433    }
3434}
3435
3436impl<'a> LoopnzEmitter<Sym> for Assembler<'a> {
3437    fn loopnz(&mut self, op0: Sym) {
3438        self.emit(LOOPNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3439    }
3440}
3441
3442impl<'a> LoopnzEmitter<Label> for Assembler<'a> {
3443    fn loopnz(&mut self, op0: Label) {
3444        self.emit(LOOPNZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3445    }
3446}
3447
3448/// `LOOPZ`.
3449///
3450/// Supported operand variants:
3451///
3452/// ```text
3453/// +---+----------+
3454/// | # | Operands |
3455/// +---+----------+
3456/// | 1 | Imm      |
3457/// | 2 | Label    |
3458/// | 3 | Sym      |
3459/// +---+----------+
3460/// ```
3461pub trait LoopzEmitter<A> {
3462    fn loopz(&mut self, op0: A);
3463}
3464
3465impl<'a> LoopzEmitter<Imm> for Assembler<'a> {
3466    fn loopz(&mut self, op0: Imm) {
3467        self.emit(LOOPZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3468    }
3469}
3470
3471impl<'a> LoopzEmitter<Sym> for Assembler<'a> {
3472    fn loopz(&mut self, op0: Sym) {
3473        self.emit(LOOPZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3474    }
3475}
3476
3477impl<'a> LoopzEmitter<Label> for Assembler<'a> {
3478    fn loopz(&mut self, op0: Label) {
3479        self.emit(LOOPZ, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3480    }
3481}
3482
3483/// `LSL` (LSL). 
3484/// Loads the unscrambled segment limit from the segment descriptor specified with the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the EFLAGS register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. The destination operand is a general-purpose register.
3485///
3486///
3487/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LSL.html).
3488///
3489/// Supported operand variants:
3490///
3491/// ```text
3492/// +---+----------+
3493/// | # | Operands |
3494/// +---+----------+
3495/// | 1 | Gpd, Gpw |
3496/// | 2 | Gpd, Mem |
3497/// | 3 | Gpq, Gpw |
3498/// | 4 | Gpq, Mem |
3499/// | 5 | Gpw, Gpw |
3500/// | 6 | Gpw, Mem |
3501/// +---+----------+
3502/// ```
3503pub trait LslEmitter<A, B> {
3504    fn lsl(&mut self, op0: A, op1: B);
3505}
3506
3507impl<'a> LslEmitter<Gpw, Gpw> for Assembler<'a> {
3508    fn lsl(&mut self, op0: Gpw, op1: Gpw) {
3509        self.emit(LSL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3510    }
3511}
3512
3513impl<'a> LslEmitter<Gpw, Mem> for Assembler<'a> {
3514    fn lsl(&mut self, op0: Gpw, op1: Mem) {
3515        self.emit(LSL16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3516    }
3517}
3518
3519impl<'a> LslEmitter<Gpd, Gpw> for Assembler<'a> {
3520    fn lsl(&mut self, op0: Gpd, op1: Gpw) {
3521        self.emit(LSL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3522    }
3523}
3524
3525impl<'a> LslEmitter<Gpd, Mem> for Assembler<'a> {
3526    fn lsl(&mut self, op0: Gpd, op1: Mem) {
3527        self.emit(LSL32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3528    }
3529}
3530
3531impl<'a> LslEmitter<Gpq, Gpw> for Assembler<'a> {
3532    fn lsl(&mut self, op0: Gpq, op1: Gpw) {
3533        self.emit(LSL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3534    }
3535}
3536
3537impl<'a> LslEmitter<Gpq, Mem> for Assembler<'a> {
3538    fn lsl(&mut self, op0: Gpq, op1: Mem) {
3539        self.emit(LSL64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3540    }
3541}
3542
3543/// `LSS` (LSS). 
3544/// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
3545///
3546///
3547/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
3548///
3549/// Supported operand variants:
3550///
3551/// ```text
3552/// +---+----------+
3553/// | # | Operands |
3554/// +---+----------+
3555/// | 1 | Gpd, Mem |
3556/// | 2 | Gpq, Mem |
3557/// | 3 | Gpw, Mem |
3558/// +---+----------+
3559/// ```
3560pub trait LssEmitter<A, B> {
3561    fn lss(&mut self, op0: A, op1: B);
3562}
3563
3564impl<'a> LssEmitter<Gpw, Mem> for Assembler<'a> {
3565    fn lss(&mut self, op0: Gpw, op1: Mem) {
3566        self.emit(LSS16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3567    }
3568}
3569
3570impl<'a> LssEmitter<Gpd, Mem> for Assembler<'a> {
3571    fn lss(&mut self, op0: Gpd, op1: Mem) {
3572        self.emit(LSS32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3573    }
3574}
3575
3576impl<'a> LssEmitter<Gpq, Mem> for Assembler<'a> {
3577    fn lss(&mut self, op0: Gpq, op1: Mem) {
3578        self.emit(LSS64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3579    }
3580}
3581
3582/// `LTR` (LTR). 
3583/// Loads the source operand into the segment selector field of the task register. The source operand (a general-purpose register or a memory location) contains a segment selector that points to a task state segment (TSS). After the segment selector is loaded in the task register, the processor uses the segment selector to locate the segment descriptor for the TSS in the global descriptor table (GDT). It then loads the segment limit and base address for the TSS from the segment descriptor into the task register. The task pointed to by the task register is marked busy, but a switch to the task does not occur.
3584///
3585///
3586/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LTR.html).
3587///
3588/// Supported operand variants:
3589///
3590/// ```text
3591/// +---+----------+
3592/// | # | Operands |
3593/// +---+----------+
3594/// | 1 | Gpd      |
3595/// | 2 | Mem      |
3596/// +---+----------+
3597/// ```
3598pub trait LtrEmitter<A> {
3599    fn ltr(&mut self, op0: A);
3600}
3601
3602impl<'a> LtrEmitter<Gpd> for Assembler<'a> {
3603    fn ltr(&mut self, op0: Gpd) {
3604        self.emit(LTRR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3605    }
3606}
3607
3608impl<'a> LtrEmitter<Mem> for Assembler<'a> {
3609    fn ltr(&mut self, op0: Mem) {
3610        self.emit(LTRM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
3611    }
3612}
3613
3614/// `MOV` (MOV). 
3615/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
3616///
3617///
3618/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
3619///
3620/// Supported operand variants:
3621///
3622/// ```text
3623/// +----+------------------------+
3624/// | #  | Operands               |
3625/// +----+------------------------+
3626/// | 1  | AbsoluteAddress, GpbLo |
3627/// | 2  | AbsoluteAddress, Gpd   |
3628/// | 3  | AbsoluteAddress, Gpq   |
3629/// | 4  | AbsoluteAddress, Gpw   |
3630/// | 5  | GpbLo, AbsoluteAddress |
3631/// | 6  | GpbLo, GpbLo           |
3632/// | 7  | GpbLo, Imm             |
3633/// | 8  | GpbLo, Mem             |
3634/// | 9  | Gpd, AbsoluteAddress   |
3635/// | 10 | Gpd, Gpd               |
3636/// | 11 | Gpd, Imm               |
3637/// | 12 | Gpd, Mem               |
3638/// | 13 | Gpq, AbsoluteAddress   |
3639/// | 14 | Gpq, Gpq               |
3640/// | 15 | Gpq, Imm               |
3641/// | 16 | Gpq, Mem               |
3642/// | 17 | Gpw, AbsoluteAddress   |
3643/// | 18 | Gpw, Gpw               |
3644/// | 19 | Gpw, Imm               |
3645/// | 20 | Gpw, Mem               |
3646/// | 21 | Mem, GpbLo             |
3647/// | 22 | Mem, Gpd               |
3648/// | 23 | Mem, Gpq               |
3649/// | 24 | Mem, Gpw               |
3650/// | 25 | Mem, Imm               |
3651/// +----+------------------------+
3652/// ```
3653pub trait MovEmitter<A, B> {
3654    fn mov(&mut self, op0: A, op1: B);
3655}
3656
3657impl<'a> MovEmitter<GpbLo, GpbLo> for Assembler<'a> {
3658    fn mov(&mut self, op0: GpbLo, op1: GpbLo) {
3659        self.emit(MOV8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3660    }
3661}
3662
3663impl<'a> MovEmitter<Mem, GpbLo> for Assembler<'a> {
3664    fn mov(&mut self, op0: Mem, op1: GpbLo) {
3665        self.emit(MOV8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3666    }
3667}
3668
3669impl<'a> MovEmitter<Gpw, Gpw> for Assembler<'a> {
3670    fn mov(&mut self, op0: Gpw, op1: Gpw) {
3671        self.emit(MOV16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3672    }
3673}
3674
3675impl<'a> MovEmitter<Mem, Gpw> for Assembler<'a> {
3676    fn mov(&mut self, op0: Mem, op1: Gpw) {
3677        self.emit(MOV16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3678    }
3679}
3680
3681impl<'a> MovEmitter<Gpd, Gpd> for Assembler<'a> {
3682    fn mov(&mut self, op0: Gpd, op1: Gpd) {
3683        self.emit(MOV32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3684    }
3685}
3686
3687impl<'a> MovEmitter<Mem, Gpd> for Assembler<'a> {
3688    fn mov(&mut self, op0: Mem, op1: Gpd) {
3689        self.emit(MOV32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3690    }
3691}
3692
3693impl<'a> MovEmitter<Gpq, Gpq> for Assembler<'a> {
3694    fn mov(&mut self, op0: Gpq, op1: Gpq) {
3695        self.emit(MOV64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3696    }
3697}
3698
3699impl<'a> MovEmitter<Mem, Gpq> for Assembler<'a> {
3700    fn mov(&mut self, op0: Mem, op1: Gpq) {
3701        self.emit(MOV64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3702    }
3703}
3704
3705impl<'a> MovEmitter<GpbLo, Mem> for Assembler<'a> {
3706    fn mov(&mut self, op0: GpbLo, op1: Mem) {
3707        self.emit(MOV8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3708    }
3709}
3710
3711impl<'a> MovEmitter<Gpw, Mem> for Assembler<'a> {
3712    fn mov(&mut self, op0: Gpw, op1: Mem) {
3713        self.emit(MOV16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3714    }
3715}
3716
3717impl<'a> MovEmitter<Gpd, Mem> for Assembler<'a> {
3718    fn mov(&mut self, op0: Gpd, op1: Mem) {
3719        self.emit(MOV32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3720    }
3721}
3722
3723impl<'a> MovEmitter<Gpq, Mem> for Assembler<'a> {
3724    fn mov(&mut self, op0: Gpq, op1: Mem) {
3725        self.emit(MOV64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3726    }
3727}
3728
3729impl<'a> MovEmitter<GpbLo, AbsoluteAddress> for Assembler<'a> {
3730    fn mov(&mut self, op0: GpbLo, op1: AbsoluteAddress) {
3731        self.emit(MOV8RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3732    }
3733}
3734
3735impl<'a> MovEmitter<Gpw, AbsoluteAddress> for Assembler<'a> {
3736    fn mov(&mut self, op0: Gpw, op1: AbsoluteAddress) {
3737        self.emit(MOV16RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3738    }
3739}
3740
3741impl<'a> MovEmitter<Gpd, AbsoluteAddress> for Assembler<'a> {
3742    fn mov(&mut self, op0: Gpd, op1: AbsoluteAddress) {
3743        self.emit(MOV32RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3744    }
3745}
3746
3747impl<'a> MovEmitter<Gpq, AbsoluteAddress> for Assembler<'a> {
3748    fn mov(&mut self, op0: Gpq, op1: AbsoluteAddress) {
3749        self.emit(MOV64RA, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3750    }
3751}
3752
3753impl<'a> MovEmitter<AbsoluteAddress, GpbLo> for Assembler<'a> {
3754    fn mov(&mut self, op0: AbsoluteAddress, op1: GpbLo) {
3755        self.emit(MOV8AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3756    }
3757}
3758
3759impl<'a> MovEmitter<AbsoluteAddress, Gpw> for Assembler<'a> {
3760    fn mov(&mut self, op0: AbsoluteAddress, op1: Gpw) {
3761        self.emit(MOV16AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3762    }
3763}
3764
3765impl<'a> MovEmitter<AbsoluteAddress, Gpd> for Assembler<'a> {
3766    fn mov(&mut self, op0: AbsoluteAddress, op1: Gpd) {
3767        self.emit(MOV32AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3768    }
3769}
3770
3771impl<'a> MovEmitter<AbsoluteAddress, Gpq> for Assembler<'a> {
3772    fn mov(&mut self, op0: AbsoluteAddress, op1: Gpq) {
3773        self.emit(MOV64AR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3774    }
3775}
3776
3777impl<'a> MovEmitter<GpbLo, Imm> for Assembler<'a> {
3778    fn mov(&mut self, op0: GpbLo, op1: Imm) {
3779        self.emit(MOV8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3780    }
3781}
3782
3783impl<'a> MovEmitter<Gpw, Imm> for Assembler<'a> {
3784    fn mov(&mut self, op0: Gpw, op1: Imm) {
3785        self.emit(MOV16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3786    }
3787}
3788
3789impl<'a> MovEmitter<Gpd, Imm> for Assembler<'a> {
3790    fn mov(&mut self, op0: Gpd, op1: Imm) {
3791        self.emit(MOV32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3792    }
3793}
3794
3795impl<'a> MovEmitter<Gpq, Imm> for Assembler<'a> {
3796    fn mov(&mut self, op0: Gpq, op1: Imm) {
3797        self.emit(MOV64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3798    }
3799}
3800
3801impl<'a> MovEmitter<Mem, Imm> for Assembler<'a> {
3802    fn mov(&mut self, op0: Mem, op1: Imm) {
3803        self.emit(MOV8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3804    }
3805}
3806
3807/// `MOVS` (MOVS). 
3808/// Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified with the first operand (destination operand). Both the source and destination operands are located in memory. The address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be overridden with a segment override prefix, but the ES segment cannot be overridden.
3809///
3810///
3811/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVS%3AMOVSB%3AMOVSW%3AMOVSD%3AMOVSQ.html).
3812///
3813/// Supported operand variants:
3814///
3815/// ```text
3816/// +---+----------+
3817/// | # | Operands |
3818/// +---+----------+
3819/// | 1 | (none)   |
3820/// +---+----------+
3821/// ```
3822pub trait MovsEmitter {
3823    fn movs(&mut self);
3824}
3825
3826impl<'a> MovsEmitter for Assembler<'a> {
3827    fn movs(&mut self) {
3828        self.emit(MOVS8, &NOREG, &NOREG, &NOREG, &NOREG);
3829    }
3830}
3831
3832/// `MOVSX` (MOVSX). 
3833/// Copies the contents of the source operand (register or memory location) to the destination operand (register) and sign extends the value to 16 or 32 bits (see Figure 7-6 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The size of the converted value depends on the operand-size attribute.
3834///
3835///
3836/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVSX%3AMOVSXD.html).
3837///
3838/// Supported operand variants:
3839///
3840/// ```text
3841/// +----+------------+
3842/// | #  | Operands   |
3843/// +----+------------+
3844/// | 1  | Gpd, GpbLo |
3845/// | 2  | Gpd, Gpd   |
3846/// | 3  | Gpd, Gpw   |
3847/// | 4  | Gpd, Mem   |
3848/// | 5  | Gpq, GpbLo |
3849/// | 6  | Gpq, Gpd   |
3850/// | 7  | Gpq, Gpw   |
3851/// | 8  | Gpq, Mem   |
3852/// | 9  | Gpw, GpbLo |
3853/// | 10 | Gpw, Gpd   |
3854/// | 11 | Gpw, Gpw   |
3855/// | 12 | Gpw, Mem   |
3856/// +----+------------+
3857/// ```
3858pub trait MovsxEmitter<A, B> {
3859    fn movsx(&mut self, op0: A, op1: B);
3860}
3861
3862impl<'a> MovsxEmitter<Gpw, Gpd> for Assembler<'a> {
3863    fn movsx(&mut self, op0: Gpw, op1: Gpd) {
3864        self.emit(MOVSXR16R32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3865    }
3866}
3867
3868impl<'a> MovsxEmitter<Gpw, Mem> for Assembler<'a> {
3869    fn movsx(&mut self, op0: Gpw, op1: Mem) {
3870        self.emit(MOVSXR16M32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3871    }
3872}
3873
3874impl<'a> MovsxEmitter<Gpd, Gpd> for Assembler<'a> {
3875    fn movsx(&mut self, op0: Gpd, op1: Gpd) {
3876        self.emit(MOVSXR32R32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3877    }
3878}
3879
3880impl<'a> MovsxEmitter<Gpd, Mem> for Assembler<'a> {
3881    fn movsx(&mut self, op0: Gpd, op1: Mem) {
3882        self.emit(MOVSXR32M32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3883    }
3884}
3885
3886impl<'a> MovsxEmitter<Gpq, Gpd> for Assembler<'a> {
3887    fn movsx(&mut self, op0: Gpq, op1: Gpd) {
3888        self.emit(MOVSXR64R32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3889    }
3890}
3891
3892impl<'a> MovsxEmitter<Gpq, Mem> for Assembler<'a> {
3893    fn movsx(&mut self, op0: Gpq, op1: Mem) {
3894        self.emit(MOVSXR64M32, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3895    }
3896}
3897
3898impl<'a> MovsxEmitter<Gpw, GpbLo> for Assembler<'a> {
3899    fn movsx(&mut self, op0: Gpw, op1: GpbLo) {
3900        self.emit(MOVSXR16R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3901    }
3902}
3903
3904impl<'a> MovsxEmitter<Gpd, GpbLo> for Assembler<'a> {
3905    fn movsx(&mut self, op0: Gpd, op1: GpbLo) {
3906        self.emit(MOVSXR32R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3907    }
3908}
3909
3910impl<'a> MovsxEmitter<Gpq, GpbLo> for Assembler<'a> {
3911    fn movsx(&mut self, op0: Gpq, op1: GpbLo) {
3912        self.emit(MOVSXR64R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3913    }
3914}
3915
3916impl<'a> MovsxEmitter<Gpw, Gpw> for Assembler<'a> {
3917    fn movsx(&mut self, op0: Gpw, op1: Gpw) {
3918        self.emit(MOVSXR16R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3919    }
3920}
3921
3922impl<'a> MovsxEmitter<Gpd, Gpw> for Assembler<'a> {
3923    fn movsx(&mut self, op0: Gpd, op1: Gpw) {
3924        self.emit(MOVSXR32R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3925    }
3926}
3927
3928impl<'a> MovsxEmitter<Gpq, Gpw> for Assembler<'a> {
3929    fn movsx(&mut self, op0: Gpq, op1: Gpw) {
3930        self.emit(MOVSXR64R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3931    }
3932}
3933
3934/// `MOVZX` (MOVZX). 
3935/// Copies the contents of the source operand (register or memory location) to the destination operand (register) and zero extends the value. The size of the converted value depends on the operand-size attribute.
3936///
3937///
3938/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVZX.html).
3939///
3940/// Supported operand variants:
3941///
3942/// ```text
3943/// +---+------------+
3944/// | # | Operands   |
3945/// +---+------------+
3946/// | 1 | Gpd, GpbLo |
3947/// | 2 | Gpd, Gpw   |
3948/// | 3 | Gpd, Mem   |
3949/// | 4 | Gpq, GpbLo |
3950/// | 5 | Gpq, Gpw   |
3951/// | 6 | Gpq, Mem   |
3952/// | 7 | Gpw, GpbLo |
3953/// | 8 | Gpw, Gpw   |
3954/// | 9 | Gpw, Mem   |
3955/// +---+------------+
3956/// ```
3957pub trait MovzxEmitter<A, B> {
3958    fn movzx(&mut self, op0: A, op1: B);
3959}
3960
3961impl<'a> MovzxEmitter<Gpw, GpbLo> for Assembler<'a> {
3962    fn movzx(&mut self, op0: Gpw, op1: GpbLo) {
3963        self.emit(MOVZXR16R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3964    }
3965}
3966
3967impl<'a> MovzxEmitter<Gpw, Mem> for Assembler<'a> {
3968    fn movzx(&mut self, op0: Gpw, op1: Mem) {
3969        self.emit(MOVZXR16M8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3970    }
3971}
3972
3973impl<'a> MovzxEmitter<Gpd, GpbLo> for Assembler<'a> {
3974    fn movzx(&mut self, op0: Gpd, op1: GpbLo) {
3975        self.emit(MOVZXR32R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3976    }
3977}
3978
3979impl<'a> MovzxEmitter<Gpd, Mem> for Assembler<'a> {
3980    fn movzx(&mut self, op0: Gpd, op1: Mem) {
3981        self.emit(MOVZXR32M8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3982    }
3983}
3984
3985impl<'a> MovzxEmitter<Gpq, GpbLo> for Assembler<'a> {
3986    fn movzx(&mut self, op0: Gpq, op1: GpbLo) {
3987        self.emit(MOVZXR64R8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3988    }
3989}
3990
3991impl<'a> MovzxEmitter<Gpq, Mem> for Assembler<'a> {
3992    fn movzx(&mut self, op0: Gpq, op1: Mem) {
3993        self.emit(MOVZXR64M8, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
3994    }
3995}
3996
3997impl<'a> MovzxEmitter<Gpw, Gpw> for Assembler<'a> {
3998    fn movzx(&mut self, op0: Gpw, op1: Gpw) {
3999        self.emit(MOVZXR16R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4000    }
4001}
4002
4003impl<'a> MovzxEmitter<Gpd, Gpw> for Assembler<'a> {
4004    fn movzx(&mut self, op0: Gpd, op1: Gpw) {
4005        self.emit(MOVZXR32R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4006    }
4007}
4008
4009impl<'a> MovzxEmitter<Gpq, Gpw> for Assembler<'a> {
4010    fn movzx(&mut self, op0: Gpq, op1: Gpw) {
4011        self.emit(MOVZXR64R16, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4012    }
4013}
4014
4015/// `MOV_CR2G`.
4016///
4017/// Supported operand variants:
4018///
4019/// ```text
4020/// +---+-----------+
4021/// | # | Operands  |
4022/// +---+-----------+
4023/// | 1 | Gpq, CReg |
4024/// +---+-----------+
4025/// ```
4026pub trait MovCr2gEmitter<A, B> {
4027    fn mov_cr2g(&mut self, op0: A, op1: B);
4028}
4029
4030impl<'a> MovCr2gEmitter<Gpq, CReg> for Assembler<'a> {
4031    fn mov_cr2g(&mut self, op0: Gpq, op1: CReg) {
4032        self.emit(MOV_CR2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4033    }
4034}
4035
4036/// `MOV_DR2G`.
4037///
4038/// Supported operand variants:
4039///
4040/// ```text
4041/// +---+-----------+
4042/// | # | Operands  |
4043/// +---+-----------+
4044/// | 1 | Gpq, DReg |
4045/// +---+-----------+
4046/// ```
4047pub trait MovDr2gEmitter<A, B> {
4048    fn mov_dr2g(&mut self, op0: A, op1: B);
4049}
4050
4051impl<'a> MovDr2gEmitter<Gpq, DReg> for Assembler<'a> {
4052    fn mov_dr2g(&mut self, op0: Gpq, op1: DReg) {
4053        self.emit(MOV_DR2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4054    }
4055}
4056
4057/// `MOV_G2CR`.
4058///
4059/// Supported operand variants:
4060///
4061/// ```text
4062/// +---+-----------+
4063/// | # | Operands  |
4064/// +---+-----------+
4065/// | 1 | CReg, Gpq |
4066/// +---+-----------+
4067/// ```
4068pub trait MovG2crEmitter<A, B> {
4069    fn mov_g2cr(&mut self, op0: A, op1: B);
4070}
4071
4072impl<'a> MovG2crEmitter<CReg, Gpq> for Assembler<'a> {
4073    fn mov_g2cr(&mut self, op0: CReg, op1: Gpq) {
4074        self.emit(MOV_G2CRRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4075    }
4076}
4077
4078/// `MOV_G2DR`.
4079///
4080/// Supported operand variants:
4081///
4082/// ```text
4083/// +---+-----------+
4084/// | # | Operands  |
4085/// +---+-----------+
4086/// | 1 | DReg, Gpq |
4087/// +---+-----------+
4088/// ```
4089pub trait MovG2drEmitter<A, B> {
4090    fn mov_g2dr(&mut self, op0: A, op1: B);
4091}
4092
4093impl<'a> MovG2drEmitter<DReg, Gpq> for Assembler<'a> {
4094    fn mov_g2dr(&mut self, op0: DReg, op1: Gpq) {
4095        self.emit(MOV_G2DRRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4096    }
4097}
4098
4099/// `MOV_G2S` (MOV). 
4100/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
4101///
4102///
4103/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
4104///
4105/// Supported operand variants:
4106///
4107/// ```text
4108/// +---+-----------+
4109/// | # | Operands  |
4110/// +---+-----------+
4111/// | 1 | SReg, Gpd |
4112/// | 2 | SReg, Mem |
4113/// +---+-----------+
4114/// ```
4115pub trait MovG2sEmitter<A, B> {
4116    fn mov_g2s(&mut self, op0: A, op1: B);
4117}
4118
4119impl<'a> MovG2sEmitter<SReg, Gpd> for Assembler<'a> {
4120    fn mov_g2s(&mut self, op0: SReg, op1: Gpd) {
4121        self.emit(MOV_G2SRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4122    }
4123}
4124
4125impl<'a> MovG2sEmitter<SReg, Mem> for Assembler<'a> {
4126    fn mov_g2s(&mut self, op0: SReg, op1: Mem) {
4127        self.emit(MOV_G2SRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4128    }
4129}
4130
4131/// `MOV_S2G` (MOV). 
4132/// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
4133///
4134///
4135/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
4136///
4137/// Supported operand variants:
4138///
4139/// ```text
4140/// +---+-----------+
4141/// | # | Operands  |
4142/// +---+-----------+
4143/// | 1 | Gpd, SReg |
4144/// | 2 | Mem, SReg |
4145/// +---+-----------+
4146/// ```
4147pub trait MovS2gEmitter<A, B> {
4148    fn mov_s2g(&mut self, op0: A, op1: B);
4149}
4150
4151impl<'a> MovS2gEmitter<Gpd, SReg> for Assembler<'a> {
4152    fn mov_s2g(&mut self, op0: Gpd, op1: SReg) {
4153        self.emit(MOV_S2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4154    }
4155}
4156
4157impl<'a> MovS2gEmitter<Mem, SReg> for Assembler<'a> {
4158    fn mov_s2g(&mut self, op0: Mem, op1: SReg) {
4159        self.emit(MOV_S2GMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4160    }
4161}
4162
4163/// `MUL` (MUL). 
4164/// Performs an unsigned multiplication of the first operand (destination operand) and the second operand (source operand) and stores the result in the destination operand. The destination operand is an implied operand located in register AL, AX or EAX (depending on the size of the operand); the source operand is located in a general-purpose register or a memory location. The action of this instruction and the location of the result depends on the opcode and the operand size as shown in Table 4-9.
4165///
4166///
4167/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MUL.html).
4168///
4169/// Supported operand variants:
4170///
4171/// ```text
4172/// +---+----------+
4173/// | # | Operands |
4174/// +---+----------+
4175/// | 1 | GpbLo    |
4176/// | 2 | Gpd      |
4177/// | 3 | Gpq      |
4178/// | 4 | Gpw      |
4179/// | 5 | Mem      |
4180/// +---+----------+
4181/// ```
4182pub trait MulEmitter<A> {
4183    fn mul(&mut self, op0: A);
4184}
4185
4186impl<'a> MulEmitter<GpbLo> for Assembler<'a> {
4187    fn mul(&mut self, op0: GpbLo) {
4188        self.emit(MUL8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4189    }
4190}
4191
4192impl<'a> MulEmitter<Mem> for Assembler<'a> {
4193    fn mul(&mut self, op0: Mem) {
4194        self.emit(MUL8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4195    }
4196}
4197
4198impl<'a> MulEmitter<Gpw> for Assembler<'a> {
4199    fn mul(&mut self, op0: Gpw) {
4200        self.emit(MUL16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4201    }
4202}
4203
4204impl<'a> MulEmitter<Gpd> for Assembler<'a> {
4205    fn mul(&mut self, op0: Gpd) {
4206        self.emit(MUL32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4207    }
4208}
4209
4210impl<'a> MulEmitter<Gpq> for Assembler<'a> {
4211    fn mul(&mut self, op0: Gpq) {
4212        self.emit(MUL64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4213    }
4214}
4215
4216/// `NEG` (NEG). 
4217/// Replaces the value of operand (the destination operand) with its two's complement. (This operation is equivalent to subtracting the operand from 0.) The destination operand is located in a general-purpose register or a memory location.
4218///
4219///
4220/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NEG.html).
4221///
4222/// Supported operand variants:
4223///
4224/// ```text
4225/// +---+----------+
4226/// | # | Operands |
4227/// +---+----------+
4228/// | 1 | GpbLo    |
4229/// | 2 | Gpd      |
4230/// | 3 | Gpq      |
4231/// | 4 | Gpw      |
4232/// | 5 | Mem      |
4233/// +---+----------+
4234/// ```
4235pub trait NegEmitter<A> {
4236    fn neg(&mut self, op0: A);
4237}
4238
4239impl<'a> NegEmitter<GpbLo> for Assembler<'a> {
4240    fn neg(&mut self, op0: GpbLo) {
4241        self.emit(NEG8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4242    }
4243}
4244
4245impl<'a> NegEmitter<Mem> for Assembler<'a> {
4246    fn neg(&mut self, op0: Mem) {
4247        self.emit(NEG8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4248    }
4249}
4250
4251impl<'a> NegEmitter<Gpw> for Assembler<'a> {
4252    fn neg(&mut self, op0: Gpw) {
4253        self.emit(NEG16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4254    }
4255}
4256
4257impl<'a> NegEmitter<Gpd> for Assembler<'a> {
4258    fn neg(&mut self, op0: Gpd) {
4259        self.emit(NEG32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4260    }
4261}
4262
4263impl<'a> NegEmitter<Gpq> for Assembler<'a> {
4264    fn neg(&mut self, op0: Gpq) {
4265        self.emit(NEG64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4266    }
4267}
4268
4269/// `NOP` (NOP). 
4270/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
4271///
4272///
4273/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
4274///
4275/// Supported operand variants:
4276///
4277/// ```text
4278/// +---+----------+
4279/// | # | Operands |
4280/// +---+----------+
4281/// | 1 | (none)   |
4282/// +---+----------+
4283/// ```
4284pub trait NopEmitter {
4285    fn nop(&mut self);
4286}
4287
4288impl<'a> NopEmitter for Assembler<'a> {
4289    fn nop(&mut self) {
4290        self.emit(NOP, &NOREG, &NOREG, &NOREG, &NOREG);
4291    }
4292}
4293
4294/// `NOP` (NOP). 
4295/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
4296///
4297///
4298/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
4299///
4300/// Supported operand variants:
4301///
4302/// ```text
4303/// +---+----------+
4304/// | # | Operands |
4305/// +---+----------+
4306/// | 1 | Gpd      |
4307/// | 2 | Gpq      |
4308/// | 3 | Gpw      |
4309/// | 4 | Mem      |
4310/// +---+----------+
4311/// ```
4312pub trait NopEmitter_1<A> {
4313    fn nop_1(&mut self, op0: A);
4314}
4315
4316impl<'a> NopEmitter_1<Gpw> for Assembler<'a> {
4317    fn nop_1(&mut self, op0: Gpw) {
4318        self.emit(NOP16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4319    }
4320}
4321
4322impl<'a> NopEmitter_1<Mem> for Assembler<'a> {
4323    fn nop_1(&mut self, op0: Mem) {
4324        self.emit(NOP16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4325    }
4326}
4327
4328impl<'a> NopEmitter_1<Gpd> for Assembler<'a> {
4329    fn nop_1(&mut self, op0: Gpd) {
4330        self.emit(NOP32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4331    }
4332}
4333
4334impl<'a> NopEmitter_1<Gpq> for Assembler<'a> {
4335    fn nop_1(&mut self, op0: Gpq) {
4336        self.emit(NOP64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4337    }
4338}
4339
4340/// `NOT` (NOT). 
4341/// Performs a bitwise NOT operation (each 1 is set to 0, and each 0 is set to 1) on the destination operand and stores the result in the destination operand location. The destination operand can be a register or a memory location.
4342///
4343///
4344/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOT.html).
4345///
4346/// Supported operand variants:
4347///
4348/// ```text
4349/// +---+----------+
4350/// | # | Operands |
4351/// +---+----------+
4352/// | 1 | GpbLo    |
4353/// | 2 | Gpd      |
4354/// | 3 | Gpq      |
4355/// | 4 | Gpw      |
4356/// | 5 | Mem      |
4357/// +---+----------+
4358/// ```
4359pub trait NotEmitter<A> {
4360    fn not(&mut self, op0: A);
4361}
4362
4363impl<'a> NotEmitter<GpbLo> for Assembler<'a> {
4364    fn not(&mut self, op0: GpbLo) {
4365        self.emit(NOT8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4366    }
4367}
4368
4369impl<'a> NotEmitter<Mem> for Assembler<'a> {
4370    fn not(&mut self, op0: Mem) {
4371        self.emit(NOT8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4372    }
4373}
4374
4375impl<'a> NotEmitter<Gpw> for Assembler<'a> {
4376    fn not(&mut self, op0: Gpw) {
4377        self.emit(NOT16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4378    }
4379}
4380
4381impl<'a> NotEmitter<Gpd> for Assembler<'a> {
4382    fn not(&mut self, op0: Gpd) {
4383        self.emit(NOT32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4384    }
4385}
4386
4387impl<'a> NotEmitter<Gpq> for Assembler<'a> {
4388    fn not(&mut self, op0: Gpq) {
4389        self.emit(NOT64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4390    }
4391}
4392
4393/// `OR` (OR). 
4394/// Performs a bitwise inclusive OR operation between the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result of the OR instruction is set to 0 if both corresponding bits of the first and second operands are 0; otherwise, each bit is set to 1.
4395///
4396///
4397/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OR.html).
4398///
4399/// Supported operand variants:
4400///
4401/// ```text
4402/// +----+--------------+
4403/// | #  | Operands     |
4404/// +----+--------------+
4405/// | 1  | GpbLo, GpbLo |
4406/// | 2  | GpbLo, Imm   |
4407/// | 3  | GpbLo, Mem   |
4408/// | 4  | Gpd, Gpd     |
4409/// | 5  | Gpd, Imm     |
4410/// | 6  | Gpd, Mem     |
4411/// | 7  | Gpq, Gpq     |
4412/// | 8  | Gpq, Imm     |
4413/// | 9  | Gpq, Mem     |
4414/// | 10 | Gpw, Gpw     |
4415/// | 11 | Gpw, Imm     |
4416/// | 12 | Gpw, Mem     |
4417/// | 13 | Mem, GpbLo   |
4418/// | 14 | Mem, Gpd     |
4419/// | 15 | Mem, Gpq     |
4420/// | 16 | Mem, Gpw     |
4421/// | 17 | Mem, Imm     |
4422/// +----+--------------+
4423/// ```
4424pub trait OrEmitter<A, B> {
4425    fn or(&mut self, op0: A, op1: B);
4426}
4427
4428impl<'a> OrEmitter<GpbLo, GpbLo> for Assembler<'a> {
4429    fn or(&mut self, op0: GpbLo, op1: GpbLo) {
4430        self.emit(OR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4431    }
4432}
4433
4434impl<'a> OrEmitter<Mem, GpbLo> for Assembler<'a> {
4435    fn or(&mut self, op0: Mem, op1: GpbLo) {
4436        self.emit(OR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4437    }
4438}
4439
4440impl<'a> OrEmitter<Gpw, Gpw> for Assembler<'a> {
4441    fn or(&mut self, op0: Gpw, op1: Gpw) {
4442        self.emit(OR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4443    }
4444}
4445
4446impl<'a> OrEmitter<Mem, Gpw> for Assembler<'a> {
4447    fn or(&mut self, op0: Mem, op1: Gpw) {
4448        self.emit(OR16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4449    }
4450}
4451
4452impl<'a> OrEmitter<Gpd, Gpd> for Assembler<'a> {
4453    fn or(&mut self, op0: Gpd, op1: Gpd) {
4454        self.emit(OR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4455    }
4456}
4457
4458impl<'a> OrEmitter<Mem, Gpd> for Assembler<'a> {
4459    fn or(&mut self, op0: Mem, op1: Gpd) {
4460        self.emit(OR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4461    }
4462}
4463
4464impl<'a> OrEmitter<Gpq, Gpq> for Assembler<'a> {
4465    fn or(&mut self, op0: Gpq, op1: Gpq) {
4466        self.emit(OR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4467    }
4468}
4469
4470impl<'a> OrEmitter<Mem, Gpq> for Assembler<'a> {
4471    fn or(&mut self, op0: Mem, op1: Gpq) {
4472        self.emit(OR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4473    }
4474}
4475
4476impl<'a> OrEmitter<GpbLo, Mem> for Assembler<'a> {
4477    fn or(&mut self, op0: GpbLo, op1: Mem) {
4478        self.emit(OR8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4479    }
4480}
4481
4482impl<'a> OrEmitter<Gpw, Mem> for Assembler<'a> {
4483    fn or(&mut self, op0: Gpw, op1: Mem) {
4484        self.emit(OR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4485    }
4486}
4487
4488impl<'a> OrEmitter<Gpd, Mem> for Assembler<'a> {
4489    fn or(&mut self, op0: Gpd, op1: Mem) {
4490        self.emit(OR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4491    }
4492}
4493
4494impl<'a> OrEmitter<Gpq, Mem> for Assembler<'a> {
4495    fn or(&mut self, op0: Gpq, op1: Mem) {
4496        self.emit(OR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4497    }
4498}
4499
4500impl<'a> OrEmitter<GpbLo, Imm> for Assembler<'a> {
4501    fn or(&mut self, op0: GpbLo, op1: Imm) {
4502        self.emit(OR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4503    }
4504}
4505
4506impl<'a> OrEmitter<Gpw, Imm> for Assembler<'a> {
4507    fn or(&mut self, op0: Gpw, op1: Imm) {
4508        self.emit(OR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4509    }
4510}
4511
4512impl<'a> OrEmitter<Gpd, Imm> for Assembler<'a> {
4513    fn or(&mut self, op0: Gpd, op1: Imm) {
4514        self.emit(OR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4515    }
4516}
4517
4518impl<'a> OrEmitter<Gpq, Imm> for Assembler<'a> {
4519    fn or(&mut self, op0: Gpq, op1: Imm) {
4520        self.emit(OR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4521    }
4522}
4523
4524impl<'a> OrEmitter<Mem, Imm> for Assembler<'a> {
4525    fn or(&mut self, op0: Mem, op1: Imm) {
4526        self.emit(OR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4527    }
4528}
4529
4530/// `OUT` (OUT). 
4531/// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
4532///
4533///
4534/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
4535///
4536/// Supported operand variants:
4537///
4538/// ```text
4539/// +---+----------+
4540/// | # | Operands |
4541/// +---+----------+
4542/// | 1 | (none)   |
4543/// +---+----------+
4544/// ```
4545pub trait OutEmitter {
4546    fn r#out(&mut self);
4547}
4548
4549impl<'a> OutEmitter for Assembler<'a> {
4550    fn r#out(&mut self) {
4551        self.emit(OUT8, &NOREG, &NOREG, &NOREG, &NOREG);
4552    }
4553}
4554
4555/// `OUT` (OUT). 
4556/// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
4557///
4558///
4559/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
4560///
4561/// Supported operand variants:
4562///
4563/// ```text
4564/// +---+------------+
4565/// | # | Operands   |
4566/// +---+------------+
4567/// | 1 | GpbLo, Imm |
4568/// | 2 | Gpd, Imm   |
4569/// | 3 | Gpq, Imm   |
4570/// | 4 | Gpw, Imm   |
4571/// +---+------------+
4572/// ```
4573pub trait OutEmitter_2<A, B> {
4574    fn r#out_2(&mut self, op0: A, op1: B);
4575}
4576
4577impl<'a> OutEmitter_2<GpbLo, Imm> for Assembler<'a> {
4578    fn r#out_2(&mut self, op0: GpbLo, op1: Imm) {
4579        self.emit(OUT8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4580    }
4581}
4582
4583impl<'a> OutEmitter_2<Gpw, Imm> for Assembler<'a> {
4584    fn r#out_2(&mut self, op0: Gpw, op1: Imm) {
4585        self.emit(OUT16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4586    }
4587}
4588
4589impl<'a> OutEmitter_2<Gpd, Imm> for Assembler<'a> {
4590    fn r#out_2(&mut self, op0: Gpd, op1: Imm) {
4591        self.emit(OUT32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4592    }
4593}
4594
4595impl<'a> OutEmitter_2<Gpq, Imm> for Assembler<'a> {
4596    fn r#out_2(&mut self, op0: Gpq, op1: Imm) {
4597        self.emit(OUT64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4598    }
4599}
4600
4601/// `OUTS` (OUTS). 
4602/// Copies data from the source operand (second operand) to the I/O port specified with the destination operand (first operand). The source operand is a memory location, the address of which is read from either the DS:SI, DS:ESI or the RSI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The DS segment may be overridden with a segment override prefix.) The destination operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
4603///
4604///
4605/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUTS%3AOUTSB%3AOUTSW%3AOUTSD.html).
4606///
4607/// Supported operand variants:
4608///
4609/// ```text
4610/// +---+----------+
4611/// | # | Operands |
4612/// +---+----------+
4613/// | 1 | (none)   |
4614/// +---+----------+
4615/// ```
4616pub trait OutsEmitter {
4617    fn outs(&mut self);
4618}
4619
4620impl<'a> OutsEmitter for Assembler<'a> {
4621    fn outs(&mut self) {
4622        self.emit(OUTS8, &NOREG, &NOREG, &NOREG, &NOREG);
4623    }
4624}
4625
4626/// `PAUSE` (NOP). 
4627/// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
4628///
4629///
4630/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
4631///
4632/// Supported operand variants:
4633///
4634/// ```text
4635/// +---+----------+
4636/// | # | Operands |
4637/// +---+----------+
4638/// | 1 | (none)   |
4639/// +---+----------+
4640/// ```
4641pub trait PauseEmitter {
4642    fn pause(&mut self);
4643}
4644
4645impl<'a> PauseEmitter for Assembler<'a> {
4646    fn pause(&mut self) {
4647        self.emit(PAUSE, &NOREG, &NOREG, &NOREG, &NOREG);
4648    }
4649}
4650
4651/// `POP` (POP). 
4652/// Loads the value from the top of the stack to the location specified with the destination operand (or explicit opcode) and then increments the stack pointer. The destination operand can be a general-purpose register, memory location, or segment register.
4653///
4654///
4655/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POP.html).
4656///
4657/// Supported operand variants:
4658///
4659/// ```text
4660/// +---+----------+
4661/// | # | Operands |
4662/// +---+----------+
4663/// | 1 | Gpq      |
4664/// | 2 | Gpw      |
4665/// | 3 | Mem      |
4666/// +---+----------+
4667/// ```
4668pub trait PopEmitter<A> {
4669    fn pop(&mut self, op0: A);
4670}
4671
4672impl<'a> PopEmitter<Gpw> for Assembler<'a> {
4673    fn pop(&mut self, op0: Gpw) {
4674        self.emit(POP16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4675    }
4676}
4677
4678impl<'a> PopEmitter<Gpq> for Assembler<'a> {
4679    fn pop(&mut self, op0: Gpq) {
4680        self.emit(POPR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4681    }
4682}
4683
4684impl<'a> PopEmitter<Mem> for Assembler<'a> {
4685    fn pop(&mut self, op0: Mem) {
4686        self.emit(POP16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4687    }
4688}
4689
4690/// `POPF` (POPF). 
4691/// Pops a doubleword (POPFD) from the top of the stack (if the current operand-size attribute is 32) and stores the value in the EFLAGS register, or pops a word from the top of the stack (if the operand-size attribute is 16) and stores it in the lower 16 bits of the EFLAGS register (that is, the FLAGS register). These instructions reverse the operation of the PUSHF/PUSHFD/PUSHFQ instructions.
4692///
4693///
4694/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POPF%3APOPFD%3APOPFQ.html).
4695///
4696/// Supported operand variants:
4697///
4698/// ```text
4699/// +---+----------+
4700/// | # | Operands |
4701/// +---+----------+
4702/// | 1 | (none)   |
4703/// +---+----------+
4704/// ```
4705pub trait PopfEmitter {
4706    fn popf(&mut self);
4707}
4708
4709impl<'a> PopfEmitter for Assembler<'a> {
4710    fn popf(&mut self) {
4711        self.emit(POPF16, &NOREG, &NOREG, &NOREG, &NOREG);
4712    }
4713}
4714
4715/// `POP_SEG`.
4716///
4717/// Supported operand variants:
4718///
4719/// ```text
4720/// +---+----------+
4721/// | # | Operands |
4722/// +---+----------+
4723/// | 1 | SReg     |
4724/// +---+----------+
4725/// ```
4726pub trait PopSegEmitter<A> {
4727    fn pop_seg(&mut self, op0: A);
4728}
4729
4730impl<'a> PopSegEmitter<SReg> for Assembler<'a> {
4731    fn pop_seg(&mut self, op0: SReg) {
4732        self.emit(POP_SEG16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4733    }
4734}
4735
4736/// `PUSH` (PUSH). 
4737/// Decrements the stack pointer and then stores the source operand on the top of the stack. Address and operand sizes are determined and used as follows
4738///
4739///
4740/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSH.html).
4741///
4742/// Supported operand variants:
4743///
4744/// ```text
4745/// +---+----------+
4746/// | # | Operands |
4747/// +---+----------+
4748/// | 1 | Gpq      |
4749/// | 2 | Gpw      |
4750/// | 3 | Imm      |
4751/// | 4 | Mem      |
4752/// +---+----------+
4753/// ```
4754pub trait PushEmitter<A> {
4755    fn push(&mut self, op0: A);
4756}
4757
4758impl<'a> PushEmitter<Gpw> for Assembler<'a> {
4759    fn push(&mut self, op0: Gpw) {
4760        self.emit(PUSH16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4761    }
4762}
4763
4764impl<'a> PushEmitter<Gpq> for Assembler<'a> {
4765    fn push(&mut self, op0: Gpq) {
4766        self.emit(PUSHR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4767    }
4768}
4769
4770impl<'a> PushEmitter<Imm> for Assembler<'a> {
4771    fn push(&mut self, op0: Imm) {
4772        self.emit(PUSH16I, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4773    }
4774}
4775
4776impl<'a> PushEmitter<Mem> for Assembler<'a> {
4777    fn push(&mut self, op0: Mem) {
4778        self.emit(PUSH16M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4779    }
4780}
4781
4782/// `PUSHF` (PUSHF). 
4783/// Decrements the stack pointer by 4 (if the current operand-size attribute is 32) and pushes the entire contents of the EFLAGS register onto the stack, or decrements the stack pointer by 2 (if the operand-size attribute is 16) and pushes the lower 16 bits of the EFLAGS register (that is, the FLAGS register) onto the stack. These instructions reverse the operation of the POPF/POPFD instructions.
4784///
4785///
4786/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSHF%3APUSHFD%3APUSHFQ.html).
4787///
4788/// Supported operand variants:
4789///
4790/// ```text
4791/// +---+----------+
4792/// | # | Operands |
4793/// +---+----------+
4794/// | 1 | (none)   |
4795/// +---+----------+
4796/// ```
4797pub trait PushfEmitter {
4798    fn pushf(&mut self);
4799}
4800
4801impl<'a> PushfEmitter for Assembler<'a> {
4802    fn pushf(&mut self) {
4803        self.emit(PUSHF16, &NOREG, &NOREG, &NOREG, &NOREG);
4804    }
4805}
4806
4807/// `PUSH_SEG`.
4808///
4809/// Supported operand variants:
4810///
4811/// ```text
4812/// +---+----------+
4813/// | # | Operands |
4814/// +---+----------+
4815/// | 1 | SReg     |
4816/// +---+----------+
4817/// ```
4818pub trait PushSegEmitter<A> {
4819    fn push_seg(&mut self, op0: A);
4820}
4821
4822impl<'a> PushSegEmitter<SReg> for Assembler<'a> {
4823    fn push_seg(&mut self, op0: SReg) {
4824        self.emit(PUSH_SEG16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
4825    }
4826}
4827
4828/// `RCL` (RCL). 
4829/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
4830///
4831///
4832/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
4833///
4834/// Supported operand variants:
4835///
4836/// ```text
4837/// +----+--------------+
4838/// | #  | Operands     |
4839/// +----+--------------+
4840/// | 1  | GpbLo, GpbLo |
4841/// | 2  | GpbLo, Imm   |
4842/// | 3  | Gpd, GpbLo   |
4843/// | 4  | Gpd, Imm     |
4844/// | 5  | Gpq, GpbLo   |
4845/// | 6  | Gpq, Imm     |
4846/// | 7  | Gpw, GpbLo   |
4847/// | 8  | Gpw, Imm     |
4848/// | 9  | Mem, GpbLo   |
4849/// | 10 | Mem, Imm     |
4850/// +----+--------------+
4851/// ```
4852pub trait RclEmitter<A, B> {
4853    fn rcl(&mut self, op0: A, op1: B);
4854}
4855
4856impl<'a> RclEmitter<GpbLo, Imm> for Assembler<'a> {
4857    fn rcl(&mut self, op0: GpbLo, op1: Imm) {
4858        self.emit(RCL8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4859    }
4860}
4861
4862impl<'a> RclEmitter<Mem, Imm> for Assembler<'a> {
4863    fn rcl(&mut self, op0: Mem, op1: Imm) {
4864        self.emit(RCL8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4865    }
4866}
4867
4868impl<'a> RclEmitter<Gpw, Imm> for Assembler<'a> {
4869    fn rcl(&mut self, op0: Gpw, op1: Imm) {
4870        self.emit(RCL16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4871    }
4872}
4873
4874impl<'a> RclEmitter<Gpd, Imm> for Assembler<'a> {
4875    fn rcl(&mut self, op0: Gpd, op1: Imm) {
4876        self.emit(RCL32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4877    }
4878}
4879
4880impl<'a> RclEmitter<Gpq, Imm> for Assembler<'a> {
4881    fn rcl(&mut self, op0: Gpq, op1: Imm) {
4882        self.emit(RCL64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4883    }
4884}
4885
4886impl<'a> RclEmitter<GpbLo, GpbLo> for Assembler<'a> {
4887    fn rcl(&mut self, op0: GpbLo, op1: GpbLo) {
4888        self.emit(RCL8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4889    }
4890}
4891
4892impl<'a> RclEmitter<Mem, GpbLo> for Assembler<'a> {
4893    fn rcl(&mut self, op0: Mem, op1: GpbLo) {
4894        self.emit(RCL8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4895    }
4896}
4897
4898impl<'a> RclEmitter<Gpw, GpbLo> for Assembler<'a> {
4899    fn rcl(&mut self, op0: Gpw, op1: GpbLo) {
4900        self.emit(RCL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4901    }
4902}
4903
4904impl<'a> RclEmitter<Gpd, GpbLo> for Assembler<'a> {
4905    fn rcl(&mut self, op0: Gpd, op1: GpbLo) {
4906        self.emit(RCL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4907    }
4908}
4909
4910impl<'a> RclEmitter<Gpq, GpbLo> for Assembler<'a> {
4911    fn rcl(&mut self, op0: Gpq, op1: GpbLo) {
4912        self.emit(RCL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4913    }
4914}
4915
4916/// `RCR` (RCR). 
4917/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
4918///
4919///
4920/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
4921///
4922/// Supported operand variants:
4923///
4924/// ```text
4925/// +----+--------------+
4926/// | #  | Operands     |
4927/// +----+--------------+
4928/// | 1  | GpbLo, GpbLo |
4929/// | 2  | GpbLo, Imm   |
4930/// | 3  | Gpd, GpbLo   |
4931/// | 4  | Gpd, Imm     |
4932/// | 5  | Gpq, GpbLo   |
4933/// | 6  | Gpq, Imm     |
4934/// | 7  | Gpw, GpbLo   |
4935/// | 8  | Gpw, Imm     |
4936/// | 9  | Mem, GpbLo   |
4937/// | 10 | Mem, Imm     |
4938/// +----+--------------+
4939/// ```
4940pub trait RcrEmitter<A, B> {
4941    fn rcr(&mut self, op0: A, op1: B);
4942}
4943
4944impl<'a> RcrEmitter<GpbLo, Imm> for Assembler<'a> {
4945    fn rcr(&mut self, op0: GpbLo, op1: Imm) {
4946        self.emit(RCR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4947    }
4948}
4949
4950impl<'a> RcrEmitter<Mem, Imm> for Assembler<'a> {
4951    fn rcr(&mut self, op0: Mem, op1: Imm) {
4952        self.emit(RCR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4953    }
4954}
4955
4956impl<'a> RcrEmitter<Gpw, Imm> for Assembler<'a> {
4957    fn rcr(&mut self, op0: Gpw, op1: Imm) {
4958        self.emit(RCR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4959    }
4960}
4961
4962impl<'a> RcrEmitter<Gpd, Imm> for Assembler<'a> {
4963    fn rcr(&mut self, op0: Gpd, op1: Imm) {
4964        self.emit(RCR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4965    }
4966}
4967
4968impl<'a> RcrEmitter<Gpq, Imm> for Assembler<'a> {
4969    fn rcr(&mut self, op0: Gpq, op1: Imm) {
4970        self.emit(RCR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4971    }
4972}
4973
4974impl<'a> RcrEmitter<GpbLo, GpbLo> for Assembler<'a> {
4975    fn rcr(&mut self, op0: GpbLo, op1: GpbLo) {
4976        self.emit(RCR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4977    }
4978}
4979
4980impl<'a> RcrEmitter<Mem, GpbLo> for Assembler<'a> {
4981    fn rcr(&mut self, op0: Mem, op1: GpbLo) {
4982        self.emit(RCR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4983    }
4984}
4985
4986impl<'a> RcrEmitter<Gpw, GpbLo> for Assembler<'a> {
4987    fn rcr(&mut self, op0: Gpw, op1: GpbLo) {
4988        self.emit(RCR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4989    }
4990}
4991
4992impl<'a> RcrEmitter<Gpd, GpbLo> for Assembler<'a> {
4993    fn rcr(&mut self, op0: Gpd, op1: GpbLo) {
4994        self.emit(RCR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4995    }
4996}
4997
4998impl<'a> RcrEmitter<Gpq, GpbLo> for Assembler<'a> {
4999    fn rcr(&mut self, op0: Gpq, op1: GpbLo) {
5000        self.emit(RCR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5001    }
5002}
5003
5004/// `RET` (RET). 
5005/// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
5006///
5007///
5008/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
5009///
5010/// Supported operand variants:
5011///
5012/// ```text
5013/// +---+----------+
5014/// | # | Operands |
5015/// +---+----------+
5016/// | 1 | (none)   |
5017/// +---+----------+
5018/// ```
5019pub trait RetEmitter {
5020    fn ret(&mut self);
5021}
5022
5023impl<'a> RetEmitter for Assembler<'a> {
5024    fn ret(&mut self) {
5025        self.emit(RET, &NOREG, &NOREG, &NOREG, &NOREG);
5026    }
5027}
5028
5029/// `RET` (RET). 
5030/// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
5031///
5032///
5033/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
5034///
5035/// Supported operand variants:
5036///
5037/// ```text
5038/// +---+----------+
5039/// | # | Operands |
5040/// +---+----------+
5041/// | 1 | Imm      |
5042/// +---+----------+
5043/// ```
5044pub trait RetEmitter_1<A> {
5045    fn ret_1(&mut self, op0: A);
5046}
5047
5048impl<'a> RetEmitter_1<Imm> for Assembler<'a> {
5049    fn ret_1(&mut self, op0: Imm) {
5050        self.emit(RETI, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5051    }
5052}
5053
5054/// `RETF`.
5055///
5056/// Supported operand variants:
5057///
5058/// ```text
5059/// +---+----------+
5060/// | # | Operands |
5061/// +---+----------+
5062/// | 1 | (none)   |
5063/// +---+----------+
5064/// ```
5065pub trait RetfEmitter {
5066    fn retf(&mut self);
5067}
5068
5069impl<'a> RetfEmitter for Assembler<'a> {
5070    fn retf(&mut self) {
5071        self.emit(RETF16, &NOREG, &NOREG, &NOREG, &NOREG);
5072    }
5073}
5074
5075/// `RETF`.
5076///
5077/// Supported operand variants:
5078///
5079/// ```text
5080/// +---+----------+
5081/// | # | Operands |
5082/// +---+----------+
5083/// | 1 | Imm      |
5084/// +---+----------+
5085/// ```
5086pub trait RetfEmitter_1<A> {
5087    fn retf_1(&mut self, op0: A);
5088}
5089
5090impl<'a> RetfEmitter_1<Imm> for Assembler<'a> {
5091    fn retf_1(&mut self, op0: Imm) {
5092        self.emit(RETF16I, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5093    }
5094}
5095
5096/// `ROL` (ROL). 
5097/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
5098///
5099///
5100/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
5101///
5102/// Supported operand variants:
5103///
5104/// ```text
5105/// +----+--------------+
5106/// | #  | Operands     |
5107/// +----+--------------+
5108/// | 1  | GpbLo, GpbLo |
5109/// | 2  | GpbLo, Imm   |
5110/// | 3  | Gpd, GpbLo   |
5111/// | 4  | Gpd, Imm     |
5112/// | 5  | Gpq, GpbLo   |
5113/// | 6  | Gpq, Imm     |
5114/// | 7  | Gpw, GpbLo   |
5115/// | 8  | Gpw, Imm     |
5116/// | 9  | Mem, GpbLo   |
5117/// | 10 | Mem, Imm     |
5118/// +----+--------------+
5119/// ```
5120pub trait RolEmitter<A, B> {
5121    fn rol(&mut self, op0: A, op1: B);
5122}
5123
5124impl<'a> RolEmitter<GpbLo, Imm> for Assembler<'a> {
5125    fn rol(&mut self, op0: GpbLo, op1: Imm) {
5126        self.emit(ROL8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5127    }
5128}
5129
5130impl<'a> RolEmitter<Mem, Imm> for Assembler<'a> {
5131    fn rol(&mut self, op0: Mem, op1: Imm) {
5132        self.emit(ROL8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5133    }
5134}
5135
5136impl<'a> RolEmitter<Gpw, Imm> for Assembler<'a> {
5137    fn rol(&mut self, op0: Gpw, op1: Imm) {
5138        self.emit(ROL16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5139    }
5140}
5141
5142impl<'a> RolEmitter<Gpd, Imm> for Assembler<'a> {
5143    fn rol(&mut self, op0: Gpd, op1: Imm) {
5144        self.emit(ROL32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5145    }
5146}
5147
5148impl<'a> RolEmitter<Gpq, Imm> for Assembler<'a> {
5149    fn rol(&mut self, op0: Gpq, op1: Imm) {
5150        self.emit(ROL64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5151    }
5152}
5153
5154impl<'a> RolEmitter<GpbLo, GpbLo> for Assembler<'a> {
5155    fn rol(&mut self, op0: GpbLo, op1: GpbLo) {
5156        self.emit(ROL8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5157    }
5158}
5159
5160impl<'a> RolEmitter<Mem, GpbLo> for Assembler<'a> {
5161    fn rol(&mut self, op0: Mem, op1: GpbLo) {
5162        self.emit(ROL8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5163    }
5164}
5165
5166impl<'a> RolEmitter<Gpw, GpbLo> for Assembler<'a> {
5167    fn rol(&mut self, op0: Gpw, op1: GpbLo) {
5168        self.emit(ROL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5169    }
5170}
5171
5172impl<'a> RolEmitter<Gpd, GpbLo> for Assembler<'a> {
5173    fn rol(&mut self, op0: Gpd, op1: GpbLo) {
5174        self.emit(ROL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5175    }
5176}
5177
5178impl<'a> RolEmitter<Gpq, GpbLo> for Assembler<'a> {
5179    fn rol(&mut self, op0: Gpq, op1: GpbLo) {
5180        self.emit(ROL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5181    }
5182}
5183
5184/// `ROR` (ROR). 
5185/// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
5186///
5187///
5188/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
5189///
5190/// Supported operand variants:
5191///
5192/// ```text
5193/// +----+--------------+
5194/// | #  | Operands     |
5195/// +----+--------------+
5196/// | 1  | GpbLo, GpbLo |
5197/// | 2  | GpbLo, Imm   |
5198/// | 3  | Gpd, GpbLo   |
5199/// | 4  | Gpd, Imm     |
5200/// | 5  | Gpq, GpbLo   |
5201/// | 6  | Gpq, Imm     |
5202/// | 7  | Gpw, GpbLo   |
5203/// | 8  | Gpw, Imm     |
5204/// | 9  | Mem, GpbLo   |
5205/// | 10 | Mem, Imm     |
5206/// +----+--------------+
5207/// ```
5208pub trait RorEmitter<A, B> {
5209    fn ror(&mut self, op0: A, op1: B);
5210}
5211
5212impl<'a> RorEmitter<GpbLo, Imm> for Assembler<'a> {
5213    fn ror(&mut self, op0: GpbLo, op1: Imm) {
5214        self.emit(ROR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5215    }
5216}
5217
5218impl<'a> RorEmitter<Mem, Imm> for Assembler<'a> {
5219    fn ror(&mut self, op0: Mem, op1: Imm) {
5220        self.emit(ROR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5221    }
5222}
5223
5224impl<'a> RorEmitter<Gpw, Imm> for Assembler<'a> {
5225    fn ror(&mut self, op0: Gpw, op1: Imm) {
5226        self.emit(ROR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5227    }
5228}
5229
5230impl<'a> RorEmitter<Gpd, Imm> for Assembler<'a> {
5231    fn ror(&mut self, op0: Gpd, op1: Imm) {
5232        self.emit(ROR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5233    }
5234}
5235
5236impl<'a> RorEmitter<Gpq, Imm> for Assembler<'a> {
5237    fn ror(&mut self, op0: Gpq, op1: Imm) {
5238        self.emit(ROR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5239    }
5240}
5241
5242impl<'a> RorEmitter<GpbLo, GpbLo> for Assembler<'a> {
5243    fn ror(&mut self, op0: GpbLo, op1: GpbLo) {
5244        self.emit(ROR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5245    }
5246}
5247
5248impl<'a> RorEmitter<Mem, GpbLo> for Assembler<'a> {
5249    fn ror(&mut self, op0: Mem, op1: GpbLo) {
5250        self.emit(ROR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5251    }
5252}
5253
5254impl<'a> RorEmitter<Gpw, GpbLo> for Assembler<'a> {
5255    fn ror(&mut self, op0: Gpw, op1: GpbLo) {
5256        self.emit(ROR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5257    }
5258}
5259
5260impl<'a> RorEmitter<Gpd, GpbLo> for Assembler<'a> {
5261    fn ror(&mut self, op0: Gpd, op1: GpbLo) {
5262        self.emit(ROR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5263    }
5264}
5265
5266impl<'a> RorEmitter<Gpq, GpbLo> for Assembler<'a> {
5267    fn ror(&mut self, op0: Gpq, op1: GpbLo) {
5268        self.emit(ROR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5269    }
5270}
5271
5272/// `SAHF` (SAHF). 
5273/// Loads the SF, ZF, AF, PF, and CF flags of the EFLAGS register with values from the corresponding bits in the AH register (bits 7, 6, 4, 2, and 0, respectively). Bits 1, 3, and 5 of register AH are ignored; the corresponding reserved bits (1, 3, and 5) in the EFLAGS register remain as shown in the “Operation” section below.
5274///
5275///
5276/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAHF.html).
5277///
5278/// Supported operand variants:
5279///
5280/// ```text
5281/// +---+----------+
5282/// | # | Operands |
5283/// +---+----------+
5284/// | 1 | (none)   |
5285/// +---+----------+
5286/// ```
5287pub trait SahfEmitter {
5288    fn sahf(&mut self);
5289}
5290
5291impl<'a> SahfEmitter for Assembler<'a> {
5292    fn sahf(&mut self) {
5293        self.emit(SAHF, &NOREG, &NOREG, &NOREG, &NOREG);
5294    }
5295}
5296
5297/// `SAR` (SAR). 
5298/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
5299///
5300///
5301/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
5302///
5303/// Supported operand variants:
5304///
5305/// ```text
5306/// +----+--------------+
5307/// | #  | Operands     |
5308/// +----+--------------+
5309/// | 1  | GpbLo, GpbLo |
5310/// | 2  | GpbLo, Imm   |
5311/// | 3  | Gpd, GpbLo   |
5312/// | 4  | Gpd, Imm     |
5313/// | 5  | Gpq, GpbLo   |
5314/// | 6  | Gpq, Imm     |
5315/// | 7  | Gpw, GpbLo   |
5316/// | 8  | Gpw, Imm     |
5317/// | 9  | Mem, GpbLo   |
5318/// | 10 | Mem, Imm     |
5319/// +----+--------------+
5320/// ```
5321pub trait SarEmitter<A, B> {
5322    fn sar(&mut self, op0: A, op1: B);
5323}
5324
5325impl<'a> SarEmitter<GpbLo, Imm> for Assembler<'a> {
5326    fn sar(&mut self, op0: GpbLo, op1: Imm) {
5327        self.emit(SAR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5328    }
5329}
5330
5331impl<'a> SarEmitter<Mem, Imm> for Assembler<'a> {
5332    fn sar(&mut self, op0: Mem, op1: Imm) {
5333        self.emit(SAR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5334    }
5335}
5336
5337impl<'a> SarEmitter<Gpw, Imm> for Assembler<'a> {
5338    fn sar(&mut self, op0: Gpw, op1: Imm) {
5339        self.emit(SAR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5340    }
5341}
5342
5343impl<'a> SarEmitter<Gpd, Imm> for Assembler<'a> {
5344    fn sar(&mut self, op0: Gpd, op1: Imm) {
5345        self.emit(SAR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5346    }
5347}
5348
5349impl<'a> SarEmitter<Gpq, Imm> for Assembler<'a> {
5350    fn sar(&mut self, op0: Gpq, op1: Imm) {
5351        self.emit(SAR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5352    }
5353}
5354
5355impl<'a> SarEmitter<GpbLo, GpbLo> for Assembler<'a> {
5356    fn sar(&mut self, op0: GpbLo, op1: GpbLo) {
5357        self.emit(SAR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5358    }
5359}
5360
5361impl<'a> SarEmitter<Mem, GpbLo> for Assembler<'a> {
5362    fn sar(&mut self, op0: Mem, op1: GpbLo) {
5363        self.emit(SAR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5364    }
5365}
5366
5367impl<'a> SarEmitter<Gpw, GpbLo> for Assembler<'a> {
5368    fn sar(&mut self, op0: Gpw, op1: GpbLo) {
5369        self.emit(SAR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5370    }
5371}
5372
5373impl<'a> SarEmitter<Gpd, GpbLo> for Assembler<'a> {
5374    fn sar(&mut self, op0: Gpd, op1: GpbLo) {
5375        self.emit(SAR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5376    }
5377}
5378
5379impl<'a> SarEmitter<Gpq, GpbLo> for Assembler<'a> {
5380    fn sar(&mut self, op0: Gpq, op1: GpbLo) {
5381        self.emit(SAR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5382    }
5383}
5384
5385/// `SBB` (SBB). 
5386/// Adds the source operand (second operand) and the carry (CF) flag, and subtracts the result from the destination operand (first operand). The result of the subtraction is stored in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location.
5387///
5388///
5389/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SBB.html).
5390///
5391/// Supported operand variants:
5392///
5393/// ```text
5394/// +----+--------------+
5395/// | #  | Operands     |
5396/// +----+--------------+
5397/// | 1  | GpbLo, GpbLo |
5398/// | 2  | GpbLo, Imm   |
5399/// | 3  | GpbLo, Mem   |
5400/// | 4  | Gpd, Gpd     |
5401/// | 5  | Gpd, Imm     |
5402/// | 6  | Gpd, Mem     |
5403/// | 7  | Gpq, Gpq     |
5404/// | 8  | Gpq, Imm     |
5405/// | 9  | Gpq, Mem     |
5406/// | 10 | Gpw, Gpw     |
5407/// | 11 | Gpw, Imm     |
5408/// | 12 | Gpw, Mem     |
5409/// | 13 | Mem, GpbLo   |
5410/// | 14 | Mem, Gpd     |
5411/// | 15 | Mem, Gpq     |
5412/// | 16 | Mem, Gpw     |
5413/// | 17 | Mem, Imm     |
5414/// +----+--------------+
5415/// ```
5416pub trait SbbEmitter<A, B> {
5417    fn sbb(&mut self, op0: A, op1: B);
5418}
5419
5420impl<'a> SbbEmitter<GpbLo, GpbLo> for Assembler<'a> {
5421    fn sbb(&mut self, op0: GpbLo, op1: GpbLo) {
5422        self.emit(SBB8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5423    }
5424}
5425
5426impl<'a> SbbEmitter<Mem, GpbLo> for Assembler<'a> {
5427    fn sbb(&mut self, op0: Mem, op1: GpbLo) {
5428        self.emit(SBB8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5429    }
5430}
5431
5432impl<'a> SbbEmitter<Gpw, Gpw> for Assembler<'a> {
5433    fn sbb(&mut self, op0: Gpw, op1: Gpw) {
5434        self.emit(SBB16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5435    }
5436}
5437
5438impl<'a> SbbEmitter<Mem, Gpw> for Assembler<'a> {
5439    fn sbb(&mut self, op0: Mem, op1: Gpw) {
5440        self.emit(SBB16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5441    }
5442}
5443
5444impl<'a> SbbEmitter<Gpd, Gpd> for Assembler<'a> {
5445    fn sbb(&mut self, op0: Gpd, op1: Gpd) {
5446        self.emit(SBB32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5447    }
5448}
5449
5450impl<'a> SbbEmitter<Mem, Gpd> for Assembler<'a> {
5451    fn sbb(&mut self, op0: Mem, op1: Gpd) {
5452        self.emit(SBB32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5453    }
5454}
5455
5456impl<'a> SbbEmitter<Gpq, Gpq> for Assembler<'a> {
5457    fn sbb(&mut self, op0: Gpq, op1: Gpq) {
5458        self.emit(SBB64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5459    }
5460}
5461
5462impl<'a> SbbEmitter<Mem, Gpq> for Assembler<'a> {
5463    fn sbb(&mut self, op0: Mem, op1: Gpq) {
5464        self.emit(SBB64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5465    }
5466}
5467
5468impl<'a> SbbEmitter<GpbLo, Mem> for Assembler<'a> {
5469    fn sbb(&mut self, op0: GpbLo, op1: Mem) {
5470        self.emit(SBB8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5471    }
5472}
5473
5474impl<'a> SbbEmitter<Gpw, Mem> for Assembler<'a> {
5475    fn sbb(&mut self, op0: Gpw, op1: Mem) {
5476        self.emit(SBB16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5477    }
5478}
5479
5480impl<'a> SbbEmitter<Gpd, Mem> for Assembler<'a> {
5481    fn sbb(&mut self, op0: Gpd, op1: Mem) {
5482        self.emit(SBB32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5483    }
5484}
5485
5486impl<'a> SbbEmitter<Gpq, Mem> for Assembler<'a> {
5487    fn sbb(&mut self, op0: Gpq, op1: Mem) {
5488        self.emit(SBB64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5489    }
5490}
5491
5492impl<'a> SbbEmitter<GpbLo, Imm> for Assembler<'a> {
5493    fn sbb(&mut self, op0: GpbLo, op1: Imm) {
5494        self.emit(SBB8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5495    }
5496}
5497
5498impl<'a> SbbEmitter<Gpw, Imm> for Assembler<'a> {
5499    fn sbb(&mut self, op0: Gpw, op1: Imm) {
5500        self.emit(SBB16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5501    }
5502}
5503
5504impl<'a> SbbEmitter<Gpd, Imm> for Assembler<'a> {
5505    fn sbb(&mut self, op0: Gpd, op1: Imm) {
5506        self.emit(SBB32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5507    }
5508}
5509
5510impl<'a> SbbEmitter<Gpq, Imm> for Assembler<'a> {
5511    fn sbb(&mut self, op0: Gpq, op1: Imm) {
5512        self.emit(SBB64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5513    }
5514}
5515
5516impl<'a> SbbEmitter<Mem, Imm> for Assembler<'a> {
5517    fn sbb(&mut self, op0: Mem, op1: Imm) {
5518        self.emit(SBB8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
5519    }
5520}
5521
5522/// `SCAS` (SCAS). 
5523/// In non-64-bit modes and in default 64-bit mode: this instruction compares a byte, word, doubleword or quadword specified using a memory operand with the value in AL, AX, or EAX. It then sets status flags in EFLAGS recording the results. The memory operand address is read from ES:(E)DI register (depending on the address-size attribute of the instruction and the current operational mode). Note that ES cannot be overridden with a segment override prefix.
5524///
5525///
5526/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SCAS%3ASCASB%3ASCASW%3ASCASD.html).
5527///
5528/// Supported operand variants:
5529///
5530/// ```text
5531/// +---+----------+
5532/// | # | Operands |
5533/// +---+----------+
5534/// | 1 | (none)   |
5535/// +---+----------+
5536/// ```
5537pub trait ScasEmitter {
5538    fn scas(&mut self);
5539}
5540
5541impl<'a> ScasEmitter for Assembler<'a> {
5542    fn scas(&mut self) {
5543        self.emit(SCAS8, &NOREG, &NOREG, &NOREG, &NOREG);
5544    }
5545}
5546
5547/// `SETA` (SETA). 
5548/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5549///
5550///
5551/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5552///
5553/// Supported operand variants:
5554///
5555/// ```text
5556/// +---+----------+
5557/// | # | Operands |
5558/// +---+----------+
5559/// | 1 | GpbLo    |
5560/// | 2 | Mem      |
5561/// +---+----------+
5562/// ```
5563pub trait SetaEmitter<A> {
5564    fn seta(&mut self, op0: A);
5565}
5566
5567impl<'a> SetaEmitter<GpbLo> for Assembler<'a> {
5568    fn seta(&mut self, op0: GpbLo) {
5569        self.emit(SETA8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5570    }
5571}
5572
5573impl<'a> SetaEmitter<Mem> for Assembler<'a> {
5574    fn seta(&mut self, op0: Mem) {
5575        self.emit(SETA8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5576    }
5577}
5578
5579/// `SETBE` (SETBE). 
5580/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5581///
5582///
5583/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5584///
5585/// Supported operand variants:
5586///
5587/// ```text
5588/// +---+----------+
5589/// | # | Operands |
5590/// +---+----------+
5591/// | 1 | GpbLo    |
5592/// | 2 | Mem      |
5593/// +---+----------+
5594/// ```
5595pub trait SetbeEmitter<A> {
5596    fn setbe(&mut self, op0: A);
5597}
5598
5599impl<'a> SetbeEmitter<GpbLo> for Assembler<'a> {
5600    fn setbe(&mut self, op0: GpbLo) {
5601        self.emit(SETBE8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5602    }
5603}
5604
5605impl<'a> SetbeEmitter<Mem> for Assembler<'a> {
5606    fn setbe(&mut self, op0: Mem) {
5607        self.emit(SETBE8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5608    }
5609}
5610
5611/// `SETC` (SETC). 
5612/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5613///
5614///
5615/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5616///
5617/// Supported operand variants:
5618///
5619/// ```text
5620/// +---+----------+
5621/// | # | Operands |
5622/// +---+----------+
5623/// | 1 | GpbLo    |
5624/// | 2 | Mem      |
5625/// +---+----------+
5626/// ```
5627pub trait SetcEmitter<A> {
5628    fn setc(&mut self, op0: A);
5629}
5630
5631impl<'a> SetcEmitter<GpbLo> for Assembler<'a> {
5632    fn setc(&mut self, op0: GpbLo) {
5633        self.emit(SETC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5634    }
5635}
5636
5637impl<'a> SetcEmitter<Mem> for Assembler<'a> {
5638    fn setc(&mut self, op0: Mem) {
5639        self.emit(SETC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5640    }
5641}
5642
5643/// `SETG` (SETG). 
5644/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5645///
5646///
5647/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5648///
5649/// Supported operand variants:
5650///
5651/// ```text
5652/// +---+----------+
5653/// | # | Operands |
5654/// +---+----------+
5655/// | 1 | GpbLo    |
5656/// | 2 | Mem      |
5657/// +---+----------+
5658/// ```
5659pub trait SetgEmitter<A> {
5660    fn setg(&mut self, op0: A);
5661}
5662
5663impl<'a> SetgEmitter<GpbLo> for Assembler<'a> {
5664    fn setg(&mut self, op0: GpbLo) {
5665        self.emit(SETG8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5666    }
5667}
5668
5669impl<'a> SetgEmitter<Mem> for Assembler<'a> {
5670    fn setg(&mut self, op0: Mem) {
5671        self.emit(SETG8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5672    }
5673}
5674
5675/// `SETGE` (SETGE). 
5676/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5677///
5678///
5679/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5680///
5681/// Supported operand variants:
5682///
5683/// ```text
5684/// +---+----------+
5685/// | # | Operands |
5686/// +---+----------+
5687/// | 1 | GpbLo    |
5688/// | 2 | Mem      |
5689/// +---+----------+
5690/// ```
5691pub trait SetgeEmitter<A> {
5692    fn setge(&mut self, op0: A);
5693}
5694
5695impl<'a> SetgeEmitter<GpbLo> for Assembler<'a> {
5696    fn setge(&mut self, op0: GpbLo) {
5697        self.emit(SETGE8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5698    }
5699}
5700
5701impl<'a> SetgeEmitter<Mem> for Assembler<'a> {
5702    fn setge(&mut self, op0: Mem) {
5703        self.emit(SETGE8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5704    }
5705}
5706
5707/// `SETL` (SETL). 
5708/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5709///
5710///
5711/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5712///
5713/// Supported operand variants:
5714///
5715/// ```text
5716/// +---+----------+
5717/// | # | Operands |
5718/// +---+----------+
5719/// | 1 | GpbLo    |
5720/// | 2 | Mem      |
5721/// +---+----------+
5722/// ```
5723pub trait SetlEmitter<A> {
5724    fn setl(&mut self, op0: A);
5725}
5726
5727impl<'a> SetlEmitter<GpbLo> for Assembler<'a> {
5728    fn setl(&mut self, op0: GpbLo) {
5729        self.emit(SETL8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5730    }
5731}
5732
5733impl<'a> SetlEmitter<Mem> for Assembler<'a> {
5734    fn setl(&mut self, op0: Mem) {
5735        self.emit(SETL8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5736    }
5737}
5738
5739/// `SETLE` (SETLE). 
5740/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5741///
5742///
5743/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5744///
5745/// Supported operand variants:
5746///
5747/// ```text
5748/// +---+----------+
5749/// | # | Operands |
5750/// +---+----------+
5751/// | 1 | GpbLo    |
5752/// | 2 | Mem      |
5753/// +---+----------+
5754/// ```
5755pub trait SetleEmitter<A> {
5756    fn setle(&mut self, op0: A);
5757}
5758
5759impl<'a> SetleEmitter<GpbLo> for Assembler<'a> {
5760    fn setle(&mut self, op0: GpbLo) {
5761        self.emit(SETLE8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5762    }
5763}
5764
5765impl<'a> SetleEmitter<Mem> for Assembler<'a> {
5766    fn setle(&mut self, op0: Mem) {
5767        self.emit(SETLE8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5768    }
5769}
5770
5771/// `SETNC` (SETNC). 
5772/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5773///
5774///
5775/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5776///
5777/// Supported operand variants:
5778///
5779/// ```text
5780/// +---+----------+
5781/// | # | Operands |
5782/// +---+----------+
5783/// | 1 | GpbLo    |
5784/// | 2 | Mem      |
5785/// +---+----------+
5786/// ```
5787pub trait SetncEmitter<A> {
5788    fn setnc(&mut self, op0: A);
5789}
5790
5791impl<'a> SetncEmitter<GpbLo> for Assembler<'a> {
5792    fn setnc(&mut self, op0: GpbLo) {
5793        self.emit(SETNC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5794    }
5795}
5796
5797impl<'a> SetncEmitter<Mem> for Assembler<'a> {
5798    fn setnc(&mut self, op0: Mem) {
5799        self.emit(SETNC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5800    }
5801}
5802
5803/// `SETNO` (SETNO). 
5804/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5805///
5806///
5807/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5808///
5809/// Supported operand variants:
5810///
5811/// ```text
5812/// +---+----------+
5813/// | # | Operands |
5814/// +---+----------+
5815/// | 1 | GpbLo    |
5816/// | 2 | Mem      |
5817/// +---+----------+
5818/// ```
5819pub trait SetnoEmitter<A> {
5820    fn setno(&mut self, op0: A);
5821}
5822
5823impl<'a> SetnoEmitter<GpbLo> for Assembler<'a> {
5824    fn setno(&mut self, op0: GpbLo) {
5825        self.emit(SETNO8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5826    }
5827}
5828
5829impl<'a> SetnoEmitter<Mem> for Assembler<'a> {
5830    fn setno(&mut self, op0: Mem) {
5831        self.emit(SETNO8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5832    }
5833}
5834
5835/// `SETNP` (SETNP). 
5836/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5837///
5838///
5839/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5840///
5841/// Supported operand variants:
5842///
5843/// ```text
5844/// +---+----------+
5845/// | # | Operands |
5846/// +---+----------+
5847/// | 1 | GpbLo    |
5848/// | 2 | Mem      |
5849/// +---+----------+
5850/// ```
5851pub trait SetnpEmitter<A> {
5852    fn setnp(&mut self, op0: A);
5853}
5854
5855impl<'a> SetnpEmitter<GpbLo> for Assembler<'a> {
5856    fn setnp(&mut self, op0: GpbLo) {
5857        self.emit(SETNP8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5858    }
5859}
5860
5861impl<'a> SetnpEmitter<Mem> for Assembler<'a> {
5862    fn setnp(&mut self, op0: Mem) {
5863        self.emit(SETNP8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5864    }
5865}
5866
5867/// `SETNS` (SETNS). 
5868/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5869///
5870///
5871/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5872///
5873/// Supported operand variants:
5874///
5875/// ```text
5876/// +---+----------+
5877/// | # | Operands |
5878/// +---+----------+
5879/// | 1 | GpbLo    |
5880/// | 2 | Mem      |
5881/// +---+----------+
5882/// ```
5883pub trait SetnsEmitter<A> {
5884    fn setns(&mut self, op0: A);
5885}
5886
5887impl<'a> SetnsEmitter<GpbLo> for Assembler<'a> {
5888    fn setns(&mut self, op0: GpbLo) {
5889        self.emit(SETNS8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5890    }
5891}
5892
5893impl<'a> SetnsEmitter<Mem> for Assembler<'a> {
5894    fn setns(&mut self, op0: Mem) {
5895        self.emit(SETNS8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5896    }
5897}
5898
5899/// `SETNZ` (SETNZ). 
5900/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5901///
5902///
5903/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5904///
5905/// Supported operand variants:
5906///
5907/// ```text
5908/// +---+----------+
5909/// | # | Operands |
5910/// +---+----------+
5911/// | 1 | GpbLo    |
5912/// | 2 | Mem      |
5913/// +---+----------+
5914/// ```
5915pub trait SetnzEmitter<A> {
5916    fn setnz(&mut self, op0: A);
5917}
5918
5919impl<'a> SetnzEmitter<GpbLo> for Assembler<'a> {
5920    fn setnz(&mut self, op0: GpbLo) {
5921        self.emit(SETNZ8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5922    }
5923}
5924
5925impl<'a> SetnzEmitter<Mem> for Assembler<'a> {
5926    fn setnz(&mut self, op0: Mem) {
5927        self.emit(SETNZ8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5928    }
5929}
5930
5931/// `SETO` (SETO). 
5932/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5933///
5934///
5935/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5936///
5937/// Supported operand variants:
5938///
5939/// ```text
5940/// +---+----------+
5941/// | # | Operands |
5942/// +---+----------+
5943/// | 1 | GpbLo    |
5944/// | 2 | Mem      |
5945/// +---+----------+
5946/// ```
5947pub trait SetoEmitter<A> {
5948    fn seto(&mut self, op0: A);
5949}
5950
5951impl<'a> SetoEmitter<GpbLo> for Assembler<'a> {
5952    fn seto(&mut self, op0: GpbLo) {
5953        self.emit(SETO8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5954    }
5955}
5956
5957impl<'a> SetoEmitter<Mem> for Assembler<'a> {
5958    fn seto(&mut self, op0: Mem) {
5959        self.emit(SETO8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5960    }
5961}
5962
5963/// `SETP` (SETP). 
5964/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5965///
5966///
5967/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
5968///
5969/// Supported operand variants:
5970///
5971/// ```text
5972/// +---+----------+
5973/// | # | Operands |
5974/// +---+----------+
5975/// | 1 | GpbLo    |
5976/// | 2 | Mem      |
5977/// +---+----------+
5978/// ```
5979pub trait SetpEmitter<A> {
5980    fn setp(&mut self, op0: A);
5981}
5982
5983impl<'a> SetpEmitter<GpbLo> for Assembler<'a> {
5984    fn setp(&mut self, op0: GpbLo) {
5985        self.emit(SETP8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5986    }
5987}
5988
5989impl<'a> SetpEmitter<Mem> for Assembler<'a> {
5990    fn setp(&mut self, op0: Mem) {
5991        self.emit(SETP8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
5992    }
5993}
5994
5995/// `SETS` (SETS). 
5996/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
5997///
5998///
5999/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
6000///
6001/// Supported operand variants:
6002///
6003/// ```text
6004/// +---+----------+
6005/// | # | Operands |
6006/// +---+----------+
6007/// | 1 | GpbLo    |
6008/// | 2 | Mem      |
6009/// +---+----------+
6010/// ```
6011pub trait SetsEmitter<A> {
6012    fn sets(&mut self, op0: A);
6013}
6014
6015impl<'a> SetsEmitter<GpbLo> for Assembler<'a> {
6016    fn sets(&mut self, op0: GpbLo) {
6017        self.emit(SETS8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6018    }
6019}
6020
6021impl<'a> SetsEmitter<Mem> for Assembler<'a> {
6022    fn sets(&mut self, op0: Mem) {
6023        self.emit(SETS8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6024    }
6025}
6026
6027/// `SETZ` (SETZ). 
6028/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
6029///
6030///
6031/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
6032///
6033/// Supported operand variants:
6034///
6035/// ```text
6036/// +---+----------+
6037/// | # | Operands |
6038/// +---+----------+
6039/// | 1 | GpbLo    |
6040/// | 2 | Mem      |
6041/// +---+----------+
6042/// ```
6043pub trait SetzEmitter<A> {
6044    fn setz(&mut self, op0: A);
6045}
6046
6047impl<'a> SetzEmitter<GpbLo> for Assembler<'a> {
6048    fn setz(&mut self, op0: GpbLo) {
6049        self.emit(SETZ8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6050    }
6051}
6052
6053impl<'a> SetzEmitter<Mem> for Assembler<'a> {
6054    fn setz(&mut self, op0: Mem) {
6055        self.emit(SETZ8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6056    }
6057}
6058
6059/// `SETCC` (SETO). 
6060/// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
6061///
6062///
6063/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
6064///
6065/// Supported operand variants:
6066///
6067/// ```text
6068/// +---+----------+
6069/// | # | Operands |
6070/// +---+----------+
6071/// | 1 | GpbLo    |
6072/// | 2 | Mem      |
6073/// +---+----------+
6074/// ```
6075pub trait SetccEmitter<A> {
6076    fn setcc(&mut self, op0: A);
6077}
6078
6079impl<'a> SetccEmitter<GpbLo> for Assembler<'a> {
6080    fn setcc(&mut self, op0: GpbLo) {
6081        self.emit(SETCC8R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6082    }
6083}
6084
6085impl<'a> SetccEmitter<Mem> for Assembler<'a> {
6086    fn setcc(&mut self, op0: Mem) {
6087        self.emit(SETCC8M, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6088    }
6089}
6090
6091/// `SGDT`.
6092///
6093/// Supported operand variants:
6094///
6095/// ```text
6096/// +---+----------+
6097/// | # | Operands |
6098/// +---+----------+
6099/// | 1 | Mem      |
6100/// +---+----------+
6101/// ```
6102pub trait SgdtEmitter<A> {
6103    fn sgdt(&mut self, op0: A);
6104}
6105
6106impl<'a> SgdtEmitter<Mem> for Assembler<'a> {
6107    fn sgdt(&mut self, op0: Mem) {
6108        self.emit(SGDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6109    }
6110}
6111
6112/// `SHL` (SHL). 
6113/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
6114///
6115///
6116/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
6117///
6118/// Supported operand variants:
6119///
6120/// ```text
6121/// +----+--------------+
6122/// | #  | Operands     |
6123/// +----+--------------+
6124/// | 1  | GpbLo, GpbLo |
6125/// | 2  | GpbLo, Imm   |
6126/// | 3  | Gpd, GpbLo   |
6127/// | 4  | Gpd, Imm     |
6128/// | 5  | Gpq, GpbLo   |
6129/// | 6  | Gpq, Imm     |
6130/// | 7  | Gpw, GpbLo   |
6131/// | 8  | Gpw, Imm     |
6132/// | 9  | Mem, GpbLo   |
6133/// | 10 | Mem, Imm     |
6134/// +----+--------------+
6135/// ```
6136pub trait ShlEmitter<A, B> {
6137    fn shl(&mut self, op0: A, op1: B);
6138}
6139
6140impl<'a> ShlEmitter<GpbLo, Imm> for Assembler<'a> {
6141    fn shl(&mut self, op0: GpbLo, op1: Imm) {
6142        self.emit(SHL8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6143    }
6144}
6145
6146impl<'a> ShlEmitter<Mem, Imm> for Assembler<'a> {
6147    fn shl(&mut self, op0: Mem, op1: Imm) {
6148        self.emit(SHL8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6149    }
6150}
6151
6152impl<'a> ShlEmitter<Gpw, Imm> for Assembler<'a> {
6153    fn shl(&mut self, op0: Gpw, op1: Imm) {
6154        self.emit(SHL16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6155    }
6156}
6157
6158impl<'a> ShlEmitter<Gpd, Imm> for Assembler<'a> {
6159    fn shl(&mut self, op0: Gpd, op1: Imm) {
6160        self.emit(SHL32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6161    }
6162}
6163
6164impl<'a> ShlEmitter<Gpq, Imm> for Assembler<'a> {
6165    fn shl(&mut self, op0: Gpq, op1: Imm) {
6166        self.emit(SHL64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6167    }
6168}
6169
6170impl<'a> ShlEmitter<GpbLo, GpbLo> for Assembler<'a> {
6171    fn shl(&mut self, op0: GpbLo, op1: GpbLo) {
6172        self.emit(SHL8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6173    }
6174}
6175
6176impl<'a> ShlEmitter<Mem, GpbLo> for Assembler<'a> {
6177    fn shl(&mut self, op0: Mem, op1: GpbLo) {
6178        self.emit(SHL8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6179    }
6180}
6181
6182impl<'a> ShlEmitter<Gpw, GpbLo> for Assembler<'a> {
6183    fn shl(&mut self, op0: Gpw, op1: GpbLo) {
6184        self.emit(SHL16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6185    }
6186}
6187
6188impl<'a> ShlEmitter<Gpd, GpbLo> for Assembler<'a> {
6189    fn shl(&mut self, op0: Gpd, op1: GpbLo) {
6190        self.emit(SHL32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6191    }
6192}
6193
6194impl<'a> ShlEmitter<Gpq, GpbLo> for Assembler<'a> {
6195    fn shl(&mut self, op0: Gpq, op1: GpbLo) {
6196        self.emit(SHL64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6197    }
6198}
6199
6200/// `SHLD` (SHLD). 
6201/// The SHLD instruction is used for multi-precision shifts of 64 bits or more.
6202///
6203///
6204/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHLD.html).
6205///
6206/// Supported operand variants:
6207///
6208/// ```text
6209/// +----+-----------------+
6210/// | #  | Operands        |
6211/// +----+-----------------+
6212/// | 1  | Gpd, Gpd, GpbLo |
6213/// | 2  | Gpd, Gpd, Imm   |
6214/// | 3  | Gpq, Gpq, GpbLo |
6215/// | 4  | Gpq, Gpq, Imm   |
6216/// | 5  | Gpw, Gpw, GpbLo |
6217/// | 6  | Gpw, Gpw, Imm   |
6218/// | 7  | Mem, Gpd, GpbLo |
6219/// | 8  | Mem, Gpd, Imm   |
6220/// | 9  | Mem, Gpq, GpbLo |
6221/// | 10 | Mem, Gpq, Imm   |
6222/// | 11 | Mem, Gpw, GpbLo |
6223/// | 12 | Mem, Gpw, Imm   |
6224/// +----+-----------------+
6225/// ```
6226pub trait ShldEmitter<A, B, C> {
6227    fn shld(&mut self, op0: A, op1: B, op2: C);
6228}
6229
6230impl<'a> ShldEmitter<Gpw, Gpw, Imm> for Assembler<'a> {
6231    fn shld(&mut self, op0: Gpw, op1: Gpw, op2: Imm) {
6232        self.emit(SHLD16RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6233    }
6234}
6235
6236impl<'a> ShldEmitter<Mem, Gpw, Imm> for Assembler<'a> {
6237    fn shld(&mut self, op0: Mem, op1: Gpw, op2: Imm) {
6238        self.emit(SHLD16MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6239    }
6240}
6241
6242impl<'a> ShldEmitter<Gpd, Gpd, Imm> for Assembler<'a> {
6243    fn shld(&mut self, op0: Gpd, op1: Gpd, op2: Imm) {
6244        self.emit(SHLD32RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6245    }
6246}
6247
6248impl<'a> ShldEmitter<Mem, Gpd, Imm> for Assembler<'a> {
6249    fn shld(&mut self, op0: Mem, op1: Gpd, op2: Imm) {
6250        self.emit(SHLD32MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6251    }
6252}
6253
6254impl<'a> ShldEmitter<Gpq, Gpq, Imm> for Assembler<'a> {
6255    fn shld(&mut self, op0: Gpq, op1: Gpq, op2: Imm) {
6256        self.emit(SHLD64RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6257    }
6258}
6259
6260impl<'a> ShldEmitter<Mem, Gpq, Imm> for Assembler<'a> {
6261    fn shld(&mut self, op0: Mem, op1: Gpq, op2: Imm) {
6262        self.emit(SHLD64MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6263    }
6264}
6265
6266impl<'a> ShldEmitter<Gpw, Gpw, GpbLo> for Assembler<'a> {
6267    fn shld(&mut self, op0: Gpw, op1: Gpw, op2: GpbLo) {
6268        self.emit(SHLD16RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6269    }
6270}
6271
6272impl<'a> ShldEmitter<Mem, Gpw, GpbLo> for Assembler<'a> {
6273    fn shld(&mut self, op0: Mem, op1: Gpw, op2: GpbLo) {
6274        self.emit(SHLD16MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6275    }
6276}
6277
6278impl<'a> ShldEmitter<Gpd, Gpd, GpbLo> for Assembler<'a> {
6279    fn shld(&mut self, op0: Gpd, op1: Gpd, op2: GpbLo) {
6280        self.emit(SHLD32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6281    }
6282}
6283
6284impl<'a> ShldEmitter<Mem, Gpd, GpbLo> for Assembler<'a> {
6285    fn shld(&mut self, op0: Mem, op1: Gpd, op2: GpbLo) {
6286        self.emit(SHLD32MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6287    }
6288}
6289
6290impl<'a> ShldEmitter<Gpq, Gpq, GpbLo> for Assembler<'a> {
6291    fn shld(&mut self, op0: Gpq, op1: Gpq, op2: GpbLo) {
6292        self.emit(SHLD64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6293    }
6294}
6295
6296impl<'a> ShldEmitter<Mem, Gpq, GpbLo> for Assembler<'a> {
6297    fn shld(&mut self, op0: Mem, op1: Gpq, op2: GpbLo) {
6298        self.emit(SHLD64MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6299    }
6300}
6301
6302/// `SHR` (SHR). 
6303/// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
6304///
6305///
6306/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
6307///
6308/// Supported operand variants:
6309///
6310/// ```text
6311/// +----+--------------+
6312/// | #  | Operands     |
6313/// +----+--------------+
6314/// | 1  | GpbLo, GpbLo |
6315/// | 2  | GpbLo, Imm   |
6316/// | 3  | Gpd, GpbLo   |
6317/// | 4  | Gpd, Imm     |
6318/// | 5  | Gpq, GpbLo   |
6319/// | 6  | Gpq, Imm     |
6320/// | 7  | Gpw, GpbLo   |
6321/// | 8  | Gpw, Imm     |
6322/// | 9  | Mem, GpbLo   |
6323/// | 10 | Mem, Imm     |
6324/// +----+--------------+
6325/// ```
6326pub trait ShrEmitter<A, B> {
6327    fn shr(&mut self, op0: A, op1: B);
6328}
6329
6330impl<'a> ShrEmitter<GpbLo, Imm> for Assembler<'a> {
6331    fn shr(&mut self, op0: GpbLo, op1: Imm) {
6332        self.emit(SHR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6333    }
6334}
6335
6336impl<'a> ShrEmitter<Mem, Imm> for Assembler<'a> {
6337    fn shr(&mut self, op0: Mem, op1: Imm) {
6338        self.emit(SHR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6339    }
6340}
6341
6342impl<'a> ShrEmitter<Gpw, Imm> for Assembler<'a> {
6343    fn shr(&mut self, op0: Gpw, op1: Imm) {
6344        self.emit(SHR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6345    }
6346}
6347
6348impl<'a> ShrEmitter<Gpd, Imm> for Assembler<'a> {
6349    fn shr(&mut self, op0: Gpd, op1: Imm) {
6350        self.emit(SHR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6351    }
6352}
6353
6354impl<'a> ShrEmitter<Gpq, Imm> for Assembler<'a> {
6355    fn shr(&mut self, op0: Gpq, op1: Imm) {
6356        self.emit(SHR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6357    }
6358}
6359
6360impl<'a> ShrEmitter<GpbLo, GpbLo> for Assembler<'a> {
6361    fn shr(&mut self, op0: GpbLo, op1: GpbLo) {
6362        self.emit(SHR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6363    }
6364}
6365
6366impl<'a> ShrEmitter<Mem, GpbLo> for Assembler<'a> {
6367    fn shr(&mut self, op0: Mem, op1: GpbLo) {
6368        self.emit(SHR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6369    }
6370}
6371
6372impl<'a> ShrEmitter<Gpw, GpbLo> for Assembler<'a> {
6373    fn shr(&mut self, op0: Gpw, op1: GpbLo) {
6374        self.emit(SHR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6375    }
6376}
6377
6378impl<'a> ShrEmitter<Gpd, GpbLo> for Assembler<'a> {
6379    fn shr(&mut self, op0: Gpd, op1: GpbLo) {
6380        self.emit(SHR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6381    }
6382}
6383
6384impl<'a> ShrEmitter<Gpq, GpbLo> for Assembler<'a> {
6385    fn shr(&mut self, op0: Gpq, op1: GpbLo) {
6386        self.emit(SHR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6387    }
6388}
6389
6390/// `SHRD` (SHRD). 
6391/// The SHRD instruction is useful for multi-precision shifts of 64 bits or more.
6392///
6393///
6394/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHRD.html).
6395///
6396/// Supported operand variants:
6397///
6398/// ```text
6399/// +----+-----------------+
6400/// | #  | Operands        |
6401/// +----+-----------------+
6402/// | 1  | Gpd, Gpd, GpbLo |
6403/// | 2  | Gpd, Gpd, Imm   |
6404/// | 3  | Gpq, Gpq, GpbLo |
6405/// | 4  | Gpq, Gpq, Imm   |
6406/// | 5  | Gpw, Gpw, GpbLo |
6407/// | 6  | Gpw, Gpw, Imm   |
6408/// | 7  | Mem, Gpd, GpbLo |
6409/// | 8  | Mem, Gpd, Imm   |
6410/// | 9  | Mem, Gpq, GpbLo |
6411/// | 10 | Mem, Gpq, Imm   |
6412/// | 11 | Mem, Gpw, GpbLo |
6413/// | 12 | Mem, Gpw, Imm   |
6414/// +----+-----------------+
6415/// ```
6416pub trait ShrdEmitter<A, B, C> {
6417    fn shrd(&mut self, op0: A, op1: B, op2: C);
6418}
6419
6420impl<'a> ShrdEmitter<Gpw, Gpw, Imm> for Assembler<'a> {
6421    fn shrd(&mut self, op0: Gpw, op1: Gpw, op2: Imm) {
6422        self.emit(SHRD16RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6423    }
6424}
6425
6426impl<'a> ShrdEmitter<Mem, Gpw, Imm> for Assembler<'a> {
6427    fn shrd(&mut self, op0: Mem, op1: Gpw, op2: Imm) {
6428        self.emit(SHRD16MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6429    }
6430}
6431
6432impl<'a> ShrdEmitter<Gpd, Gpd, Imm> for Assembler<'a> {
6433    fn shrd(&mut self, op0: Gpd, op1: Gpd, op2: Imm) {
6434        self.emit(SHRD32RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6435    }
6436}
6437
6438impl<'a> ShrdEmitter<Mem, Gpd, Imm> for Assembler<'a> {
6439    fn shrd(&mut self, op0: Mem, op1: Gpd, op2: Imm) {
6440        self.emit(SHRD32MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6441    }
6442}
6443
6444impl<'a> ShrdEmitter<Gpq, Gpq, Imm> for Assembler<'a> {
6445    fn shrd(&mut self, op0: Gpq, op1: Gpq, op2: Imm) {
6446        self.emit(SHRD64RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6447    }
6448}
6449
6450impl<'a> ShrdEmitter<Mem, Gpq, Imm> for Assembler<'a> {
6451    fn shrd(&mut self, op0: Mem, op1: Gpq, op2: Imm) {
6452        self.emit(SHRD64MRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6453    }
6454}
6455
6456impl<'a> ShrdEmitter<Gpw, Gpw, GpbLo> for Assembler<'a> {
6457    fn shrd(&mut self, op0: Gpw, op1: Gpw, op2: GpbLo) {
6458        self.emit(SHRD16RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6459    }
6460}
6461
6462impl<'a> ShrdEmitter<Mem, Gpw, GpbLo> for Assembler<'a> {
6463    fn shrd(&mut self, op0: Mem, op1: Gpw, op2: GpbLo) {
6464        self.emit(SHRD16MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6465    }
6466}
6467
6468impl<'a> ShrdEmitter<Gpd, Gpd, GpbLo> for Assembler<'a> {
6469    fn shrd(&mut self, op0: Gpd, op1: Gpd, op2: GpbLo) {
6470        self.emit(SHRD32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6471    }
6472}
6473
6474impl<'a> ShrdEmitter<Mem, Gpd, GpbLo> for Assembler<'a> {
6475    fn shrd(&mut self, op0: Mem, op1: Gpd, op2: GpbLo) {
6476        self.emit(SHRD32MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6477    }
6478}
6479
6480impl<'a> ShrdEmitter<Gpq, Gpq, GpbLo> for Assembler<'a> {
6481    fn shrd(&mut self, op0: Gpq, op1: Gpq, op2: GpbLo) {
6482        self.emit(SHRD64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6483    }
6484}
6485
6486impl<'a> ShrdEmitter<Mem, Gpq, GpbLo> for Assembler<'a> {
6487    fn shrd(&mut self, op0: Mem, op1: Gpq, op2: GpbLo) {
6488        self.emit(SHRD64MRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6489    }
6490}
6491
6492/// `SIDT`.
6493///
6494/// Supported operand variants:
6495///
6496/// ```text
6497/// +---+----------+
6498/// | # | Operands |
6499/// +---+----------+
6500/// | 1 | Mem      |
6501/// +---+----------+
6502/// ```
6503pub trait SidtEmitter<A> {
6504    fn sidt(&mut self, op0: A);
6505}
6506
6507impl<'a> SidtEmitter<Mem> for Assembler<'a> {
6508    fn sidt(&mut self, op0: Mem) {
6509        self.emit(SIDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6510    }
6511}
6512
6513/// `SLDT` (SLDT). 
6514/// Stores the segment selector from the local descriptor table register (LDTR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the segment descriptor (located in the GDT) for the current LDT. This instruction can only be executed in protected mode.
6515///
6516///
6517/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SLDT.html).
6518///
6519/// Supported operand variants:
6520///
6521/// ```text
6522/// +---+----------+
6523/// | # | Operands |
6524/// +---+----------+
6525/// | 1 | Gpd      |
6526/// | 2 | Mem      |
6527/// +---+----------+
6528/// ```
6529pub trait SldtEmitter<A> {
6530    fn sldt(&mut self, op0: A);
6531}
6532
6533impl<'a> SldtEmitter<Gpd> for Assembler<'a> {
6534    fn sldt(&mut self, op0: Gpd) {
6535        self.emit(SLDTR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6536    }
6537}
6538
6539impl<'a> SldtEmitter<Mem> for Assembler<'a> {
6540    fn sldt(&mut self, op0: Mem) {
6541        self.emit(SLDTM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6542    }
6543}
6544
6545/// `SMSW` (SMSW). 
6546/// Stores the machine status word (bits 0 through 15 of control register CR0) into the destination operand. The destination operand can be a general-purpose register or a memory location.
6547///
6548///
6549/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SMSW.html).
6550///
6551/// Supported operand variants:
6552///
6553/// ```text
6554/// +---+----------+
6555/// | # | Operands |
6556/// +---+----------+
6557/// | 1 | Gpd      |
6558/// | 2 | Gpq      |
6559/// | 3 | Gpw      |
6560/// | 4 | Mem      |
6561/// +---+----------+
6562/// ```
6563pub trait SmswEmitter<A> {
6564    fn smsw(&mut self, op0: A);
6565}
6566
6567impl<'a> SmswEmitter<Mem> for Assembler<'a> {
6568    fn smsw(&mut self, op0: Mem) {
6569        self.emit(SMSWM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6570    }
6571}
6572
6573impl<'a> SmswEmitter<Gpw> for Assembler<'a> {
6574    fn smsw(&mut self, op0: Gpw) {
6575        self.emit(SMSW16R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6576    }
6577}
6578
6579impl<'a> SmswEmitter<Gpd> for Assembler<'a> {
6580    fn smsw(&mut self, op0: Gpd) {
6581        self.emit(SMSW32R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6582    }
6583}
6584
6585impl<'a> SmswEmitter<Gpq> for Assembler<'a> {
6586    fn smsw(&mut self, op0: Gpq) {
6587        self.emit(SMSW64R, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6588    }
6589}
6590
6591/// `STC` (STC). 
6592/// Sets the CF flag in the EFLAGS register. Operation is the same in all modes.
6593///
6594///
6595/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STC.html).
6596///
6597/// Supported operand variants:
6598///
6599/// ```text
6600/// +---+----------+
6601/// | # | Operands |
6602/// +---+----------+
6603/// | 1 | (none)   |
6604/// +---+----------+
6605/// ```
6606pub trait StcEmitter {
6607    fn stc(&mut self);
6608}
6609
6610impl<'a> StcEmitter for Assembler<'a> {
6611    fn stc(&mut self) {
6612        self.emit(STC, &NOREG, &NOREG, &NOREG, &NOREG);
6613    }
6614}
6615
6616/// `STD` (STD). 
6617/// Sets the DF flag in the EFLAGS register. When the DF flag is set to 1, string operations decrement the index registers (ESI and/or EDI). Operation is the same in all modes.
6618///
6619///
6620/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STD.html).
6621///
6622/// Supported operand variants:
6623///
6624/// ```text
6625/// +---+----------+
6626/// | # | Operands |
6627/// +---+----------+
6628/// | 1 | (none)   |
6629/// +---+----------+
6630/// ```
6631pub trait StdEmitter {
6632    fn std(&mut self);
6633}
6634
6635impl<'a> StdEmitter for Assembler<'a> {
6636    fn std(&mut self) {
6637        self.emit(STD, &NOREG, &NOREG, &NOREG, &NOREG);
6638    }
6639}
6640
6641/// `STI` (STI). 
6642/// In most cases, STI sets the interrupt flag (IF) in the EFLAGS register. This allows the processor to respond to maskable hardware interrupts.
6643///
6644///
6645/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STI.html).
6646///
6647/// Supported operand variants:
6648///
6649/// ```text
6650/// +---+----------+
6651/// | # | Operands |
6652/// +---+----------+
6653/// | 1 | (none)   |
6654/// +---+----------+
6655/// ```
6656pub trait StiEmitter {
6657    fn sti(&mut self);
6658}
6659
6660impl<'a> StiEmitter for Assembler<'a> {
6661    fn sti(&mut self) {
6662        self.emit(STI, &NOREG, &NOREG, &NOREG, &NOREG);
6663    }
6664}
6665
6666/// `STOS` (STOS). 
6667/// In non-64-bit and default 64-bit mode; stores a byte, word, or doubleword from the AL, AX, or EAX register (respectively) into the destination operand. The destination operand is a memory location, the address of which is read from either the ES:EDI or ES:DI register (depending on the address-size attribute of the instruction and the mode of operation). The ES segment cannot be overridden with a segment override prefix.
6668///
6669///
6670/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STOS%3ASTOSB%3ASTOSW%3ASTOSD%3ASTOSQ.html).
6671///
6672/// Supported operand variants:
6673///
6674/// ```text
6675/// +---+----------+
6676/// | # | Operands |
6677/// +---+----------+
6678/// | 1 | (none)   |
6679/// +---+----------+
6680/// ```
6681pub trait StosEmitter {
6682    fn stos(&mut self);
6683}
6684
6685impl<'a> StosEmitter for Assembler<'a> {
6686    fn stos(&mut self) {
6687        self.emit(STOS8, &NOREG, &NOREG, &NOREG, &NOREG);
6688    }
6689}
6690
6691/// `STR` (STR). 
6692/// Stores the segment selector from the task register (TR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the task state segment (TSS) for the currently running task.
6693///
6694///
6695/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STR.html).
6696///
6697/// Supported operand variants:
6698///
6699/// ```text
6700/// +---+----------+
6701/// | # | Operands |
6702/// +---+----------+
6703/// | 1 | Gpd      |
6704/// | 2 | Mem      |
6705/// +---+----------+
6706/// ```
6707pub trait StrEmitter<A> {
6708    fn str(&mut self, op0: A);
6709}
6710
6711impl<'a> StrEmitter<Gpd> for Assembler<'a> {
6712    fn str(&mut self, op0: Gpd) {
6713        self.emit(STRR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6714    }
6715}
6716
6717impl<'a> StrEmitter<Mem> for Assembler<'a> {
6718    fn str(&mut self, op0: Mem) {
6719        self.emit(STRM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6720    }
6721}
6722
6723/// `STTILECFG`.
6724///
6725/// Supported operand variants:
6726///
6727/// ```text
6728/// +---+----------+
6729/// | # | Operands |
6730/// +---+----------+
6731/// | 1 | Mem      |
6732/// +---+----------+
6733/// ```
6734pub trait SttilecfgEmitter<A> {
6735    fn sttilecfg(&mut self, op0: A);
6736}
6737
6738impl<'a> SttilecfgEmitter<Mem> for Assembler<'a> {
6739    fn sttilecfg(&mut self, op0: Mem) {
6740        self.emit(STTILECFGM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
6741    }
6742}
6743
6744/// `SUB` (SUB). 
6745/// Subtracts the second operand (source operand) from the first operand (destination operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, register, or memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
6746///
6747///
6748/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SUB.html).
6749///
6750/// Supported operand variants:
6751///
6752/// ```text
6753/// +----+--------------+
6754/// | #  | Operands     |
6755/// +----+--------------+
6756/// | 1  | GpbLo, GpbLo |
6757/// | 2  | GpbLo, Imm   |
6758/// | 3  | GpbLo, Mem   |
6759/// | 4  | Gpd, Gpd     |
6760/// | 5  | Gpd, Imm     |
6761/// | 6  | Gpd, Mem     |
6762/// | 7  | Gpq, Gpq     |
6763/// | 8  | Gpq, Imm     |
6764/// | 9  | Gpq, Mem     |
6765/// | 10 | Gpw, Gpw     |
6766/// | 11 | Gpw, Imm     |
6767/// | 12 | Gpw, Mem     |
6768/// | 13 | Mem, GpbLo   |
6769/// | 14 | Mem, Gpd     |
6770/// | 15 | Mem, Gpq     |
6771/// | 16 | Mem, Gpw     |
6772/// | 17 | Mem, Imm     |
6773/// +----+--------------+
6774/// ```
6775pub trait SubEmitter<A, B> {
6776    fn sub(&mut self, op0: A, op1: B);
6777}
6778
6779impl<'a> SubEmitter<GpbLo, GpbLo> for Assembler<'a> {
6780    fn sub(&mut self, op0: GpbLo, op1: GpbLo) {
6781        self.emit(SUB8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6782    }
6783}
6784
6785impl<'a> SubEmitter<Mem, GpbLo> for Assembler<'a> {
6786    fn sub(&mut self, op0: Mem, op1: GpbLo) {
6787        self.emit(SUB8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6788    }
6789}
6790
6791impl<'a> SubEmitter<Gpw, Gpw> for Assembler<'a> {
6792    fn sub(&mut self, op0: Gpw, op1: Gpw) {
6793        self.emit(SUB16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6794    }
6795}
6796
6797impl<'a> SubEmitter<Mem, Gpw> for Assembler<'a> {
6798    fn sub(&mut self, op0: Mem, op1: Gpw) {
6799        self.emit(SUB16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6800    }
6801}
6802
6803impl<'a> SubEmitter<Gpd, Gpd> for Assembler<'a> {
6804    fn sub(&mut self, op0: Gpd, op1: Gpd) {
6805        self.emit(SUB32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6806    }
6807}
6808
6809impl<'a> SubEmitter<Mem, Gpd> for Assembler<'a> {
6810    fn sub(&mut self, op0: Mem, op1: Gpd) {
6811        self.emit(SUB32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6812    }
6813}
6814
6815impl<'a> SubEmitter<Gpq, Gpq> for Assembler<'a> {
6816    fn sub(&mut self, op0: Gpq, op1: Gpq) {
6817        self.emit(SUB64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6818    }
6819}
6820
6821impl<'a> SubEmitter<Mem, Gpq> for Assembler<'a> {
6822    fn sub(&mut self, op0: Mem, op1: Gpq) {
6823        self.emit(SUB64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6824    }
6825}
6826
6827impl<'a> SubEmitter<GpbLo, Mem> for Assembler<'a> {
6828    fn sub(&mut self, op0: GpbLo, op1: Mem) {
6829        self.emit(SUB8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6830    }
6831}
6832
6833impl<'a> SubEmitter<Gpw, Mem> for Assembler<'a> {
6834    fn sub(&mut self, op0: Gpw, op1: Mem) {
6835        self.emit(SUB16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6836    }
6837}
6838
6839impl<'a> SubEmitter<Gpd, Mem> for Assembler<'a> {
6840    fn sub(&mut self, op0: Gpd, op1: Mem) {
6841        self.emit(SUB32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6842    }
6843}
6844
6845impl<'a> SubEmitter<Gpq, Mem> for Assembler<'a> {
6846    fn sub(&mut self, op0: Gpq, op1: Mem) {
6847        self.emit(SUB64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6848    }
6849}
6850
6851impl<'a> SubEmitter<GpbLo, Imm> for Assembler<'a> {
6852    fn sub(&mut self, op0: GpbLo, op1: Imm) {
6853        self.emit(SUB8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6854    }
6855}
6856
6857impl<'a> SubEmitter<Gpw, Imm> for Assembler<'a> {
6858    fn sub(&mut self, op0: Gpw, op1: Imm) {
6859        self.emit(SUB16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6860    }
6861}
6862
6863impl<'a> SubEmitter<Gpd, Imm> for Assembler<'a> {
6864    fn sub(&mut self, op0: Gpd, op1: Imm) {
6865        self.emit(SUB32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6866    }
6867}
6868
6869impl<'a> SubEmitter<Gpq, Imm> for Assembler<'a> {
6870    fn sub(&mut self, op0: Gpq, op1: Imm) {
6871        self.emit(SUB64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6872    }
6873}
6874
6875impl<'a> SubEmitter<Mem, Imm> for Assembler<'a> {
6876    fn sub(&mut self, op0: Mem, op1: Imm) {
6877        self.emit(SUB8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
6878    }
6879}
6880
6881/// `SWAPGS` (SWAPGS). 
6882/// SWAPGS exchanges the current GS base register value with the value contained in MSR address C0000102H (IA32_KERNEL_GS_BASE). The SWAPGS instruction is a privileged instruction intended for use by system software.
6883///
6884///
6885/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SWAPGS.html).
6886///
6887/// Supported operand variants:
6888///
6889/// ```text
6890/// +---+----------+
6891/// | # | Operands |
6892/// +---+----------+
6893/// | 1 | (none)   |
6894/// +---+----------+
6895/// ```
6896pub trait SwapgsEmitter {
6897    fn swapgs(&mut self);
6898}
6899
6900impl<'a> SwapgsEmitter for Assembler<'a> {
6901    fn swapgs(&mut self) {
6902        self.emit(SWAPGS, &NOREG, &NOREG, &NOREG, &NOREG);
6903    }
6904}
6905
6906/// `SYSCALL` (SYSCALL). 
6907/// SYSCALL invokes an OS system-call handler at privilege level 0. It does so by loading RIP from the IA32_LSTAR MSR (after saving the address of the instruction following SYSCALL into RCX). (The WRMSR instruction ensures that the IA32_LSTAR MSR always contain a canonical address.)
6908///
6909///
6910/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSCALL.html).
6911///
6912/// Supported operand variants:
6913///
6914/// ```text
6915/// +---+----------+
6916/// | # | Operands |
6917/// +---+----------+
6918/// | 1 | (none)   |
6919/// +---+----------+
6920/// ```
6921pub trait SyscallEmitter {
6922    fn syscall(&mut self);
6923}
6924
6925impl<'a> SyscallEmitter for Assembler<'a> {
6926    fn syscall(&mut self) {
6927        self.emit(SYSCALL, &NOREG, &NOREG, &NOREG, &NOREG);
6928    }
6929}
6930
6931/// `SYSRET` (SYSRET). 
6932/// SYSRET is a companion instruction to the SYSCALL instruction. It returns from an OS system-call handler to user code at privilege level 3. It does so by loading RIP from RCX and loading RFLAGS from R11.1 With a 64-bit operand size, SYSRET remains in 64-bit mode; otherwise, it enters compatibility mode and only the low 32 bits of the registers are loaded.
6933///
6934///
6935/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSRET.html).
6936///
6937/// Supported operand variants:
6938///
6939/// ```text
6940/// +---+----------+
6941/// | # | Operands |
6942/// +---+----------+
6943/// | 1 | (none)   |
6944/// +---+----------+
6945/// ```
6946pub trait SysretEmitter {
6947    fn sysret(&mut self);
6948}
6949
6950impl<'a> SysretEmitter for Assembler<'a> {
6951    fn sysret(&mut self) {
6952        self.emit(SYSRET, &NOREG, &NOREG, &NOREG, &NOREG);
6953    }
6954}
6955
6956/// `TCMMIMFP16PS`.
6957///
6958/// Supported operand variants:
6959///
6960/// ```text
6961/// +---+---------------+
6962/// | # | Operands      |
6963/// +---+---------------+
6964/// | 1 | Tmm, Tmm, Tmm |
6965/// +---+---------------+
6966/// ```
6967pub trait Tcmmimfp16psEmitter<A, B, C> {
6968    fn tcmmimfp16ps(&mut self, op0: A, op1: B, op2: C);
6969}
6970
6971impl<'a> Tcmmimfp16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
6972    fn tcmmimfp16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
6973        self.emit(TCMMIMFP16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6974    }
6975}
6976
6977/// `TCMMRLFP16PS`.
6978///
6979/// Supported operand variants:
6980///
6981/// ```text
6982/// +---+---------------+
6983/// | # | Operands      |
6984/// +---+---------------+
6985/// | 1 | Tmm, Tmm, Tmm |
6986/// +---+---------------+
6987/// ```
6988pub trait Tcmmrlfp16psEmitter<A, B, C> {
6989    fn tcmmrlfp16ps(&mut self, op0: A, op1: B, op2: C);
6990}
6991
6992impl<'a> Tcmmrlfp16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
6993    fn tcmmrlfp16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
6994        self.emit(TCMMRLFP16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6995    }
6996}
6997
6998/// `TDPBF16PS`.
6999///
7000/// Supported operand variants:
7001///
7002/// ```text
7003/// +---+---------------+
7004/// | # | Operands      |
7005/// +---+---------------+
7006/// | 1 | Tmm, Tmm, Tmm |
7007/// +---+---------------+
7008/// ```
7009pub trait Tdpbf16psEmitter<A, B, C> {
7010    fn tdpbf16ps(&mut self, op0: A, op1: B, op2: C);
7011}
7012
7013impl<'a> Tdpbf16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
7014    fn tdpbf16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
7015        self.emit(TDPBF16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7016    }
7017}
7018
7019/// `TDPBSSD`.
7020///
7021/// Supported operand variants:
7022///
7023/// ```text
7024/// +---+---------------+
7025/// | # | Operands      |
7026/// +---+---------------+
7027/// | 1 | Tmm, Tmm, Tmm |
7028/// +---+---------------+
7029/// ```
7030pub trait TdpbssdEmitter<A, B, C> {
7031    fn tdpbssd(&mut self, op0: A, op1: B, op2: C);
7032}
7033
7034impl<'a> TdpbssdEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
7035    fn tdpbssd(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
7036        self.emit(TDPBSSDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7037    }
7038}
7039
7040/// `TDPBSUD`.
7041///
7042/// Supported operand variants:
7043///
7044/// ```text
7045/// +---+---------------+
7046/// | # | Operands      |
7047/// +---+---------------+
7048/// | 1 | Tmm, Tmm, Tmm |
7049/// +---+---------------+
7050/// ```
7051pub trait TdpbsudEmitter<A, B, C> {
7052    fn tdpbsud(&mut self, op0: A, op1: B, op2: C);
7053}
7054
7055impl<'a> TdpbsudEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
7056    fn tdpbsud(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
7057        self.emit(TDPBSUDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7058    }
7059}
7060
7061/// `TDPBUSD`.
7062///
7063/// Supported operand variants:
7064///
7065/// ```text
7066/// +---+---------------+
7067/// | # | Operands      |
7068/// +---+---------------+
7069/// | 1 | Tmm, Tmm, Tmm |
7070/// +---+---------------+
7071/// ```
7072pub trait TdpbusdEmitter<A, B, C> {
7073    fn tdpbusd(&mut self, op0: A, op1: B, op2: C);
7074}
7075
7076impl<'a> TdpbusdEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
7077    fn tdpbusd(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
7078        self.emit(TDPBUSDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7079    }
7080}
7081
7082/// `TDPBUUD`.
7083///
7084/// Supported operand variants:
7085///
7086/// ```text
7087/// +---+---------------+
7088/// | # | Operands      |
7089/// +---+---------------+
7090/// | 1 | Tmm, Tmm, Tmm |
7091/// +---+---------------+
7092/// ```
7093pub trait TdpbuudEmitter<A, B, C> {
7094    fn tdpbuud(&mut self, op0: A, op1: B, op2: C);
7095}
7096
7097impl<'a> TdpbuudEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
7098    fn tdpbuud(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
7099        self.emit(TDPBUUDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7100    }
7101}
7102
7103/// `TDPFP16PS`.
7104///
7105/// Supported operand variants:
7106///
7107/// ```text
7108/// +---+---------------+
7109/// | # | Operands      |
7110/// +---+---------------+
7111/// | 1 | Tmm, Tmm, Tmm |
7112/// +---+---------------+
7113/// ```
7114pub trait Tdpfp16psEmitter<A, B, C> {
7115    fn tdpfp16ps(&mut self, op0: A, op1: B, op2: C);
7116}
7117
7118impl<'a> Tdpfp16psEmitter<Tmm, Tmm, Tmm> for Assembler<'a> {
7119    fn tdpfp16ps(&mut self, op0: Tmm, op1: Tmm, op2: Tmm) {
7120        self.emit(TDPFP16PSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7121    }
7122}
7123
7124/// `TEST` (TEST). 
7125/// Computes the bit-wise logical AND of first operand (source 1 operand) and the second operand (source 2 operand) and sets the SF, ZF, and PF status flags according to the result. The result is then discarded.
7126///
7127///
7128/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TEST.html).
7129///
7130/// Supported operand variants:
7131///
7132/// ```text
7133/// +----+--------------+
7134/// | #  | Operands     |
7135/// +----+--------------+
7136/// | 1  | GpbLo, GpbLo |
7137/// | 2  | GpbLo, Imm   |
7138/// | 3  | Gpd, Gpd     |
7139/// | 4  | Gpd, Imm     |
7140/// | 5  | Gpq, Gpq     |
7141/// | 6  | Gpq, Imm     |
7142/// | 7  | Gpw, Gpw     |
7143/// | 8  | Gpw, Imm     |
7144/// | 9  | Mem, GpbLo   |
7145/// | 10 | Mem, Gpd     |
7146/// | 11 | Mem, Gpq     |
7147/// | 12 | Mem, Gpw     |
7148/// | 13 | Mem, Imm     |
7149/// +----+--------------+
7150/// ```
7151pub trait TestEmitter<A, B> {
7152    fn test(&mut self, op0: A, op1: B);
7153}
7154
7155impl<'a> TestEmitter<GpbLo, GpbLo> for Assembler<'a> {
7156    fn test(&mut self, op0: GpbLo, op1: GpbLo) {
7157        self.emit(TEST8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7158    }
7159}
7160
7161impl<'a> TestEmitter<Mem, GpbLo> for Assembler<'a> {
7162    fn test(&mut self, op0: Mem, op1: GpbLo) {
7163        self.emit(TEST8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7164    }
7165}
7166
7167impl<'a> TestEmitter<Gpw, Gpw> for Assembler<'a> {
7168    fn test(&mut self, op0: Gpw, op1: Gpw) {
7169        self.emit(TEST16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7170    }
7171}
7172
7173impl<'a> TestEmitter<Mem, Gpw> for Assembler<'a> {
7174    fn test(&mut self, op0: Mem, op1: Gpw) {
7175        self.emit(TEST16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7176    }
7177}
7178
7179impl<'a> TestEmitter<Gpd, Gpd> for Assembler<'a> {
7180    fn test(&mut self, op0: Gpd, op1: Gpd) {
7181        self.emit(TEST32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7182    }
7183}
7184
7185impl<'a> TestEmitter<Mem, Gpd> for Assembler<'a> {
7186    fn test(&mut self, op0: Mem, op1: Gpd) {
7187        self.emit(TEST32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7188    }
7189}
7190
7191impl<'a> TestEmitter<Gpq, Gpq> for Assembler<'a> {
7192    fn test(&mut self, op0: Gpq, op1: Gpq) {
7193        self.emit(TEST64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7194    }
7195}
7196
7197impl<'a> TestEmitter<Mem, Gpq> for Assembler<'a> {
7198    fn test(&mut self, op0: Mem, op1: Gpq) {
7199        self.emit(TEST64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7200    }
7201}
7202
7203impl<'a> TestEmitter<GpbLo, Imm> for Assembler<'a> {
7204    fn test(&mut self, op0: GpbLo, op1: Imm) {
7205        self.emit(TEST8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7206    }
7207}
7208
7209impl<'a> TestEmitter<Gpw, Imm> for Assembler<'a> {
7210    fn test(&mut self, op0: Gpw, op1: Imm) {
7211        self.emit(TEST16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7212    }
7213}
7214
7215impl<'a> TestEmitter<Gpd, Imm> for Assembler<'a> {
7216    fn test(&mut self, op0: Gpd, op1: Imm) {
7217        self.emit(TEST32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7218    }
7219}
7220
7221impl<'a> TestEmitter<Gpq, Imm> for Assembler<'a> {
7222    fn test(&mut self, op0: Gpq, op1: Imm) {
7223        self.emit(TEST64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7224    }
7225}
7226
7227impl<'a> TestEmitter<Mem, Imm> for Assembler<'a> {
7228    fn test(&mut self, op0: Mem, op1: Imm) {
7229        self.emit(TEST8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7230    }
7231}
7232
7233/// `TILELOADD`.
7234///
7235/// Supported operand variants:
7236///
7237/// ```text
7238/// +---+----------+
7239/// | # | Operands |
7240/// +---+----------+
7241/// | 1 | Tmm, Mem |
7242/// +---+----------+
7243/// ```
7244pub trait TileloaddEmitter<A, B> {
7245    fn tileloadd(&mut self, op0: A, op1: B);
7246}
7247
7248impl<'a> TileloaddEmitter<Tmm, Mem> for Assembler<'a> {
7249    fn tileloadd(&mut self, op0: Tmm, op1: Mem) {
7250        self.emit(TILELOADDRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7251    }
7252}
7253
7254/// `TILELOADDT1`.
7255///
7256/// Supported operand variants:
7257///
7258/// ```text
7259/// +---+----------+
7260/// | # | Operands |
7261/// +---+----------+
7262/// | 1 | Tmm, Mem |
7263/// +---+----------+
7264/// ```
7265pub trait Tileloaddt1Emitter<A, B> {
7266    fn tileloaddt1(&mut self, op0: A, op1: B);
7267}
7268
7269impl<'a> Tileloaddt1Emitter<Tmm, Mem> for Assembler<'a> {
7270    fn tileloaddt1(&mut self, op0: Tmm, op1: Mem) {
7271        self.emit(TILELOADDT1RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7272    }
7273}
7274
7275/// `TILERELEASE` (TILERELEASE). 
7276/// This instruction returns TILECFG and TILEDATA to the INIT state.
7277///
7278///
7279/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TILERELEASE.html).
7280///
7281/// Supported operand variants:
7282///
7283/// ```text
7284/// +---+----------+
7285/// | # | Operands |
7286/// +---+----------+
7287/// | 1 | (none)   |
7288/// +---+----------+
7289/// ```
7290pub trait TilereleaseEmitter {
7291    fn tilerelease(&mut self);
7292}
7293
7294impl<'a> TilereleaseEmitter for Assembler<'a> {
7295    fn tilerelease(&mut self) {
7296        self.emit(TILERELEASE, &NOREG, &NOREG, &NOREG, &NOREG);
7297    }
7298}
7299
7300/// `TILESTORED`.
7301///
7302/// Supported operand variants:
7303///
7304/// ```text
7305/// +---+----------+
7306/// | # | Operands |
7307/// +---+----------+
7308/// | 1 | Mem, Tmm |
7309/// +---+----------+
7310/// ```
7311pub trait TilestoredEmitter<A, B> {
7312    fn tilestored(&mut self, op0: A, op1: B);
7313}
7314
7315impl<'a> TilestoredEmitter<Mem, Tmm> for Assembler<'a> {
7316    fn tilestored(&mut self, op0: Mem, op1: Tmm) {
7317        self.emit(TILESTOREDMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7318    }
7319}
7320
7321/// `TILEZERO`.
7322///
7323/// Supported operand variants:
7324///
7325/// ```text
7326/// +---+----------+
7327/// | # | Operands |
7328/// +---+----------+
7329/// | 1 | Tmm      |
7330/// +---+----------+
7331/// ```
7332pub trait TilezeroEmitter<A> {
7333    fn tilezero(&mut self, op0: A);
7334}
7335
7336impl<'a> TilezeroEmitter<Tmm> for Assembler<'a> {
7337    fn tilezero(&mut self, op0: Tmm) {
7338        self.emit(TILEZEROR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
7339    }
7340}
7341
7342/// `UD0`.
7343///
7344/// Supported operand variants:
7345///
7346/// ```text
7347/// +---+----------+
7348/// | # | Operands |
7349/// +---+----------+
7350/// | 1 | Gpd, Gpd |
7351/// | 2 | Gpd, Mem |
7352/// | 3 | Gpq, Gpq |
7353/// | 4 | Gpq, Mem |
7354/// | 5 | Gpw, Gpw |
7355/// | 6 | Gpw, Mem |
7356/// +---+----------+
7357/// ```
7358pub trait Ud0Emitter<A, B> {
7359    fn ud0(&mut self, op0: A, op1: B);
7360}
7361
7362impl<'a> Ud0Emitter<Gpw, Gpw> for Assembler<'a> {
7363    fn ud0(&mut self, op0: Gpw, op1: Gpw) {
7364        self.emit(UD0_16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7365    }
7366}
7367
7368impl<'a> Ud0Emitter<Gpw, Mem> for Assembler<'a> {
7369    fn ud0(&mut self, op0: Gpw, op1: Mem) {
7370        self.emit(UD0_16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7371    }
7372}
7373
7374impl<'a> Ud0Emitter<Gpd, Gpd> for Assembler<'a> {
7375    fn ud0(&mut self, op0: Gpd, op1: Gpd) {
7376        self.emit(UD0_32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7377    }
7378}
7379
7380impl<'a> Ud0Emitter<Gpd, Mem> for Assembler<'a> {
7381    fn ud0(&mut self, op0: Gpd, op1: Mem) {
7382        self.emit(UD0_32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7383    }
7384}
7385
7386impl<'a> Ud0Emitter<Gpq, Gpq> for Assembler<'a> {
7387    fn ud0(&mut self, op0: Gpq, op1: Gpq) {
7388        self.emit(UD0_64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7389    }
7390}
7391
7392impl<'a> Ud0Emitter<Gpq, Mem> for Assembler<'a> {
7393    fn ud0(&mut self, op0: Gpq, op1: Mem) {
7394        self.emit(UD0_64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7395    }
7396}
7397
7398/// `UD1` (UD1). 
7399/// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
7400///
7401///
7402/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
7403///
7404/// Supported operand variants:
7405///
7406/// ```text
7407/// +---+----------+
7408/// | # | Operands |
7409/// +---+----------+
7410/// | 1 | Gpd, Gpd |
7411/// | 2 | Gpd, Mem |
7412/// | 3 | Gpq, Gpq |
7413/// | 4 | Gpq, Mem |
7414/// | 5 | Gpw, Gpw |
7415/// | 6 | Gpw, Mem |
7416/// +---+----------+
7417/// ```
7418pub trait Ud1Emitter<A, B> {
7419    fn ud1(&mut self, op0: A, op1: B);
7420}
7421
7422impl<'a> Ud1Emitter<Gpw, Gpw> for Assembler<'a> {
7423    fn ud1(&mut self, op0: Gpw, op1: Gpw) {
7424        self.emit(UD1_16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7425    }
7426}
7427
7428impl<'a> Ud1Emitter<Gpw, Mem> for Assembler<'a> {
7429    fn ud1(&mut self, op0: Gpw, op1: Mem) {
7430        self.emit(UD1_16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7431    }
7432}
7433
7434impl<'a> Ud1Emitter<Gpd, Gpd> for Assembler<'a> {
7435    fn ud1(&mut self, op0: Gpd, op1: Gpd) {
7436        self.emit(UD1_32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7437    }
7438}
7439
7440impl<'a> Ud1Emitter<Gpd, Mem> for Assembler<'a> {
7441    fn ud1(&mut self, op0: Gpd, op1: Mem) {
7442        self.emit(UD1_32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7443    }
7444}
7445
7446impl<'a> Ud1Emitter<Gpq, Gpq> for Assembler<'a> {
7447    fn ud1(&mut self, op0: Gpq, op1: Gpq) {
7448        self.emit(UD1_64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7449    }
7450}
7451
7452impl<'a> Ud1Emitter<Gpq, Mem> for Assembler<'a> {
7453    fn ud1(&mut self, op0: Gpq, op1: Mem) {
7454        self.emit(UD1_64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7455    }
7456}
7457
7458/// `UD2` (UD2). 
7459/// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
7460///
7461///
7462/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
7463///
7464/// Supported operand variants:
7465///
7466/// ```text
7467/// +---+----------+
7468/// | # | Operands |
7469/// +---+----------+
7470/// | 1 | (none)   |
7471/// +---+----------+
7472/// ```
7473pub trait Ud2Emitter {
7474    fn ud2(&mut self);
7475}
7476
7477impl<'a> Ud2Emitter for Assembler<'a> {
7478    fn ud2(&mut self) {
7479        self.emit(UD2, &NOREG, &NOREG, &NOREG, &NOREG);
7480    }
7481}
7482
7483/// `VADDPH`.
7484///
7485/// Supported operand variants:
7486///
7487/// ```text
7488/// +---+---------------+
7489/// | # | Operands      |
7490/// +---+---------------+
7491/// | 1 | Xmm, Xmm, Mem |
7492/// | 2 | Xmm, Xmm, Xmm |
7493/// | 3 | Ymm, Ymm, Mem |
7494/// | 4 | Ymm, Ymm, Ymm |
7495/// | 5 | Zmm, Zmm, Mem |
7496/// | 6 | Zmm, Zmm, Zmm |
7497/// +---+---------------+
7498/// ```
7499pub trait VaddphEmitter<A, B, C> {
7500    fn vaddph(&mut self, op0: A, op1: B, op2: C);
7501}
7502
7503impl<'a> VaddphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7504    fn vaddph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7505        self.emit(VADDPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7506    }
7507}
7508
7509impl<'a> VaddphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7510    fn vaddph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7511        self.emit(VADDPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7512    }
7513}
7514
7515impl<'a> VaddphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7516    fn vaddph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7517        self.emit(VADDPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7518    }
7519}
7520
7521impl<'a> VaddphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7522    fn vaddph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7523        self.emit(VADDPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7524    }
7525}
7526
7527impl<'a> VaddphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7528    fn vaddph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7529        self.emit(VADDPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7530    }
7531}
7532
7533impl<'a> VaddphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7534    fn vaddph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7535        self.emit(VADDPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7536    }
7537}
7538
7539/// `VADDPH_ER`.
7540///
7541/// Supported operand variants:
7542///
7543/// ```text
7544/// +---+---------------+
7545/// | # | Operands      |
7546/// +---+---------------+
7547/// | 1 | Zmm, Zmm, Zmm |
7548/// +---+---------------+
7549/// ```
7550pub trait VaddphErEmitter<A, B, C> {
7551    fn vaddph_er(&mut self, op0: A, op1: B, op2: C);
7552}
7553
7554impl<'a> VaddphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7555    fn vaddph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7556        self.emit(VADDPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7557    }
7558}
7559
7560/// `VADDPH_MASK`.
7561///
7562/// Supported operand variants:
7563///
7564/// ```text
7565/// +---+---------------+
7566/// | # | Operands      |
7567/// +---+---------------+
7568/// | 1 | Xmm, Xmm, Mem |
7569/// | 2 | Xmm, Xmm, Xmm |
7570/// | 3 | Ymm, Ymm, Mem |
7571/// | 4 | Ymm, Ymm, Ymm |
7572/// | 5 | Zmm, Zmm, Mem |
7573/// | 6 | Zmm, Zmm, Zmm |
7574/// +---+---------------+
7575/// ```
7576pub trait VaddphMaskEmitter<A, B, C> {
7577    fn vaddph_mask(&mut self, op0: A, op1: B, op2: C);
7578}
7579
7580impl<'a> VaddphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7581    fn vaddph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7582        self.emit(VADDPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7583    }
7584}
7585
7586impl<'a> VaddphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7587    fn vaddph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7588        self.emit(VADDPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7589    }
7590}
7591
7592impl<'a> VaddphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7593    fn vaddph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7594        self.emit(VADDPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7595    }
7596}
7597
7598impl<'a> VaddphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7599    fn vaddph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7600        self.emit(VADDPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7601    }
7602}
7603
7604impl<'a> VaddphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7605    fn vaddph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7606        self.emit(VADDPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7607    }
7608}
7609
7610impl<'a> VaddphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7611    fn vaddph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7612        self.emit(VADDPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7613    }
7614}
7615
7616/// `VADDPH_MASK_ER`.
7617///
7618/// Supported operand variants:
7619///
7620/// ```text
7621/// +---+---------------+
7622/// | # | Operands      |
7623/// +---+---------------+
7624/// | 1 | Zmm, Zmm, Zmm |
7625/// +---+---------------+
7626/// ```
7627pub trait VaddphMaskErEmitter<A, B, C> {
7628    fn vaddph_mask_er(&mut self, op0: A, op1: B, op2: C);
7629}
7630
7631impl<'a> VaddphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7632    fn vaddph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7633        self.emit(VADDPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7634    }
7635}
7636
7637/// `VADDPH_MASKZ`.
7638///
7639/// Supported operand variants:
7640///
7641/// ```text
7642/// +---+---------------+
7643/// | # | Operands      |
7644/// +---+---------------+
7645/// | 1 | Xmm, Xmm, Mem |
7646/// | 2 | Xmm, Xmm, Xmm |
7647/// | 3 | Ymm, Ymm, Mem |
7648/// | 4 | Ymm, Ymm, Ymm |
7649/// | 5 | Zmm, Zmm, Mem |
7650/// | 6 | Zmm, Zmm, Zmm |
7651/// +---+---------------+
7652/// ```
7653pub trait VaddphMaskzEmitter<A, B, C> {
7654    fn vaddph_maskz(&mut self, op0: A, op1: B, op2: C);
7655}
7656
7657impl<'a> VaddphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7658    fn vaddph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7659        self.emit(VADDPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7660    }
7661}
7662
7663impl<'a> VaddphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7664    fn vaddph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7665        self.emit(VADDPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7666    }
7667}
7668
7669impl<'a> VaddphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7670    fn vaddph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7671        self.emit(VADDPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7672    }
7673}
7674
7675impl<'a> VaddphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7676    fn vaddph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7677        self.emit(VADDPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7678    }
7679}
7680
7681impl<'a> VaddphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7682    fn vaddph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7683        self.emit(VADDPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7684    }
7685}
7686
7687impl<'a> VaddphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7688    fn vaddph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7689        self.emit(VADDPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7690    }
7691}
7692
7693/// `VADDPH_MASKZ_ER`.
7694///
7695/// Supported operand variants:
7696///
7697/// ```text
7698/// +---+---------------+
7699/// | # | Operands      |
7700/// +---+---------------+
7701/// | 1 | Zmm, Zmm, Zmm |
7702/// +---+---------------+
7703/// ```
7704pub trait VaddphMaskzErEmitter<A, B, C> {
7705    fn vaddph_maskz_er(&mut self, op0: A, op1: B, op2: C);
7706}
7707
7708impl<'a> VaddphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7709    fn vaddph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7710        self.emit(VADDPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7711    }
7712}
7713
7714/// `VADDSH`.
7715///
7716/// Supported operand variants:
7717///
7718/// ```text
7719/// +---+---------------+
7720/// | # | Operands      |
7721/// +---+---------------+
7722/// | 1 | Xmm, Xmm, Mem |
7723/// | 2 | Xmm, Xmm, Xmm |
7724/// +---+---------------+
7725/// ```
7726pub trait VaddshEmitter<A, B, C> {
7727    fn vaddsh(&mut self, op0: A, op1: B, op2: C);
7728}
7729
7730impl<'a> VaddshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7731    fn vaddsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7732        self.emit(VADDSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7733    }
7734}
7735
7736impl<'a> VaddshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7737    fn vaddsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7738        self.emit(VADDSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7739    }
7740}
7741
7742/// `VADDSH_ER`.
7743///
7744/// Supported operand variants:
7745///
7746/// ```text
7747/// +---+---------------+
7748/// | # | Operands      |
7749/// +---+---------------+
7750/// | 1 | Xmm, Xmm, Xmm |
7751/// +---+---------------+
7752/// ```
7753pub trait VaddshErEmitter<A, B, C> {
7754    fn vaddsh_er(&mut self, op0: A, op1: B, op2: C);
7755}
7756
7757impl<'a> VaddshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7758    fn vaddsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7759        self.emit(VADDSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7760    }
7761}
7762
7763/// `VADDSH_MASK`.
7764///
7765/// Supported operand variants:
7766///
7767/// ```text
7768/// +---+---------------+
7769/// | # | Operands      |
7770/// +---+---------------+
7771/// | 1 | Xmm, Xmm, Mem |
7772/// | 2 | Xmm, Xmm, Xmm |
7773/// +---+---------------+
7774/// ```
7775pub trait VaddshMaskEmitter<A, B, C> {
7776    fn vaddsh_mask(&mut self, op0: A, op1: B, op2: C);
7777}
7778
7779impl<'a> VaddshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7780    fn vaddsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7781        self.emit(VADDSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7782    }
7783}
7784
7785impl<'a> VaddshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7786    fn vaddsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7787        self.emit(VADDSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7788    }
7789}
7790
7791/// `VADDSH_MASK_ER`.
7792///
7793/// Supported operand variants:
7794///
7795/// ```text
7796/// +---+---------------+
7797/// | # | Operands      |
7798/// +---+---------------+
7799/// | 1 | Xmm, Xmm, Xmm |
7800/// +---+---------------+
7801/// ```
7802pub trait VaddshMaskErEmitter<A, B, C> {
7803    fn vaddsh_mask_er(&mut self, op0: A, op1: B, op2: C);
7804}
7805
7806impl<'a> VaddshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7807    fn vaddsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7808        self.emit(VADDSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7809    }
7810}
7811
7812/// `VADDSH_MASKZ`.
7813///
7814/// Supported operand variants:
7815///
7816/// ```text
7817/// +---+---------------+
7818/// | # | Operands      |
7819/// +---+---------------+
7820/// | 1 | Xmm, Xmm, Mem |
7821/// | 2 | Xmm, Xmm, Xmm |
7822/// +---+---------------+
7823/// ```
7824pub trait VaddshMaskzEmitter<A, B, C> {
7825    fn vaddsh_maskz(&mut self, op0: A, op1: B, op2: C);
7826}
7827
7828impl<'a> VaddshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7829    fn vaddsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7830        self.emit(VADDSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7831    }
7832}
7833
7834impl<'a> VaddshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7835    fn vaddsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7836        self.emit(VADDSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7837    }
7838}
7839
7840/// `VADDSH_MASKZ_ER`.
7841///
7842/// Supported operand variants:
7843///
7844/// ```text
7845/// +---+---------------+
7846/// | # | Operands      |
7847/// +---+---------------+
7848/// | 1 | Xmm, Xmm, Xmm |
7849/// +---+---------------+
7850/// ```
7851pub trait VaddshMaskzErEmitter<A, B, C> {
7852    fn vaddsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
7853}
7854
7855impl<'a> VaddshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7856    fn vaddsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7857        self.emit(VADDSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7858    }
7859}
7860
7861/// `VAESDEC` (VAESDEC). 
7862/// This instruction performs a single round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
7863///
7864///
7865/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDEC.html).
7866///
7867/// Supported operand variants:
7868///
7869/// ```text
7870/// +---+---------------+
7871/// | # | Operands      |
7872/// +---+---------------+
7873/// | 1 | Xmm, Xmm, Mem |
7874/// | 2 | Xmm, Xmm, Xmm |
7875/// | 3 | Ymm, Ymm, Mem |
7876/// | 4 | Ymm, Ymm, Ymm |
7877/// | 5 | Zmm, Zmm, Mem |
7878/// | 6 | Zmm, Zmm, Zmm |
7879/// +---+---------------+
7880/// ```
7881pub trait VaesdecEmitter<A, B, C> {
7882    fn vaesdec(&mut self, op0: A, op1: B, op2: C);
7883}
7884
7885impl<'a> VaesdecEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7886    fn vaesdec(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7887        self.emit(VAESDEC128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7888    }
7889}
7890
7891impl<'a> VaesdecEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7892    fn vaesdec(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7893        self.emit(VAESDEC128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7894    }
7895}
7896
7897impl<'a> VaesdecEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7898    fn vaesdec(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7899        self.emit(VAESDEC256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7900    }
7901}
7902
7903impl<'a> VaesdecEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7904    fn vaesdec(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7905        self.emit(VAESDEC256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7906    }
7907}
7908
7909impl<'a> VaesdecEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7910    fn vaesdec(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7911        self.emit(VAESDEC512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7912    }
7913}
7914
7915impl<'a> VaesdecEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7916    fn vaesdec(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7917        self.emit(VAESDEC512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7918    }
7919}
7920
7921/// `VAESDECLAST` (VAESDECLAST). 
7922/// This instruction performs the last round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
7923///
7924///
7925/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDECLAST.html).
7926///
7927/// Supported operand variants:
7928///
7929/// ```text
7930/// +---+---------------+
7931/// | # | Operands      |
7932/// +---+---------------+
7933/// | 1 | Xmm, Xmm, Mem |
7934/// | 2 | Xmm, Xmm, Xmm |
7935/// | 3 | Ymm, Ymm, Mem |
7936/// | 4 | Ymm, Ymm, Ymm |
7937/// | 5 | Zmm, Zmm, Mem |
7938/// | 6 | Zmm, Zmm, Zmm |
7939/// +---+---------------+
7940/// ```
7941pub trait VaesdeclastEmitter<A, B, C> {
7942    fn vaesdeclast(&mut self, op0: A, op1: B, op2: C);
7943}
7944
7945impl<'a> VaesdeclastEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7946    fn vaesdeclast(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7947        self.emit(VAESDECLAST128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7948    }
7949}
7950
7951impl<'a> VaesdeclastEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7952    fn vaesdeclast(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7953        self.emit(VAESDECLAST128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7954    }
7955}
7956
7957impl<'a> VaesdeclastEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7958    fn vaesdeclast(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7959        self.emit(VAESDECLAST256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7960    }
7961}
7962
7963impl<'a> VaesdeclastEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7964    fn vaesdeclast(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7965        self.emit(VAESDECLAST256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7966    }
7967}
7968
7969impl<'a> VaesdeclastEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7970    fn vaesdeclast(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7971        self.emit(VAESDECLAST512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7972    }
7973}
7974
7975impl<'a> VaesdeclastEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7976    fn vaesdeclast(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7977        self.emit(VAESDECLAST512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7978    }
7979}
7980
7981/// `VAESENC` (VAESENC). 
7982/// This instruction performs a single round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
7983///
7984///
7985/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENC.html).
7986///
7987/// Supported operand variants:
7988///
7989/// ```text
7990/// +---+---------------+
7991/// | # | Operands      |
7992/// +---+---------------+
7993/// | 1 | Xmm, Xmm, Mem |
7994/// | 2 | Xmm, Xmm, Xmm |
7995/// | 3 | Ymm, Ymm, Mem |
7996/// | 4 | Ymm, Ymm, Ymm |
7997/// | 5 | Zmm, Zmm, Mem |
7998/// | 6 | Zmm, Zmm, Zmm |
7999/// +---+---------------+
8000/// ```
8001pub trait VaesencEmitter<A, B, C> {
8002    fn vaesenc(&mut self, op0: A, op1: B, op2: C);
8003}
8004
8005impl<'a> VaesencEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8006    fn vaesenc(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8007        self.emit(VAESENC128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8008    }
8009}
8010
8011impl<'a> VaesencEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8012    fn vaesenc(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8013        self.emit(VAESENC128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8014    }
8015}
8016
8017impl<'a> VaesencEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8018    fn vaesenc(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8019        self.emit(VAESENC256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8020    }
8021}
8022
8023impl<'a> VaesencEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8024    fn vaesenc(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8025        self.emit(VAESENC256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8026    }
8027}
8028
8029impl<'a> VaesencEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8030    fn vaesenc(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8031        self.emit(VAESENC512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8032    }
8033}
8034
8035impl<'a> VaesencEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8036    fn vaesenc(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8037        self.emit(VAESENC512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8038    }
8039}
8040
8041/// `VAESENCLAST` (VAESENCLAST). 
8042/// This instruction performs the last round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
8043///
8044///
8045/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENCLAST.html).
8046///
8047/// Supported operand variants:
8048///
8049/// ```text
8050/// +---+---------------+
8051/// | # | Operands      |
8052/// +---+---------------+
8053/// | 1 | Xmm, Xmm, Mem |
8054/// | 2 | Xmm, Xmm, Xmm |
8055/// | 3 | Ymm, Ymm, Mem |
8056/// | 4 | Ymm, Ymm, Ymm |
8057/// | 5 | Zmm, Zmm, Mem |
8058/// | 6 | Zmm, Zmm, Zmm |
8059/// +---+---------------+
8060/// ```
8061pub trait VaesenclastEmitter<A, B, C> {
8062    fn vaesenclast(&mut self, op0: A, op1: B, op2: C);
8063}
8064
8065impl<'a> VaesenclastEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8066    fn vaesenclast(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8067        self.emit(VAESENCLAST128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8068    }
8069}
8070
8071impl<'a> VaesenclastEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8072    fn vaesenclast(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8073        self.emit(VAESENCLAST128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8074    }
8075}
8076
8077impl<'a> VaesenclastEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8078    fn vaesenclast(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8079        self.emit(VAESENCLAST256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8080    }
8081}
8082
8083impl<'a> VaesenclastEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8084    fn vaesenclast(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8085        self.emit(VAESENCLAST256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8086    }
8087}
8088
8089impl<'a> VaesenclastEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8090    fn vaesenclast(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8091        self.emit(VAESENCLAST512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8092    }
8093}
8094
8095impl<'a> VaesenclastEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8096    fn vaesenclast(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8097        self.emit(VAESENCLAST512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8098    }
8099}
8100
8101/// `VAESIMC` (VAESIMC). 
8102/// Perform the InvMixColumns transformation on the source operand and store the result in the destination operand. The destination operand is an XMM register. The source operand can be an XMM register or a 128-bit memory location.
8103///
8104///
8105/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESIMC.html).
8106///
8107/// Supported operand variants:
8108///
8109/// ```text
8110/// +---+----------+
8111/// | # | Operands |
8112/// +---+----------+
8113/// | 1 | Xmm, Mem |
8114/// | 2 | Xmm, Xmm |
8115/// +---+----------+
8116/// ```
8117pub trait VaesimcEmitter<A, B> {
8118    fn vaesimc(&mut self, op0: A, op1: B);
8119}
8120
8121impl<'a> VaesimcEmitter<Xmm, Xmm> for Assembler<'a> {
8122    fn vaesimc(&mut self, op0: Xmm, op1: Xmm) {
8123        self.emit(VAESIMCRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8124    }
8125}
8126
8127impl<'a> VaesimcEmitter<Xmm, Mem> for Assembler<'a> {
8128    fn vaesimc(&mut self, op0: Xmm, op1: Mem) {
8129        self.emit(VAESIMCRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8130    }
8131}
8132
8133/// `VAESKEYGENASSIST` (VAESKEYGENASSIST). 
8134/// Assist in expanding the AES cipher key, by computing steps towards generating a round key for encryption, using 128-bit data specified in the source operand and an 8-bit round constant specified as an immediate, store the result in the destination operand.
8135///
8136///
8137/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESKEYGENASSIST.html).
8138///
8139/// Supported operand variants:
8140///
8141/// ```text
8142/// +---+---------------+
8143/// | # | Operands      |
8144/// +---+---------------+
8145/// | 1 | Xmm, Mem, Imm |
8146/// | 2 | Xmm, Xmm, Imm |
8147/// +---+---------------+
8148/// ```
8149pub trait VaeskeygenassistEmitter<A, B, C> {
8150    fn vaeskeygenassist(&mut self, op0: A, op1: B, op2: C);
8151}
8152
8153impl<'a> VaeskeygenassistEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
8154    fn vaeskeygenassist(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
8155        self.emit(VAESKEYGENASSISTRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8156    }
8157}
8158
8159impl<'a> VaeskeygenassistEmitter<Xmm, Mem, Imm> for Assembler<'a> {
8160    fn vaeskeygenassist(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
8161        self.emit(VAESKEYGENASSISTRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8162    }
8163}
8164
8165/// `VBCSTNEBF162PS`.
8166///
8167/// Supported operand variants:
8168///
8169/// ```text
8170/// +---+----------+
8171/// | # | Operands |
8172/// +---+----------+
8173/// | 1 | Xmm, Mem |
8174/// | 2 | Ymm, Mem |
8175/// +---+----------+
8176/// ```
8177pub trait Vbcstnebf162psEmitter<A, B> {
8178    fn vbcstnebf162ps(&mut self, op0: A, op1: B);
8179}
8180
8181impl<'a> Vbcstnebf162psEmitter<Xmm, Mem> for Assembler<'a> {
8182    fn vbcstnebf162ps(&mut self, op0: Xmm, op1: Mem) {
8183        self.emit(VBCSTNEBF162PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8184    }
8185}
8186
8187impl<'a> Vbcstnebf162psEmitter<Ymm, Mem> for Assembler<'a> {
8188    fn vbcstnebf162ps(&mut self, op0: Ymm, op1: Mem) {
8189        self.emit(VBCSTNEBF162PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8190    }
8191}
8192
8193/// `VBCSTNESH2PS`.
8194///
8195/// Supported operand variants:
8196///
8197/// ```text
8198/// +---+----------+
8199/// | # | Operands |
8200/// +---+----------+
8201/// | 1 | Xmm, Mem |
8202/// | 2 | Ymm, Mem |
8203/// +---+----------+
8204/// ```
8205pub trait Vbcstnesh2psEmitter<A, B> {
8206    fn vbcstnesh2ps(&mut self, op0: A, op1: B);
8207}
8208
8209impl<'a> Vbcstnesh2psEmitter<Xmm, Mem> for Assembler<'a> {
8210    fn vbcstnesh2ps(&mut self, op0: Xmm, op1: Mem) {
8211        self.emit(VBCSTNESH2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8212    }
8213}
8214
8215impl<'a> Vbcstnesh2psEmitter<Ymm, Mem> for Assembler<'a> {
8216    fn vbcstnesh2ps(&mut self, op0: Ymm, op1: Mem) {
8217        self.emit(VBCSTNESH2PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8218    }
8219}
8220
8221/// `VCMPPH`.
8222///
8223/// Supported operand variants:
8224///
8225/// ```text
8226/// +---+---------------------+
8227/// | # | Operands            |
8228/// +---+---------------------+
8229/// | 1 | KReg, Xmm, Mem, Imm |
8230/// | 2 | KReg, Xmm, Xmm, Imm |
8231/// | 3 | KReg, Ymm, Mem, Imm |
8232/// | 4 | KReg, Ymm, Ymm, Imm |
8233/// | 5 | KReg, Zmm, Mem, Imm |
8234/// | 6 | KReg, Zmm, Zmm, Imm |
8235/// +---+---------------------+
8236/// ```
8237pub trait VcmpphEmitter<A, B, C, D> {
8238    fn vcmpph(&mut self, op0: A, op1: B, op2: C, op3: D);
8239}
8240
8241impl<'a> VcmpphEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
8242    fn vcmpph(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
8243        self.emit(VCMPPH128KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8244    }
8245}
8246
8247impl<'a> VcmpphEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
8248    fn vcmpph(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
8249        self.emit(VCMPPH128KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8250    }
8251}
8252
8253impl<'a> VcmpphEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
8254    fn vcmpph(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
8255        self.emit(VCMPPH256KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8256    }
8257}
8258
8259impl<'a> VcmpphEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
8260    fn vcmpph(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
8261        self.emit(VCMPPH256KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8262    }
8263}
8264
8265impl<'a> VcmpphEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
8266    fn vcmpph(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
8267        self.emit(VCMPPH512KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8268    }
8269}
8270
8271impl<'a> VcmpphEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
8272    fn vcmpph(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
8273        self.emit(VCMPPH512KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8274    }
8275}
8276
8277/// `VCMPPH_MASK`.
8278///
8279/// Supported operand variants:
8280///
8281/// ```text
8282/// +---+---------------------+
8283/// | # | Operands            |
8284/// +---+---------------------+
8285/// | 1 | KReg, Xmm, Mem, Imm |
8286/// | 2 | KReg, Xmm, Xmm, Imm |
8287/// | 3 | KReg, Ymm, Mem, Imm |
8288/// | 4 | KReg, Ymm, Ymm, Imm |
8289/// | 5 | KReg, Zmm, Mem, Imm |
8290/// | 6 | KReg, Zmm, Zmm, Imm |
8291/// +---+---------------------+
8292/// ```
8293pub trait VcmpphMaskEmitter<A, B, C, D> {
8294    fn vcmpph_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
8295}
8296
8297impl<'a> VcmpphMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
8298    fn vcmpph_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
8299        self.emit(VCMPPH128KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8300    }
8301}
8302
8303impl<'a> VcmpphMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
8304    fn vcmpph_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
8305        self.emit(VCMPPH128KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8306    }
8307}
8308
8309impl<'a> VcmpphMaskEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
8310    fn vcmpph_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
8311        self.emit(VCMPPH256KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8312    }
8313}
8314
8315impl<'a> VcmpphMaskEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
8316    fn vcmpph_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
8317        self.emit(VCMPPH256KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8318    }
8319}
8320
8321impl<'a> VcmpphMaskEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
8322    fn vcmpph_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
8323        self.emit(VCMPPH512KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8324    }
8325}
8326
8327impl<'a> VcmpphMaskEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
8328    fn vcmpph_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
8329        self.emit(VCMPPH512KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8330    }
8331}
8332
8333/// `VCMPPH_MASK_SAE`.
8334///
8335/// Supported operand variants:
8336///
8337/// ```text
8338/// +---+---------------------+
8339/// | # | Operands            |
8340/// +---+---------------------+
8341/// | 1 | KReg, Zmm, Zmm, Imm |
8342/// +---+---------------------+
8343/// ```
8344pub trait VcmpphMaskSaeEmitter<A, B, C, D> {
8345    fn vcmpph_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
8346}
8347
8348impl<'a> VcmpphMaskSaeEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
8349    fn vcmpph_mask_sae(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
8350        self.emit(VCMPPH512KRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8351    }
8352}
8353
8354/// `VCMPPH_SAE`.
8355///
8356/// Supported operand variants:
8357///
8358/// ```text
8359/// +---+---------------------+
8360/// | # | Operands            |
8361/// +---+---------------------+
8362/// | 1 | KReg, Zmm, Zmm, Imm |
8363/// +---+---------------------+
8364/// ```
8365pub trait VcmpphSaeEmitter<A, B, C, D> {
8366    fn vcmpph_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
8367}
8368
8369impl<'a> VcmpphSaeEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
8370    fn vcmpph_sae(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
8371        self.emit(VCMPPH512KRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8372    }
8373}
8374
8375/// `VCMPSH`.
8376///
8377/// Supported operand variants:
8378///
8379/// ```text
8380/// +---+---------------------+
8381/// | # | Operands            |
8382/// +---+---------------------+
8383/// | 1 | KReg, Xmm, Mem, Imm |
8384/// | 2 | KReg, Xmm, Xmm, Imm |
8385/// +---+---------------------+
8386/// ```
8387pub trait VcmpshEmitter<A, B, C, D> {
8388    fn vcmpsh(&mut self, op0: A, op1: B, op2: C, op3: D);
8389}
8390
8391impl<'a> VcmpshEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
8392    fn vcmpsh(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
8393        self.emit(VCMPSHKRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8394    }
8395}
8396
8397impl<'a> VcmpshEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
8398    fn vcmpsh(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
8399        self.emit(VCMPSHKRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8400    }
8401}
8402
8403/// `VCMPSH_MASK`.
8404///
8405/// Supported operand variants:
8406///
8407/// ```text
8408/// +---+---------------------+
8409/// | # | Operands            |
8410/// +---+---------------------+
8411/// | 1 | KReg, Xmm, Mem, Imm |
8412/// | 2 | KReg, Xmm, Xmm, Imm |
8413/// +---+---------------------+
8414/// ```
8415pub trait VcmpshMaskEmitter<A, B, C, D> {
8416    fn vcmpsh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
8417}
8418
8419impl<'a> VcmpshMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
8420    fn vcmpsh_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
8421        self.emit(VCMPSHKRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8422    }
8423}
8424
8425impl<'a> VcmpshMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
8426    fn vcmpsh_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
8427        self.emit(VCMPSHKRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8428    }
8429}
8430
8431/// `VCMPSH_MASK_SAE`.
8432///
8433/// Supported operand variants:
8434///
8435/// ```text
8436/// +---+---------------------+
8437/// | # | Operands            |
8438/// +---+---------------------+
8439/// | 1 | KReg, Xmm, Xmm, Imm |
8440/// +---+---------------------+
8441/// ```
8442pub trait VcmpshMaskSaeEmitter<A, B, C, D> {
8443    fn vcmpsh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
8444}
8445
8446impl<'a> VcmpshMaskSaeEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
8447    fn vcmpsh_mask_sae(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
8448        self.emit(VCMPSHKRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8449    }
8450}
8451
8452/// `VCMPSH_SAE`.
8453///
8454/// Supported operand variants:
8455///
8456/// ```text
8457/// +---+---------------------+
8458/// | # | Operands            |
8459/// +---+---------------------+
8460/// | 1 | KReg, Xmm, Xmm, Imm |
8461/// +---+---------------------+
8462/// ```
8463pub trait VcmpshSaeEmitter<A, B, C, D> {
8464    fn vcmpsh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
8465}
8466
8467impl<'a> VcmpshSaeEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
8468    fn vcmpsh_sae(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
8469        self.emit(VCMPSHKRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
8470    }
8471}
8472
8473/// `VCOMISH`.
8474///
8475/// Supported operand variants:
8476///
8477/// ```text
8478/// +---+----------+
8479/// | # | Operands |
8480/// +---+----------+
8481/// | 1 | Xmm, Mem |
8482/// | 2 | Xmm, Xmm |
8483/// +---+----------+
8484/// ```
8485pub trait VcomishEmitter<A, B> {
8486    fn vcomish(&mut self, op0: A, op1: B);
8487}
8488
8489impl<'a> VcomishEmitter<Xmm, Xmm> for Assembler<'a> {
8490    fn vcomish(&mut self, op0: Xmm, op1: Xmm) {
8491        self.emit(VCOMISHRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8492    }
8493}
8494
8495impl<'a> VcomishEmitter<Xmm, Mem> for Assembler<'a> {
8496    fn vcomish(&mut self, op0: Xmm, op1: Mem) {
8497        self.emit(VCOMISHRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8498    }
8499}
8500
8501/// `VCOMISH_SAE`.
8502///
8503/// Supported operand variants:
8504///
8505/// ```text
8506/// +---+----------+
8507/// | # | Operands |
8508/// +---+----------+
8509/// | 1 | Xmm, Xmm |
8510/// +---+----------+
8511/// ```
8512pub trait VcomishSaeEmitter<A, B> {
8513    fn vcomish_sae(&mut self, op0: A, op1: B);
8514}
8515
8516impl<'a> VcomishSaeEmitter<Xmm, Xmm> for Assembler<'a> {
8517    fn vcomish_sae(&mut self, op0: Xmm, op1: Xmm) {
8518        self.emit(VCOMISHRR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8519    }
8520}
8521
8522/// `VCVTDQ2PH`.
8523///
8524/// Supported operand variants:
8525///
8526/// ```text
8527/// +---+----------+
8528/// | # | Operands |
8529/// +---+----------+
8530/// | 1 | Xmm, Mem |
8531/// | 2 | Xmm, Xmm |
8532/// | 3 | Xmm, Ymm |
8533/// | 4 | Ymm, Mem |
8534/// | 5 | Ymm, Zmm |
8535/// +---+----------+
8536/// ```
8537pub trait Vcvtdq2phEmitter<A, B> {
8538    fn vcvtdq2ph(&mut self, op0: A, op1: B);
8539}
8540
8541impl<'a> Vcvtdq2phEmitter<Xmm, Xmm> for Assembler<'a> {
8542    fn vcvtdq2ph(&mut self, op0: Xmm, op1: Xmm) {
8543        self.emit(VCVTDQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8544    }
8545}
8546
8547impl<'a> Vcvtdq2phEmitter<Xmm, Mem> for Assembler<'a> {
8548    fn vcvtdq2ph(&mut self, op0: Xmm, op1: Mem) {
8549        self.emit(VCVTDQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8550    }
8551}
8552
8553impl<'a> Vcvtdq2phEmitter<Xmm, Ymm> for Assembler<'a> {
8554    fn vcvtdq2ph(&mut self, op0: Xmm, op1: Ymm) {
8555        self.emit(VCVTDQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8556    }
8557}
8558
8559impl<'a> Vcvtdq2phEmitter<Ymm, Zmm> for Assembler<'a> {
8560    fn vcvtdq2ph(&mut self, op0: Ymm, op1: Zmm) {
8561        self.emit(VCVTDQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8562    }
8563}
8564
8565impl<'a> Vcvtdq2phEmitter<Ymm, Mem> for Assembler<'a> {
8566    fn vcvtdq2ph(&mut self, op0: Ymm, op1: Mem) {
8567        self.emit(VCVTDQ2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8568    }
8569}
8570
8571/// `VCVTDQ2PH_ER`.
8572///
8573/// Supported operand variants:
8574///
8575/// ```text
8576/// +---+----------+
8577/// | # | Operands |
8578/// +---+----------+
8579/// | 1 | Ymm, Zmm |
8580/// +---+----------+
8581/// ```
8582pub trait Vcvtdq2phErEmitter<A, B> {
8583    fn vcvtdq2ph_er(&mut self, op0: A, op1: B);
8584}
8585
8586impl<'a> Vcvtdq2phErEmitter<Ymm, Zmm> for Assembler<'a> {
8587    fn vcvtdq2ph_er(&mut self, op0: Ymm, op1: Zmm) {
8588        self.emit(VCVTDQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8589    }
8590}
8591
8592/// `VCVTDQ2PH_MASK`.
8593///
8594/// Supported operand variants:
8595///
8596/// ```text
8597/// +---+----------+
8598/// | # | Operands |
8599/// +---+----------+
8600/// | 1 | Xmm, Mem |
8601/// | 2 | Xmm, Xmm |
8602/// | 3 | Xmm, Ymm |
8603/// | 4 | Ymm, Mem |
8604/// | 5 | Ymm, Zmm |
8605/// +---+----------+
8606/// ```
8607pub trait Vcvtdq2phMaskEmitter<A, B> {
8608    fn vcvtdq2ph_mask(&mut self, op0: A, op1: B);
8609}
8610
8611impl<'a> Vcvtdq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
8612    fn vcvtdq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
8613        self.emit(VCVTDQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8614    }
8615}
8616
8617impl<'a> Vcvtdq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
8618    fn vcvtdq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
8619        self.emit(VCVTDQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8620    }
8621}
8622
8623impl<'a> Vcvtdq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
8624    fn vcvtdq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
8625        self.emit(VCVTDQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8626    }
8627}
8628
8629impl<'a> Vcvtdq2phMaskEmitter<Ymm, Zmm> for Assembler<'a> {
8630    fn vcvtdq2ph_mask(&mut self, op0: Ymm, op1: Zmm) {
8631        self.emit(VCVTDQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8632    }
8633}
8634
8635impl<'a> Vcvtdq2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
8636    fn vcvtdq2ph_mask(&mut self, op0: Ymm, op1: Mem) {
8637        self.emit(VCVTDQ2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8638    }
8639}
8640
8641/// `VCVTDQ2PH_MASK_ER`.
8642///
8643/// Supported operand variants:
8644///
8645/// ```text
8646/// +---+----------+
8647/// | # | Operands |
8648/// +---+----------+
8649/// | 1 | Ymm, Zmm |
8650/// +---+----------+
8651/// ```
8652pub trait Vcvtdq2phMaskErEmitter<A, B> {
8653    fn vcvtdq2ph_mask_er(&mut self, op0: A, op1: B);
8654}
8655
8656impl<'a> Vcvtdq2phMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
8657    fn vcvtdq2ph_mask_er(&mut self, op0: Ymm, op1: Zmm) {
8658        self.emit(VCVTDQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8659    }
8660}
8661
8662/// `VCVTDQ2PH_MASKZ`.
8663///
8664/// Supported operand variants:
8665///
8666/// ```text
8667/// +---+----------+
8668/// | # | Operands |
8669/// +---+----------+
8670/// | 1 | Xmm, Mem |
8671/// | 2 | Xmm, Xmm |
8672/// | 3 | Xmm, Ymm |
8673/// | 4 | Ymm, Mem |
8674/// | 5 | Ymm, Zmm |
8675/// +---+----------+
8676/// ```
8677pub trait Vcvtdq2phMaskzEmitter<A, B> {
8678    fn vcvtdq2ph_maskz(&mut self, op0: A, op1: B);
8679}
8680
8681impl<'a> Vcvtdq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
8682    fn vcvtdq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
8683        self.emit(VCVTDQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8684    }
8685}
8686
8687impl<'a> Vcvtdq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
8688    fn vcvtdq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
8689        self.emit(VCVTDQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8690    }
8691}
8692
8693impl<'a> Vcvtdq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
8694    fn vcvtdq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
8695        self.emit(VCVTDQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8696    }
8697}
8698
8699impl<'a> Vcvtdq2phMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
8700    fn vcvtdq2ph_maskz(&mut self, op0: Ymm, op1: Zmm) {
8701        self.emit(VCVTDQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8702    }
8703}
8704
8705impl<'a> Vcvtdq2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
8706    fn vcvtdq2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
8707        self.emit(VCVTDQ2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8708    }
8709}
8710
8711/// `VCVTDQ2PH_MASKZ_ER`.
8712///
8713/// Supported operand variants:
8714///
8715/// ```text
8716/// +---+----------+
8717/// | # | Operands |
8718/// +---+----------+
8719/// | 1 | Ymm, Zmm |
8720/// +---+----------+
8721/// ```
8722pub trait Vcvtdq2phMaskzErEmitter<A, B> {
8723    fn vcvtdq2ph_maskz_er(&mut self, op0: A, op1: B);
8724}
8725
8726impl<'a> Vcvtdq2phMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
8727    fn vcvtdq2ph_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
8728        self.emit(VCVTDQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8729    }
8730}
8731
8732/// `VCVTNEEBF162PS`.
8733///
8734/// Supported operand variants:
8735///
8736/// ```text
8737/// +---+----------+
8738/// | # | Operands |
8739/// +---+----------+
8740/// | 1 | Xmm, Mem |
8741/// | 2 | Ymm, Mem |
8742/// +---+----------+
8743/// ```
8744pub trait Vcvtneebf162psEmitter<A, B> {
8745    fn vcvtneebf162ps(&mut self, op0: A, op1: B);
8746}
8747
8748impl<'a> Vcvtneebf162psEmitter<Xmm, Mem> for Assembler<'a> {
8749    fn vcvtneebf162ps(&mut self, op0: Xmm, op1: Mem) {
8750        self.emit(VCVTNEEBF162PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8751    }
8752}
8753
8754impl<'a> Vcvtneebf162psEmitter<Ymm, Mem> for Assembler<'a> {
8755    fn vcvtneebf162ps(&mut self, op0: Ymm, op1: Mem) {
8756        self.emit(VCVTNEEBF162PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8757    }
8758}
8759
8760/// `VCVTNEEPH2PS`.
8761///
8762/// Supported operand variants:
8763///
8764/// ```text
8765/// +---+----------+
8766/// | # | Operands |
8767/// +---+----------+
8768/// | 1 | Xmm, Mem |
8769/// | 2 | Ymm, Mem |
8770/// +---+----------+
8771/// ```
8772pub trait Vcvtneeph2psEmitter<A, B> {
8773    fn vcvtneeph2ps(&mut self, op0: A, op1: B);
8774}
8775
8776impl<'a> Vcvtneeph2psEmitter<Xmm, Mem> for Assembler<'a> {
8777    fn vcvtneeph2ps(&mut self, op0: Xmm, op1: Mem) {
8778        self.emit(VCVTNEEPH2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8779    }
8780}
8781
8782impl<'a> Vcvtneeph2psEmitter<Ymm, Mem> for Assembler<'a> {
8783    fn vcvtneeph2ps(&mut self, op0: Ymm, op1: Mem) {
8784        self.emit(VCVTNEEPH2PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8785    }
8786}
8787
8788/// `VCVTNEOBF162PS`.
8789///
8790/// Supported operand variants:
8791///
8792/// ```text
8793/// +---+----------+
8794/// | # | Operands |
8795/// +---+----------+
8796/// | 1 | Xmm, Mem |
8797/// | 2 | Ymm, Mem |
8798/// +---+----------+
8799/// ```
8800pub trait Vcvtneobf162psEmitter<A, B> {
8801    fn vcvtneobf162ps(&mut self, op0: A, op1: B);
8802}
8803
8804impl<'a> Vcvtneobf162psEmitter<Xmm, Mem> for Assembler<'a> {
8805    fn vcvtneobf162ps(&mut self, op0: Xmm, op1: Mem) {
8806        self.emit(VCVTNEOBF162PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8807    }
8808}
8809
8810impl<'a> Vcvtneobf162psEmitter<Ymm, Mem> for Assembler<'a> {
8811    fn vcvtneobf162ps(&mut self, op0: Ymm, op1: Mem) {
8812        self.emit(VCVTNEOBF162PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8813    }
8814}
8815
8816/// `VCVTNEOPH2PS`.
8817///
8818/// Supported operand variants:
8819///
8820/// ```text
8821/// +---+----------+
8822/// | # | Operands |
8823/// +---+----------+
8824/// | 1 | Xmm, Mem |
8825/// | 2 | Ymm, Mem |
8826/// +---+----------+
8827/// ```
8828pub trait Vcvtneoph2psEmitter<A, B> {
8829    fn vcvtneoph2ps(&mut self, op0: A, op1: B);
8830}
8831
8832impl<'a> Vcvtneoph2psEmitter<Xmm, Mem> for Assembler<'a> {
8833    fn vcvtneoph2ps(&mut self, op0: Xmm, op1: Mem) {
8834        self.emit(VCVTNEOPH2PS128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8835    }
8836}
8837
8838impl<'a> Vcvtneoph2psEmitter<Ymm, Mem> for Assembler<'a> {
8839    fn vcvtneoph2ps(&mut self, op0: Ymm, op1: Mem) {
8840        self.emit(VCVTNEOPH2PS256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8841    }
8842}
8843
8844/// `VCVTPD2PH`.
8845///
8846/// Supported operand variants:
8847///
8848/// ```text
8849/// +---+----------+
8850/// | # | Operands |
8851/// +---+----------+
8852/// | 1 | Xmm, Mem |
8853/// | 2 | Xmm, Xmm |
8854/// | 3 | Xmm, Ymm |
8855/// | 4 | Xmm, Zmm |
8856/// +---+----------+
8857/// ```
8858pub trait Vcvtpd2phEmitter<A, B> {
8859    fn vcvtpd2ph(&mut self, op0: A, op1: B);
8860}
8861
8862impl<'a> Vcvtpd2phEmitter<Xmm, Xmm> for Assembler<'a> {
8863    fn vcvtpd2ph(&mut self, op0: Xmm, op1: Xmm) {
8864        self.emit(VCVTPD2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8865    }
8866}
8867
8868impl<'a> Vcvtpd2phEmitter<Xmm, Mem> for Assembler<'a> {
8869    fn vcvtpd2ph(&mut self, op0: Xmm, op1: Mem) {
8870        self.emit(VCVTPD2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8871    }
8872}
8873
8874impl<'a> Vcvtpd2phEmitter<Xmm, Ymm> for Assembler<'a> {
8875    fn vcvtpd2ph(&mut self, op0: Xmm, op1: Ymm) {
8876        self.emit(VCVTPD2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8877    }
8878}
8879
8880impl<'a> Vcvtpd2phEmitter<Xmm, Zmm> for Assembler<'a> {
8881    fn vcvtpd2ph(&mut self, op0: Xmm, op1: Zmm) {
8882        self.emit(VCVTPD2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8883    }
8884}
8885
8886/// `VCVTPD2PH_ER`.
8887///
8888/// Supported operand variants:
8889///
8890/// ```text
8891/// +---+----------+
8892/// | # | Operands |
8893/// +---+----------+
8894/// | 1 | Xmm, Zmm |
8895/// +---+----------+
8896/// ```
8897pub trait Vcvtpd2phErEmitter<A, B> {
8898    fn vcvtpd2ph_er(&mut self, op0: A, op1: B);
8899}
8900
8901impl<'a> Vcvtpd2phErEmitter<Xmm, Zmm> for Assembler<'a> {
8902    fn vcvtpd2ph_er(&mut self, op0: Xmm, op1: Zmm) {
8903        self.emit(VCVTPD2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8904    }
8905}
8906
8907/// `VCVTPD2PH_MASK`.
8908///
8909/// Supported operand variants:
8910///
8911/// ```text
8912/// +---+----------+
8913/// | # | Operands |
8914/// +---+----------+
8915/// | 1 | Xmm, Mem |
8916/// | 2 | Xmm, Xmm |
8917/// | 3 | Xmm, Ymm |
8918/// | 4 | Xmm, Zmm |
8919/// +---+----------+
8920/// ```
8921pub trait Vcvtpd2phMaskEmitter<A, B> {
8922    fn vcvtpd2ph_mask(&mut self, op0: A, op1: B);
8923}
8924
8925impl<'a> Vcvtpd2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
8926    fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
8927        self.emit(VCVTPD2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8928    }
8929}
8930
8931impl<'a> Vcvtpd2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
8932    fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Mem) {
8933        self.emit(VCVTPD2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8934    }
8935}
8936
8937impl<'a> Vcvtpd2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
8938    fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
8939        self.emit(VCVTPD2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8940    }
8941}
8942
8943impl<'a> Vcvtpd2phMaskEmitter<Xmm, Zmm> for Assembler<'a> {
8944    fn vcvtpd2ph_mask(&mut self, op0: Xmm, op1: Zmm) {
8945        self.emit(VCVTPD2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8946    }
8947}
8948
8949/// `VCVTPD2PH_MASK_ER`.
8950///
8951/// Supported operand variants:
8952///
8953/// ```text
8954/// +---+----------+
8955/// | # | Operands |
8956/// +---+----------+
8957/// | 1 | Xmm, Zmm |
8958/// +---+----------+
8959/// ```
8960pub trait Vcvtpd2phMaskErEmitter<A, B> {
8961    fn vcvtpd2ph_mask_er(&mut self, op0: A, op1: B);
8962}
8963
8964impl<'a> Vcvtpd2phMaskErEmitter<Xmm, Zmm> for Assembler<'a> {
8965    fn vcvtpd2ph_mask_er(&mut self, op0: Xmm, op1: Zmm) {
8966        self.emit(VCVTPD2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8967    }
8968}
8969
8970/// `VCVTPD2PH_MASKZ`.
8971///
8972/// Supported operand variants:
8973///
8974/// ```text
8975/// +---+----------+
8976/// | # | Operands |
8977/// +---+----------+
8978/// | 1 | Xmm, Mem |
8979/// | 2 | Xmm, Xmm |
8980/// | 3 | Xmm, Ymm |
8981/// | 4 | Xmm, Zmm |
8982/// +---+----------+
8983/// ```
8984pub trait Vcvtpd2phMaskzEmitter<A, B> {
8985    fn vcvtpd2ph_maskz(&mut self, op0: A, op1: B);
8986}
8987
8988impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
8989    fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
8990        self.emit(VCVTPD2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8991    }
8992}
8993
8994impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
8995    fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
8996        self.emit(VCVTPD2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8997    }
8998}
8999
9000impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
9001    fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
9002        self.emit(VCVTPD2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9003    }
9004}
9005
9006impl<'a> Vcvtpd2phMaskzEmitter<Xmm, Zmm> for Assembler<'a> {
9007    fn vcvtpd2ph_maskz(&mut self, op0: Xmm, op1: Zmm) {
9008        self.emit(VCVTPD2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9009    }
9010}
9011
9012/// `VCVTPD2PH_MASKZ_ER`.
9013///
9014/// Supported operand variants:
9015///
9016/// ```text
9017/// +---+----------+
9018/// | # | Operands |
9019/// +---+----------+
9020/// | 1 | Xmm, Zmm |
9021/// +---+----------+
9022/// ```
9023pub trait Vcvtpd2phMaskzErEmitter<A, B> {
9024    fn vcvtpd2ph_maskz_er(&mut self, op0: A, op1: B);
9025}
9026
9027impl<'a> Vcvtpd2phMaskzErEmitter<Xmm, Zmm> for Assembler<'a> {
9028    fn vcvtpd2ph_maskz_er(&mut self, op0: Xmm, op1: Zmm) {
9029        self.emit(VCVTPD2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9030    }
9031}
9032
9033/// `VCVTPH2DQ`.
9034///
9035/// Supported operand variants:
9036///
9037/// ```text
9038/// +---+----------+
9039/// | # | Operands |
9040/// +---+----------+
9041/// | 1 | Xmm, Mem |
9042/// | 2 | Xmm, Xmm |
9043/// | 3 | Ymm, Mem |
9044/// | 4 | Ymm, Xmm |
9045/// | 5 | Zmm, Mem |
9046/// | 6 | Zmm, Ymm |
9047/// +---+----------+
9048/// ```
9049pub trait Vcvtph2dqEmitter<A, B> {
9050    fn vcvtph2dq(&mut self, op0: A, op1: B);
9051}
9052
9053impl<'a> Vcvtph2dqEmitter<Xmm, Xmm> for Assembler<'a> {
9054    fn vcvtph2dq(&mut self, op0: Xmm, op1: Xmm) {
9055        self.emit(VCVTPH2DQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9056    }
9057}
9058
9059impl<'a> Vcvtph2dqEmitter<Xmm, Mem> for Assembler<'a> {
9060    fn vcvtph2dq(&mut self, op0: Xmm, op1: Mem) {
9061        self.emit(VCVTPH2DQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9062    }
9063}
9064
9065impl<'a> Vcvtph2dqEmitter<Ymm, Xmm> for Assembler<'a> {
9066    fn vcvtph2dq(&mut self, op0: Ymm, op1: Xmm) {
9067        self.emit(VCVTPH2DQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9068    }
9069}
9070
9071impl<'a> Vcvtph2dqEmitter<Ymm, Mem> for Assembler<'a> {
9072    fn vcvtph2dq(&mut self, op0: Ymm, op1: Mem) {
9073        self.emit(VCVTPH2DQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9074    }
9075}
9076
9077impl<'a> Vcvtph2dqEmitter<Zmm, Ymm> for Assembler<'a> {
9078    fn vcvtph2dq(&mut self, op0: Zmm, op1: Ymm) {
9079        self.emit(VCVTPH2DQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9080    }
9081}
9082
9083impl<'a> Vcvtph2dqEmitter<Zmm, Mem> for Assembler<'a> {
9084    fn vcvtph2dq(&mut self, op0: Zmm, op1: Mem) {
9085        self.emit(VCVTPH2DQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9086    }
9087}
9088
9089/// `VCVTPH2DQ_ER`.
9090///
9091/// Supported operand variants:
9092///
9093/// ```text
9094/// +---+----------+
9095/// | # | Operands |
9096/// +---+----------+
9097/// | 1 | Zmm, Ymm |
9098/// +---+----------+
9099/// ```
9100pub trait Vcvtph2dqErEmitter<A, B> {
9101    fn vcvtph2dq_er(&mut self, op0: A, op1: B);
9102}
9103
9104impl<'a> Vcvtph2dqErEmitter<Zmm, Ymm> for Assembler<'a> {
9105    fn vcvtph2dq_er(&mut self, op0: Zmm, op1: Ymm) {
9106        self.emit(VCVTPH2DQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9107    }
9108}
9109
9110/// `VCVTPH2DQ_MASK`.
9111///
9112/// Supported operand variants:
9113///
9114/// ```text
9115/// +---+----------+
9116/// | # | Operands |
9117/// +---+----------+
9118/// | 1 | Xmm, Mem |
9119/// | 2 | Xmm, Xmm |
9120/// | 3 | Ymm, Mem |
9121/// | 4 | Ymm, Xmm |
9122/// | 5 | Zmm, Mem |
9123/// | 6 | Zmm, Ymm |
9124/// +---+----------+
9125/// ```
9126pub trait Vcvtph2dqMaskEmitter<A, B> {
9127    fn vcvtph2dq_mask(&mut self, op0: A, op1: B);
9128}
9129
9130impl<'a> Vcvtph2dqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
9131    fn vcvtph2dq_mask(&mut self, op0: Xmm, op1: Xmm) {
9132        self.emit(VCVTPH2DQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9133    }
9134}
9135
9136impl<'a> Vcvtph2dqMaskEmitter<Xmm, Mem> for Assembler<'a> {
9137    fn vcvtph2dq_mask(&mut self, op0: Xmm, op1: Mem) {
9138        self.emit(VCVTPH2DQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9139    }
9140}
9141
9142impl<'a> Vcvtph2dqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
9143    fn vcvtph2dq_mask(&mut self, op0: Ymm, op1: Xmm) {
9144        self.emit(VCVTPH2DQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9145    }
9146}
9147
9148impl<'a> Vcvtph2dqMaskEmitter<Ymm, Mem> for Assembler<'a> {
9149    fn vcvtph2dq_mask(&mut self, op0: Ymm, op1: Mem) {
9150        self.emit(VCVTPH2DQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9151    }
9152}
9153
9154impl<'a> Vcvtph2dqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
9155    fn vcvtph2dq_mask(&mut self, op0: Zmm, op1: Ymm) {
9156        self.emit(VCVTPH2DQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9157    }
9158}
9159
9160impl<'a> Vcvtph2dqMaskEmitter<Zmm, Mem> for Assembler<'a> {
9161    fn vcvtph2dq_mask(&mut self, op0: Zmm, op1: Mem) {
9162        self.emit(VCVTPH2DQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9163    }
9164}
9165
9166/// `VCVTPH2DQ_MASK_ER`.
9167///
9168/// Supported operand variants:
9169///
9170/// ```text
9171/// +---+----------+
9172/// | # | Operands |
9173/// +---+----------+
9174/// | 1 | Zmm, Ymm |
9175/// +---+----------+
9176/// ```
9177pub trait Vcvtph2dqMaskErEmitter<A, B> {
9178    fn vcvtph2dq_mask_er(&mut self, op0: A, op1: B);
9179}
9180
9181impl<'a> Vcvtph2dqMaskErEmitter<Zmm, Ymm> for Assembler<'a> {
9182    fn vcvtph2dq_mask_er(&mut self, op0: Zmm, op1: Ymm) {
9183        self.emit(VCVTPH2DQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9184    }
9185}
9186
9187/// `VCVTPH2DQ_MASKZ`.
9188///
9189/// Supported operand variants:
9190///
9191/// ```text
9192/// +---+----------+
9193/// | # | Operands |
9194/// +---+----------+
9195/// | 1 | Xmm, Mem |
9196/// | 2 | Xmm, Xmm |
9197/// | 3 | Ymm, Mem |
9198/// | 4 | Ymm, Xmm |
9199/// | 5 | Zmm, Mem |
9200/// | 6 | Zmm, Ymm |
9201/// +---+----------+
9202/// ```
9203pub trait Vcvtph2dqMaskzEmitter<A, B> {
9204    fn vcvtph2dq_maskz(&mut self, op0: A, op1: B);
9205}
9206
9207impl<'a> Vcvtph2dqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
9208    fn vcvtph2dq_maskz(&mut self, op0: Xmm, op1: Xmm) {
9209        self.emit(VCVTPH2DQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9210    }
9211}
9212
9213impl<'a> Vcvtph2dqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
9214    fn vcvtph2dq_maskz(&mut self, op0: Xmm, op1: Mem) {
9215        self.emit(VCVTPH2DQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9216    }
9217}
9218
9219impl<'a> Vcvtph2dqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
9220    fn vcvtph2dq_maskz(&mut self, op0: Ymm, op1: Xmm) {
9221        self.emit(VCVTPH2DQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9222    }
9223}
9224
9225impl<'a> Vcvtph2dqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
9226    fn vcvtph2dq_maskz(&mut self, op0: Ymm, op1: Mem) {
9227        self.emit(VCVTPH2DQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9228    }
9229}
9230
9231impl<'a> Vcvtph2dqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
9232    fn vcvtph2dq_maskz(&mut self, op0: Zmm, op1: Ymm) {
9233        self.emit(VCVTPH2DQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9234    }
9235}
9236
9237impl<'a> Vcvtph2dqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
9238    fn vcvtph2dq_maskz(&mut self, op0: Zmm, op1: Mem) {
9239        self.emit(VCVTPH2DQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9240    }
9241}
9242
9243/// `VCVTPH2DQ_MASKZ_ER`.
9244///
9245/// Supported operand variants:
9246///
9247/// ```text
9248/// +---+----------+
9249/// | # | Operands |
9250/// +---+----------+
9251/// | 1 | Zmm, Ymm |
9252/// +---+----------+
9253/// ```
9254pub trait Vcvtph2dqMaskzErEmitter<A, B> {
9255    fn vcvtph2dq_maskz_er(&mut self, op0: A, op1: B);
9256}
9257
9258impl<'a> Vcvtph2dqMaskzErEmitter<Zmm, Ymm> for Assembler<'a> {
9259    fn vcvtph2dq_maskz_er(&mut self, op0: Zmm, op1: Ymm) {
9260        self.emit(VCVTPH2DQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9261    }
9262}
9263
9264/// `VCVTPH2PD`.
9265///
9266/// Supported operand variants:
9267///
9268/// ```text
9269/// +---+----------+
9270/// | # | Operands |
9271/// +---+----------+
9272/// | 1 | Xmm, Mem |
9273/// | 2 | Xmm, Xmm |
9274/// | 3 | Ymm, Mem |
9275/// | 4 | Ymm, Xmm |
9276/// | 5 | Zmm, Mem |
9277/// | 6 | Zmm, Xmm |
9278/// +---+----------+
9279/// ```
9280pub trait Vcvtph2pdEmitter<A, B> {
9281    fn vcvtph2pd(&mut self, op0: A, op1: B);
9282}
9283
9284impl<'a> Vcvtph2pdEmitter<Xmm, Xmm> for Assembler<'a> {
9285    fn vcvtph2pd(&mut self, op0: Xmm, op1: Xmm) {
9286        self.emit(VCVTPH2PD128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9287    }
9288}
9289
9290impl<'a> Vcvtph2pdEmitter<Xmm, Mem> for Assembler<'a> {
9291    fn vcvtph2pd(&mut self, op0: Xmm, op1: Mem) {
9292        self.emit(VCVTPH2PD128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9293    }
9294}
9295
9296impl<'a> Vcvtph2pdEmitter<Ymm, Xmm> for Assembler<'a> {
9297    fn vcvtph2pd(&mut self, op0: Ymm, op1: Xmm) {
9298        self.emit(VCVTPH2PD256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9299    }
9300}
9301
9302impl<'a> Vcvtph2pdEmitter<Ymm, Mem> for Assembler<'a> {
9303    fn vcvtph2pd(&mut self, op0: Ymm, op1: Mem) {
9304        self.emit(VCVTPH2PD256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9305    }
9306}
9307
9308impl<'a> Vcvtph2pdEmitter<Zmm, Xmm> for Assembler<'a> {
9309    fn vcvtph2pd(&mut self, op0: Zmm, op1: Xmm) {
9310        self.emit(VCVTPH2PD512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9311    }
9312}
9313
9314impl<'a> Vcvtph2pdEmitter<Zmm, Mem> for Assembler<'a> {
9315    fn vcvtph2pd(&mut self, op0: Zmm, op1: Mem) {
9316        self.emit(VCVTPH2PD512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9317    }
9318}
9319
9320/// `VCVTPH2PD_MASK`.
9321///
9322/// Supported operand variants:
9323///
9324/// ```text
9325/// +---+----------+
9326/// | # | Operands |
9327/// +---+----------+
9328/// | 1 | Xmm, Mem |
9329/// | 2 | Xmm, Xmm |
9330/// | 3 | Ymm, Mem |
9331/// | 4 | Ymm, Xmm |
9332/// | 5 | Zmm, Mem |
9333/// | 6 | Zmm, Xmm |
9334/// +---+----------+
9335/// ```
9336pub trait Vcvtph2pdMaskEmitter<A, B> {
9337    fn vcvtph2pd_mask(&mut self, op0: A, op1: B);
9338}
9339
9340impl<'a> Vcvtph2pdMaskEmitter<Xmm, Xmm> for Assembler<'a> {
9341    fn vcvtph2pd_mask(&mut self, op0: Xmm, op1: Xmm) {
9342        self.emit(VCVTPH2PD128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9343    }
9344}
9345
9346impl<'a> Vcvtph2pdMaskEmitter<Xmm, Mem> for Assembler<'a> {
9347    fn vcvtph2pd_mask(&mut self, op0: Xmm, op1: Mem) {
9348        self.emit(VCVTPH2PD128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9349    }
9350}
9351
9352impl<'a> Vcvtph2pdMaskEmitter<Ymm, Xmm> for Assembler<'a> {
9353    fn vcvtph2pd_mask(&mut self, op0: Ymm, op1: Xmm) {
9354        self.emit(VCVTPH2PD256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9355    }
9356}
9357
9358impl<'a> Vcvtph2pdMaskEmitter<Ymm, Mem> for Assembler<'a> {
9359    fn vcvtph2pd_mask(&mut self, op0: Ymm, op1: Mem) {
9360        self.emit(VCVTPH2PD256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9361    }
9362}
9363
9364impl<'a> Vcvtph2pdMaskEmitter<Zmm, Xmm> for Assembler<'a> {
9365    fn vcvtph2pd_mask(&mut self, op0: Zmm, op1: Xmm) {
9366        self.emit(VCVTPH2PD512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9367    }
9368}
9369
9370impl<'a> Vcvtph2pdMaskEmitter<Zmm, Mem> for Assembler<'a> {
9371    fn vcvtph2pd_mask(&mut self, op0: Zmm, op1: Mem) {
9372        self.emit(VCVTPH2PD512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9373    }
9374}
9375
9376/// `VCVTPH2PD_MASK_SAE`.
9377///
9378/// Supported operand variants:
9379///
9380/// ```text
9381/// +---+----------+
9382/// | # | Operands |
9383/// +---+----------+
9384/// | 1 | Zmm, Xmm |
9385/// +---+----------+
9386/// ```
9387pub trait Vcvtph2pdMaskSaeEmitter<A, B> {
9388    fn vcvtph2pd_mask_sae(&mut self, op0: A, op1: B);
9389}
9390
9391impl<'a> Vcvtph2pdMaskSaeEmitter<Zmm, Xmm> for Assembler<'a> {
9392    fn vcvtph2pd_mask_sae(&mut self, op0: Zmm, op1: Xmm) {
9393        self.emit(VCVTPH2PD512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9394    }
9395}
9396
9397/// `VCVTPH2PD_MASKZ`.
9398///
9399/// Supported operand variants:
9400///
9401/// ```text
9402/// +---+----------+
9403/// | # | Operands |
9404/// +---+----------+
9405/// | 1 | Xmm, Mem |
9406/// | 2 | Xmm, Xmm |
9407/// | 3 | Ymm, Mem |
9408/// | 4 | Ymm, Xmm |
9409/// | 5 | Zmm, Mem |
9410/// | 6 | Zmm, Xmm |
9411/// +---+----------+
9412/// ```
9413pub trait Vcvtph2pdMaskzEmitter<A, B> {
9414    fn vcvtph2pd_maskz(&mut self, op0: A, op1: B);
9415}
9416
9417impl<'a> Vcvtph2pdMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
9418    fn vcvtph2pd_maskz(&mut self, op0: Xmm, op1: Xmm) {
9419        self.emit(VCVTPH2PD128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9420    }
9421}
9422
9423impl<'a> Vcvtph2pdMaskzEmitter<Xmm, Mem> for Assembler<'a> {
9424    fn vcvtph2pd_maskz(&mut self, op0: Xmm, op1: Mem) {
9425        self.emit(VCVTPH2PD128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9426    }
9427}
9428
9429impl<'a> Vcvtph2pdMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
9430    fn vcvtph2pd_maskz(&mut self, op0: Ymm, op1: Xmm) {
9431        self.emit(VCVTPH2PD256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9432    }
9433}
9434
9435impl<'a> Vcvtph2pdMaskzEmitter<Ymm, Mem> for Assembler<'a> {
9436    fn vcvtph2pd_maskz(&mut self, op0: Ymm, op1: Mem) {
9437        self.emit(VCVTPH2PD256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9438    }
9439}
9440
9441impl<'a> Vcvtph2pdMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
9442    fn vcvtph2pd_maskz(&mut self, op0: Zmm, op1: Xmm) {
9443        self.emit(VCVTPH2PD512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9444    }
9445}
9446
9447impl<'a> Vcvtph2pdMaskzEmitter<Zmm, Mem> for Assembler<'a> {
9448    fn vcvtph2pd_maskz(&mut self, op0: Zmm, op1: Mem) {
9449        self.emit(VCVTPH2PD512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9450    }
9451}
9452
9453/// `VCVTPH2PD_MASKZ_SAE`.
9454///
9455/// Supported operand variants:
9456///
9457/// ```text
9458/// +---+----------+
9459/// | # | Operands |
9460/// +---+----------+
9461/// | 1 | Zmm, Xmm |
9462/// +---+----------+
9463/// ```
9464pub trait Vcvtph2pdMaskzSaeEmitter<A, B> {
9465    fn vcvtph2pd_maskz_sae(&mut self, op0: A, op1: B);
9466}
9467
9468impl<'a> Vcvtph2pdMaskzSaeEmitter<Zmm, Xmm> for Assembler<'a> {
9469    fn vcvtph2pd_maskz_sae(&mut self, op0: Zmm, op1: Xmm) {
9470        self.emit(VCVTPH2PD512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9471    }
9472}
9473
9474/// `VCVTPH2PD_SAE`.
9475///
9476/// Supported operand variants:
9477///
9478/// ```text
9479/// +---+----------+
9480/// | # | Operands |
9481/// +---+----------+
9482/// | 1 | Zmm, Xmm |
9483/// +---+----------+
9484/// ```
9485pub trait Vcvtph2pdSaeEmitter<A, B> {
9486    fn vcvtph2pd_sae(&mut self, op0: A, op1: B);
9487}
9488
9489impl<'a> Vcvtph2pdSaeEmitter<Zmm, Xmm> for Assembler<'a> {
9490    fn vcvtph2pd_sae(&mut self, op0: Zmm, op1: Xmm) {
9491        self.emit(VCVTPH2PD512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9492    }
9493}
9494
9495/// `VCVTPH2PSX` (VCVTPH2PSX). 
9496/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
9497///
9498///
9499/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
9500///
9501/// Supported operand variants:
9502///
9503/// ```text
9504/// +---+----------+
9505/// | # | Operands |
9506/// +---+----------+
9507/// | 1 | Xmm, Mem |
9508/// | 2 | Xmm, Xmm |
9509/// | 3 | Ymm, Mem |
9510/// | 4 | Ymm, Xmm |
9511/// | 5 | Zmm, Mem |
9512/// | 6 | Zmm, Ymm |
9513/// +---+----------+
9514/// ```
9515pub trait Vcvtph2psxEmitter<A, B> {
9516    fn vcvtph2psx(&mut self, op0: A, op1: B);
9517}
9518
9519impl<'a> Vcvtph2psxEmitter<Xmm, Xmm> for Assembler<'a> {
9520    fn vcvtph2psx(&mut self, op0: Xmm, op1: Xmm) {
9521        self.emit(VCVTPH2PSX128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9522    }
9523}
9524
9525impl<'a> Vcvtph2psxEmitter<Xmm, Mem> for Assembler<'a> {
9526    fn vcvtph2psx(&mut self, op0: Xmm, op1: Mem) {
9527        self.emit(VCVTPH2PSX128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9528    }
9529}
9530
9531impl<'a> Vcvtph2psxEmitter<Ymm, Xmm> for Assembler<'a> {
9532    fn vcvtph2psx(&mut self, op0: Ymm, op1: Xmm) {
9533        self.emit(VCVTPH2PSX256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9534    }
9535}
9536
9537impl<'a> Vcvtph2psxEmitter<Ymm, Mem> for Assembler<'a> {
9538    fn vcvtph2psx(&mut self, op0: Ymm, op1: Mem) {
9539        self.emit(VCVTPH2PSX256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9540    }
9541}
9542
9543impl<'a> Vcvtph2psxEmitter<Zmm, Ymm> for Assembler<'a> {
9544    fn vcvtph2psx(&mut self, op0: Zmm, op1: Ymm) {
9545        self.emit(VCVTPH2PSX512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9546    }
9547}
9548
9549impl<'a> Vcvtph2psxEmitter<Zmm, Mem> for Assembler<'a> {
9550    fn vcvtph2psx(&mut self, op0: Zmm, op1: Mem) {
9551        self.emit(VCVTPH2PSX512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9552    }
9553}
9554
9555/// `VCVTPH2PSX_MASK` (VCVTPH2PSX). 
9556/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
9557///
9558///
9559/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
9560///
9561/// Supported operand variants:
9562///
9563/// ```text
9564/// +---+----------+
9565/// | # | Operands |
9566/// +---+----------+
9567/// | 1 | Xmm, Mem |
9568/// | 2 | Xmm, Xmm |
9569/// | 3 | Ymm, Mem |
9570/// | 4 | Ymm, Xmm |
9571/// | 5 | Zmm, Mem |
9572/// | 6 | Zmm, Ymm |
9573/// +---+----------+
9574/// ```
9575pub trait Vcvtph2psxMaskEmitter<A, B> {
9576    fn vcvtph2psx_mask(&mut self, op0: A, op1: B);
9577}
9578
9579impl<'a> Vcvtph2psxMaskEmitter<Xmm, Xmm> for Assembler<'a> {
9580    fn vcvtph2psx_mask(&mut self, op0: Xmm, op1: Xmm) {
9581        self.emit(VCVTPH2PSX128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9582    }
9583}
9584
9585impl<'a> Vcvtph2psxMaskEmitter<Xmm, Mem> for Assembler<'a> {
9586    fn vcvtph2psx_mask(&mut self, op0: Xmm, op1: Mem) {
9587        self.emit(VCVTPH2PSX128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9588    }
9589}
9590
9591impl<'a> Vcvtph2psxMaskEmitter<Ymm, Xmm> for Assembler<'a> {
9592    fn vcvtph2psx_mask(&mut self, op0: Ymm, op1: Xmm) {
9593        self.emit(VCVTPH2PSX256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9594    }
9595}
9596
9597impl<'a> Vcvtph2psxMaskEmitter<Ymm, Mem> for Assembler<'a> {
9598    fn vcvtph2psx_mask(&mut self, op0: Ymm, op1: Mem) {
9599        self.emit(VCVTPH2PSX256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9600    }
9601}
9602
9603impl<'a> Vcvtph2psxMaskEmitter<Zmm, Ymm> for Assembler<'a> {
9604    fn vcvtph2psx_mask(&mut self, op0: Zmm, op1: Ymm) {
9605        self.emit(VCVTPH2PSX512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9606    }
9607}
9608
9609impl<'a> Vcvtph2psxMaskEmitter<Zmm, Mem> for Assembler<'a> {
9610    fn vcvtph2psx_mask(&mut self, op0: Zmm, op1: Mem) {
9611        self.emit(VCVTPH2PSX512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9612    }
9613}
9614
9615/// `VCVTPH2PSX_MASK_SAE` (VCVTPH2PSX). 
9616/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
9617///
9618///
9619/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
9620///
9621/// Supported operand variants:
9622///
9623/// ```text
9624/// +---+----------+
9625/// | # | Operands |
9626/// +---+----------+
9627/// | 1 | Zmm, Ymm |
9628/// +---+----------+
9629/// ```
9630pub trait Vcvtph2psxMaskSaeEmitter<A, B> {
9631    fn vcvtph2psx_mask_sae(&mut self, op0: A, op1: B);
9632}
9633
9634impl<'a> Vcvtph2psxMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
9635    fn vcvtph2psx_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
9636        self.emit(VCVTPH2PSX512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9637    }
9638}
9639
9640/// `VCVTPH2PSX_MASKZ` (VCVTPH2PSX). 
9641/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
9642///
9643///
9644/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
9645///
9646/// Supported operand variants:
9647///
9648/// ```text
9649/// +---+----------+
9650/// | # | Operands |
9651/// +---+----------+
9652/// | 1 | Xmm, Mem |
9653/// | 2 | Xmm, Xmm |
9654/// | 3 | Ymm, Mem |
9655/// | 4 | Ymm, Xmm |
9656/// | 5 | Zmm, Mem |
9657/// | 6 | Zmm, Ymm |
9658/// +---+----------+
9659/// ```
9660pub trait Vcvtph2psxMaskzEmitter<A, B> {
9661    fn vcvtph2psx_maskz(&mut self, op0: A, op1: B);
9662}
9663
9664impl<'a> Vcvtph2psxMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
9665    fn vcvtph2psx_maskz(&mut self, op0: Xmm, op1: Xmm) {
9666        self.emit(VCVTPH2PSX128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9667    }
9668}
9669
9670impl<'a> Vcvtph2psxMaskzEmitter<Xmm, Mem> for Assembler<'a> {
9671    fn vcvtph2psx_maskz(&mut self, op0: Xmm, op1: Mem) {
9672        self.emit(VCVTPH2PSX128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9673    }
9674}
9675
9676impl<'a> Vcvtph2psxMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
9677    fn vcvtph2psx_maskz(&mut self, op0: Ymm, op1: Xmm) {
9678        self.emit(VCVTPH2PSX256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9679    }
9680}
9681
9682impl<'a> Vcvtph2psxMaskzEmitter<Ymm, Mem> for Assembler<'a> {
9683    fn vcvtph2psx_maskz(&mut self, op0: Ymm, op1: Mem) {
9684        self.emit(VCVTPH2PSX256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9685    }
9686}
9687
9688impl<'a> Vcvtph2psxMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
9689    fn vcvtph2psx_maskz(&mut self, op0: Zmm, op1: Ymm) {
9690        self.emit(VCVTPH2PSX512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9691    }
9692}
9693
9694impl<'a> Vcvtph2psxMaskzEmitter<Zmm, Mem> for Assembler<'a> {
9695    fn vcvtph2psx_maskz(&mut self, op0: Zmm, op1: Mem) {
9696        self.emit(VCVTPH2PSX512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9697    }
9698}
9699
9700/// `VCVTPH2PSX_MASKZ_SAE` (VCVTPH2PSX). 
9701/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
9702///
9703///
9704/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
9705///
9706/// Supported operand variants:
9707///
9708/// ```text
9709/// +---+----------+
9710/// | # | Operands |
9711/// +---+----------+
9712/// | 1 | Zmm, Ymm |
9713/// +---+----------+
9714/// ```
9715pub trait Vcvtph2psxMaskzSaeEmitter<A, B> {
9716    fn vcvtph2psx_maskz_sae(&mut self, op0: A, op1: B);
9717}
9718
9719impl<'a> Vcvtph2psxMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
9720    fn vcvtph2psx_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
9721        self.emit(VCVTPH2PSX512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9722    }
9723}
9724
9725/// `VCVTPH2PSX_SAE` (VCVTPH2PSX). 
9726/// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
9727///
9728///
9729/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
9730///
9731/// Supported operand variants:
9732///
9733/// ```text
9734/// +---+----------+
9735/// | # | Operands |
9736/// +---+----------+
9737/// | 1 | Zmm, Ymm |
9738/// +---+----------+
9739/// ```
9740pub trait Vcvtph2psxSaeEmitter<A, B> {
9741    fn vcvtph2psx_sae(&mut self, op0: A, op1: B);
9742}
9743
9744impl<'a> Vcvtph2psxSaeEmitter<Zmm, Ymm> for Assembler<'a> {
9745    fn vcvtph2psx_sae(&mut self, op0: Zmm, op1: Ymm) {
9746        self.emit(VCVTPH2PSX512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9747    }
9748}
9749
9750/// `VCVTPH2QQ`.
9751///
9752/// Supported operand variants:
9753///
9754/// ```text
9755/// +---+----------+
9756/// | # | Operands |
9757/// +---+----------+
9758/// | 1 | Xmm, Mem |
9759/// | 2 | Xmm, Xmm |
9760/// | 3 | Ymm, Mem |
9761/// | 4 | Ymm, Xmm |
9762/// | 5 | Zmm, Mem |
9763/// | 6 | Zmm, Xmm |
9764/// +---+----------+
9765/// ```
9766pub trait Vcvtph2qqEmitter<A, B> {
9767    fn vcvtph2qq(&mut self, op0: A, op1: B);
9768}
9769
9770impl<'a> Vcvtph2qqEmitter<Xmm, Xmm> for Assembler<'a> {
9771    fn vcvtph2qq(&mut self, op0: Xmm, op1: Xmm) {
9772        self.emit(VCVTPH2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9773    }
9774}
9775
9776impl<'a> Vcvtph2qqEmitter<Xmm, Mem> for Assembler<'a> {
9777    fn vcvtph2qq(&mut self, op0: Xmm, op1: Mem) {
9778        self.emit(VCVTPH2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9779    }
9780}
9781
9782impl<'a> Vcvtph2qqEmitter<Ymm, Xmm> for Assembler<'a> {
9783    fn vcvtph2qq(&mut self, op0: Ymm, op1: Xmm) {
9784        self.emit(VCVTPH2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9785    }
9786}
9787
9788impl<'a> Vcvtph2qqEmitter<Ymm, Mem> for Assembler<'a> {
9789    fn vcvtph2qq(&mut self, op0: Ymm, op1: Mem) {
9790        self.emit(VCVTPH2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9791    }
9792}
9793
9794impl<'a> Vcvtph2qqEmitter<Zmm, Xmm> for Assembler<'a> {
9795    fn vcvtph2qq(&mut self, op0: Zmm, op1: Xmm) {
9796        self.emit(VCVTPH2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9797    }
9798}
9799
9800impl<'a> Vcvtph2qqEmitter<Zmm, Mem> for Assembler<'a> {
9801    fn vcvtph2qq(&mut self, op0: Zmm, op1: Mem) {
9802        self.emit(VCVTPH2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9803    }
9804}
9805
9806/// `VCVTPH2QQ_ER`.
9807///
9808/// Supported operand variants:
9809///
9810/// ```text
9811/// +---+----------+
9812/// | # | Operands |
9813/// +---+----------+
9814/// | 1 | Zmm, Xmm |
9815/// +---+----------+
9816/// ```
9817pub trait Vcvtph2qqErEmitter<A, B> {
9818    fn vcvtph2qq_er(&mut self, op0: A, op1: B);
9819}
9820
9821impl<'a> Vcvtph2qqErEmitter<Zmm, Xmm> for Assembler<'a> {
9822    fn vcvtph2qq_er(&mut self, op0: Zmm, op1: Xmm) {
9823        self.emit(VCVTPH2QQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9824    }
9825}
9826
9827/// `VCVTPH2QQ_MASK`.
9828///
9829/// Supported operand variants:
9830///
9831/// ```text
9832/// +---+----------+
9833/// | # | Operands |
9834/// +---+----------+
9835/// | 1 | Xmm, Mem |
9836/// | 2 | Xmm, Xmm |
9837/// | 3 | Ymm, Mem |
9838/// | 4 | Ymm, Xmm |
9839/// | 5 | Zmm, Mem |
9840/// | 6 | Zmm, Xmm |
9841/// +---+----------+
9842/// ```
9843pub trait Vcvtph2qqMaskEmitter<A, B> {
9844    fn vcvtph2qq_mask(&mut self, op0: A, op1: B);
9845}
9846
9847impl<'a> Vcvtph2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
9848    fn vcvtph2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
9849        self.emit(VCVTPH2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9850    }
9851}
9852
9853impl<'a> Vcvtph2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
9854    fn vcvtph2qq_mask(&mut self, op0: Xmm, op1: Mem) {
9855        self.emit(VCVTPH2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9856    }
9857}
9858
9859impl<'a> Vcvtph2qqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
9860    fn vcvtph2qq_mask(&mut self, op0: Ymm, op1: Xmm) {
9861        self.emit(VCVTPH2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9862    }
9863}
9864
9865impl<'a> Vcvtph2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
9866    fn vcvtph2qq_mask(&mut self, op0: Ymm, op1: Mem) {
9867        self.emit(VCVTPH2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9868    }
9869}
9870
9871impl<'a> Vcvtph2qqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
9872    fn vcvtph2qq_mask(&mut self, op0: Zmm, op1: Xmm) {
9873        self.emit(VCVTPH2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9874    }
9875}
9876
9877impl<'a> Vcvtph2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
9878    fn vcvtph2qq_mask(&mut self, op0: Zmm, op1: Mem) {
9879        self.emit(VCVTPH2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9880    }
9881}
9882
9883/// `VCVTPH2QQ_MASK_ER`.
9884///
9885/// Supported operand variants:
9886///
9887/// ```text
9888/// +---+----------+
9889/// | # | Operands |
9890/// +---+----------+
9891/// | 1 | Zmm, Xmm |
9892/// +---+----------+
9893/// ```
9894pub trait Vcvtph2qqMaskErEmitter<A, B> {
9895    fn vcvtph2qq_mask_er(&mut self, op0: A, op1: B);
9896}
9897
9898impl<'a> Vcvtph2qqMaskErEmitter<Zmm, Xmm> for Assembler<'a> {
9899    fn vcvtph2qq_mask_er(&mut self, op0: Zmm, op1: Xmm) {
9900        self.emit(VCVTPH2QQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9901    }
9902}
9903
9904/// `VCVTPH2QQ_MASKZ`.
9905///
9906/// Supported operand variants:
9907///
9908/// ```text
9909/// +---+----------+
9910/// | # | Operands |
9911/// +---+----------+
9912/// | 1 | Xmm, Mem |
9913/// | 2 | Xmm, Xmm |
9914/// | 3 | Ymm, Mem |
9915/// | 4 | Ymm, Xmm |
9916/// | 5 | Zmm, Mem |
9917/// | 6 | Zmm, Xmm |
9918/// +---+----------+
9919/// ```
9920pub trait Vcvtph2qqMaskzEmitter<A, B> {
9921    fn vcvtph2qq_maskz(&mut self, op0: A, op1: B);
9922}
9923
9924impl<'a> Vcvtph2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
9925    fn vcvtph2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
9926        self.emit(VCVTPH2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9927    }
9928}
9929
9930impl<'a> Vcvtph2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
9931    fn vcvtph2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
9932        self.emit(VCVTPH2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9933    }
9934}
9935
9936impl<'a> Vcvtph2qqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
9937    fn vcvtph2qq_maskz(&mut self, op0: Ymm, op1: Xmm) {
9938        self.emit(VCVTPH2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9939    }
9940}
9941
9942impl<'a> Vcvtph2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
9943    fn vcvtph2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
9944        self.emit(VCVTPH2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9945    }
9946}
9947
9948impl<'a> Vcvtph2qqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
9949    fn vcvtph2qq_maskz(&mut self, op0: Zmm, op1: Xmm) {
9950        self.emit(VCVTPH2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9951    }
9952}
9953
9954impl<'a> Vcvtph2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
9955    fn vcvtph2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
9956        self.emit(VCVTPH2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9957    }
9958}
9959
9960/// `VCVTPH2QQ_MASKZ_ER`.
9961///
9962/// Supported operand variants:
9963///
9964/// ```text
9965/// +---+----------+
9966/// | # | Operands |
9967/// +---+----------+
9968/// | 1 | Zmm, Xmm |
9969/// +---+----------+
9970/// ```
9971pub trait Vcvtph2qqMaskzErEmitter<A, B> {
9972    fn vcvtph2qq_maskz_er(&mut self, op0: A, op1: B);
9973}
9974
9975impl<'a> Vcvtph2qqMaskzErEmitter<Zmm, Xmm> for Assembler<'a> {
9976    fn vcvtph2qq_maskz_er(&mut self, op0: Zmm, op1: Xmm) {
9977        self.emit(VCVTPH2QQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
9978    }
9979}
9980
9981/// `VCVTPH2UDQ`.
9982///
9983/// Supported operand variants:
9984///
9985/// ```text
9986/// +---+----------+
9987/// | # | Operands |
9988/// +---+----------+
9989/// | 1 | Xmm, Mem |
9990/// | 2 | Xmm, Xmm |
9991/// | 3 | Ymm, Mem |
9992/// | 4 | Ymm, Xmm |
9993/// | 5 | Zmm, Mem |
9994/// | 6 | Zmm, Ymm |
9995/// +---+----------+
9996/// ```
9997pub trait Vcvtph2udqEmitter<A, B> {
9998    fn vcvtph2udq(&mut self, op0: A, op1: B);
9999}
10000
10001impl<'a> Vcvtph2udqEmitter<Xmm, Xmm> for Assembler<'a> {
10002    fn vcvtph2udq(&mut self, op0: Xmm, op1: Xmm) {
10003        self.emit(VCVTPH2UDQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10004    }
10005}
10006
10007impl<'a> Vcvtph2udqEmitter<Xmm, Mem> for Assembler<'a> {
10008    fn vcvtph2udq(&mut self, op0: Xmm, op1: Mem) {
10009        self.emit(VCVTPH2UDQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10010    }
10011}
10012
10013impl<'a> Vcvtph2udqEmitter<Ymm, Xmm> for Assembler<'a> {
10014    fn vcvtph2udq(&mut self, op0: Ymm, op1: Xmm) {
10015        self.emit(VCVTPH2UDQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10016    }
10017}
10018
10019impl<'a> Vcvtph2udqEmitter<Ymm, Mem> for Assembler<'a> {
10020    fn vcvtph2udq(&mut self, op0: Ymm, op1: Mem) {
10021        self.emit(VCVTPH2UDQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10022    }
10023}
10024
10025impl<'a> Vcvtph2udqEmitter<Zmm, Ymm> for Assembler<'a> {
10026    fn vcvtph2udq(&mut self, op0: Zmm, op1: Ymm) {
10027        self.emit(VCVTPH2UDQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10028    }
10029}
10030
10031impl<'a> Vcvtph2udqEmitter<Zmm, Mem> for Assembler<'a> {
10032    fn vcvtph2udq(&mut self, op0: Zmm, op1: Mem) {
10033        self.emit(VCVTPH2UDQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10034    }
10035}
10036
10037/// `VCVTPH2UDQ_ER`.
10038///
10039/// Supported operand variants:
10040///
10041/// ```text
10042/// +---+----------+
10043/// | # | Operands |
10044/// +---+----------+
10045/// | 1 | Zmm, Ymm |
10046/// +---+----------+
10047/// ```
10048pub trait Vcvtph2udqErEmitter<A, B> {
10049    fn vcvtph2udq_er(&mut self, op0: A, op1: B);
10050}
10051
10052impl<'a> Vcvtph2udqErEmitter<Zmm, Ymm> for Assembler<'a> {
10053    fn vcvtph2udq_er(&mut self, op0: Zmm, op1: Ymm) {
10054        self.emit(VCVTPH2UDQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10055    }
10056}
10057
10058/// `VCVTPH2UDQ_MASK`.
10059///
10060/// Supported operand variants:
10061///
10062/// ```text
10063/// +---+----------+
10064/// | # | Operands |
10065/// +---+----------+
10066/// | 1 | Xmm, Mem |
10067/// | 2 | Xmm, Xmm |
10068/// | 3 | Ymm, Mem |
10069/// | 4 | Ymm, Xmm |
10070/// | 5 | Zmm, Mem |
10071/// | 6 | Zmm, Ymm |
10072/// +---+----------+
10073/// ```
10074pub trait Vcvtph2udqMaskEmitter<A, B> {
10075    fn vcvtph2udq_mask(&mut self, op0: A, op1: B);
10076}
10077
10078impl<'a> Vcvtph2udqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
10079    fn vcvtph2udq_mask(&mut self, op0: Xmm, op1: Xmm) {
10080        self.emit(VCVTPH2UDQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10081    }
10082}
10083
10084impl<'a> Vcvtph2udqMaskEmitter<Xmm, Mem> for Assembler<'a> {
10085    fn vcvtph2udq_mask(&mut self, op0: Xmm, op1: Mem) {
10086        self.emit(VCVTPH2UDQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10087    }
10088}
10089
10090impl<'a> Vcvtph2udqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
10091    fn vcvtph2udq_mask(&mut self, op0: Ymm, op1: Xmm) {
10092        self.emit(VCVTPH2UDQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10093    }
10094}
10095
10096impl<'a> Vcvtph2udqMaskEmitter<Ymm, Mem> for Assembler<'a> {
10097    fn vcvtph2udq_mask(&mut self, op0: Ymm, op1: Mem) {
10098        self.emit(VCVTPH2UDQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10099    }
10100}
10101
10102impl<'a> Vcvtph2udqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
10103    fn vcvtph2udq_mask(&mut self, op0: Zmm, op1: Ymm) {
10104        self.emit(VCVTPH2UDQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10105    }
10106}
10107
10108impl<'a> Vcvtph2udqMaskEmitter<Zmm, Mem> for Assembler<'a> {
10109    fn vcvtph2udq_mask(&mut self, op0: Zmm, op1: Mem) {
10110        self.emit(VCVTPH2UDQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10111    }
10112}
10113
10114/// `VCVTPH2UDQ_MASK_ER`.
10115///
10116/// Supported operand variants:
10117///
10118/// ```text
10119/// +---+----------+
10120/// | # | Operands |
10121/// +---+----------+
10122/// | 1 | Zmm, Ymm |
10123/// +---+----------+
10124/// ```
10125pub trait Vcvtph2udqMaskErEmitter<A, B> {
10126    fn vcvtph2udq_mask_er(&mut self, op0: A, op1: B);
10127}
10128
10129impl<'a> Vcvtph2udqMaskErEmitter<Zmm, Ymm> for Assembler<'a> {
10130    fn vcvtph2udq_mask_er(&mut self, op0: Zmm, op1: Ymm) {
10131        self.emit(VCVTPH2UDQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10132    }
10133}
10134
10135/// `VCVTPH2UDQ_MASKZ`.
10136///
10137/// Supported operand variants:
10138///
10139/// ```text
10140/// +---+----------+
10141/// | # | Operands |
10142/// +---+----------+
10143/// | 1 | Xmm, Mem |
10144/// | 2 | Xmm, Xmm |
10145/// | 3 | Ymm, Mem |
10146/// | 4 | Ymm, Xmm |
10147/// | 5 | Zmm, Mem |
10148/// | 6 | Zmm, Ymm |
10149/// +---+----------+
10150/// ```
10151pub trait Vcvtph2udqMaskzEmitter<A, B> {
10152    fn vcvtph2udq_maskz(&mut self, op0: A, op1: B);
10153}
10154
10155impl<'a> Vcvtph2udqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
10156    fn vcvtph2udq_maskz(&mut self, op0: Xmm, op1: Xmm) {
10157        self.emit(VCVTPH2UDQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10158    }
10159}
10160
10161impl<'a> Vcvtph2udqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
10162    fn vcvtph2udq_maskz(&mut self, op0: Xmm, op1: Mem) {
10163        self.emit(VCVTPH2UDQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10164    }
10165}
10166
10167impl<'a> Vcvtph2udqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
10168    fn vcvtph2udq_maskz(&mut self, op0: Ymm, op1: Xmm) {
10169        self.emit(VCVTPH2UDQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10170    }
10171}
10172
10173impl<'a> Vcvtph2udqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
10174    fn vcvtph2udq_maskz(&mut self, op0: Ymm, op1: Mem) {
10175        self.emit(VCVTPH2UDQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10176    }
10177}
10178
10179impl<'a> Vcvtph2udqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
10180    fn vcvtph2udq_maskz(&mut self, op0: Zmm, op1: Ymm) {
10181        self.emit(VCVTPH2UDQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10182    }
10183}
10184
10185impl<'a> Vcvtph2udqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
10186    fn vcvtph2udq_maskz(&mut self, op0: Zmm, op1: Mem) {
10187        self.emit(VCVTPH2UDQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10188    }
10189}
10190
10191/// `VCVTPH2UDQ_MASKZ_ER`.
10192///
10193/// Supported operand variants:
10194///
10195/// ```text
10196/// +---+----------+
10197/// | # | Operands |
10198/// +---+----------+
10199/// | 1 | Zmm, Ymm |
10200/// +---+----------+
10201/// ```
10202pub trait Vcvtph2udqMaskzErEmitter<A, B> {
10203    fn vcvtph2udq_maskz_er(&mut self, op0: A, op1: B);
10204}
10205
10206impl<'a> Vcvtph2udqMaskzErEmitter<Zmm, Ymm> for Assembler<'a> {
10207    fn vcvtph2udq_maskz_er(&mut self, op0: Zmm, op1: Ymm) {
10208        self.emit(VCVTPH2UDQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10209    }
10210}
10211
10212/// `VCVTPH2UQQ`.
10213///
10214/// Supported operand variants:
10215///
10216/// ```text
10217/// +---+----------+
10218/// | # | Operands |
10219/// +---+----------+
10220/// | 1 | Xmm, Mem |
10221/// | 2 | Xmm, Xmm |
10222/// | 3 | Ymm, Mem |
10223/// | 4 | Ymm, Xmm |
10224/// | 5 | Zmm, Mem |
10225/// | 6 | Zmm, Xmm |
10226/// +---+----------+
10227/// ```
10228pub trait Vcvtph2uqqEmitter<A, B> {
10229    fn vcvtph2uqq(&mut self, op0: A, op1: B);
10230}
10231
10232impl<'a> Vcvtph2uqqEmitter<Xmm, Xmm> for Assembler<'a> {
10233    fn vcvtph2uqq(&mut self, op0: Xmm, op1: Xmm) {
10234        self.emit(VCVTPH2UQQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10235    }
10236}
10237
10238impl<'a> Vcvtph2uqqEmitter<Xmm, Mem> for Assembler<'a> {
10239    fn vcvtph2uqq(&mut self, op0: Xmm, op1: Mem) {
10240        self.emit(VCVTPH2UQQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10241    }
10242}
10243
10244impl<'a> Vcvtph2uqqEmitter<Ymm, Xmm> for Assembler<'a> {
10245    fn vcvtph2uqq(&mut self, op0: Ymm, op1: Xmm) {
10246        self.emit(VCVTPH2UQQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10247    }
10248}
10249
10250impl<'a> Vcvtph2uqqEmitter<Ymm, Mem> for Assembler<'a> {
10251    fn vcvtph2uqq(&mut self, op0: Ymm, op1: Mem) {
10252        self.emit(VCVTPH2UQQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10253    }
10254}
10255
10256impl<'a> Vcvtph2uqqEmitter<Zmm, Xmm> for Assembler<'a> {
10257    fn vcvtph2uqq(&mut self, op0: Zmm, op1: Xmm) {
10258        self.emit(VCVTPH2UQQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10259    }
10260}
10261
10262impl<'a> Vcvtph2uqqEmitter<Zmm, Mem> for Assembler<'a> {
10263    fn vcvtph2uqq(&mut self, op0: Zmm, op1: Mem) {
10264        self.emit(VCVTPH2UQQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10265    }
10266}
10267
10268/// `VCVTPH2UQQ_ER`.
10269///
10270/// Supported operand variants:
10271///
10272/// ```text
10273/// +---+----------+
10274/// | # | Operands |
10275/// +---+----------+
10276/// | 1 | Zmm, Xmm |
10277/// +---+----------+
10278/// ```
10279pub trait Vcvtph2uqqErEmitter<A, B> {
10280    fn vcvtph2uqq_er(&mut self, op0: A, op1: B);
10281}
10282
10283impl<'a> Vcvtph2uqqErEmitter<Zmm, Xmm> for Assembler<'a> {
10284    fn vcvtph2uqq_er(&mut self, op0: Zmm, op1: Xmm) {
10285        self.emit(VCVTPH2UQQ512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10286    }
10287}
10288
10289/// `VCVTPH2UQQ_MASK`.
10290///
10291/// Supported operand variants:
10292///
10293/// ```text
10294/// +---+----------+
10295/// | # | Operands |
10296/// +---+----------+
10297/// | 1 | Xmm, Mem |
10298/// | 2 | Xmm, Xmm |
10299/// | 3 | Ymm, Mem |
10300/// | 4 | Ymm, Xmm |
10301/// | 5 | Zmm, Mem |
10302/// | 6 | Zmm, Xmm |
10303/// +---+----------+
10304/// ```
10305pub trait Vcvtph2uqqMaskEmitter<A, B> {
10306    fn vcvtph2uqq_mask(&mut self, op0: A, op1: B);
10307}
10308
10309impl<'a> Vcvtph2uqqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
10310    fn vcvtph2uqq_mask(&mut self, op0: Xmm, op1: Xmm) {
10311        self.emit(VCVTPH2UQQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10312    }
10313}
10314
10315impl<'a> Vcvtph2uqqMaskEmitter<Xmm, Mem> for Assembler<'a> {
10316    fn vcvtph2uqq_mask(&mut self, op0: Xmm, op1: Mem) {
10317        self.emit(VCVTPH2UQQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10318    }
10319}
10320
10321impl<'a> Vcvtph2uqqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
10322    fn vcvtph2uqq_mask(&mut self, op0: Ymm, op1: Xmm) {
10323        self.emit(VCVTPH2UQQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10324    }
10325}
10326
10327impl<'a> Vcvtph2uqqMaskEmitter<Ymm, Mem> for Assembler<'a> {
10328    fn vcvtph2uqq_mask(&mut self, op0: Ymm, op1: Mem) {
10329        self.emit(VCVTPH2UQQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10330    }
10331}
10332
10333impl<'a> Vcvtph2uqqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
10334    fn vcvtph2uqq_mask(&mut self, op0: Zmm, op1: Xmm) {
10335        self.emit(VCVTPH2UQQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10336    }
10337}
10338
10339impl<'a> Vcvtph2uqqMaskEmitter<Zmm, Mem> for Assembler<'a> {
10340    fn vcvtph2uqq_mask(&mut self, op0: Zmm, op1: Mem) {
10341        self.emit(VCVTPH2UQQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10342    }
10343}
10344
10345/// `VCVTPH2UQQ_MASK_ER`.
10346///
10347/// Supported operand variants:
10348///
10349/// ```text
10350/// +---+----------+
10351/// | # | Operands |
10352/// +---+----------+
10353/// | 1 | Zmm, Xmm |
10354/// +---+----------+
10355/// ```
10356pub trait Vcvtph2uqqMaskErEmitter<A, B> {
10357    fn vcvtph2uqq_mask_er(&mut self, op0: A, op1: B);
10358}
10359
10360impl<'a> Vcvtph2uqqMaskErEmitter<Zmm, Xmm> for Assembler<'a> {
10361    fn vcvtph2uqq_mask_er(&mut self, op0: Zmm, op1: Xmm) {
10362        self.emit(VCVTPH2UQQ512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10363    }
10364}
10365
10366/// `VCVTPH2UQQ_MASKZ`.
10367///
10368/// Supported operand variants:
10369///
10370/// ```text
10371/// +---+----------+
10372/// | # | Operands |
10373/// +---+----------+
10374/// | 1 | Xmm, Mem |
10375/// | 2 | Xmm, Xmm |
10376/// | 3 | Ymm, Mem |
10377/// | 4 | Ymm, Xmm |
10378/// | 5 | Zmm, Mem |
10379/// | 6 | Zmm, Xmm |
10380/// +---+----------+
10381/// ```
10382pub trait Vcvtph2uqqMaskzEmitter<A, B> {
10383    fn vcvtph2uqq_maskz(&mut self, op0: A, op1: B);
10384}
10385
10386impl<'a> Vcvtph2uqqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
10387    fn vcvtph2uqq_maskz(&mut self, op0: Xmm, op1: Xmm) {
10388        self.emit(VCVTPH2UQQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10389    }
10390}
10391
10392impl<'a> Vcvtph2uqqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
10393    fn vcvtph2uqq_maskz(&mut self, op0: Xmm, op1: Mem) {
10394        self.emit(VCVTPH2UQQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10395    }
10396}
10397
10398impl<'a> Vcvtph2uqqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
10399    fn vcvtph2uqq_maskz(&mut self, op0: Ymm, op1: Xmm) {
10400        self.emit(VCVTPH2UQQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10401    }
10402}
10403
10404impl<'a> Vcvtph2uqqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
10405    fn vcvtph2uqq_maskz(&mut self, op0: Ymm, op1: Mem) {
10406        self.emit(VCVTPH2UQQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10407    }
10408}
10409
10410impl<'a> Vcvtph2uqqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
10411    fn vcvtph2uqq_maskz(&mut self, op0: Zmm, op1: Xmm) {
10412        self.emit(VCVTPH2UQQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10413    }
10414}
10415
10416impl<'a> Vcvtph2uqqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
10417    fn vcvtph2uqq_maskz(&mut self, op0: Zmm, op1: Mem) {
10418        self.emit(VCVTPH2UQQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10419    }
10420}
10421
10422/// `VCVTPH2UQQ_MASKZ_ER`.
10423///
10424/// Supported operand variants:
10425///
10426/// ```text
10427/// +---+----------+
10428/// | # | Operands |
10429/// +---+----------+
10430/// | 1 | Zmm, Xmm |
10431/// +---+----------+
10432/// ```
10433pub trait Vcvtph2uqqMaskzErEmitter<A, B> {
10434    fn vcvtph2uqq_maskz_er(&mut self, op0: A, op1: B);
10435}
10436
10437impl<'a> Vcvtph2uqqMaskzErEmitter<Zmm, Xmm> for Assembler<'a> {
10438    fn vcvtph2uqq_maskz_er(&mut self, op0: Zmm, op1: Xmm) {
10439        self.emit(VCVTPH2UQQ512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10440    }
10441}
10442
10443/// `VCVTPH2UW`.
10444///
10445/// Supported operand variants:
10446///
10447/// ```text
10448/// +---+----------+
10449/// | # | Operands |
10450/// +---+----------+
10451/// | 1 | Xmm, Mem |
10452/// | 2 | Xmm, Xmm |
10453/// | 3 | Ymm, Mem |
10454/// | 4 | Ymm, Ymm |
10455/// | 5 | Zmm, Mem |
10456/// | 6 | Zmm, Zmm |
10457/// +---+----------+
10458/// ```
10459pub trait Vcvtph2uwEmitter<A, B> {
10460    fn vcvtph2uw(&mut self, op0: A, op1: B);
10461}
10462
10463impl<'a> Vcvtph2uwEmitter<Xmm, Xmm> for Assembler<'a> {
10464    fn vcvtph2uw(&mut self, op0: Xmm, op1: Xmm) {
10465        self.emit(VCVTPH2UW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10466    }
10467}
10468
10469impl<'a> Vcvtph2uwEmitter<Xmm, Mem> for Assembler<'a> {
10470    fn vcvtph2uw(&mut self, op0: Xmm, op1: Mem) {
10471        self.emit(VCVTPH2UW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10472    }
10473}
10474
10475impl<'a> Vcvtph2uwEmitter<Ymm, Ymm> for Assembler<'a> {
10476    fn vcvtph2uw(&mut self, op0: Ymm, op1: Ymm) {
10477        self.emit(VCVTPH2UW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10478    }
10479}
10480
10481impl<'a> Vcvtph2uwEmitter<Ymm, Mem> for Assembler<'a> {
10482    fn vcvtph2uw(&mut self, op0: Ymm, op1: Mem) {
10483        self.emit(VCVTPH2UW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10484    }
10485}
10486
10487impl<'a> Vcvtph2uwEmitter<Zmm, Zmm> for Assembler<'a> {
10488    fn vcvtph2uw(&mut self, op0: Zmm, op1: Zmm) {
10489        self.emit(VCVTPH2UW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10490    }
10491}
10492
10493impl<'a> Vcvtph2uwEmitter<Zmm, Mem> for Assembler<'a> {
10494    fn vcvtph2uw(&mut self, op0: Zmm, op1: Mem) {
10495        self.emit(VCVTPH2UW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10496    }
10497}
10498
10499/// `VCVTPH2UW_ER`.
10500///
10501/// Supported operand variants:
10502///
10503/// ```text
10504/// +---+----------+
10505/// | # | Operands |
10506/// +---+----------+
10507/// | 1 | Zmm, Zmm |
10508/// +---+----------+
10509/// ```
10510pub trait Vcvtph2uwErEmitter<A, B> {
10511    fn vcvtph2uw_er(&mut self, op0: A, op1: B);
10512}
10513
10514impl<'a> Vcvtph2uwErEmitter<Zmm, Zmm> for Assembler<'a> {
10515    fn vcvtph2uw_er(&mut self, op0: Zmm, op1: Zmm) {
10516        self.emit(VCVTPH2UW512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10517    }
10518}
10519
10520/// `VCVTPH2UW_MASK`.
10521///
10522/// Supported operand variants:
10523///
10524/// ```text
10525/// +---+----------+
10526/// | # | Operands |
10527/// +---+----------+
10528/// | 1 | Xmm, Mem |
10529/// | 2 | Xmm, Xmm |
10530/// | 3 | Ymm, Mem |
10531/// | 4 | Ymm, Ymm |
10532/// | 5 | Zmm, Mem |
10533/// | 6 | Zmm, Zmm |
10534/// +---+----------+
10535/// ```
10536pub trait Vcvtph2uwMaskEmitter<A, B> {
10537    fn vcvtph2uw_mask(&mut self, op0: A, op1: B);
10538}
10539
10540impl<'a> Vcvtph2uwMaskEmitter<Xmm, Xmm> for Assembler<'a> {
10541    fn vcvtph2uw_mask(&mut self, op0: Xmm, op1: Xmm) {
10542        self.emit(VCVTPH2UW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10543    }
10544}
10545
10546impl<'a> Vcvtph2uwMaskEmitter<Xmm, Mem> for Assembler<'a> {
10547    fn vcvtph2uw_mask(&mut self, op0: Xmm, op1: Mem) {
10548        self.emit(VCVTPH2UW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10549    }
10550}
10551
10552impl<'a> Vcvtph2uwMaskEmitter<Ymm, Ymm> for Assembler<'a> {
10553    fn vcvtph2uw_mask(&mut self, op0: Ymm, op1: Ymm) {
10554        self.emit(VCVTPH2UW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10555    }
10556}
10557
10558impl<'a> Vcvtph2uwMaskEmitter<Ymm, Mem> for Assembler<'a> {
10559    fn vcvtph2uw_mask(&mut self, op0: Ymm, op1: Mem) {
10560        self.emit(VCVTPH2UW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10561    }
10562}
10563
10564impl<'a> Vcvtph2uwMaskEmitter<Zmm, Zmm> for Assembler<'a> {
10565    fn vcvtph2uw_mask(&mut self, op0: Zmm, op1: Zmm) {
10566        self.emit(VCVTPH2UW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10567    }
10568}
10569
10570impl<'a> Vcvtph2uwMaskEmitter<Zmm, Mem> for Assembler<'a> {
10571    fn vcvtph2uw_mask(&mut self, op0: Zmm, op1: Mem) {
10572        self.emit(VCVTPH2UW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10573    }
10574}
10575
10576/// `VCVTPH2UW_MASK_ER`.
10577///
10578/// Supported operand variants:
10579///
10580/// ```text
10581/// +---+----------+
10582/// | # | Operands |
10583/// +---+----------+
10584/// | 1 | Zmm, Zmm |
10585/// +---+----------+
10586/// ```
10587pub trait Vcvtph2uwMaskErEmitter<A, B> {
10588    fn vcvtph2uw_mask_er(&mut self, op0: A, op1: B);
10589}
10590
10591impl<'a> Vcvtph2uwMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
10592    fn vcvtph2uw_mask_er(&mut self, op0: Zmm, op1: Zmm) {
10593        self.emit(VCVTPH2UW512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10594    }
10595}
10596
10597/// `VCVTPH2UW_MASKZ`.
10598///
10599/// Supported operand variants:
10600///
10601/// ```text
10602/// +---+----------+
10603/// | # | Operands |
10604/// +---+----------+
10605/// | 1 | Xmm, Mem |
10606/// | 2 | Xmm, Xmm |
10607/// | 3 | Ymm, Mem |
10608/// | 4 | Ymm, Ymm |
10609/// | 5 | Zmm, Mem |
10610/// | 6 | Zmm, Zmm |
10611/// +---+----------+
10612/// ```
10613pub trait Vcvtph2uwMaskzEmitter<A, B> {
10614    fn vcvtph2uw_maskz(&mut self, op0: A, op1: B);
10615}
10616
10617impl<'a> Vcvtph2uwMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
10618    fn vcvtph2uw_maskz(&mut self, op0: Xmm, op1: Xmm) {
10619        self.emit(VCVTPH2UW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10620    }
10621}
10622
10623impl<'a> Vcvtph2uwMaskzEmitter<Xmm, Mem> for Assembler<'a> {
10624    fn vcvtph2uw_maskz(&mut self, op0: Xmm, op1: Mem) {
10625        self.emit(VCVTPH2UW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10626    }
10627}
10628
10629impl<'a> Vcvtph2uwMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
10630    fn vcvtph2uw_maskz(&mut self, op0: Ymm, op1: Ymm) {
10631        self.emit(VCVTPH2UW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10632    }
10633}
10634
10635impl<'a> Vcvtph2uwMaskzEmitter<Ymm, Mem> for Assembler<'a> {
10636    fn vcvtph2uw_maskz(&mut self, op0: Ymm, op1: Mem) {
10637        self.emit(VCVTPH2UW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10638    }
10639}
10640
10641impl<'a> Vcvtph2uwMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
10642    fn vcvtph2uw_maskz(&mut self, op0: Zmm, op1: Zmm) {
10643        self.emit(VCVTPH2UW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10644    }
10645}
10646
10647impl<'a> Vcvtph2uwMaskzEmitter<Zmm, Mem> for Assembler<'a> {
10648    fn vcvtph2uw_maskz(&mut self, op0: Zmm, op1: Mem) {
10649        self.emit(VCVTPH2UW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10650    }
10651}
10652
10653/// `VCVTPH2UW_MASKZ_ER`.
10654///
10655/// Supported operand variants:
10656///
10657/// ```text
10658/// +---+----------+
10659/// | # | Operands |
10660/// +---+----------+
10661/// | 1 | Zmm, Zmm |
10662/// +---+----------+
10663/// ```
10664pub trait Vcvtph2uwMaskzErEmitter<A, B> {
10665    fn vcvtph2uw_maskz_er(&mut self, op0: A, op1: B);
10666}
10667
10668impl<'a> Vcvtph2uwMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
10669    fn vcvtph2uw_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
10670        self.emit(VCVTPH2UW512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10671    }
10672}
10673
10674/// `VCVTPH2W`.
10675///
10676/// Supported operand variants:
10677///
10678/// ```text
10679/// +---+----------+
10680/// | # | Operands |
10681/// +---+----------+
10682/// | 1 | Xmm, Mem |
10683/// | 2 | Xmm, Xmm |
10684/// | 3 | Ymm, Mem |
10685/// | 4 | Ymm, Ymm |
10686/// | 5 | Zmm, Mem |
10687/// | 6 | Zmm, Zmm |
10688/// +---+----------+
10689/// ```
10690pub trait Vcvtph2wEmitter<A, B> {
10691    fn vcvtph2w(&mut self, op0: A, op1: B);
10692}
10693
10694impl<'a> Vcvtph2wEmitter<Xmm, Xmm> for Assembler<'a> {
10695    fn vcvtph2w(&mut self, op0: Xmm, op1: Xmm) {
10696        self.emit(VCVTPH2W128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10697    }
10698}
10699
10700impl<'a> Vcvtph2wEmitter<Xmm, Mem> for Assembler<'a> {
10701    fn vcvtph2w(&mut self, op0: Xmm, op1: Mem) {
10702        self.emit(VCVTPH2W128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10703    }
10704}
10705
10706impl<'a> Vcvtph2wEmitter<Ymm, Ymm> for Assembler<'a> {
10707    fn vcvtph2w(&mut self, op0: Ymm, op1: Ymm) {
10708        self.emit(VCVTPH2W256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10709    }
10710}
10711
10712impl<'a> Vcvtph2wEmitter<Ymm, Mem> for Assembler<'a> {
10713    fn vcvtph2w(&mut self, op0: Ymm, op1: Mem) {
10714        self.emit(VCVTPH2W256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10715    }
10716}
10717
10718impl<'a> Vcvtph2wEmitter<Zmm, Zmm> for Assembler<'a> {
10719    fn vcvtph2w(&mut self, op0: Zmm, op1: Zmm) {
10720        self.emit(VCVTPH2W512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10721    }
10722}
10723
10724impl<'a> Vcvtph2wEmitter<Zmm, Mem> for Assembler<'a> {
10725    fn vcvtph2w(&mut self, op0: Zmm, op1: Mem) {
10726        self.emit(VCVTPH2W512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10727    }
10728}
10729
10730/// `VCVTPH2W_ER`.
10731///
10732/// Supported operand variants:
10733///
10734/// ```text
10735/// +---+----------+
10736/// | # | Operands |
10737/// +---+----------+
10738/// | 1 | Zmm, Zmm |
10739/// +---+----------+
10740/// ```
10741pub trait Vcvtph2wErEmitter<A, B> {
10742    fn vcvtph2w_er(&mut self, op0: A, op1: B);
10743}
10744
10745impl<'a> Vcvtph2wErEmitter<Zmm, Zmm> for Assembler<'a> {
10746    fn vcvtph2w_er(&mut self, op0: Zmm, op1: Zmm) {
10747        self.emit(VCVTPH2W512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10748    }
10749}
10750
10751/// `VCVTPH2W_MASK`.
10752///
10753/// Supported operand variants:
10754///
10755/// ```text
10756/// +---+----------+
10757/// | # | Operands |
10758/// +---+----------+
10759/// | 1 | Xmm, Mem |
10760/// | 2 | Xmm, Xmm |
10761/// | 3 | Ymm, Mem |
10762/// | 4 | Ymm, Ymm |
10763/// | 5 | Zmm, Mem |
10764/// | 6 | Zmm, Zmm |
10765/// +---+----------+
10766/// ```
10767pub trait Vcvtph2wMaskEmitter<A, B> {
10768    fn vcvtph2w_mask(&mut self, op0: A, op1: B);
10769}
10770
10771impl<'a> Vcvtph2wMaskEmitter<Xmm, Xmm> for Assembler<'a> {
10772    fn vcvtph2w_mask(&mut self, op0: Xmm, op1: Xmm) {
10773        self.emit(VCVTPH2W128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10774    }
10775}
10776
10777impl<'a> Vcvtph2wMaskEmitter<Xmm, Mem> for Assembler<'a> {
10778    fn vcvtph2w_mask(&mut self, op0: Xmm, op1: Mem) {
10779        self.emit(VCVTPH2W128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10780    }
10781}
10782
10783impl<'a> Vcvtph2wMaskEmitter<Ymm, Ymm> for Assembler<'a> {
10784    fn vcvtph2w_mask(&mut self, op0: Ymm, op1: Ymm) {
10785        self.emit(VCVTPH2W256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10786    }
10787}
10788
10789impl<'a> Vcvtph2wMaskEmitter<Ymm, Mem> for Assembler<'a> {
10790    fn vcvtph2w_mask(&mut self, op0: Ymm, op1: Mem) {
10791        self.emit(VCVTPH2W256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10792    }
10793}
10794
10795impl<'a> Vcvtph2wMaskEmitter<Zmm, Zmm> for Assembler<'a> {
10796    fn vcvtph2w_mask(&mut self, op0: Zmm, op1: Zmm) {
10797        self.emit(VCVTPH2W512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10798    }
10799}
10800
10801impl<'a> Vcvtph2wMaskEmitter<Zmm, Mem> for Assembler<'a> {
10802    fn vcvtph2w_mask(&mut self, op0: Zmm, op1: Mem) {
10803        self.emit(VCVTPH2W512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10804    }
10805}
10806
10807/// `VCVTPH2W_MASK_ER`.
10808///
10809/// Supported operand variants:
10810///
10811/// ```text
10812/// +---+----------+
10813/// | # | Operands |
10814/// +---+----------+
10815/// | 1 | Zmm, Zmm |
10816/// +---+----------+
10817/// ```
10818pub trait Vcvtph2wMaskErEmitter<A, B> {
10819    fn vcvtph2w_mask_er(&mut self, op0: A, op1: B);
10820}
10821
10822impl<'a> Vcvtph2wMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
10823    fn vcvtph2w_mask_er(&mut self, op0: Zmm, op1: Zmm) {
10824        self.emit(VCVTPH2W512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10825    }
10826}
10827
10828/// `VCVTPH2W_MASKZ`.
10829///
10830/// Supported operand variants:
10831///
10832/// ```text
10833/// +---+----------+
10834/// | # | Operands |
10835/// +---+----------+
10836/// | 1 | Xmm, Mem |
10837/// | 2 | Xmm, Xmm |
10838/// | 3 | Ymm, Mem |
10839/// | 4 | Ymm, Ymm |
10840/// | 5 | Zmm, Mem |
10841/// | 6 | Zmm, Zmm |
10842/// +---+----------+
10843/// ```
10844pub trait Vcvtph2wMaskzEmitter<A, B> {
10845    fn vcvtph2w_maskz(&mut self, op0: A, op1: B);
10846}
10847
10848impl<'a> Vcvtph2wMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
10849    fn vcvtph2w_maskz(&mut self, op0: Xmm, op1: Xmm) {
10850        self.emit(VCVTPH2W128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10851    }
10852}
10853
10854impl<'a> Vcvtph2wMaskzEmitter<Xmm, Mem> for Assembler<'a> {
10855    fn vcvtph2w_maskz(&mut self, op0: Xmm, op1: Mem) {
10856        self.emit(VCVTPH2W128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10857    }
10858}
10859
10860impl<'a> Vcvtph2wMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
10861    fn vcvtph2w_maskz(&mut self, op0: Ymm, op1: Ymm) {
10862        self.emit(VCVTPH2W256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10863    }
10864}
10865
10866impl<'a> Vcvtph2wMaskzEmitter<Ymm, Mem> for Assembler<'a> {
10867    fn vcvtph2w_maskz(&mut self, op0: Ymm, op1: Mem) {
10868        self.emit(VCVTPH2W256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10869    }
10870}
10871
10872impl<'a> Vcvtph2wMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
10873    fn vcvtph2w_maskz(&mut self, op0: Zmm, op1: Zmm) {
10874        self.emit(VCVTPH2W512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10875    }
10876}
10877
10878impl<'a> Vcvtph2wMaskzEmitter<Zmm, Mem> for Assembler<'a> {
10879    fn vcvtph2w_maskz(&mut self, op0: Zmm, op1: Mem) {
10880        self.emit(VCVTPH2W512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10881    }
10882}
10883
10884/// `VCVTPH2W_MASKZ_ER`.
10885///
10886/// Supported operand variants:
10887///
10888/// ```text
10889/// +---+----------+
10890/// | # | Operands |
10891/// +---+----------+
10892/// | 1 | Zmm, Zmm |
10893/// +---+----------+
10894/// ```
10895pub trait Vcvtph2wMaskzErEmitter<A, B> {
10896    fn vcvtph2w_maskz_er(&mut self, op0: A, op1: B);
10897}
10898
10899impl<'a> Vcvtph2wMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
10900    fn vcvtph2w_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
10901        self.emit(VCVTPH2W512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10902    }
10903}
10904
10905/// `VCVTPS2PHX` (VCVTPS2PHX). 
10906/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
10907///
10908///
10909/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
10910///
10911/// Supported operand variants:
10912///
10913/// ```text
10914/// +---+----------+
10915/// | # | Operands |
10916/// +---+----------+
10917/// | 1 | Xmm, Mem |
10918/// | 2 | Xmm, Xmm |
10919/// | 3 | Xmm, Ymm |
10920/// | 4 | Ymm, Mem |
10921/// | 5 | Ymm, Zmm |
10922/// +---+----------+
10923/// ```
10924pub trait Vcvtps2phxEmitter<A, B> {
10925    fn vcvtps2phx(&mut self, op0: A, op1: B);
10926}
10927
10928impl<'a> Vcvtps2phxEmitter<Xmm, Xmm> for Assembler<'a> {
10929    fn vcvtps2phx(&mut self, op0: Xmm, op1: Xmm) {
10930        self.emit(VCVTPS2PHX128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10931    }
10932}
10933
10934impl<'a> Vcvtps2phxEmitter<Xmm, Mem> for Assembler<'a> {
10935    fn vcvtps2phx(&mut self, op0: Xmm, op1: Mem) {
10936        self.emit(VCVTPS2PHX128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10937    }
10938}
10939
10940impl<'a> Vcvtps2phxEmitter<Xmm, Ymm> for Assembler<'a> {
10941    fn vcvtps2phx(&mut self, op0: Xmm, op1: Ymm) {
10942        self.emit(VCVTPS2PHX256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10943    }
10944}
10945
10946impl<'a> Vcvtps2phxEmitter<Ymm, Zmm> for Assembler<'a> {
10947    fn vcvtps2phx(&mut self, op0: Ymm, op1: Zmm) {
10948        self.emit(VCVTPS2PHX512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10949    }
10950}
10951
10952impl<'a> Vcvtps2phxEmitter<Ymm, Mem> for Assembler<'a> {
10953    fn vcvtps2phx(&mut self, op0: Ymm, op1: Mem) {
10954        self.emit(VCVTPS2PHX512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10955    }
10956}
10957
10958/// `VCVTPS2PHX_ER` (VCVTPS2PHX). 
10959/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
10960///
10961///
10962/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
10963///
10964/// Supported operand variants:
10965///
10966/// ```text
10967/// +---+----------+
10968/// | # | Operands |
10969/// +---+----------+
10970/// | 1 | Ymm, Zmm |
10971/// +---+----------+
10972/// ```
10973pub trait Vcvtps2phxErEmitter<A, B> {
10974    fn vcvtps2phx_er(&mut self, op0: A, op1: B);
10975}
10976
10977impl<'a> Vcvtps2phxErEmitter<Ymm, Zmm> for Assembler<'a> {
10978    fn vcvtps2phx_er(&mut self, op0: Ymm, op1: Zmm) {
10979        self.emit(VCVTPS2PHX512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
10980    }
10981}
10982
10983/// `VCVTPS2PHX_MASK` (VCVTPS2PHX). 
10984/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
10985///
10986///
10987/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
10988///
10989/// Supported operand variants:
10990///
10991/// ```text
10992/// +---+----------+
10993/// | # | Operands |
10994/// +---+----------+
10995/// | 1 | Xmm, Mem |
10996/// | 2 | Xmm, Xmm |
10997/// | 3 | Xmm, Ymm |
10998/// | 4 | Ymm, Mem |
10999/// | 5 | Ymm, Zmm |
11000/// +---+----------+
11001/// ```
11002pub trait Vcvtps2phxMaskEmitter<A, B> {
11003    fn vcvtps2phx_mask(&mut self, op0: A, op1: B);
11004}
11005
11006impl<'a> Vcvtps2phxMaskEmitter<Xmm, Xmm> for Assembler<'a> {
11007    fn vcvtps2phx_mask(&mut self, op0: Xmm, op1: Xmm) {
11008        self.emit(VCVTPS2PHX128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11009    }
11010}
11011
11012impl<'a> Vcvtps2phxMaskEmitter<Xmm, Mem> for Assembler<'a> {
11013    fn vcvtps2phx_mask(&mut self, op0: Xmm, op1: Mem) {
11014        self.emit(VCVTPS2PHX128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11015    }
11016}
11017
11018impl<'a> Vcvtps2phxMaskEmitter<Xmm, Ymm> for Assembler<'a> {
11019    fn vcvtps2phx_mask(&mut self, op0: Xmm, op1: Ymm) {
11020        self.emit(VCVTPS2PHX256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11021    }
11022}
11023
11024impl<'a> Vcvtps2phxMaskEmitter<Ymm, Zmm> for Assembler<'a> {
11025    fn vcvtps2phx_mask(&mut self, op0: Ymm, op1: Zmm) {
11026        self.emit(VCVTPS2PHX512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11027    }
11028}
11029
11030impl<'a> Vcvtps2phxMaskEmitter<Ymm, Mem> for Assembler<'a> {
11031    fn vcvtps2phx_mask(&mut self, op0: Ymm, op1: Mem) {
11032        self.emit(VCVTPS2PHX512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11033    }
11034}
11035
11036/// `VCVTPS2PHX_MASK_ER` (VCVTPS2PHX). 
11037/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
11038///
11039///
11040/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
11041///
11042/// Supported operand variants:
11043///
11044/// ```text
11045/// +---+----------+
11046/// | # | Operands |
11047/// +---+----------+
11048/// | 1 | Ymm, Zmm |
11049/// +---+----------+
11050/// ```
11051pub trait Vcvtps2phxMaskErEmitter<A, B> {
11052    fn vcvtps2phx_mask_er(&mut self, op0: A, op1: B);
11053}
11054
11055impl<'a> Vcvtps2phxMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
11056    fn vcvtps2phx_mask_er(&mut self, op0: Ymm, op1: Zmm) {
11057        self.emit(VCVTPS2PHX512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11058    }
11059}
11060
11061/// `VCVTPS2PHX_MASKZ` (VCVTPS2PHX). 
11062/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
11063///
11064///
11065/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
11066///
11067/// Supported operand variants:
11068///
11069/// ```text
11070/// +---+----------+
11071/// | # | Operands |
11072/// +---+----------+
11073/// | 1 | Xmm, Mem |
11074/// | 2 | Xmm, Xmm |
11075/// | 3 | Xmm, Ymm |
11076/// | 4 | Ymm, Mem |
11077/// | 5 | Ymm, Zmm |
11078/// +---+----------+
11079/// ```
11080pub trait Vcvtps2phxMaskzEmitter<A, B> {
11081    fn vcvtps2phx_maskz(&mut self, op0: A, op1: B);
11082}
11083
11084impl<'a> Vcvtps2phxMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
11085    fn vcvtps2phx_maskz(&mut self, op0: Xmm, op1: Xmm) {
11086        self.emit(VCVTPS2PHX128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11087    }
11088}
11089
11090impl<'a> Vcvtps2phxMaskzEmitter<Xmm, Mem> for Assembler<'a> {
11091    fn vcvtps2phx_maskz(&mut self, op0: Xmm, op1: Mem) {
11092        self.emit(VCVTPS2PHX128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11093    }
11094}
11095
11096impl<'a> Vcvtps2phxMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
11097    fn vcvtps2phx_maskz(&mut self, op0: Xmm, op1: Ymm) {
11098        self.emit(VCVTPS2PHX256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11099    }
11100}
11101
11102impl<'a> Vcvtps2phxMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
11103    fn vcvtps2phx_maskz(&mut self, op0: Ymm, op1: Zmm) {
11104        self.emit(VCVTPS2PHX512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11105    }
11106}
11107
11108impl<'a> Vcvtps2phxMaskzEmitter<Ymm, Mem> for Assembler<'a> {
11109    fn vcvtps2phx_maskz(&mut self, op0: Ymm, op1: Mem) {
11110        self.emit(VCVTPS2PHX512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11111    }
11112}
11113
11114/// `VCVTPS2PHX_MASKZ_ER` (VCVTPS2PHX). 
11115/// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
11116///
11117///
11118/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
11119///
11120/// Supported operand variants:
11121///
11122/// ```text
11123/// +---+----------+
11124/// | # | Operands |
11125/// +---+----------+
11126/// | 1 | Ymm, Zmm |
11127/// +---+----------+
11128/// ```
11129pub trait Vcvtps2phxMaskzErEmitter<A, B> {
11130    fn vcvtps2phx_maskz_er(&mut self, op0: A, op1: B);
11131}
11132
11133impl<'a> Vcvtps2phxMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
11134    fn vcvtps2phx_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
11135        self.emit(VCVTPS2PHX512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11136    }
11137}
11138
11139/// `VCVTQQ2PH`.
11140///
11141/// Supported operand variants:
11142///
11143/// ```text
11144/// +---+----------+
11145/// | # | Operands |
11146/// +---+----------+
11147/// | 1 | Xmm, Mem |
11148/// | 2 | Xmm, Xmm |
11149/// | 3 | Xmm, Ymm |
11150/// | 4 | Xmm, Zmm |
11151/// +---+----------+
11152/// ```
11153pub trait Vcvtqq2phEmitter<A, B> {
11154    fn vcvtqq2ph(&mut self, op0: A, op1: B);
11155}
11156
11157impl<'a> Vcvtqq2phEmitter<Xmm, Xmm> for Assembler<'a> {
11158    fn vcvtqq2ph(&mut self, op0: Xmm, op1: Xmm) {
11159        self.emit(VCVTQQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11160    }
11161}
11162
11163impl<'a> Vcvtqq2phEmitter<Xmm, Mem> for Assembler<'a> {
11164    fn vcvtqq2ph(&mut self, op0: Xmm, op1: Mem) {
11165        self.emit(VCVTQQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11166    }
11167}
11168
11169impl<'a> Vcvtqq2phEmitter<Xmm, Ymm> for Assembler<'a> {
11170    fn vcvtqq2ph(&mut self, op0: Xmm, op1: Ymm) {
11171        self.emit(VCVTQQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11172    }
11173}
11174
11175impl<'a> Vcvtqq2phEmitter<Xmm, Zmm> for Assembler<'a> {
11176    fn vcvtqq2ph(&mut self, op0: Xmm, op1: Zmm) {
11177        self.emit(VCVTQQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11178    }
11179}
11180
11181/// `VCVTQQ2PH_ER`.
11182///
11183/// Supported operand variants:
11184///
11185/// ```text
11186/// +---+----------+
11187/// | # | Operands |
11188/// +---+----------+
11189/// | 1 | Xmm, Zmm |
11190/// +---+----------+
11191/// ```
11192pub trait Vcvtqq2phErEmitter<A, B> {
11193    fn vcvtqq2ph_er(&mut self, op0: A, op1: B);
11194}
11195
11196impl<'a> Vcvtqq2phErEmitter<Xmm, Zmm> for Assembler<'a> {
11197    fn vcvtqq2ph_er(&mut self, op0: Xmm, op1: Zmm) {
11198        self.emit(VCVTQQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11199    }
11200}
11201
11202/// `VCVTQQ2PH_MASK`.
11203///
11204/// Supported operand variants:
11205///
11206/// ```text
11207/// +---+----------+
11208/// | # | Operands |
11209/// +---+----------+
11210/// | 1 | Xmm, Mem |
11211/// | 2 | Xmm, Xmm |
11212/// | 3 | Xmm, Ymm |
11213/// | 4 | Xmm, Zmm |
11214/// +---+----------+
11215/// ```
11216pub trait Vcvtqq2phMaskEmitter<A, B> {
11217    fn vcvtqq2ph_mask(&mut self, op0: A, op1: B);
11218}
11219
11220impl<'a> Vcvtqq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
11221    fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
11222        self.emit(VCVTQQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11223    }
11224}
11225
11226impl<'a> Vcvtqq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
11227    fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
11228        self.emit(VCVTQQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11229    }
11230}
11231
11232impl<'a> Vcvtqq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
11233    fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
11234        self.emit(VCVTQQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11235    }
11236}
11237
11238impl<'a> Vcvtqq2phMaskEmitter<Xmm, Zmm> for Assembler<'a> {
11239    fn vcvtqq2ph_mask(&mut self, op0: Xmm, op1: Zmm) {
11240        self.emit(VCVTQQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11241    }
11242}
11243
11244/// `VCVTQQ2PH_MASK_ER`.
11245///
11246/// Supported operand variants:
11247///
11248/// ```text
11249/// +---+----------+
11250/// | # | Operands |
11251/// +---+----------+
11252/// | 1 | Xmm, Zmm |
11253/// +---+----------+
11254/// ```
11255pub trait Vcvtqq2phMaskErEmitter<A, B> {
11256    fn vcvtqq2ph_mask_er(&mut self, op0: A, op1: B);
11257}
11258
11259impl<'a> Vcvtqq2phMaskErEmitter<Xmm, Zmm> for Assembler<'a> {
11260    fn vcvtqq2ph_mask_er(&mut self, op0: Xmm, op1: Zmm) {
11261        self.emit(VCVTQQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11262    }
11263}
11264
11265/// `VCVTQQ2PH_MASKZ`.
11266///
11267/// Supported operand variants:
11268///
11269/// ```text
11270/// +---+----------+
11271/// | # | Operands |
11272/// +---+----------+
11273/// | 1 | Xmm, Mem |
11274/// | 2 | Xmm, Xmm |
11275/// | 3 | Xmm, Ymm |
11276/// | 4 | Xmm, Zmm |
11277/// +---+----------+
11278/// ```
11279pub trait Vcvtqq2phMaskzEmitter<A, B> {
11280    fn vcvtqq2ph_maskz(&mut self, op0: A, op1: B);
11281}
11282
11283impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
11284    fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
11285        self.emit(VCVTQQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11286    }
11287}
11288
11289impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
11290    fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
11291        self.emit(VCVTQQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11292    }
11293}
11294
11295impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
11296    fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
11297        self.emit(VCVTQQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11298    }
11299}
11300
11301impl<'a> Vcvtqq2phMaskzEmitter<Xmm, Zmm> for Assembler<'a> {
11302    fn vcvtqq2ph_maskz(&mut self, op0: Xmm, op1: Zmm) {
11303        self.emit(VCVTQQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11304    }
11305}
11306
11307/// `VCVTQQ2PH_MASKZ_ER`.
11308///
11309/// Supported operand variants:
11310///
11311/// ```text
11312/// +---+----------+
11313/// | # | Operands |
11314/// +---+----------+
11315/// | 1 | Xmm, Zmm |
11316/// +---+----------+
11317/// ```
11318pub trait Vcvtqq2phMaskzErEmitter<A, B> {
11319    fn vcvtqq2ph_maskz_er(&mut self, op0: A, op1: B);
11320}
11321
11322impl<'a> Vcvtqq2phMaskzErEmitter<Xmm, Zmm> for Assembler<'a> {
11323    fn vcvtqq2ph_maskz_er(&mut self, op0: Xmm, op1: Zmm) {
11324        self.emit(VCVTQQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11325    }
11326}
11327
11328/// `VCVTSD2SH`.
11329///
11330/// Supported operand variants:
11331///
11332/// ```text
11333/// +---+---------------+
11334/// | # | Operands      |
11335/// +---+---------------+
11336/// | 1 | Xmm, Xmm, Mem |
11337/// | 2 | Xmm, Xmm, Xmm |
11338/// +---+---------------+
11339/// ```
11340pub trait Vcvtsd2shEmitter<A, B, C> {
11341    fn vcvtsd2sh(&mut self, op0: A, op1: B, op2: C);
11342}
11343
11344impl<'a> Vcvtsd2shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11345    fn vcvtsd2sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11346        self.emit(VCVTSD2SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11347    }
11348}
11349
11350impl<'a> Vcvtsd2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11351    fn vcvtsd2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11352        self.emit(VCVTSD2SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11353    }
11354}
11355
11356/// `VCVTSD2SH_ER`.
11357///
11358/// Supported operand variants:
11359///
11360/// ```text
11361/// +---+---------------+
11362/// | # | Operands      |
11363/// +---+---------------+
11364/// | 1 | Xmm, Xmm, Xmm |
11365/// +---+---------------+
11366/// ```
11367pub trait Vcvtsd2shErEmitter<A, B, C> {
11368    fn vcvtsd2sh_er(&mut self, op0: A, op1: B, op2: C);
11369}
11370
11371impl<'a> Vcvtsd2shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11372    fn vcvtsd2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11373        self.emit(VCVTSD2SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11374    }
11375}
11376
11377/// `VCVTSD2SH_MASK`.
11378///
11379/// Supported operand variants:
11380///
11381/// ```text
11382/// +---+---------------+
11383/// | # | Operands      |
11384/// +---+---------------+
11385/// | 1 | Xmm, Xmm, Mem |
11386/// | 2 | Xmm, Xmm, Xmm |
11387/// +---+---------------+
11388/// ```
11389pub trait Vcvtsd2shMaskEmitter<A, B, C> {
11390    fn vcvtsd2sh_mask(&mut self, op0: A, op1: B, op2: C);
11391}
11392
11393impl<'a> Vcvtsd2shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11394    fn vcvtsd2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11395        self.emit(VCVTSD2SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11396    }
11397}
11398
11399impl<'a> Vcvtsd2shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11400    fn vcvtsd2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11401        self.emit(VCVTSD2SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11402    }
11403}
11404
11405/// `VCVTSD2SH_MASK_ER`.
11406///
11407/// Supported operand variants:
11408///
11409/// ```text
11410/// +---+---------------+
11411/// | # | Operands      |
11412/// +---+---------------+
11413/// | 1 | Xmm, Xmm, Xmm |
11414/// +---+---------------+
11415/// ```
11416pub trait Vcvtsd2shMaskErEmitter<A, B, C> {
11417    fn vcvtsd2sh_mask_er(&mut self, op0: A, op1: B, op2: C);
11418}
11419
11420impl<'a> Vcvtsd2shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11421    fn vcvtsd2sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11422        self.emit(VCVTSD2SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11423    }
11424}
11425
11426/// `VCVTSD2SH_MASKZ`.
11427///
11428/// Supported operand variants:
11429///
11430/// ```text
11431/// +---+---------------+
11432/// | # | Operands      |
11433/// +---+---------------+
11434/// | 1 | Xmm, Xmm, Mem |
11435/// | 2 | Xmm, Xmm, Xmm |
11436/// +---+---------------+
11437/// ```
11438pub trait Vcvtsd2shMaskzEmitter<A, B, C> {
11439    fn vcvtsd2sh_maskz(&mut self, op0: A, op1: B, op2: C);
11440}
11441
11442impl<'a> Vcvtsd2shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11443    fn vcvtsd2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11444        self.emit(VCVTSD2SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11445    }
11446}
11447
11448impl<'a> Vcvtsd2shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11449    fn vcvtsd2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11450        self.emit(VCVTSD2SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11451    }
11452}
11453
11454/// `VCVTSD2SH_MASKZ_ER`.
11455///
11456/// Supported operand variants:
11457///
11458/// ```text
11459/// +---+---------------+
11460/// | # | Operands      |
11461/// +---+---------------+
11462/// | 1 | Xmm, Xmm, Xmm |
11463/// +---+---------------+
11464/// ```
11465pub trait Vcvtsd2shMaskzErEmitter<A, B, C> {
11466    fn vcvtsd2sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
11467}
11468
11469impl<'a> Vcvtsd2shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11470    fn vcvtsd2sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11471        self.emit(VCVTSD2SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11472    }
11473}
11474
11475/// `VCVTSH2SD`.
11476///
11477/// Supported operand variants:
11478///
11479/// ```text
11480/// +---+---------------+
11481/// | # | Operands      |
11482/// +---+---------------+
11483/// | 1 | Xmm, Xmm, Mem |
11484/// | 2 | Xmm, Xmm, Xmm |
11485/// +---+---------------+
11486/// ```
11487pub trait Vcvtsh2sdEmitter<A, B, C> {
11488    fn vcvtsh2sd(&mut self, op0: A, op1: B, op2: C);
11489}
11490
11491impl<'a> Vcvtsh2sdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11492    fn vcvtsh2sd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11493        self.emit(VCVTSH2SDRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11494    }
11495}
11496
11497impl<'a> Vcvtsh2sdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11498    fn vcvtsh2sd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11499        self.emit(VCVTSH2SDRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11500    }
11501}
11502
11503/// `VCVTSH2SD_MASK`.
11504///
11505/// Supported operand variants:
11506///
11507/// ```text
11508/// +---+---------------+
11509/// | # | Operands      |
11510/// +---+---------------+
11511/// | 1 | Xmm, Xmm, Mem |
11512/// | 2 | Xmm, Xmm, Xmm |
11513/// +---+---------------+
11514/// ```
11515pub trait Vcvtsh2sdMaskEmitter<A, B, C> {
11516    fn vcvtsh2sd_mask(&mut self, op0: A, op1: B, op2: C);
11517}
11518
11519impl<'a> Vcvtsh2sdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11520    fn vcvtsh2sd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11521        self.emit(VCVTSH2SDRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11522    }
11523}
11524
11525impl<'a> Vcvtsh2sdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11526    fn vcvtsh2sd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11527        self.emit(VCVTSH2SDRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11528    }
11529}
11530
11531/// `VCVTSH2SD_MASK_SAE`.
11532///
11533/// Supported operand variants:
11534///
11535/// ```text
11536/// +---+---------------+
11537/// | # | Operands      |
11538/// +---+---------------+
11539/// | 1 | Xmm, Xmm, Xmm |
11540/// +---+---------------+
11541/// ```
11542pub trait Vcvtsh2sdMaskSaeEmitter<A, B, C> {
11543    fn vcvtsh2sd_mask_sae(&mut self, op0: A, op1: B, op2: C);
11544}
11545
11546impl<'a> Vcvtsh2sdMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11547    fn vcvtsh2sd_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11548        self.emit(VCVTSH2SDRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11549    }
11550}
11551
11552/// `VCVTSH2SD_MASKZ`.
11553///
11554/// Supported operand variants:
11555///
11556/// ```text
11557/// +---+---------------+
11558/// | # | Operands      |
11559/// +---+---------------+
11560/// | 1 | Xmm, Xmm, Mem |
11561/// | 2 | Xmm, Xmm, Xmm |
11562/// +---+---------------+
11563/// ```
11564pub trait Vcvtsh2sdMaskzEmitter<A, B, C> {
11565    fn vcvtsh2sd_maskz(&mut self, op0: A, op1: B, op2: C);
11566}
11567
11568impl<'a> Vcvtsh2sdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11569    fn vcvtsh2sd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11570        self.emit(VCVTSH2SDRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11571    }
11572}
11573
11574impl<'a> Vcvtsh2sdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11575    fn vcvtsh2sd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11576        self.emit(VCVTSH2SDRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11577    }
11578}
11579
11580/// `VCVTSH2SD_MASKZ_SAE`.
11581///
11582/// Supported operand variants:
11583///
11584/// ```text
11585/// +---+---------------+
11586/// | # | Operands      |
11587/// +---+---------------+
11588/// | 1 | Xmm, Xmm, Xmm |
11589/// +---+---------------+
11590/// ```
11591pub trait Vcvtsh2sdMaskzSaeEmitter<A, B, C> {
11592    fn vcvtsh2sd_maskz_sae(&mut self, op0: A, op1: B, op2: C);
11593}
11594
11595impl<'a> Vcvtsh2sdMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11596    fn vcvtsh2sd_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11597        self.emit(VCVTSH2SDRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11598    }
11599}
11600
11601/// `VCVTSH2SD_SAE`.
11602///
11603/// Supported operand variants:
11604///
11605/// ```text
11606/// +---+---------------+
11607/// | # | Operands      |
11608/// +---+---------------+
11609/// | 1 | Xmm, Xmm, Xmm |
11610/// +---+---------------+
11611/// ```
11612pub trait Vcvtsh2sdSaeEmitter<A, B, C> {
11613    fn vcvtsh2sd_sae(&mut self, op0: A, op1: B, op2: C);
11614}
11615
11616impl<'a> Vcvtsh2sdSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11617    fn vcvtsh2sd_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11618        self.emit(VCVTSH2SDRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11619    }
11620}
11621
11622/// `VCVTSH2SI`.
11623///
11624/// Supported operand variants:
11625///
11626/// ```text
11627/// +---+----------+
11628/// | # | Operands |
11629/// +---+----------+
11630/// | 1 | Gpd, Mem |
11631/// | 2 | Gpd, Xmm |
11632/// | 3 | Gpq, Mem |
11633/// | 4 | Gpq, Xmm |
11634/// +---+----------+
11635/// ```
11636pub trait Vcvtsh2siEmitter<A, B> {
11637    fn vcvtsh2si(&mut self, op0: A, op1: B);
11638}
11639
11640impl<'a> Vcvtsh2siEmitter<Gpd, Xmm> for Assembler<'a> {
11641    fn vcvtsh2si(&mut self, op0: Gpd, op1: Xmm) {
11642        self.emit(VCVTSH2SI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11643    }
11644}
11645
11646impl<'a> Vcvtsh2siEmitter<Gpd, Mem> for Assembler<'a> {
11647    fn vcvtsh2si(&mut self, op0: Gpd, op1: Mem) {
11648        self.emit(VCVTSH2SI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11649    }
11650}
11651
11652impl<'a> Vcvtsh2siEmitter<Gpq, Xmm> for Assembler<'a> {
11653    fn vcvtsh2si(&mut self, op0: Gpq, op1: Xmm) {
11654        self.emit(VCVTSH2SI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11655    }
11656}
11657
11658impl<'a> Vcvtsh2siEmitter<Gpq, Mem> for Assembler<'a> {
11659    fn vcvtsh2si(&mut self, op0: Gpq, op1: Mem) {
11660        self.emit(VCVTSH2SI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11661    }
11662}
11663
11664/// `VCVTSH2SI_ER`.
11665///
11666/// Supported operand variants:
11667///
11668/// ```text
11669/// +---+----------+
11670/// | # | Operands |
11671/// +---+----------+
11672/// | 1 | Gpd, Xmm |
11673/// | 2 | Gpq, Xmm |
11674/// +---+----------+
11675/// ```
11676pub trait Vcvtsh2siErEmitter<A, B> {
11677    fn vcvtsh2si_er(&mut self, op0: A, op1: B);
11678}
11679
11680impl<'a> Vcvtsh2siErEmitter<Gpd, Xmm> for Assembler<'a> {
11681    fn vcvtsh2si_er(&mut self, op0: Gpd, op1: Xmm) {
11682        self.emit(VCVTSH2SI32RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11683    }
11684}
11685
11686impl<'a> Vcvtsh2siErEmitter<Gpq, Xmm> for Assembler<'a> {
11687    fn vcvtsh2si_er(&mut self, op0: Gpq, op1: Xmm) {
11688        self.emit(VCVTSH2SI64RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11689    }
11690}
11691
11692/// `VCVTSH2SS`.
11693///
11694/// Supported operand variants:
11695///
11696/// ```text
11697/// +---+---------------+
11698/// | # | Operands      |
11699/// +---+---------------+
11700/// | 1 | Xmm, Xmm, Mem |
11701/// | 2 | Xmm, Xmm, Xmm |
11702/// +---+---------------+
11703/// ```
11704pub trait Vcvtsh2ssEmitter<A, B, C> {
11705    fn vcvtsh2ss(&mut self, op0: A, op1: B, op2: C);
11706}
11707
11708impl<'a> Vcvtsh2ssEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11709    fn vcvtsh2ss(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11710        self.emit(VCVTSH2SSRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11711    }
11712}
11713
11714impl<'a> Vcvtsh2ssEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11715    fn vcvtsh2ss(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11716        self.emit(VCVTSH2SSRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11717    }
11718}
11719
11720/// `VCVTSH2SS_MASK`.
11721///
11722/// Supported operand variants:
11723///
11724/// ```text
11725/// +---+---------------+
11726/// | # | Operands      |
11727/// +---+---------------+
11728/// | 1 | Xmm, Xmm, Mem |
11729/// | 2 | Xmm, Xmm, Xmm |
11730/// +---+---------------+
11731/// ```
11732pub trait Vcvtsh2ssMaskEmitter<A, B, C> {
11733    fn vcvtsh2ss_mask(&mut self, op0: A, op1: B, op2: C);
11734}
11735
11736impl<'a> Vcvtsh2ssMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11737    fn vcvtsh2ss_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11738        self.emit(VCVTSH2SSRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11739    }
11740}
11741
11742impl<'a> Vcvtsh2ssMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11743    fn vcvtsh2ss_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11744        self.emit(VCVTSH2SSRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11745    }
11746}
11747
11748/// `VCVTSH2SS_MASK_SAE`.
11749///
11750/// Supported operand variants:
11751///
11752/// ```text
11753/// +---+---------------+
11754/// | # | Operands      |
11755/// +---+---------------+
11756/// | 1 | Xmm, Xmm, Xmm |
11757/// +---+---------------+
11758/// ```
11759pub trait Vcvtsh2ssMaskSaeEmitter<A, B, C> {
11760    fn vcvtsh2ss_mask_sae(&mut self, op0: A, op1: B, op2: C);
11761}
11762
11763impl<'a> Vcvtsh2ssMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11764    fn vcvtsh2ss_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11765        self.emit(VCVTSH2SSRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11766    }
11767}
11768
11769/// `VCVTSH2SS_MASKZ`.
11770///
11771/// Supported operand variants:
11772///
11773/// ```text
11774/// +---+---------------+
11775/// | # | Operands      |
11776/// +---+---------------+
11777/// | 1 | Xmm, Xmm, Mem |
11778/// | 2 | Xmm, Xmm, Xmm |
11779/// +---+---------------+
11780/// ```
11781pub trait Vcvtsh2ssMaskzEmitter<A, B, C> {
11782    fn vcvtsh2ss_maskz(&mut self, op0: A, op1: B, op2: C);
11783}
11784
11785impl<'a> Vcvtsh2ssMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11786    fn vcvtsh2ss_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11787        self.emit(VCVTSH2SSRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11788    }
11789}
11790
11791impl<'a> Vcvtsh2ssMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11792    fn vcvtsh2ss_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11793        self.emit(VCVTSH2SSRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11794    }
11795}
11796
11797/// `VCVTSH2SS_MASKZ_SAE`.
11798///
11799/// Supported operand variants:
11800///
11801/// ```text
11802/// +---+---------------+
11803/// | # | Operands      |
11804/// +---+---------------+
11805/// | 1 | Xmm, Xmm, Xmm |
11806/// +---+---------------+
11807/// ```
11808pub trait Vcvtsh2ssMaskzSaeEmitter<A, B, C> {
11809    fn vcvtsh2ss_maskz_sae(&mut self, op0: A, op1: B, op2: C);
11810}
11811
11812impl<'a> Vcvtsh2ssMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11813    fn vcvtsh2ss_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11814        self.emit(VCVTSH2SSRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11815    }
11816}
11817
11818/// `VCVTSH2SS_SAE`.
11819///
11820/// Supported operand variants:
11821///
11822/// ```text
11823/// +---+---------------+
11824/// | # | Operands      |
11825/// +---+---------------+
11826/// | 1 | Xmm, Xmm, Xmm |
11827/// +---+---------------+
11828/// ```
11829pub trait Vcvtsh2ssSaeEmitter<A, B, C> {
11830    fn vcvtsh2ss_sae(&mut self, op0: A, op1: B, op2: C);
11831}
11832
11833impl<'a> Vcvtsh2ssSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11834    fn vcvtsh2ss_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11835        self.emit(VCVTSH2SSRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11836    }
11837}
11838
11839/// `VCVTSH2USI`.
11840///
11841/// Supported operand variants:
11842///
11843/// ```text
11844/// +---+----------+
11845/// | # | Operands |
11846/// +---+----------+
11847/// | 1 | Gpd, Mem |
11848/// | 2 | Gpd, Xmm |
11849/// | 3 | Gpq, Mem |
11850/// | 4 | Gpq, Xmm |
11851/// +---+----------+
11852/// ```
11853pub trait Vcvtsh2usiEmitter<A, B> {
11854    fn vcvtsh2usi(&mut self, op0: A, op1: B);
11855}
11856
11857impl<'a> Vcvtsh2usiEmitter<Gpd, Xmm> for Assembler<'a> {
11858    fn vcvtsh2usi(&mut self, op0: Gpd, op1: Xmm) {
11859        self.emit(VCVTSH2USI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11860    }
11861}
11862
11863impl<'a> Vcvtsh2usiEmitter<Gpd, Mem> for Assembler<'a> {
11864    fn vcvtsh2usi(&mut self, op0: Gpd, op1: Mem) {
11865        self.emit(VCVTSH2USI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11866    }
11867}
11868
11869impl<'a> Vcvtsh2usiEmitter<Gpq, Xmm> for Assembler<'a> {
11870    fn vcvtsh2usi(&mut self, op0: Gpq, op1: Xmm) {
11871        self.emit(VCVTSH2USI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11872    }
11873}
11874
11875impl<'a> Vcvtsh2usiEmitter<Gpq, Mem> for Assembler<'a> {
11876    fn vcvtsh2usi(&mut self, op0: Gpq, op1: Mem) {
11877        self.emit(VCVTSH2USI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11878    }
11879}
11880
11881/// `VCVTSH2USI_ER`.
11882///
11883/// Supported operand variants:
11884///
11885/// ```text
11886/// +---+----------+
11887/// | # | Operands |
11888/// +---+----------+
11889/// | 1 | Gpd, Xmm |
11890/// | 2 | Gpq, Xmm |
11891/// +---+----------+
11892/// ```
11893pub trait Vcvtsh2usiErEmitter<A, B> {
11894    fn vcvtsh2usi_er(&mut self, op0: A, op1: B);
11895}
11896
11897impl<'a> Vcvtsh2usiErEmitter<Gpd, Xmm> for Assembler<'a> {
11898    fn vcvtsh2usi_er(&mut self, op0: Gpd, op1: Xmm) {
11899        self.emit(VCVTSH2USI32RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11900    }
11901}
11902
11903impl<'a> Vcvtsh2usiErEmitter<Gpq, Xmm> for Assembler<'a> {
11904    fn vcvtsh2usi_er(&mut self, op0: Gpq, op1: Xmm) {
11905        self.emit(VCVTSH2USI64RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
11906    }
11907}
11908
11909/// `VCVTSI2SH`.
11910///
11911/// Supported operand variants:
11912///
11913/// ```text
11914/// +---+---------------+
11915/// | # | Operands      |
11916/// +---+---------------+
11917/// | 1 | Xmm, Xmm, Gpd |
11918/// | 2 | Xmm, Xmm, Gpq |
11919/// | 3 | Xmm, Xmm, Mem |
11920/// +---+---------------+
11921/// ```
11922pub trait Vcvtsi2shEmitter<A, B, C> {
11923    fn vcvtsi2sh(&mut self, op0: A, op1: B, op2: C);
11924}
11925
11926impl<'a> Vcvtsi2shEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
11927    fn vcvtsi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
11928        self.emit(VCVTSI2SH32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11929    }
11930}
11931
11932impl<'a> Vcvtsi2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11933    fn vcvtsi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11934        self.emit(VCVTSI2SH32RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11935    }
11936}
11937
11938impl<'a> Vcvtsi2shEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
11939    fn vcvtsi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
11940        self.emit(VCVTSI2SH64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11941    }
11942}
11943
11944/// `VCVTSI2SH_ER`.
11945///
11946/// Supported operand variants:
11947///
11948/// ```text
11949/// +---+---------------+
11950/// | # | Operands      |
11951/// +---+---------------+
11952/// | 1 | Xmm, Xmm, Gpd |
11953/// | 2 | Xmm, Xmm, Gpq |
11954/// +---+---------------+
11955/// ```
11956pub trait Vcvtsi2shErEmitter<A, B, C> {
11957    fn vcvtsi2sh_er(&mut self, op0: A, op1: B, op2: C);
11958}
11959
11960impl<'a> Vcvtsi2shErEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
11961    fn vcvtsi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
11962        self.emit(VCVTSI2SH32RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11963    }
11964}
11965
11966impl<'a> Vcvtsi2shErEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
11967    fn vcvtsi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
11968        self.emit(VCVTSI2SH64RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11969    }
11970}
11971
11972/// `VCVTSS2SH`.
11973///
11974/// Supported operand variants:
11975///
11976/// ```text
11977/// +---+---------------+
11978/// | # | Operands      |
11979/// +---+---------------+
11980/// | 1 | Xmm, Xmm, Mem |
11981/// | 2 | Xmm, Xmm, Xmm |
11982/// +---+---------------+
11983/// ```
11984pub trait Vcvtss2shEmitter<A, B, C> {
11985    fn vcvtss2sh(&mut self, op0: A, op1: B, op2: C);
11986}
11987
11988impl<'a> Vcvtss2shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11989    fn vcvtss2sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11990        self.emit(VCVTSS2SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11991    }
11992}
11993
11994impl<'a> Vcvtss2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11995    fn vcvtss2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11996        self.emit(VCVTSS2SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11997    }
11998}
11999
12000/// `VCVTSS2SH_ER`.
12001///
12002/// Supported operand variants:
12003///
12004/// ```text
12005/// +---+---------------+
12006/// | # | Operands      |
12007/// +---+---------------+
12008/// | 1 | Xmm, Xmm, Xmm |
12009/// +---+---------------+
12010/// ```
12011pub trait Vcvtss2shErEmitter<A, B, C> {
12012    fn vcvtss2sh_er(&mut self, op0: A, op1: B, op2: C);
12013}
12014
12015impl<'a> Vcvtss2shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12016    fn vcvtss2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12017        self.emit(VCVTSS2SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12018    }
12019}
12020
12021/// `VCVTSS2SH_MASK`.
12022///
12023/// Supported operand variants:
12024///
12025/// ```text
12026/// +---+---------------+
12027/// | # | Operands      |
12028/// +---+---------------+
12029/// | 1 | Xmm, Xmm, Mem |
12030/// | 2 | Xmm, Xmm, Xmm |
12031/// +---+---------------+
12032/// ```
12033pub trait Vcvtss2shMaskEmitter<A, B, C> {
12034    fn vcvtss2sh_mask(&mut self, op0: A, op1: B, op2: C);
12035}
12036
12037impl<'a> Vcvtss2shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12038    fn vcvtss2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12039        self.emit(VCVTSS2SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12040    }
12041}
12042
12043impl<'a> Vcvtss2shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12044    fn vcvtss2sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12045        self.emit(VCVTSS2SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12046    }
12047}
12048
12049/// `VCVTSS2SH_MASK_ER`.
12050///
12051/// Supported operand variants:
12052///
12053/// ```text
12054/// +---+---------------+
12055/// | # | Operands      |
12056/// +---+---------------+
12057/// | 1 | Xmm, Xmm, Xmm |
12058/// +---+---------------+
12059/// ```
12060pub trait Vcvtss2shMaskErEmitter<A, B, C> {
12061    fn vcvtss2sh_mask_er(&mut self, op0: A, op1: B, op2: C);
12062}
12063
12064impl<'a> Vcvtss2shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12065    fn vcvtss2sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12066        self.emit(VCVTSS2SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12067    }
12068}
12069
12070/// `VCVTSS2SH_MASKZ`.
12071///
12072/// Supported operand variants:
12073///
12074/// ```text
12075/// +---+---------------+
12076/// | # | Operands      |
12077/// +---+---------------+
12078/// | 1 | Xmm, Xmm, Mem |
12079/// | 2 | Xmm, Xmm, Xmm |
12080/// +---+---------------+
12081/// ```
12082pub trait Vcvtss2shMaskzEmitter<A, B, C> {
12083    fn vcvtss2sh_maskz(&mut self, op0: A, op1: B, op2: C);
12084}
12085
12086impl<'a> Vcvtss2shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12087    fn vcvtss2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12088        self.emit(VCVTSS2SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12089    }
12090}
12091
12092impl<'a> Vcvtss2shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12093    fn vcvtss2sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12094        self.emit(VCVTSS2SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12095    }
12096}
12097
12098/// `VCVTSS2SH_MASKZ_ER`.
12099///
12100/// Supported operand variants:
12101///
12102/// ```text
12103/// +---+---------------+
12104/// | # | Operands      |
12105/// +---+---------------+
12106/// | 1 | Xmm, Xmm, Xmm |
12107/// +---+---------------+
12108/// ```
12109pub trait Vcvtss2shMaskzErEmitter<A, B, C> {
12110    fn vcvtss2sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
12111}
12112
12113impl<'a> Vcvtss2shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12114    fn vcvtss2sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12115        self.emit(VCVTSS2SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12116    }
12117}
12118
12119/// `VCVTTPH2DQ`.
12120///
12121/// Supported operand variants:
12122///
12123/// ```text
12124/// +---+----------+
12125/// | # | Operands |
12126/// +---+----------+
12127/// | 1 | Xmm, Mem |
12128/// | 2 | Xmm, Xmm |
12129/// | 3 | Ymm, Mem |
12130/// | 4 | Ymm, Xmm |
12131/// | 5 | Zmm, Mem |
12132/// | 6 | Zmm, Ymm |
12133/// +---+----------+
12134/// ```
12135pub trait Vcvttph2dqEmitter<A, B> {
12136    fn vcvttph2dq(&mut self, op0: A, op1: B);
12137}
12138
12139impl<'a> Vcvttph2dqEmitter<Xmm, Xmm> for Assembler<'a> {
12140    fn vcvttph2dq(&mut self, op0: Xmm, op1: Xmm) {
12141        self.emit(VCVTTPH2DQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12142    }
12143}
12144
12145impl<'a> Vcvttph2dqEmitter<Xmm, Mem> for Assembler<'a> {
12146    fn vcvttph2dq(&mut self, op0: Xmm, op1: Mem) {
12147        self.emit(VCVTTPH2DQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12148    }
12149}
12150
12151impl<'a> Vcvttph2dqEmitter<Ymm, Xmm> for Assembler<'a> {
12152    fn vcvttph2dq(&mut self, op0: Ymm, op1: Xmm) {
12153        self.emit(VCVTTPH2DQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12154    }
12155}
12156
12157impl<'a> Vcvttph2dqEmitter<Ymm, Mem> for Assembler<'a> {
12158    fn vcvttph2dq(&mut self, op0: Ymm, op1: Mem) {
12159        self.emit(VCVTTPH2DQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12160    }
12161}
12162
12163impl<'a> Vcvttph2dqEmitter<Zmm, Ymm> for Assembler<'a> {
12164    fn vcvttph2dq(&mut self, op0: Zmm, op1: Ymm) {
12165        self.emit(VCVTTPH2DQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12166    }
12167}
12168
12169impl<'a> Vcvttph2dqEmitter<Zmm, Mem> for Assembler<'a> {
12170    fn vcvttph2dq(&mut self, op0: Zmm, op1: Mem) {
12171        self.emit(VCVTTPH2DQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12172    }
12173}
12174
12175/// `VCVTTPH2DQ_MASK`.
12176///
12177/// Supported operand variants:
12178///
12179/// ```text
12180/// +---+----------+
12181/// | # | Operands |
12182/// +---+----------+
12183/// | 1 | Xmm, Mem |
12184/// | 2 | Xmm, Xmm |
12185/// | 3 | Ymm, Mem |
12186/// | 4 | Ymm, Xmm |
12187/// | 5 | Zmm, Mem |
12188/// | 6 | Zmm, Ymm |
12189/// +---+----------+
12190/// ```
12191pub trait Vcvttph2dqMaskEmitter<A, B> {
12192    fn vcvttph2dq_mask(&mut self, op0: A, op1: B);
12193}
12194
12195impl<'a> Vcvttph2dqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
12196    fn vcvttph2dq_mask(&mut self, op0: Xmm, op1: Xmm) {
12197        self.emit(VCVTTPH2DQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12198    }
12199}
12200
12201impl<'a> Vcvttph2dqMaskEmitter<Xmm, Mem> for Assembler<'a> {
12202    fn vcvttph2dq_mask(&mut self, op0: Xmm, op1: Mem) {
12203        self.emit(VCVTTPH2DQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12204    }
12205}
12206
12207impl<'a> Vcvttph2dqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
12208    fn vcvttph2dq_mask(&mut self, op0: Ymm, op1: Xmm) {
12209        self.emit(VCVTTPH2DQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12210    }
12211}
12212
12213impl<'a> Vcvttph2dqMaskEmitter<Ymm, Mem> for Assembler<'a> {
12214    fn vcvttph2dq_mask(&mut self, op0: Ymm, op1: Mem) {
12215        self.emit(VCVTTPH2DQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12216    }
12217}
12218
12219impl<'a> Vcvttph2dqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
12220    fn vcvttph2dq_mask(&mut self, op0: Zmm, op1: Ymm) {
12221        self.emit(VCVTTPH2DQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12222    }
12223}
12224
12225impl<'a> Vcvttph2dqMaskEmitter<Zmm, Mem> for Assembler<'a> {
12226    fn vcvttph2dq_mask(&mut self, op0: Zmm, op1: Mem) {
12227        self.emit(VCVTTPH2DQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12228    }
12229}
12230
12231/// `VCVTTPH2DQ_MASK_SAE`.
12232///
12233/// Supported operand variants:
12234///
12235/// ```text
12236/// +---+----------+
12237/// | # | Operands |
12238/// +---+----------+
12239/// | 1 | Zmm, Ymm |
12240/// +---+----------+
12241/// ```
12242pub trait Vcvttph2dqMaskSaeEmitter<A, B> {
12243    fn vcvttph2dq_mask_sae(&mut self, op0: A, op1: B);
12244}
12245
12246impl<'a> Vcvttph2dqMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
12247    fn vcvttph2dq_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
12248        self.emit(VCVTTPH2DQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12249    }
12250}
12251
12252/// `VCVTTPH2DQ_MASKZ`.
12253///
12254/// Supported operand variants:
12255///
12256/// ```text
12257/// +---+----------+
12258/// | # | Operands |
12259/// +---+----------+
12260/// | 1 | Xmm, Mem |
12261/// | 2 | Xmm, Xmm |
12262/// | 3 | Ymm, Mem |
12263/// | 4 | Ymm, Xmm |
12264/// | 5 | Zmm, Mem |
12265/// | 6 | Zmm, Ymm |
12266/// +---+----------+
12267/// ```
12268pub trait Vcvttph2dqMaskzEmitter<A, B> {
12269    fn vcvttph2dq_maskz(&mut self, op0: A, op1: B);
12270}
12271
12272impl<'a> Vcvttph2dqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
12273    fn vcvttph2dq_maskz(&mut self, op0: Xmm, op1: Xmm) {
12274        self.emit(VCVTTPH2DQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12275    }
12276}
12277
12278impl<'a> Vcvttph2dqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
12279    fn vcvttph2dq_maskz(&mut self, op0: Xmm, op1: Mem) {
12280        self.emit(VCVTTPH2DQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12281    }
12282}
12283
12284impl<'a> Vcvttph2dqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
12285    fn vcvttph2dq_maskz(&mut self, op0: Ymm, op1: Xmm) {
12286        self.emit(VCVTTPH2DQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12287    }
12288}
12289
12290impl<'a> Vcvttph2dqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
12291    fn vcvttph2dq_maskz(&mut self, op0: Ymm, op1: Mem) {
12292        self.emit(VCVTTPH2DQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12293    }
12294}
12295
12296impl<'a> Vcvttph2dqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
12297    fn vcvttph2dq_maskz(&mut self, op0: Zmm, op1: Ymm) {
12298        self.emit(VCVTTPH2DQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12299    }
12300}
12301
12302impl<'a> Vcvttph2dqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
12303    fn vcvttph2dq_maskz(&mut self, op0: Zmm, op1: Mem) {
12304        self.emit(VCVTTPH2DQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12305    }
12306}
12307
12308/// `VCVTTPH2DQ_MASKZ_SAE`.
12309///
12310/// Supported operand variants:
12311///
12312/// ```text
12313/// +---+----------+
12314/// | # | Operands |
12315/// +---+----------+
12316/// | 1 | Zmm, Ymm |
12317/// +---+----------+
12318/// ```
12319pub trait Vcvttph2dqMaskzSaeEmitter<A, B> {
12320    fn vcvttph2dq_maskz_sae(&mut self, op0: A, op1: B);
12321}
12322
12323impl<'a> Vcvttph2dqMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
12324    fn vcvttph2dq_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
12325        self.emit(VCVTTPH2DQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12326    }
12327}
12328
12329/// `VCVTTPH2DQ_SAE`.
12330///
12331/// Supported operand variants:
12332///
12333/// ```text
12334/// +---+----------+
12335/// | # | Operands |
12336/// +---+----------+
12337/// | 1 | Zmm, Ymm |
12338/// +---+----------+
12339/// ```
12340pub trait Vcvttph2dqSaeEmitter<A, B> {
12341    fn vcvttph2dq_sae(&mut self, op0: A, op1: B);
12342}
12343
12344impl<'a> Vcvttph2dqSaeEmitter<Zmm, Ymm> for Assembler<'a> {
12345    fn vcvttph2dq_sae(&mut self, op0: Zmm, op1: Ymm) {
12346        self.emit(VCVTTPH2DQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12347    }
12348}
12349
12350/// `VCVTTPH2QQ`.
12351///
12352/// Supported operand variants:
12353///
12354/// ```text
12355/// +---+----------+
12356/// | # | Operands |
12357/// +---+----------+
12358/// | 1 | Xmm, Mem |
12359/// | 2 | Xmm, Xmm |
12360/// | 3 | Ymm, Mem |
12361/// | 4 | Ymm, Xmm |
12362/// | 5 | Zmm, Mem |
12363/// | 6 | Zmm, Xmm |
12364/// +---+----------+
12365/// ```
12366pub trait Vcvttph2qqEmitter<A, B> {
12367    fn vcvttph2qq(&mut self, op0: A, op1: B);
12368}
12369
12370impl<'a> Vcvttph2qqEmitter<Xmm, Xmm> for Assembler<'a> {
12371    fn vcvttph2qq(&mut self, op0: Xmm, op1: Xmm) {
12372        self.emit(VCVTTPH2QQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12373    }
12374}
12375
12376impl<'a> Vcvttph2qqEmitter<Xmm, Mem> for Assembler<'a> {
12377    fn vcvttph2qq(&mut self, op0: Xmm, op1: Mem) {
12378        self.emit(VCVTTPH2QQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12379    }
12380}
12381
12382impl<'a> Vcvttph2qqEmitter<Ymm, Xmm> for Assembler<'a> {
12383    fn vcvttph2qq(&mut self, op0: Ymm, op1: Xmm) {
12384        self.emit(VCVTTPH2QQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12385    }
12386}
12387
12388impl<'a> Vcvttph2qqEmitter<Ymm, Mem> for Assembler<'a> {
12389    fn vcvttph2qq(&mut self, op0: Ymm, op1: Mem) {
12390        self.emit(VCVTTPH2QQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12391    }
12392}
12393
12394impl<'a> Vcvttph2qqEmitter<Zmm, Xmm> for Assembler<'a> {
12395    fn vcvttph2qq(&mut self, op0: Zmm, op1: Xmm) {
12396        self.emit(VCVTTPH2QQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12397    }
12398}
12399
12400impl<'a> Vcvttph2qqEmitter<Zmm, Mem> for Assembler<'a> {
12401    fn vcvttph2qq(&mut self, op0: Zmm, op1: Mem) {
12402        self.emit(VCVTTPH2QQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12403    }
12404}
12405
12406/// `VCVTTPH2QQ_MASK`.
12407///
12408/// Supported operand variants:
12409///
12410/// ```text
12411/// +---+----------+
12412/// | # | Operands |
12413/// +---+----------+
12414/// | 1 | Xmm, Mem |
12415/// | 2 | Xmm, Xmm |
12416/// | 3 | Ymm, Mem |
12417/// | 4 | Ymm, Xmm |
12418/// | 5 | Zmm, Mem |
12419/// | 6 | Zmm, Xmm |
12420/// +---+----------+
12421/// ```
12422pub trait Vcvttph2qqMaskEmitter<A, B> {
12423    fn vcvttph2qq_mask(&mut self, op0: A, op1: B);
12424}
12425
12426impl<'a> Vcvttph2qqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
12427    fn vcvttph2qq_mask(&mut self, op0: Xmm, op1: Xmm) {
12428        self.emit(VCVTTPH2QQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12429    }
12430}
12431
12432impl<'a> Vcvttph2qqMaskEmitter<Xmm, Mem> for Assembler<'a> {
12433    fn vcvttph2qq_mask(&mut self, op0: Xmm, op1: Mem) {
12434        self.emit(VCVTTPH2QQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12435    }
12436}
12437
12438impl<'a> Vcvttph2qqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
12439    fn vcvttph2qq_mask(&mut self, op0: Ymm, op1: Xmm) {
12440        self.emit(VCVTTPH2QQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12441    }
12442}
12443
12444impl<'a> Vcvttph2qqMaskEmitter<Ymm, Mem> for Assembler<'a> {
12445    fn vcvttph2qq_mask(&mut self, op0: Ymm, op1: Mem) {
12446        self.emit(VCVTTPH2QQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12447    }
12448}
12449
12450impl<'a> Vcvttph2qqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
12451    fn vcvttph2qq_mask(&mut self, op0: Zmm, op1: Xmm) {
12452        self.emit(VCVTTPH2QQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12453    }
12454}
12455
12456impl<'a> Vcvttph2qqMaskEmitter<Zmm, Mem> for Assembler<'a> {
12457    fn vcvttph2qq_mask(&mut self, op0: Zmm, op1: Mem) {
12458        self.emit(VCVTTPH2QQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12459    }
12460}
12461
12462/// `VCVTTPH2QQ_MASK_SAE`.
12463///
12464/// Supported operand variants:
12465///
12466/// ```text
12467/// +---+----------+
12468/// | # | Operands |
12469/// +---+----------+
12470/// | 1 | Zmm, Xmm |
12471/// +---+----------+
12472/// ```
12473pub trait Vcvttph2qqMaskSaeEmitter<A, B> {
12474    fn vcvttph2qq_mask_sae(&mut self, op0: A, op1: B);
12475}
12476
12477impl<'a> Vcvttph2qqMaskSaeEmitter<Zmm, Xmm> for Assembler<'a> {
12478    fn vcvttph2qq_mask_sae(&mut self, op0: Zmm, op1: Xmm) {
12479        self.emit(VCVTTPH2QQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12480    }
12481}
12482
12483/// `VCVTTPH2QQ_MASKZ`.
12484///
12485/// Supported operand variants:
12486///
12487/// ```text
12488/// +---+----------+
12489/// | # | Operands |
12490/// +---+----------+
12491/// | 1 | Xmm, Mem |
12492/// | 2 | Xmm, Xmm |
12493/// | 3 | Ymm, Mem |
12494/// | 4 | Ymm, Xmm |
12495/// | 5 | Zmm, Mem |
12496/// | 6 | Zmm, Xmm |
12497/// +---+----------+
12498/// ```
12499pub trait Vcvttph2qqMaskzEmitter<A, B> {
12500    fn vcvttph2qq_maskz(&mut self, op0: A, op1: B);
12501}
12502
12503impl<'a> Vcvttph2qqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
12504    fn vcvttph2qq_maskz(&mut self, op0: Xmm, op1: Xmm) {
12505        self.emit(VCVTTPH2QQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12506    }
12507}
12508
12509impl<'a> Vcvttph2qqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
12510    fn vcvttph2qq_maskz(&mut self, op0: Xmm, op1: Mem) {
12511        self.emit(VCVTTPH2QQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12512    }
12513}
12514
12515impl<'a> Vcvttph2qqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
12516    fn vcvttph2qq_maskz(&mut self, op0: Ymm, op1: Xmm) {
12517        self.emit(VCVTTPH2QQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12518    }
12519}
12520
12521impl<'a> Vcvttph2qqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
12522    fn vcvttph2qq_maskz(&mut self, op0: Ymm, op1: Mem) {
12523        self.emit(VCVTTPH2QQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12524    }
12525}
12526
12527impl<'a> Vcvttph2qqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
12528    fn vcvttph2qq_maskz(&mut self, op0: Zmm, op1: Xmm) {
12529        self.emit(VCVTTPH2QQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12530    }
12531}
12532
12533impl<'a> Vcvttph2qqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
12534    fn vcvttph2qq_maskz(&mut self, op0: Zmm, op1: Mem) {
12535        self.emit(VCVTTPH2QQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12536    }
12537}
12538
12539/// `VCVTTPH2QQ_MASKZ_SAE`.
12540///
12541/// Supported operand variants:
12542///
12543/// ```text
12544/// +---+----------+
12545/// | # | Operands |
12546/// +---+----------+
12547/// | 1 | Zmm, Xmm |
12548/// +---+----------+
12549/// ```
12550pub trait Vcvttph2qqMaskzSaeEmitter<A, B> {
12551    fn vcvttph2qq_maskz_sae(&mut self, op0: A, op1: B);
12552}
12553
12554impl<'a> Vcvttph2qqMaskzSaeEmitter<Zmm, Xmm> for Assembler<'a> {
12555    fn vcvttph2qq_maskz_sae(&mut self, op0: Zmm, op1: Xmm) {
12556        self.emit(VCVTTPH2QQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12557    }
12558}
12559
12560/// `VCVTTPH2QQ_SAE`.
12561///
12562/// Supported operand variants:
12563///
12564/// ```text
12565/// +---+----------+
12566/// | # | Operands |
12567/// +---+----------+
12568/// | 1 | Zmm, Xmm |
12569/// +---+----------+
12570/// ```
12571pub trait Vcvttph2qqSaeEmitter<A, B> {
12572    fn vcvttph2qq_sae(&mut self, op0: A, op1: B);
12573}
12574
12575impl<'a> Vcvttph2qqSaeEmitter<Zmm, Xmm> for Assembler<'a> {
12576    fn vcvttph2qq_sae(&mut self, op0: Zmm, op1: Xmm) {
12577        self.emit(VCVTTPH2QQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12578    }
12579}
12580
12581/// `VCVTTPH2UDQ`.
12582///
12583/// Supported operand variants:
12584///
12585/// ```text
12586/// +---+----------+
12587/// | # | Operands |
12588/// +---+----------+
12589/// | 1 | Xmm, Mem |
12590/// | 2 | Xmm, Xmm |
12591/// | 3 | Ymm, Mem |
12592/// | 4 | Ymm, Xmm |
12593/// | 5 | Zmm, Mem |
12594/// | 6 | Zmm, Ymm |
12595/// +---+----------+
12596/// ```
12597pub trait Vcvttph2udqEmitter<A, B> {
12598    fn vcvttph2udq(&mut self, op0: A, op1: B);
12599}
12600
12601impl<'a> Vcvttph2udqEmitter<Xmm, Xmm> for Assembler<'a> {
12602    fn vcvttph2udq(&mut self, op0: Xmm, op1: Xmm) {
12603        self.emit(VCVTTPH2UDQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12604    }
12605}
12606
12607impl<'a> Vcvttph2udqEmitter<Xmm, Mem> for Assembler<'a> {
12608    fn vcvttph2udq(&mut self, op0: Xmm, op1: Mem) {
12609        self.emit(VCVTTPH2UDQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12610    }
12611}
12612
12613impl<'a> Vcvttph2udqEmitter<Ymm, Xmm> for Assembler<'a> {
12614    fn vcvttph2udq(&mut self, op0: Ymm, op1: Xmm) {
12615        self.emit(VCVTTPH2UDQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12616    }
12617}
12618
12619impl<'a> Vcvttph2udqEmitter<Ymm, Mem> for Assembler<'a> {
12620    fn vcvttph2udq(&mut self, op0: Ymm, op1: Mem) {
12621        self.emit(VCVTTPH2UDQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12622    }
12623}
12624
12625impl<'a> Vcvttph2udqEmitter<Zmm, Ymm> for Assembler<'a> {
12626    fn vcvttph2udq(&mut self, op0: Zmm, op1: Ymm) {
12627        self.emit(VCVTTPH2UDQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12628    }
12629}
12630
12631impl<'a> Vcvttph2udqEmitter<Zmm, Mem> for Assembler<'a> {
12632    fn vcvttph2udq(&mut self, op0: Zmm, op1: Mem) {
12633        self.emit(VCVTTPH2UDQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12634    }
12635}
12636
12637/// `VCVTTPH2UDQ_MASK`.
12638///
12639/// Supported operand variants:
12640///
12641/// ```text
12642/// +---+----------+
12643/// | # | Operands |
12644/// +---+----------+
12645/// | 1 | Xmm, Mem |
12646/// | 2 | Xmm, Xmm |
12647/// | 3 | Ymm, Mem |
12648/// | 4 | Ymm, Xmm |
12649/// | 5 | Zmm, Mem |
12650/// | 6 | Zmm, Ymm |
12651/// +---+----------+
12652/// ```
12653pub trait Vcvttph2udqMaskEmitter<A, B> {
12654    fn vcvttph2udq_mask(&mut self, op0: A, op1: B);
12655}
12656
12657impl<'a> Vcvttph2udqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
12658    fn vcvttph2udq_mask(&mut self, op0: Xmm, op1: Xmm) {
12659        self.emit(VCVTTPH2UDQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12660    }
12661}
12662
12663impl<'a> Vcvttph2udqMaskEmitter<Xmm, Mem> for Assembler<'a> {
12664    fn vcvttph2udq_mask(&mut self, op0: Xmm, op1: Mem) {
12665        self.emit(VCVTTPH2UDQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12666    }
12667}
12668
12669impl<'a> Vcvttph2udqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
12670    fn vcvttph2udq_mask(&mut self, op0: Ymm, op1: Xmm) {
12671        self.emit(VCVTTPH2UDQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12672    }
12673}
12674
12675impl<'a> Vcvttph2udqMaskEmitter<Ymm, Mem> for Assembler<'a> {
12676    fn vcvttph2udq_mask(&mut self, op0: Ymm, op1: Mem) {
12677        self.emit(VCVTTPH2UDQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12678    }
12679}
12680
12681impl<'a> Vcvttph2udqMaskEmitter<Zmm, Ymm> for Assembler<'a> {
12682    fn vcvttph2udq_mask(&mut self, op0: Zmm, op1: Ymm) {
12683        self.emit(VCVTTPH2UDQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12684    }
12685}
12686
12687impl<'a> Vcvttph2udqMaskEmitter<Zmm, Mem> for Assembler<'a> {
12688    fn vcvttph2udq_mask(&mut self, op0: Zmm, op1: Mem) {
12689        self.emit(VCVTTPH2UDQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12690    }
12691}
12692
12693/// `VCVTTPH2UDQ_MASK_SAE`.
12694///
12695/// Supported operand variants:
12696///
12697/// ```text
12698/// +---+----------+
12699/// | # | Operands |
12700/// +---+----------+
12701/// | 1 | Zmm, Ymm |
12702/// +---+----------+
12703/// ```
12704pub trait Vcvttph2udqMaskSaeEmitter<A, B> {
12705    fn vcvttph2udq_mask_sae(&mut self, op0: A, op1: B);
12706}
12707
12708impl<'a> Vcvttph2udqMaskSaeEmitter<Zmm, Ymm> for Assembler<'a> {
12709    fn vcvttph2udq_mask_sae(&mut self, op0: Zmm, op1: Ymm) {
12710        self.emit(VCVTTPH2UDQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12711    }
12712}
12713
12714/// `VCVTTPH2UDQ_MASKZ`.
12715///
12716/// Supported operand variants:
12717///
12718/// ```text
12719/// +---+----------+
12720/// | # | Operands |
12721/// +---+----------+
12722/// | 1 | Xmm, Mem |
12723/// | 2 | Xmm, Xmm |
12724/// | 3 | Ymm, Mem |
12725/// | 4 | Ymm, Xmm |
12726/// | 5 | Zmm, Mem |
12727/// | 6 | Zmm, Ymm |
12728/// +---+----------+
12729/// ```
12730pub trait Vcvttph2udqMaskzEmitter<A, B> {
12731    fn vcvttph2udq_maskz(&mut self, op0: A, op1: B);
12732}
12733
12734impl<'a> Vcvttph2udqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
12735    fn vcvttph2udq_maskz(&mut self, op0: Xmm, op1: Xmm) {
12736        self.emit(VCVTTPH2UDQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12737    }
12738}
12739
12740impl<'a> Vcvttph2udqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
12741    fn vcvttph2udq_maskz(&mut self, op0: Xmm, op1: Mem) {
12742        self.emit(VCVTTPH2UDQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12743    }
12744}
12745
12746impl<'a> Vcvttph2udqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
12747    fn vcvttph2udq_maskz(&mut self, op0: Ymm, op1: Xmm) {
12748        self.emit(VCVTTPH2UDQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12749    }
12750}
12751
12752impl<'a> Vcvttph2udqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
12753    fn vcvttph2udq_maskz(&mut self, op0: Ymm, op1: Mem) {
12754        self.emit(VCVTTPH2UDQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12755    }
12756}
12757
12758impl<'a> Vcvttph2udqMaskzEmitter<Zmm, Ymm> for Assembler<'a> {
12759    fn vcvttph2udq_maskz(&mut self, op0: Zmm, op1: Ymm) {
12760        self.emit(VCVTTPH2UDQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12761    }
12762}
12763
12764impl<'a> Vcvttph2udqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
12765    fn vcvttph2udq_maskz(&mut self, op0: Zmm, op1: Mem) {
12766        self.emit(VCVTTPH2UDQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12767    }
12768}
12769
12770/// `VCVTTPH2UDQ_MASKZ_SAE`.
12771///
12772/// Supported operand variants:
12773///
12774/// ```text
12775/// +---+----------+
12776/// | # | Operands |
12777/// +---+----------+
12778/// | 1 | Zmm, Ymm |
12779/// +---+----------+
12780/// ```
12781pub trait Vcvttph2udqMaskzSaeEmitter<A, B> {
12782    fn vcvttph2udq_maskz_sae(&mut self, op0: A, op1: B);
12783}
12784
12785impl<'a> Vcvttph2udqMaskzSaeEmitter<Zmm, Ymm> for Assembler<'a> {
12786    fn vcvttph2udq_maskz_sae(&mut self, op0: Zmm, op1: Ymm) {
12787        self.emit(VCVTTPH2UDQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12788    }
12789}
12790
12791/// `VCVTTPH2UDQ_SAE`.
12792///
12793/// Supported operand variants:
12794///
12795/// ```text
12796/// +---+----------+
12797/// | # | Operands |
12798/// +---+----------+
12799/// | 1 | Zmm, Ymm |
12800/// +---+----------+
12801/// ```
12802pub trait Vcvttph2udqSaeEmitter<A, B> {
12803    fn vcvttph2udq_sae(&mut self, op0: A, op1: B);
12804}
12805
12806impl<'a> Vcvttph2udqSaeEmitter<Zmm, Ymm> for Assembler<'a> {
12807    fn vcvttph2udq_sae(&mut self, op0: Zmm, op1: Ymm) {
12808        self.emit(VCVTTPH2UDQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12809    }
12810}
12811
12812/// `VCVTTPH2UQQ`.
12813///
12814/// Supported operand variants:
12815///
12816/// ```text
12817/// +---+----------+
12818/// | # | Operands |
12819/// +---+----------+
12820/// | 1 | Xmm, Mem |
12821/// | 2 | Xmm, Xmm |
12822/// | 3 | Ymm, Mem |
12823/// | 4 | Ymm, Xmm |
12824/// | 5 | Zmm, Mem |
12825/// | 6 | Zmm, Xmm |
12826/// +---+----------+
12827/// ```
12828pub trait Vcvttph2uqqEmitter<A, B> {
12829    fn vcvttph2uqq(&mut self, op0: A, op1: B);
12830}
12831
12832impl<'a> Vcvttph2uqqEmitter<Xmm, Xmm> for Assembler<'a> {
12833    fn vcvttph2uqq(&mut self, op0: Xmm, op1: Xmm) {
12834        self.emit(VCVTTPH2UQQ128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12835    }
12836}
12837
12838impl<'a> Vcvttph2uqqEmitter<Xmm, Mem> for Assembler<'a> {
12839    fn vcvttph2uqq(&mut self, op0: Xmm, op1: Mem) {
12840        self.emit(VCVTTPH2UQQ128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12841    }
12842}
12843
12844impl<'a> Vcvttph2uqqEmitter<Ymm, Xmm> for Assembler<'a> {
12845    fn vcvttph2uqq(&mut self, op0: Ymm, op1: Xmm) {
12846        self.emit(VCVTTPH2UQQ256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12847    }
12848}
12849
12850impl<'a> Vcvttph2uqqEmitter<Ymm, Mem> for Assembler<'a> {
12851    fn vcvttph2uqq(&mut self, op0: Ymm, op1: Mem) {
12852        self.emit(VCVTTPH2UQQ256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12853    }
12854}
12855
12856impl<'a> Vcvttph2uqqEmitter<Zmm, Xmm> for Assembler<'a> {
12857    fn vcvttph2uqq(&mut self, op0: Zmm, op1: Xmm) {
12858        self.emit(VCVTTPH2UQQ512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12859    }
12860}
12861
12862impl<'a> Vcvttph2uqqEmitter<Zmm, Mem> for Assembler<'a> {
12863    fn vcvttph2uqq(&mut self, op0: Zmm, op1: Mem) {
12864        self.emit(VCVTTPH2UQQ512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12865    }
12866}
12867
12868/// `VCVTTPH2UQQ_MASK`.
12869///
12870/// Supported operand variants:
12871///
12872/// ```text
12873/// +---+----------+
12874/// | # | Operands |
12875/// +---+----------+
12876/// | 1 | Xmm, Mem |
12877/// | 2 | Xmm, Xmm |
12878/// | 3 | Ymm, Mem |
12879/// | 4 | Ymm, Xmm |
12880/// | 5 | Zmm, Mem |
12881/// | 6 | Zmm, Xmm |
12882/// +---+----------+
12883/// ```
12884pub trait Vcvttph2uqqMaskEmitter<A, B> {
12885    fn vcvttph2uqq_mask(&mut self, op0: A, op1: B);
12886}
12887
12888impl<'a> Vcvttph2uqqMaskEmitter<Xmm, Xmm> for Assembler<'a> {
12889    fn vcvttph2uqq_mask(&mut self, op0: Xmm, op1: Xmm) {
12890        self.emit(VCVTTPH2UQQ128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12891    }
12892}
12893
12894impl<'a> Vcvttph2uqqMaskEmitter<Xmm, Mem> for Assembler<'a> {
12895    fn vcvttph2uqq_mask(&mut self, op0: Xmm, op1: Mem) {
12896        self.emit(VCVTTPH2UQQ128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12897    }
12898}
12899
12900impl<'a> Vcvttph2uqqMaskEmitter<Ymm, Xmm> for Assembler<'a> {
12901    fn vcvttph2uqq_mask(&mut self, op0: Ymm, op1: Xmm) {
12902        self.emit(VCVTTPH2UQQ256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12903    }
12904}
12905
12906impl<'a> Vcvttph2uqqMaskEmitter<Ymm, Mem> for Assembler<'a> {
12907    fn vcvttph2uqq_mask(&mut self, op0: Ymm, op1: Mem) {
12908        self.emit(VCVTTPH2UQQ256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12909    }
12910}
12911
12912impl<'a> Vcvttph2uqqMaskEmitter<Zmm, Xmm> for Assembler<'a> {
12913    fn vcvttph2uqq_mask(&mut self, op0: Zmm, op1: Xmm) {
12914        self.emit(VCVTTPH2UQQ512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12915    }
12916}
12917
12918impl<'a> Vcvttph2uqqMaskEmitter<Zmm, Mem> for Assembler<'a> {
12919    fn vcvttph2uqq_mask(&mut self, op0: Zmm, op1: Mem) {
12920        self.emit(VCVTTPH2UQQ512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12921    }
12922}
12923
12924/// `VCVTTPH2UQQ_MASK_SAE`.
12925///
12926/// Supported operand variants:
12927///
12928/// ```text
12929/// +---+----------+
12930/// | # | Operands |
12931/// +---+----------+
12932/// | 1 | Zmm, Xmm |
12933/// +---+----------+
12934/// ```
12935pub trait Vcvttph2uqqMaskSaeEmitter<A, B> {
12936    fn vcvttph2uqq_mask_sae(&mut self, op0: A, op1: B);
12937}
12938
12939impl<'a> Vcvttph2uqqMaskSaeEmitter<Zmm, Xmm> for Assembler<'a> {
12940    fn vcvttph2uqq_mask_sae(&mut self, op0: Zmm, op1: Xmm) {
12941        self.emit(VCVTTPH2UQQ512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12942    }
12943}
12944
12945/// `VCVTTPH2UQQ_MASKZ`.
12946///
12947/// Supported operand variants:
12948///
12949/// ```text
12950/// +---+----------+
12951/// | # | Operands |
12952/// +---+----------+
12953/// | 1 | Xmm, Mem |
12954/// | 2 | Xmm, Xmm |
12955/// | 3 | Ymm, Mem |
12956/// | 4 | Ymm, Xmm |
12957/// | 5 | Zmm, Mem |
12958/// | 6 | Zmm, Xmm |
12959/// +---+----------+
12960/// ```
12961pub trait Vcvttph2uqqMaskzEmitter<A, B> {
12962    fn vcvttph2uqq_maskz(&mut self, op0: A, op1: B);
12963}
12964
12965impl<'a> Vcvttph2uqqMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
12966    fn vcvttph2uqq_maskz(&mut self, op0: Xmm, op1: Xmm) {
12967        self.emit(VCVTTPH2UQQ128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12968    }
12969}
12970
12971impl<'a> Vcvttph2uqqMaskzEmitter<Xmm, Mem> for Assembler<'a> {
12972    fn vcvttph2uqq_maskz(&mut self, op0: Xmm, op1: Mem) {
12973        self.emit(VCVTTPH2UQQ128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12974    }
12975}
12976
12977impl<'a> Vcvttph2uqqMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
12978    fn vcvttph2uqq_maskz(&mut self, op0: Ymm, op1: Xmm) {
12979        self.emit(VCVTTPH2UQQ256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12980    }
12981}
12982
12983impl<'a> Vcvttph2uqqMaskzEmitter<Ymm, Mem> for Assembler<'a> {
12984    fn vcvttph2uqq_maskz(&mut self, op0: Ymm, op1: Mem) {
12985        self.emit(VCVTTPH2UQQ256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12986    }
12987}
12988
12989impl<'a> Vcvttph2uqqMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
12990    fn vcvttph2uqq_maskz(&mut self, op0: Zmm, op1: Xmm) {
12991        self.emit(VCVTTPH2UQQ512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12992    }
12993}
12994
12995impl<'a> Vcvttph2uqqMaskzEmitter<Zmm, Mem> for Assembler<'a> {
12996    fn vcvttph2uqq_maskz(&mut self, op0: Zmm, op1: Mem) {
12997        self.emit(VCVTTPH2UQQ512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
12998    }
12999}
13000
13001/// `VCVTTPH2UQQ_MASKZ_SAE`.
13002///
13003/// Supported operand variants:
13004///
13005/// ```text
13006/// +---+----------+
13007/// | # | Operands |
13008/// +---+----------+
13009/// | 1 | Zmm, Xmm |
13010/// +---+----------+
13011/// ```
13012pub trait Vcvttph2uqqMaskzSaeEmitter<A, B> {
13013    fn vcvttph2uqq_maskz_sae(&mut self, op0: A, op1: B);
13014}
13015
13016impl<'a> Vcvttph2uqqMaskzSaeEmitter<Zmm, Xmm> for Assembler<'a> {
13017    fn vcvttph2uqq_maskz_sae(&mut self, op0: Zmm, op1: Xmm) {
13018        self.emit(VCVTTPH2UQQ512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13019    }
13020}
13021
13022/// `VCVTTPH2UQQ_SAE`.
13023///
13024/// Supported operand variants:
13025///
13026/// ```text
13027/// +---+----------+
13028/// | # | Operands |
13029/// +---+----------+
13030/// | 1 | Zmm, Xmm |
13031/// +---+----------+
13032/// ```
13033pub trait Vcvttph2uqqSaeEmitter<A, B> {
13034    fn vcvttph2uqq_sae(&mut self, op0: A, op1: B);
13035}
13036
13037impl<'a> Vcvttph2uqqSaeEmitter<Zmm, Xmm> for Assembler<'a> {
13038    fn vcvttph2uqq_sae(&mut self, op0: Zmm, op1: Xmm) {
13039        self.emit(VCVTTPH2UQQ512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13040    }
13041}
13042
13043/// `VCVTTPH2UW`.
13044///
13045/// Supported operand variants:
13046///
13047/// ```text
13048/// +---+----------+
13049/// | # | Operands |
13050/// +---+----------+
13051/// | 1 | Xmm, Mem |
13052/// | 2 | Xmm, Xmm |
13053/// | 3 | Ymm, Mem |
13054/// | 4 | Ymm, Ymm |
13055/// | 5 | Zmm, Mem |
13056/// | 6 | Zmm, Zmm |
13057/// +---+----------+
13058/// ```
13059pub trait Vcvttph2uwEmitter<A, B> {
13060    fn vcvttph2uw(&mut self, op0: A, op1: B);
13061}
13062
13063impl<'a> Vcvttph2uwEmitter<Xmm, Xmm> for Assembler<'a> {
13064    fn vcvttph2uw(&mut self, op0: Xmm, op1: Xmm) {
13065        self.emit(VCVTTPH2UW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13066    }
13067}
13068
13069impl<'a> Vcvttph2uwEmitter<Xmm, Mem> for Assembler<'a> {
13070    fn vcvttph2uw(&mut self, op0: Xmm, op1: Mem) {
13071        self.emit(VCVTTPH2UW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13072    }
13073}
13074
13075impl<'a> Vcvttph2uwEmitter<Ymm, Ymm> for Assembler<'a> {
13076    fn vcvttph2uw(&mut self, op0: Ymm, op1: Ymm) {
13077        self.emit(VCVTTPH2UW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13078    }
13079}
13080
13081impl<'a> Vcvttph2uwEmitter<Ymm, Mem> for Assembler<'a> {
13082    fn vcvttph2uw(&mut self, op0: Ymm, op1: Mem) {
13083        self.emit(VCVTTPH2UW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13084    }
13085}
13086
13087impl<'a> Vcvttph2uwEmitter<Zmm, Zmm> for Assembler<'a> {
13088    fn vcvttph2uw(&mut self, op0: Zmm, op1: Zmm) {
13089        self.emit(VCVTTPH2UW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13090    }
13091}
13092
13093impl<'a> Vcvttph2uwEmitter<Zmm, Mem> for Assembler<'a> {
13094    fn vcvttph2uw(&mut self, op0: Zmm, op1: Mem) {
13095        self.emit(VCVTTPH2UW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13096    }
13097}
13098
13099/// `VCVTTPH2UW_MASK`.
13100///
13101/// Supported operand variants:
13102///
13103/// ```text
13104/// +---+----------+
13105/// | # | Operands |
13106/// +---+----------+
13107/// | 1 | Xmm, Mem |
13108/// | 2 | Xmm, Xmm |
13109/// | 3 | Ymm, Mem |
13110/// | 4 | Ymm, Ymm |
13111/// | 5 | Zmm, Mem |
13112/// | 6 | Zmm, Zmm |
13113/// +---+----------+
13114/// ```
13115pub trait Vcvttph2uwMaskEmitter<A, B> {
13116    fn vcvttph2uw_mask(&mut self, op0: A, op1: B);
13117}
13118
13119impl<'a> Vcvttph2uwMaskEmitter<Xmm, Xmm> for Assembler<'a> {
13120    fn vcvttph2uw_mask(&mut self, op0: Xmm, op1: Xmm) {
13121        self.emit(VCVTTPH2UW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13122    }
13123}
13124
13125impl<'a> Vcvttph2uwMaskEmitter<Xmm, Mem> for Assembler<'a> {
13126    fn vcvttph2uw_mask(&mut self, op0: Xmm, op1: Mem) {
13127        self.emit(VCVTTPH2UW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13128    }
13129}
13130
13131impl<'a> Vcvttph2uwMaskEmitter<Ymm, Ymm> for Assembler<'a> {
13132    fn vcvttph2uw_mask(&mut self, op0: Ymm, op1: Ymm) {
13133        self.emit(VCVTTPH2UW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13134    }
13135}
13136
13137impl<'a> Vcvttph2uwMaskEmitter<Ymm, Mem> for Assembler<'a> {
13138    fn vcvttph2uw_mask(&mut self, op0: Ymm, op1: Mem) {
13139        self.emit(VCVTTPH2UW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13140    }
13141}
13142
13143impl<'a> Vcvttph2uwMaskEmitter<Zmm, Zmm> for Assembler<'a> {
13144    fn vcvttph2uw_mask(&mut self, op0: Zmm, op1: Zmm) {
13145        self.emit(VCVTTPH2UW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13146    }
13147}
13148
13149impl<'a> Vcvttph2uwMaskEmitter<Zmm, Mem> for Assembler<'a> {
13150    fn vcvttph2uw_mask(&mut self, op0: Zmm, op1: Mem) {
13151        self.emit(VCVTTPH2UW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13152    }
13153}
13154
13155/// `VCVTTPH2UW_MASK_SAE`.
13156///
13157/// Supported operand variants:
13158///
13159/// ```text
13160/// +---+----------+
13161/// | # | Operands |
13162/// +---+----------+
13163/// | 1 | Zmm, Zmm |
13164/// +---+----------+
13165/// ```
13166pub trait Vcvttph2uwMaskSaeEmitter<A, B> {
13167    fn vcvttph2uw_mask_sae(&mut self, op0: A, op1: B);
13168}
13169
13170impl<'a> Vcvttph2uwMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
13171    fn vcvttph2uw_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
13172        self.emit(VCVTTPH2UW512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13173    }
13174}
13175
13176/// `VCVTTPH2UW_MASKZ`.
13177///
13178/// Supported operand variants:
13179///
13180/// ```text
13181/// +---+----------+
13182/// | # | Operands |
13183/// +---+----------+
13184/// | 1 | Xmm, Mem |
13185/// | 2 | Xmm, Xmm |
13186/// | 3 | Ymm, Mem |
13187/// | 4 | Ymm, Ymm |
13188/// | 5 | Zmm, Mem |
13189/// | 6 | Zmm, Zmm |
13190/// +---+----------+
13191/// ```
13192pub trait Vcvttph2uwMaskzEmitter<A, B> {
13193    fn vcvttph2uw_maskz(&mut self, op0: A, op1: B);
13194}
13195
13196impl<'a> Vcvttph2uwMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
13197    fn vcvttph2uw_maskz(&mut self, op0: Xmm, op1: Xmm) {
13198        self.emit(VCVTTPH2UW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13199    }
13200}
13201
13202impl<'a> Vcvttph2uwMaskzEmitter<Xmm, Mem> for Assembler<'a> {
13203    fn vcvttph2uw_maskz(&mut self, op0: Xmm, op1: Mem) {
13204        self.emit(VCVTTPH2UW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13205    }
13206}
13207
13208impl<'a> Vcvttph2uwMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
13209    fn vcvttph2uw_maskz(&mut self, op0: Ymm, op1: Ymm) {
13210        self.emit(VCVTTPH2UW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13211    }
13212}
13213
13214impl<'a> Vcvttph2uwMaskzEmitter<Ymm, Mem> for Assembler<'a> {
13215    fn vcvttph2uw_maskz(&mut self, op0: Ymm, op1: Mem) {
13216        self.emit(VCVTTPH2UW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13217    }
13218}
13219
13220impl<'a> Vcvttph2uwMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
13221    fn vcvttph2uw_maskz(&mut self, op0: Zmm, op1: Zmm) {
13222        self.emit(VCVTTPH2UW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13223    }
13224}
13225
13226impl<'a> Vcvttph2uwMaskzEmitter<Zmm, Mem> for Assembler<'a> {
13227    fn vcvttph2uw_maskz(&mut self, op0: Zmm, op1: Mem) {
13228        self.emit(VCVTTPH2UW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13229    }
13230}
13231
13232/// `VCVTTPH2UW_MASKZ_SAE`.
13233///
13234/// Supported operand variants:
13235///
13236/// ```text
13237/// +---+----------+
13238/// | # | Operands |
13239/// +---+----------+
13240/// | 1 | Zmm, Zmm |
13241/// +---+----------+
13242/// ```
13243pub trait Vcvttph2uwMaskzSaeEmitter<A, B> {
13244    fn vcvttph2uw_maskz_sae(&mut self, op0: A, op1: B);
13245}
13246
13247impl<'a> Vcvttph2uwMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
13248    fn vcvttph2uw_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
13249        self.emit(VCVTTPH2UW512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13250    }
13251}
13252
13253/// `VCVTTPH2UW_SAE`.
13254///
13255/// Supported operand variants:
13256///
13257/// ```text
13258/// +---+----------+
13259/// | # | Operands |
13260/// +---+----------+
13261/// | 1 | Zmm, Zmm |
13262/// +---+----------+
13263/// ```
13264pub trait Vcvttph2uwSaeEmitter<A, B> {
13265    fn vcvttph2uw_sae(&mut self, op0: A, op1: B);
13266}
13267
13268impl<'a> Vcvttph2uwSaeEmitter<Zmm, Zmm> for Assembler<'a> {
13269    fn vcvttph2uw_sae(&mut self, op0: Zmm, op1: Zmm) {
13270        self.emit(VCVTTPH2UW512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13271    }
13272}
13273
13274/// `VCVTTPH2W`.
13275///
13276/// Supported operand variants:
13277///
13278/// ```text
13279/// +---+----------+
13280/// | # | Operands |
13281/// +---+----------+
13282/// | 1 | Xmm, Mem |
13283/// | 2 | Xmm, Xmm |
13284/// | 3 | Ymm, Mem |
13285/// | 4 | Ymm, Ymm |
13286/// | 5 | Zmm, Mem |
13287/// | 6 | Zmm, Zmm |
13288/// +---+----------+
13289/// ```
13290pub trait Vcvttph2wEmitter<A, B> {
13291    fn vcvttph2w(&mut self, op0: A, op1: B);
13292}
13293
13294impl<'a> Vcvttph2wEmitter<Xmm, Xmm> for Assembler<'a> {
13295    fn vcvttph2w(&mut self, op0: Xmm, op1: Xmm) {
13296        self.emit(VCVTTPH2W128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13297    }
13298}
13299
13300impl<'a> Vcvttph2wEmitter<Xmm, Mem> for Assembler<'a> {
13301    fn vcvttph2w(&mut self, op0: Xmm, op1: Mem) {
13302        self.emit(VCVTTPH2W128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13303    }
13304}
13305
13306impl<'a> Vcvttph2wEmitter<Ymm, Ymm> for Assembler<'a> {
13307    fn vcvttph2w(&mut self, op0: Ymm, op1: Ymm) {
13308        self.emit(VCVTTPH2W256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13309    }
13310}
13311
13312impl<'a> Vcvttph2wEmitter<Ymm, Mem> for Assembler<'a> {
13313    fn vcvttph2w(&mut self, op0: Ymm, op1: Mem) {
13314        self.emit(VCVTTPH2W256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13315    }
13316}
13317
13318impl<'a> Vcvttph2wEmitter<Zmm, Zmm> for Assembler<'a> {
13319    fn vcvttph2w(&mut self, op0: Zmm, op1: Zmm) {
13320        self.emit(VCVTTPH2W512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13321    }
13322}
13323
13324impl<'a> Vcvttph2wEmitter<Zmm, Mem> for Assembler<'a> {
13325    fn vcvttph2w(&mut self, op0: Zmm, op1: Mem) {
13326        self.emit(VCVTTPH2W512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13327    }
13328}
13329
13330/// `VCVTTPH2W_MASK`.
13331///
13332/// Supported operand variants:
13333///
13334/// ```text
13335/// +---+----------+
13336/// | # | Operands |
13337/// +---+----------+
13338/// | 1 | Xmm, Mem |
13339/// | 2 | Xmm, Xmm |
13340/// | 3 | Ymm, Mem |
13341/// | 4 | Ymm, Ymm |
13342/// | 5 | Zmm, Mem |
13343/// | 6 | Zmm, Zmm |
13344/// +---+----------+
13345/// ```
13346pub trait Vcvttph2wMaskEmitter<A, B> {
13347    fn vcvttph2w_mask(&mut self, op0: A, op1: B);
13348}
13349
13350impl<'a> Vcvttph2wMaskEmitter<Xmm, Xmm> for Assembler<'a> {
13351    fn vcvttph2w_mask(&mut self, op0: Xmm, op1: Xmm) {
13352        self.emit(VCVTTPH2W128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13353    }
13354}
13355
13356impl<'a> Vcvttph2wMaskEmitter<Xmm, Mem> for Assembler<'a> {
13357    fn vcvttph2w_mask(&mut self, op0: Xmm, op1: Mem) {
13358        self.emit(VCVTTPH2W128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13359    }
13360}
13361
13362impl<'a> Vcvttph2wMaskEmitter<Ymm, Ymm> for Assembler<'a> {
13363    fn vcvttph2w_mask(&mut self, op0: Ymm, op1: Ymm) {
13364        self.emit(VCVTTPH2W256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13365    }
13366}
13367
13368impl<'a> Vcvttph2wMaskEmitter<Ymm, Mem> for Assembler<'a> {
13369    fn vcvttph2w_mask(&mut self, op0: Ymm, op1: Mem) {
13370        self.emit(VCVTTPH2W256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13371    }
13372}
13373
13374impl<'a> Vcvttph2wMaskEmitter<Zmm, Zmm> for Assembler<'a> {
13375    fn vcvttph2w_mask(&mut self, op0: Zmm, op1: Zmm) {
13376        self.emit(VCVTTPH2W512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13377    }
13378}
13379
13380impl<'a> Vcvttph2wMaskEmitter<Zmm, Mem> for Assembler<'a> {
13381    fn vcvttph2w_mask(&mut self, op0: Zmm, op1: Mem) {
13382        self.emit(VCVTTPH2W512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13383    }
13384}
13385
13386/// `VCVTTPH2W_MASK_SAE`.
13387///
13388/// Supported operand variants:
13389///
13390/// ```text
13391/// +---+----------+
13392/// | # | Operands |
13393/// +---+----------+
13394/// | 1 | Zmm, Zmm |
13395/// +---+----------+
13396/// ```
13397pub trait Vcvttph2wMaskSaeEmitter<A, B> {
13398    fn vcvttph2w_mask_sae(&mut self, op0: A, op1: B);
13399}
13400
13401impl<'a> Vcvttph2wMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
13402    fn vcvttph2w_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
13403        self.emit(VCVTTPH2W512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13404    }
13405}
13406
13407/// `VCVTTPH2W_MASKZ`.
13408///
13409/// Supported operand variants:
13410///
13411/// ```text
13412/// +---+----------+
13413/// | # | Operands |
13414/// +---+----------+
13415/// | 1 | Xmm, Mem |
13416/// | 2 | Xmm, Xmm |
13417/// | 3 | Ymm, Mem |
13418/// | 4 | Ymm, Ymm |
13419/// | 5 | Zmm, Mem |
13420/// | 6 | Zmm, Zmm |
13421/// +---+----------+
13422/// ```
13423pub trait Vcvttph2wMaskzEmitter<A, B> {
13424    fn vcvttph2w_maskz(&mut self, op0: A, op1: B);
13425}
13426
13427impl<'a> Vcvttph2wMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
13428    fn vcvttph2w_maskz(&mut self, op0: Xmm, op1: Xmm) {
13429        self.emit(VCVTTPH2W128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13430    }
13431}
13432
13433impl<'a> Vcvttph2wMaskzEmitter<Xmm, Mem> for Assembler<'a> {
13434    fn vcvttph2w_maskz(&mut self, op0: Xmm, op1: Mem) {
13435        self.emit(VCVTTPH2W128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13436    }
13437}
13438
13439impl<'a> Vcvttph2wMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
13440    fn vcvttph2w_maskz(&mut self, op0: Ymm, op1: Ymm) {
13441        self.emit(VCVTTPH2W256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13442    }
13443}
13444
13445impl<'a> Vcvttph2wMaskzEmitter<Ymm, Mem> for Assembler<'a> {
13446    fn vcvttph2w_maskz(&mut self, op0: Ymm, op1: Mem) {
13447        self.emit(VCVTTPH2W256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13448    }
13449}
13450
13451impl<'a> Vcvttph2wMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
13452    fn vcvttph2w_maskz(&mut self, op0: Zmm, op1: Zmm) {
13453        self.emit(VCVTTPH2W512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13454    }
13455}
13456
13457impl<'a> Vcvttph2wMaskzEmitter<Zmm, Mem> for Assembler<'a> {
13458    fn vcvttph2w_maskz(&mut self, op0: Zmm, op1: Mem) {
13459        self.emit(VCVTTPH2W512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13460    }
13461}
13462
13463/// `VCVTTPH2W_MASKZ_SAE`.
13464///
13465/// Supported operand variants:
13466///
13467/// ```text
13468/// +---+----------+
13469/// | # | Operands |
13470/// +---+----------+
13471/// | 1 | Zmm, Zmm |
13472/// +---+----------+
13473/// ```
13474pub trait Vcvttph2wMaskzSaeEmitter<A, B> {
13475    fn vcvttph2w_maskz_sae(&mut self, op0: A, op1: B);
13476}
13477
13478impl<'a> Vcvttph2wMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
13479    fn vcvttph2w_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
13480        self.emit(VCVTTPH2W512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13481    }
13482}
13483
13484/// `VCVTTPH2W_SAE`.
13485///
13486/// Supported operand variants:
13487///
13488/// ```text
13489/// +---+----------+
13490/// | # | Operands |
13491/// +---+----------+
13492/// | 1 | Zmm, Zmm |
13493/// +---+----------+
13494/// ```
13495pub trait Vcvttph2wSaeEmitter<A, B> {
13496    fn vcvttph2w_sae(&mut self, op0: A, op1: B);
13497}
13498
13499impl<'a> Vcvttph2wSaeEmitter<Zmm, Zmm> for Assembler<'a> {
13500    fn vcvttph2w_sae(&mut self, op0: Zmm, op1: Zmm) {
13501        self.emit(VCVTTPH2W512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13502    }
13503}
13504
13505/// `VCVTTSH2SI`.
13506///
13507/// Supported operand variants:
13508///
13509/// ```text
13510/// +---+----------+
13511/// | # | Operands |
13512/// +---+----------+
13513/// | 1 | Gpd, Mem |
13514/// | 2 | Gpd, Xmm |
13515/// | 3 | Gpq, Mem |
13516/// | 4 | Gpq, Xmm |
13517/// +---+----------+
13518/// ```
13519pub trait Vcvttsh2siEmitter<A, B> {
13520    fn vcvttsh2si(&mut self, op0: A, op1: B);
13521}
13522
13523impl<'a> Vcvttsh2siEmitter<Gpd, Xmm> for Assembler<'a> {
13524    fn vcvttsh2si(&mut self, op0: Gpd, op1: Xmm) {
13525        self.emit(VCVTTSH2SI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13526    }
13527}
13528
13529impl<'a> Vcvttsh2siEmitter<Gpd, Mem> for Assembler<'a> {
13530    fn vcvttsh2si(&mut self, op0: Gpd, op1: Mem) {
13531        self.emit(VCVTTSH2SI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13532    }
13533}
13534
13535impl<'a> Vcvttsh2siEmitter<Gpq, Xmm> for Assembler<'a> {
13536    fn vcvttsh2si(&mut self, op0: Gpq, op1: Xmm) {
13537        self.emit(VCVTTSH2SI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13538    }
13539}
13540
13541impl<'a> Vcvttsh2siEmitter<Gpq, Mem> for Assembler<'a> {
13542    fn vcvttsh2si(&mut self, op0: Gpq, op1: Mem) {
13543        self.emit(VCVTTSH2SI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13544    }
13545}
13546
13547/// `VCVTTSH2SI_SAE`.
13548///
13549/// Supported operand variants:
13550///
13551/// ```text
13552/// +---+----------+
13553/// | # | Operands |
13554/// +---+----------+
13555/// | 1 | Gpd, Xmm |
13556/// | 2 | Gpq, Xmm |
13557/// +---+----------+
13558/// ```
13559pub trait Vcvttsh2siSaeEmitter<A, B> {
13560    fn vcvttsh2si_sae(&mut self, op0: A, op1: B);
13561}
13562
13563impl<'a> Vcvttsh2siSaeEmitter<Gpd, Xmm> for Assembler<'a> {
13564    fn vcvttsh2si_sae(&mut self, op0: Gpd, op1: Xmm) {
13565        self.emit(VCVTTSH2SI32RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13566    }
13567}
13568
13569impl<'a> Vcvttsh2siSaeEmitter<Gpq, Xmm> for Assembler<'a> {
13570    fn vcvttsh2si_sae(&mut self, op0: Gpq, op1: Xmm) {
13571        self.emit(VCVTTSH2SI64RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13572    }
13573}
13574
13575/// `VCVTTSH2USI`.
13576///
13577/// Supported operand variants:
13578///
13579/// ```text
13580/// +---+----------+
13581/// | # | Operands |
13582/// +---+----------+
13583/// | 1 | Gpd, Mem |
13584/// | 2 | Gpd, Xmm |
13585/// | 3 | Gpq, Mem |
13586/// | 4 | Gpq, Xmm |
13587/// +---+----------+
13588/// ```
13589pub trait Vcvttsh2usiEmitter<A, B> {
13590    fn vcvttsh2usi(&mut self, op0: A, op1: B);
13591}
13592
13593impl<'a> Vcvttsh2usiEmitter<Gpd, Xmm> for Assembler<'a> {
13594    fn vcvttsh2usi(&mut self, op0: Gpd, op1: Xmm) {
13595        self.emit(VCVTTSH2USI32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13596    }
13597}
13598
13599impl<'a> Vcvttsh2usiEmitter<Gpd, Mem> for Assembler<'a> {
13600    fn vcvttsh2usi(&mut self, op0: Gpd, op1: Mem) {
13601        self.emit(VCVTTSH2USI32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13602    }
13603}
13604
13605impl<'a> Vcvttsh2usiEmitter<Gpq, Xmm> for Assembler<'a> {
13606    fn vcvttsh2usi(&mut self, op0: Gpq, op1: Xmm) {
13607        self.emit(VCVTTSH2USI64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13608    }
13609}
13610
13611impl<'a> Vcvttsh2usiEmitter<Gpq, Mem> for Assembler<'a> {
13612    fn vcvttsh2usi(&mut self, op0: Gpq, op1: Mem) {
13613        self.emit(VCVTTSH2USI64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13614    }
13615}
13616
13617/// `VCVTTSH2USI_SAE`.
13618///
13619/// Supported operand variants:
13620///
13621/// ```text
13622/// +---+----------+
13623/// | # | Operands |
13624/// +---+----------+
13625/// | 1 | Gpd, Xmm |
13626/// | 2 | Gpq, Xmm |
13627/// +---+----------+
13628/// ```
13629pub trait Vcvttsh2usiSaeEmitter<A, B> {
13630    fn vcvttsh2usi_sae(&mut self, op0: A, op1: B);
13631}
13632
13633impl<'a> Vcvttsh2usiSaeEmitter<Gpd, Xmm> for Assembler<'a> {
13634    fn vcvttsh2usi_sae(&mut self, op0: Gpd, op1: Xmm) {
13635        self.emit(VCVTTSH2USI32RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13636    }
13637}
13638
13639impl<'a> Vcvttsh2usiSaeEmitter<Gpq, Xmm> for Assembler<'a> {
13640    fn vcvttsh2usi_sae(&mut self, op0: Gpq, op1: Xmm) {
13641        self.emit(VCVTTSH2USI64RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13642    }
13643}
13644
13645/// `VCVTUDQ2PH`.
13646///
13647/// Supported operand variants:
13648///
13649/// ```text
13650/// +---+----------+
13651/// | # | Operands |
13652/// +---+----------+
13653/// | 1 | Xmm, Mem |
13654/// | 2 | Xmm, Xmm |
13655/// | 3 | Xmm, Ymm |
13656/// | 4 | Ymm, Mem |
13657/// | 5 | Ymm, Zmm |
13658/// +---+----------+
13659/// ```
13660pub trait Vcvtudq2phEmitter<A, B> {
13661    fn vcvtudq2ph(&mut self, op0: A, op1: B);
13662}
13663
13664impl<'a> Vcvtudq2phEmitter<Xmm, Xmm> for Assembler<'a> {
13665    fn vcvtudq2ph(&mut self, op0: Xmm, op1: Xmm) {
13666        self.emit(VCVTUDQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13667    }
13668}
13669
13670impl<'a> Vcvtudq2phEmitter<Xmm, Mem> for Assembler<'a> {
13671    fn vcvtudq2ph(&mut self, op0: Xmm, op1: Mem) {
13672        self.emit(VCVTUDQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13673    }
13674}
13675
13676impl<'a> Vcvtudq2phEmitter<Xmm, Ymm> for Assembler<'a> {
13677    fn vcvtudq2ph(&mut self, op0: Xmm, op1: Ymm) {
13678        self.emit(VCVTUDQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13679    }
13680}
13681
13682impl<'a> Vcvtudq2phEmitter<Ymm, Zmm> for Assembler<'a> {
13683    fn vcvtudq2ph(&mut self, op0: Ymm, op1: Zmm) {
13684        self.emit(VCVTUDQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13685    }
13686}
13687
13688impl<'a> Vcvtudq2phEmitter<Ymm, Mem> for Assembler<'a> {
13689    fn vcvtudq2ph(&mut self, op0: Ymm, op1: Mem) {
13690        self.emit(VCVTUDQ2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13691    }
13692}
13693
13694/// `VCVTUDQ2PH_ER`.
13695///
13696/// Supported operand variants:
13697///
13698/// ```text
13699/// +---+----------+
13700/// | # | Operands |
13701/// +---+----------+
13702/// | 1 | Ymm, Zmm |
13703/// +---+----------+
13704/// ```
13705pub trait Vcvtudq2phErEmitter<A, B> {
13706    fn vcvtudq2ph_er(&mut self, op0: A, op1: B);
13707}
13708
13709impl<'a> Vcvtudq2phErEmitter<Ymm, Zmm> for Assembler<'a> {
13710    fn vcvtudq2ph_er(&mut self, op0: Ymm, op1: Zmm) {
13711        self.emit(VCVTUDQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13712    }
13713}
13714
13715/// `VCVTUDQ2PH_MASK`.
13716///
13717/// Supported operand variants:
13718///
13719/// ```text
13720/// +---+----------+
13721/// | # | Operands |
13722/// +---+----------+
13723/// | 1 | Xmm, Mem |
13724/// | 2 | Xmm, Xmm |
13725/// | 3 | Xmm, Ymm |
13726/// | 4 | Ymm, Mem |
13727/// | 5 | Ymm, Zmm |
13728/// +---+----------+
13729/// ```
13730pub trait Vcvtudq2phMaskEmitter<A, B> {
13731    fn vcvtudq2ph_mask(&mut self, op0: A, op1: B);
13732}
13733
13734impl<'a> Vcvtudq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
13735    fn vcvtudq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
13736        self.emit(VCVTUDQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13737    }
13738}
13739
13740impl<'a> Vcvtudq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
13741    fn vcvtudq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
13742        self.emit(VCVTUDQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13743    }
13744}
13745
13746impl<'a> Vcvtudq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
13747    fn vcvtudq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
13748        self.emit(VCVTUDQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13749    }
13750}
13751
13752impl<'a> Vcvtudq2phMaskEmitter<Ymm, Zmm> for Assembler<'a> {
13753    fn vcvtudq2ph_mask(&mut self, op0: Ymm, op1: Zmm) {
13754        self.emit(VCVTUDQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13755    }
13756}
13757
13758impl<'a> Vcvtudq2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
13759    fn vcvtudq2ph_mask(&mut self, op0: Ymm, op1: Mem) {
13760        self.emit(VCVTUDQ2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13761    }
13762}
13763
13764/// `VCVTUDQ2PH_MASK_ER`.
13765///
13766/// Supported operand variants:
13767///
13768/// ```text
13769/// +---+----------+
13770/// | # | Operands |
13771/// +---+----------+
13772/// | 1 | Ymm, Zmm |
13773/// +---+----------+
13774/// ```
13775pub trait Vcvtudq2phMaskErEmitter<A, B> {
13776    fn vcvtudq2ph_mask_er(&mut self, op0: A, op1: B);
13777}
13778
13779impl<'a> Vcvtudq2phMaskErEmitter<Ymm, Zmm> for Assembler<'a> {
13780    fn vcvtudq2ph_mask_er(&mut self, op0: Ymm, op1: Zmm) {
13781        self.emit(VCVTUDQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13782    }
13783}
13784
13785/// `VCVTUDQ2PH_MASKZ`.
13786///
13787/// Supported operand variants:
13788///
13789/// ```text
13790/// +---+----------+
13791/// | # | Operands |
13792/// +---+----------+
13793/// | 1 | Xmm, Mem |
13794/// | 2 | Xmm, Xmm |
13795/// | 3 | Xmm, Ymm |
13796/// | 4 | Ymm, Mem |
13797/// | 5 | Ymm, Zmm |
13798/// +---+----------+
13799/// ```
13800pub trait Vcvtudq2phMaskzEmitter<A, B> {
13801    fn vcvtudq2ph_maskz(&mut self, op0: A, op1: B);
13802}
13803
13804impl<'a> Vcvtudq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
13805    fn vcvtudq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
13806        self.emit(VCVTUDQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13807    }
13808}
13809
13810impl<'a> Vcvtudq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
13811    fn vcvtudq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
13812        self.emit(VCVTUDQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13813    }
13814}
13815
13816impl<'a> Vcvtudq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
13817    fn vcvtudq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
13818        self.emit(VCVTUDQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13819    }
13820}
13821
13822impl<'a> Vcvtudq2phMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
13823    fn vcvtudq2ph_maskz(&mut self, op0: Ymm, op1: Zmm) {
13824        self.emit(VCVTUDQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13825    }
13826}
13827
13828impl<'a> Vcvtudq2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
13829    fn vcvtudq2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
13830        self.emit(VCVTUDQ2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13831    }
13832}
13833
13834/// `VCVTUDQ2PH_MASKZ_ER`.
13835///
13836/// Supported operand variants:
13837///
13838/// ```text
13839/// +---+----------+
13840/// | # | Operands |
13841/// +---+----------+
13842/// | 1 | Ymm, Zmm |
13843/// +---+----------+
13844/// ```
13845pub trait Vcvtudq2phMaskzErEmitter<A, B> {
13846    fn vcvtudq2ph_maskz_er(&mut self, op0: A, op1: B);
13847}
13848
13849impl<'a> Vcvtudq2phMaskzErEmitter<Ymm, Zmm> for Assembler<'a> {
13850    fn vcvtudq2ph_maskz_er(&mut self, op0: Ymm, op1: Zmm) {
13851        self.emit(VCVTUDQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13852    }
13853}
13854
13855/// `VCVTUQQ2PH`.
13856///
13857/// Supported operand variants:
13858///
13859/// ```text
13860/// +---+----------+
13861/// | # | Operands |
13862/// +---+----------+
13863/// | 1 | Xmm, Mem |
13864/// | 2 | Xmm, Xmm |
13865/// | 3 | Xmm, Ymm |
13866/// | 4 | Xmm, Zmm |
13867/// +---+----------+
13868/// ```
13869pub trait Vcvtuqq2phEmitter<A, B> {
13870    fn vcvtuqq2ph(&mut self, op0: A, op1: B);
13871}
13872
13873impl<'a> Vcvtuqq2phEmitter<Xmm, Xmm> for Assembler<'a> {
13874    fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Xmm) {
13875        self.emit(VCVTUQQ2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13876    }
13877}
13878
13879impl<'a> Vcvtuqq2phEmitter<Xmm, Mem> for Assembler<'a> {
13880    fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Mem) {
13881        self.emit(VCVTUQQ2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13882    }
13883}
13884
13885impl<'a> Vcvtuqq2phEmitter<Xmm, Ymm> for Assembler<'a> {
13886    fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Ymm) {
13887        self.emit(VCVTUQQ2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13888    }
13889}
13890
13891impl<'a> Vcvtuqq2phEmitter<Xmm, Zmm> for Assembler<'a> {
13892    fn vcvtuqq2ph(&mut self, op0: Xmm, op1: Zmm) {
13893        self.emit(VCVTUQQ2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13894    }
13895}
13896
13897/// `VCVTUQQ2PH_ER`.
13898///
13899/// Supported operand variants:
13900///
13901/// ```text
13902/// +---+----------+
13903/// | # | Operands |
13904/// +---+----------+
13905/// | 1 | Xmm, Zmm |
13906/// +---+----------+
13907/// ```
13908pub trait Vcvtuqq2phErEmitter<A, B> {
13909    fn vcvtuqq2ph_er(&mut self, op0: A, op1: B);
13910}
13911
13912impl<'a> Vcvtuqq2phErEmitter<Xmm, Zmm> for Assembler<'a> {
13913    fn vcvtuqq2ph_er(&mut self, op0: Xmm, op1: Zmm) {
13914        self.emit(VCVTUQQ2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13915    }
13916}
13917
13918/// `VCVTUQQ2PH_MASK`.
13919///
13920/// Supported operand variants:
13921///
13922/// ```text
13923/// +---+----------+
13924/// | # | Operands |
13925/// +---+----------+
13926/// | 1 | Xmm, Mem |
13927/// | 2 | Xmm, Xmm |
13928/// | 3 | Xmm, Ymm |
13929/// | 4 | Xmm, Zmm |
13930/// +---+----------+
13931/// ```
13932pub trait Vcvtuqq2phMaskEmitter<A, B> {
13933    fn vcvtuqq2ph_mask(&mut self, op0: A, op1: B);
13934}
13935
13936impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
13937    fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
13938        self.emit(VCVTUQQ2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13939    }
13940}
13941
13942impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
13943    fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Mem) {
13944        self.emit(VCVTUQQ2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13945    }
13946}
13947
13948impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Ymm> for Assembler<'a> {
13949    fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Ymm) {
13950        self.emit(VCVTUQQ2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13951    }
13952}
13953
13954impl<'a> Vcvtuqq2phMaskEmitter<Xmm, Zmm> for Assembler<'a> {
13955    fn vcvtuqq2ph_mask(&mut self, op0: Xmm, op1: Zmm) {
13956        self.emit(VCVTUQQ2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13957    }
13958}
13959
13960/// `VCVTUQQ2PH_MASK_ER`.
13961///
13962/// Supported operand variants:
13963///
13964/// ```text
13965/// +---+----------+
13966/// | # | Operands |
13967/// +---+----------+
13968/// | 1 | Xmm, Zmm |
13969/// +---+----------+
13970/// ```
13971pub trait Vcvtuqq2phMaskErEmitter<A, B> {
13972    fn vcvtuqq2ph_mask_er(&mut self, op0: A, op1: B);
13973}
13974
13975impl<'a> Vcvtuqq2phMaskErEmitter<Xmm, Zmm> for Assembler<'a> {
13976    fn vcvtuqq2ph_mask_er(&mut self, op0: Xmm, op1: Zmm) {
13977        self.emit(VCVTUQQ2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
13978    }
13979}
13980
13981/// `VCVTUQQ2PH_MASKZ`.
13982///
13983/// Supported operand variants:
13984///
13985/// ```text
13986/// +---+----------+
13987/// | # | Operands |
13988/// +---+----------+
13989/// | 1 | Xmm, Mem |
13990/// | 2 | Xmm, Xmm |
13991/// | 3 | Xmm, Ymm |
13992/// | 4 | Xmm, Zmm |
13993/// +---+----------+
13994/// ```
13995pub trait Vcvtuqq2phMaskzEmitter<A, B> {
13996    fn vcvtuqq2ph_maskz(&mut self, op0: A, op1: B);
13997}
13998
13999impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
14000    fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
14001        self.emit(VCVTUQQ2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14002    }
14003}
14004
14005impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
14006    fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
14007        self.emit(VCVTUQQ2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14008    }
14009}
14010
14011impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
14012    fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Ymm) {
14013        self.emit(VCVTUQQ2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14014    }
14015}
14016
14017impl<'a> Vcvtuqq2phMaskzEmitter<Xmm, Zmm> for Assembler<'a> {
14018    fn vcvtuqq2ph_maskz(&mut self, op0: Xmm, op1: Zmm) {
14019        self.emit(VCVTUQQ2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14020    }
14021}
14022
14023/// `VCVTUQQ2PH_MASKZ_ER`.
14024///
14025/// Supported operand variants:
14026///
14027/// ```text
14028/// +---+----------+
14029/// | # | Operands |
14030/// +---+----------+
14031/// | 1 | Xmm, Zmm |
14032/// +---+----------+
14033/// ```
14034pub trait Vcvtuqq2phMaskzErEmitter<A, B> {
14035    fn vcvtuqq2ph_maskz_er(&mut self, op0: A, op1: B);
14036}
14037
14038impl<'a> Vcvtuqq2phMaskzErEmitter<Xmm, Zmm> for Assembler<'a> {
14039    fn vcvtuqq2ph_maskz_er(&mut self, op0: Xmm, op1: Zmm) {
14040        self.emit(VCVTUQQ2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14041    }
14042}
14043
14044/// `VCVTUSI2SH`.
14045///
14046/// Supported operand variants:
14047///
14048/// ```text
14049/// +---+---------------+
14050/// | # | Operands      |
14051/// +---+---------------+
14052/// | 1 | Xmm, Xmm, Gpd |
14053/// | 2 | Xmm, Xmm, Gpq |
14054/// | 3 | Xmm, Xmm, Mem |
14055/// +---+---------------+
14056/// ```
14057pub trait Vcvtusi2shEmitter<A, B, C> {
14058    fn vcvtusi2sh(&mut self, op0: A, op1: B, op2: C);
14059}
14060
14061impl<'a> Vcvtusi2shEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
14062    fn vcvtusi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
14063        self.emit(VCVTUSI2SH32RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14064    }
14065}
14066
14067impl<'a> Vcvtusi2shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14068    fn vcvtusi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14069        self.emit(VCVTUSI2SH32RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14070    }
14071}
14072
14073impl<'a> Vcvtusi2shEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
14074    fn vcvtusi2sh(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
14075        self.emit(VCVTUSI2SH64RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14076    }
14077}
14078
14079/// `VCVTUSI2SH_ER`.
14080///
14081/// Supported operand variants:
14082///
14083/// ```text
14084/// +---+---------------+
14085/// | # | Operands      |
14086/// +---+---------------+
14087/// | 1 | Xmm, Xmm, Gpd |
14088/// | 2 | Xmm, Xmm, Gpq |
14089/// +---+---------------+
14090/// ```
14091pub trait Vcvtusi2shErEmitter<A, B, C> {
14092    fn vcvtusi2sh_er(&mut self, op0: A, op1: B, op2: C);
14093}
14094
14095impl<'a> Vcvtusi2shErEmitter<Xmm, Xmm, Gpd> for Assembler<'a> {
14096    fn vcvtusi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpd) {
14097        self.emit(VCVTUSI2SH32RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14098    }
14099}
14100
14101impl<'a> Vcvtusi2shErEmitter<Xmm, Xmm, Gpq> for Assembler<'a> {
14102    fn vcvtusi2sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Gpq) {
14103        self.emit(VCVTUSI2SH64RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14104    }
14105}
14106
14107/// `VCVTUW2PH`.
14108///
14109/// Supported operand variants:
14110///
14111/// ```text
14112/// +---+----------+
14113/// | # | Operands |
14114/// +---+----------+
14115/// | 1 | Xmm, Mem |
14116/// | 2 | Xmm, Xmm |
14117/// | 3 | Ymm, Mem |
14118/// | 4 | Ymm, Ymm |
14119/// | 5 | Zmm, Mem |
14120/// | 6 | Zmm, Zmm |
14121/// +---+----------+
14122/// ```
14123pub trait Vcvtuw2phEmitter<A, B> {
14124    fn vcvtuw2ph(&mut self, op0: A, op1: B);
14125}
14126
14127impl<'a> Vcvtuw2phEmitter<Xmm, Xmm> for Assembler<'a> {
14128    fn vcvtuw2ph(&mut self, op0: Xmm, op1: Xmm) {
14129        self.emit(VCVTUW2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14130    }
14131}
14132
14133impl<'a> Vcvtuw2phEmitter<Xmm, Mem> for Assembler<'a> {
14134    fn vcvtuw2ph(&mut self, op0: Xmm, op1: Mem) {
14135        self.emit(VCVTUW2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14136    }
14137}
14138
14139impl<'a> Vcvtuw2phEmitter<Ymm, Ymm> for Assembler<'a> {
14140    fn vcvtuw2ph(&mut self, op0: Ymm, op1: Ymm) {
14141        self.emit(VCVTUW2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14142    }
14143}
14144
14145impl<'a> Vcvtuw2phEmitter<Ymm, Mem> for Assembler<'a> {
14146    fn vcvtuw2ph(&mut self, op0: Ymm, op1: Mem) {
14147        self.emit(VCVTUW2PH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14148    }
14149}
14150
14151impl<'a> Vcvtuw2phEmitter<Zmm, Zmm> for Assembler<'a> {
14152    fn vcvtuw2ph(&mut self, op0: Zmm, op1: Zmm) {
14153        self.emit(VCVTUW2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14154    }
14155}
14156
14157impl<'a> Vcvtuw2phEmitter<Zmm, Mem> for Assembler<'a> {
14158    fn vcvtuw2ph(&mut self, op0: Zmm, op1: Mem) {
14159        self.emit(VCVTUW2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14160    }
14161}
14162
14163/// `VCVTUW2PH_ER`.
14164///
14165/// Supported operand variants:
14166///
14167/// ```text
14168/// +---+----------+
14169/// | # | Operands |
14170/// +---+----------+
14171/// | 1 | Zmm, Zmm |
14172/// +---+----------+
14173/// ```
14174pub trait Vcvtuw2phErEmitter<A, B> {
14175    fn vcvtuw2ph_er(&mut self, op0: A, op1: B);
14176}
14177
14178impl<'a> Vcvtuw2phErEmitter<Zmm, Zmm> for Assembler<'a> {
14179    fn vcvtuw2ph_er(&mut self, op0: Zmm, op1: Zmm) {
14180        self.emit(VCVTUW2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14181    }
14182}
14183
14184/// `VCVTUW2PH_MASK`.
14185///
14186/// Supported operand variants:
14187///
14188/// ```text
14189/// +---+----------+
14190/// | # | Operands |
14191/// +---+----------+
14192/// | 1 | Xmm, Mem |
14193/// | 2 | Xmm, Xmm |
14194/// | 3 | Ymm, Mem |
14195/// | 4 | Ymm, Ymm |
14196/// | 5 | Zmm, Mem |
14197/// | 6 | Zmm, Zmm |
14198/// +---+----------+
14199/// ```
14200pub trait Vcvtuw2phMaskEmitter<A, B> {
14201    fn vcvtuw2ph_mask(&mut self, op0: A, op1: B);
14202}
14203
14204impl<'a> Vcvtuw2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
14205    fn vcvtuw2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
14206        self.emit(VCVTUW2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14207    }
14208}
14209
14210impl<'a> Vcvtuw2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
14211    fn vcvtuw2ph_mask(&mut self, op0: Xmm, op1: Mem) {
14212        self.emit(VCVTUW2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14213    }
14214}
14215
14216impl<'a> Vcvtuw2phMaskEmitter<Ymm, Ymm> for Assembler<'a> {
14217    fn vcvtuw2ph_mask(&mut self, op0: Ymm, op1: Ymm) {
14218        self.emit(VCVTUW2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14219    }
14220}
14221
14222impl<'a> Vcvtuw2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
14223    fn vcvtuw2ph_mask(&mut self, op0: Ymm, op1: Mem) {
14224        self.emit(VCVTUW2PH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14225    }
14226}
14227
14228impl<'a> Vcvtuw2phMaskEmitter<Zmm, Zmm> for Assembler<'a> {
14229    fn vcvtuw2ph_mask(&mut self, op0: Zmm, op1: Zmm) {
14230        self.emit(VCVTUW2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14231    }
14232}
14233
14234impl<'a> Vcvtuw2phMaskEmitter<Zmm, Mem> for Assembler<'a> {
14235    fn vcvtuw2ph_mask(&mut self, op0: Zmm, op1: Mem) {
14236        self.emit(VCVTUW2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14237    }
14238}
14239
14240/// `VCVTUW2PH_MASK_ER`.
14241///
14242/// Supported operand variants:
14243///
14244/// ```text
14245/// +---+----------+
14246/// | # | Operands |
14247/// +---+----------+
14248/// | 1 | Zmm, Zmm |
14249/// +---+----------+
14250/// ```
14251pub trait Vcvtuw2phMaskErEmitter<A, B> {
14252    fn vcvtuw2ph_mask_er(&mut self, op0: A, op1: B);
14253}
14254
14255impl<'a> Vcvtuw2phMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
14256    fn vcvtuw2ph_mask_er(&mut self, op0: Zmm, op1: Zmm) {
14257        self.emit(VCVTUW2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14258    }
14259}
14260
14261/// `VCVTUW2PH_MASKZ`.
14262///
14263/// Supported operand variants:
14264///
14265/// ```text
14266/// +---+----------+
14267/// | # | Operands |
14268/// +---+----------+
14269/// | 1 | Xmm, Mem |
14270/// | 2 | Xmm, Xmm |
14271/// | 3 | Ymm, Mem |
14272/// | 4 | Ymm, Ymm |
14273/// | 5 | Zmm, Mem |
14274/// | 6 | Zmm, Zmm |
14275/// +---+----------+
14276/// ```
14277pub trait Vcvtuw2phMaskzEmitter<A, B> {
14278    fn vcvtuw2ph_maskz(&mut self, op0: A, op1: B);
14279}
14280
14281impl<'a> Vcvtuw2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
14282    fn vcvtuw2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
14283        self.emit(VCVTUW2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14284    }
14285}
14286
14287impl<'a> Vcvtuw2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
14288    fn vcvtuw2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
14289        self.emit(VCVTUW2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14290    }
14291}
14292
14293impl<'a> Vcvtuw2phMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
14294    fn vcvtuw2ph_maskz(&mut self, op0: Ymm, op1: Ymm) {
14295        self.emit(VCVTUW2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14296    }
14297}
14298
14299impl<'a> Vcvtuw2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
14300    fn vcvtuw2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
14301        self.emit(VCVTUW2PH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14302    }
14303}
14304
14305impl<'a> Vcvtuw2phMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
14306    fn vcvtuw2ph_maskz(&mut self, op0: Zmm, op1: Zmm) {
14307        self.emit(VCVTUW2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14308    }
14309}
14310
14311impl<'a> Vcvtuw2phMaskzEmitter<Zmm, Mem> for Assembler<'a> {
14312    fn vcvtuw2ph_maskz(&mut self, op0: Zmm, op1: Mem) {
14313        self.emit(VCVTUW2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14314    }
14315}
14316
14317/// `VCVTUW2PH_MASKZ_ER`.
14318///
14319/// Supported operand variants:
14320///
14321/// ```text
14322/// +---+----------+
14323/// | # | Operands |
14324/// +---+----------+
14325/// | 1 | Zmm, Zmm |
14326/// +---+----------+
14327/// ```
14328pub trait Vcvtuw2phMaskzErEmitter<A, B> {
14329    fn vcvtuw2ph_maskz_er(&mut self, op0: A, op1: B);
14330}
14331
14332impl<'a> Vcvtuw2phMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
14333    fn vcvtuw2ph_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
14334        self.emit(VCVTUW2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14335    }
14336}
14337
14338/// `VCVTW2PH`.
14339///
14340/// Supported operand variants:
14341///
14342/// ```text
14343/// +---+----------+
14344/// | # | Operands |
14345/// +---+----------+
14346/// | 1 | Xmm, Mem |
14347/// | 2 | Xmm, Xmm |
14348/// | 3 | Ymm, Mem |
14349/// | 4 | Ymm, Ymm |
14350/// | 5 | Zmm, Mem |
14351/// | 6 | Zmm, Zmm |
14352/// +---+----------+
14353/// ```
14354pub trait Vcvtw2phEmitter<A, B> {
14355    fn vcvtw2ph(&mut self, op0: A, op1: B);
14356}
14357
14358impl<'a> Vcvtw2phEmitter<Xmm, Xmm> for Assembler<'a> {
14359    fn vcvtw2ph(&mut self, op0: Xmm, op1: Xmm) {
14360        self.emit(VCVTW2PH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14361    }
14362}
14363
14364impl<'a> Vcvtw2phEmitter<Xmm, Mem> for Assembler<'a> {
14365    fn vcvtw2ph(&mut self, op0: Xmm, op1: Mem) {
14366        self.emit(VCVTW2PH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14367    }
14368}
14369
14370impl<'a> Vcvtw2phEmitter<Ymm, Ymm> for Assembler<'a> {
14371    fn vcvtw2ph(&mut self, op0: Ymm, op1: Ymm) {
14372        self.emit(VCVTW2PH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14373    }
14374}
14375
14376impl<'a> Vcvtw2phEmitter<Ymm, Mem> for Assembler<'a> {
14377    fn vcvtw2ph(&mut self, op0: Ymm, op1: Mem) {
14378        self.emit(VCVTW2PH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14379    }
14380}
14381
14382impl<'a> Vcvtw2phEmitter<Zmm, Zmm> for Assembler<'a> {
14383    fn vcvtw2ph(&mut self, op0: Zmm, op1: Zmm) {
14384        self.emit(VCVTW2PH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14385    }
14386}
14387
14388impl<'a> Vcvtw2phEmitter<Zmm, Mem> for Assembler<'a> {
14389    fn vcvtw2ph(&mut self, op0: Zmm, op1: Mem) {
14390        self.emit(VCVTW2PH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14391    }
14392}
14393
14394/// `VCVTW2PH_ER`.
14395///
14396/// Supported operand variants:
14397///
14398/// ```text
14399/// +---+----------+
14400/// | # | Operands |
14401/// +---+----------+
14402/// | 1 | Zmm, Zmm |
14403/// +---+----------+
14404/// ```
14405pub trait Vcvtw2phErEmitter<A, B> {
14406    fn vcvtw2ph_er(&mut self, op0: A, op1: B);
14407}
14408
14409impl<'a> Vcvtw2phErEmitter<Zmm, Zmm> for Assembler<'a> {
14410    fn vcvtw2ph_er(&mut self, op0: Zmm, op1: Zmm) {
14411        self.emit(VCVTW2PH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14412    }
14413}
14414
14415/// `VCVTW2PH_MASK`.
14416///
14417/// Supported operand variants:
14418///
14419/// ```text
14420/// +---+----------+
14421/// | # | Operands |
14422/// +---+----------+
14423/// | 1 | Xmm, Mem |
14424/// | 2 | Xmm, Xmm |
14425/// | 3 | Ymm, Mem |
14426/// | 4 | Ymm, Ymm |
14427/// | 5 | Zmm, Mem |
14428/// | 6 | Zmm, Zmm |
14429/// +---+----------+
14430/// ```
14431pub trait Vcvtw2phMaskEmitter<A, B> {
14432    fn vcvtw2ph_mask(&mut self, op0: A, op1: B);
14433}
14434
14435impl<'a> Vcvtw2phMaskEmitter<Xmm, Xmm> for Assembler<'a> {
14436    fn vcvtw2ph_mask(&mut self, op0: Xmm, op1: Xmm) {
14437        self.emit(VCVTW2PH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14438    }
14439}
14440
14441impl<'a> Vcvtw2phMaskEmitter<Xmm, Mem> for Assembler<'a> {
14442    fn vcvtw2ph_mask(&mut self, op0: Xmm, op1: Mem) {
14443        self.emit(VCVTW2PH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14444    }
14445}
14446
14447impl<'a> Vcvtw2phMaskEmitter<Ymm, Ymm> for Assembler<'a> {
14448    fn vcvtw2ph_mask(&mut self, op0: Ymm, op1: Ymm) {
14449        self.emit(VCVTW2PH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14450    }
14451}
14452
14453impl<'a> Vcvtw2phMaskEmitter<Ymm, Mem> for Assembler<'a> {
14454    fn vcvtw2ph_mask(&mut self, op0: Ymm, op1: Mem) {
14455        self.emit(VCVTW2PH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14456    }
14457}
14458
14459impl<'a> Vcvtw2phMaskEmitter<Zmm, Zmm> for Assembler<'a> {
14460    fn vcvtw2ph_mask(&mut self, op0: Zmm, op1: Zmm) {
14461        self.emit(VCVTW2PH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14462    }
14463}
14464
14465impl<'a> Vcvtw2phMaskEmitter<Zmm, Mem> for Assembler<'a> {
14466    fn vcvtw2ph_mask(&mut self, op0: Zmm, op1: Mem) {
14467        self.emit(VCVTW2PH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14468    }
14469}
14470
14471/// `VCVTW2PH_MASK_ER`.
14472///
14473/// Supported operand variants:
14474///
14475/// ```text
14476/// +---+----------+
14477/// | # | Operands |
14478/// +---+----------+
14479/// | 1 | Zmm, Zmm |
14480/// +---+----------+
14481/// ```
14482pub trait Vcvtw2phMaskErEmitter<A, B> {
14483    fn vcvtw2ph_mask_er(&mut self, op0: A, op1: B);
14484}
14485
14486impl<'a> Vcvtw2phMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
14487    fn vcvtw2ph_mask_er(&mut self, op0: Zmm, op1: Zmm) {
14488        self.emit(VCVTW2PH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14489    }
14490}
14491
14492/// `VCVTW2PH_MASKZ`.
14493///
14494/// Supported operand variants:
14495///
14496/// ```text
14497/// +---+----------+
14498/// | # | Operands |
14499/// +---+----------+
14500/// | 1 | Xmm, Mem |
14501/// | 2 | Xmm, Xmm |
14502/// | 3 | Ymm, Mem |
14503/// | 4 | Ymm, Ymm |
14504/// | 5 | Zmm, Mem |
14505/// | 6 | Zmm, Zmm |
14506/// +---+----------+
14507/// ```
14508pub trait Vcvtw2phMaskzEmitter<A, B> {
14509    fn vcvtw2ph_maskz(&mut self, op0: A, op1: B);
14510}
14511
14512impl<'a> Vcvtw2phMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
14513    fn vcvtw2ph_maskz(&mut self, op0: Xmm, op1: Xmm) {
14514        self.emit(VCVTW2PH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14515    }
14516}
14517
14518impl<'a> Vcvtw2phMaskzEmitter<Xmm, Mem> for Assembler<'a> {
14519    fn vcvtw2ph_maskz(&mut self, op0: Xmm, op1: Mem) {
14520        self.emit(VCVTW2PH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14521    }
14522}
14523
14524impl<'a> Vcvtw2phMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
14525    fn vcvtw2ph_maskz(&mut self, op0: Ymm, op1: Ymm) {
14526        self.emit(VCVTW2PH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14527    }
14528}
14529
14530impl<'a> Vcvtw2phMaskzEmitter<Ymm, Mem> for Assembler<'a> {
14531    fn vcvtw2ph_maskz(&mut self, op0: Ymm, op1: Mem) {
14532        self.emit(VCVTW2PH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14533    }
14534}
14535
14536impl<'a> Vcvtw2phMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
14537    fn vcvtw2ph_maskz(&mut self, op0: Zmm, op1: Zmm) {
14538        self.emit(VCVTW2PH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14539    }
14540}
14541
14542impl<'a> Vcvtw2phMaskzEmitter<Zmm, Mem> for Assembler<'a> {
14543    fn vcvtw2ph_maskz(&mut self, op0: Zmm, op1: Mem) {
14544        self.emit(VCVTW2PH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14545    }
14546}
14547
14548/// `VCVTW2PH_MASKZ_ER`.
14549///
14550/// Supported operand variants:
14551///
14552/// ```text
14553/// +---+----------+
14554/// | # | Operands |
14555/// +---+----------+
14556/// | 1 | Zmm, Zmm |
14557/// +---+----------+
14558/// ```
14559pub trait Vcvtw2phMaskzErEmitter<A, B> {
14560    fn vcvtw2ph_maskz_er(&mut self, op0: A, op1: B);
14561}
14562
14563impl<'a> Vcvtw2phMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
14564    fn vcvtw2ph_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
14565        self.emit(VCVTW2PH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
14566    }
14567}
14568
14569/// `VDIVPH`.
14570///
14571/// Supported operand variants:
14572///
14573/// ```text
14574/// +---+---------------+
14575/// | # | Operands      |
14576/// +---+---------------+
14577/// | 1 | Xmm, Xmm, Mem |
14578/// | 2 | Xmm, Xmm, Xmm |
14579/// | 3 | Ymm, Ymm, Mem |
14580/// | 4 | Ymm, Ymm, Ymm |
14581/// | 5 | Zmm, Zmm, Mem |
14582/// | 6 | Zmm, Zmm, Zmm |
14583/// +---+---------------+
14584/// ```
14585pub trait VdivphEmitter<A, B, C> {
14586    fn vdivph(&mut self, op0: A, op1: B, op2: C);
14587}
14588
14589impl<'a> VdivphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14590    fn vdivph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14591        self.emit(VDIVPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14592    }
14593}
14594
14595impl<'a> VdivphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14596    fn vdivph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14597        self.emit(VDIVPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14598    }
14599}
14600
14601impl<'a> VdivphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
14602    fn vdivph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
14603        self.emit(VDIVPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14604    }
14605}
14606
14607impl<'a> VdivphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
14608    fn vdivph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
14609        self.emit(VDIVPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14610    }
14611}
14612
14613impl<'a> VdivphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
14614    fn vdivph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
14615        self.emit(VDIVPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14616    }
14617}
14618
14619impl<'a> VdivphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
14620    fn vdivph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
14621        self.emit(VDIVPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14622    }
14623}
14624
14625/// `VDIVPH_ER`.
14626///
14627/// Supported operand variants:
14628///
14629/// ```text
14630/// +---+---------------+
14631/// | # | Operands      |
14632/// +---+---------------+
14633/// | 1 | Zmm, Zmm, Zmm |
14634/// +---+---------------+
14635/// ```
14636pub trait VdivphErEmitter<A, B, C> {
14637    fn vdivph_er(&mut self, op0: A, op1: B, op2: C);
14638}
14639
14640impl<'a> VdivphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
14641    fn vdivph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
14642        self.emit(VDIVPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14643    }
14644}
14645
14646/// `VDIVPH_MASK`.
14647///
14648/// Supported operand variants:
14649///
14650/// ```text
14651/// +---+---------------+
14652/// | # | Operands      |
14653/// +---+---------------+
14654/// | 1 | Xmm, Xmm, Mem |
14655/// | 2 | Xmm, Xmm, Xmm |
14656/// | 3 | Ymm, Ymm, Mem |
14657/// | 4 | Ymm, Ymm, Ymm |
14658/// | 5 | Zmm, Zmm, Mem |
14659/// | 6 | Zmm, Zmm, Zmm |
14660/// +---+---------------+
14661/// ```
14662pub trait VdivphMaskEmitter<A, B, C> {
14663    fn vdivph_mask(&mut self, op0: A, op1: B, op2: C);
14664}
14665
14666impl<'a> VdivphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14667    fn vdivph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14668        self.emit(VDIVPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14669    }
14670}
14671
14672impl<'a> VdivphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14673    fn vdivph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14674        self.emit(VDIVPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14675    }
14676}
14677
14678impl<'a> VdivphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
14679    fn vdivph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
14680        self.emit(VDIVPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14681    }
14682}
14683
14684impl<'a> VdivphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
14685    fn vdivph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
14686        self.emit(VDIVPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14687    }
14688}
14689
14690impl<'a> VdivphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
14691    fn vdivph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
14692        self.emit(VDIVPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14693    }
14694}
14695
14696impl<'a> VdivphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
14697    fn vdivph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
14698        self.emit(VDIVPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14699    }
14700}
14701
14702/// `VDIVPH_MASK_ER`.
14703///
14704/// Supported operand variants:
14705///
14706/// ```text
14707/// +---+---------------+
14708/// | # | Operands      |
14709/// +---+---------------+
14710/// | 1 | Zmm, Zmm, Zmm |
14711/// +---+---------------+
14712/// ```
14713pub trait VdivphMaskErEmitter<A, B, C> {
14714    fn vdivph_mask_er(&mut self, op0: A, op1: B, op2: C);
14715}
14716
14717impl<'a> VdivphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
14718    fn vdivph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
14719        self.emit(VDIVPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14720    }
14721}
14722
14723/// `VDIVPH_MASKZ`.
14724///
14725/// Supported operand variants:
14726///
14727/// ```text
14728/// +---+---------------+
14729/// | # | Operands      |
14730/// +---+---------------+
14731/// | 1 | Xmm, Xmm, Mem |
14732/// | 2 | Xmm, Xmm, Xmm |
14733/// | 3 | Ymm, Ymm, Mem |
14734/// | 4 | Ymm, Ymm, Ymm |
14735/// | 5 | Zmm, Zmm, Mem |
14736/// | 6 | Zmm, Zmm, Zmm |
14737/// +---+---------------+
14738/// ```
14739pub trait VdivphMaskzEmitter<A, B, C> {
14740    fn vdivph_maskz(&mut self, op0: A, op1: B, op2: C);
14741}
14742
14743impl<'a> VdivphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14744    fn vdivph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14745        self.emit(VDIVPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14746    }
14747}
14748
14749impl<'a> VdivphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14750    fn vdivph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14751        self.emit(VDIVPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14752    }
14753}
14754
14755impl<'a> VdivphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
14756    fn vdivph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
14757        self.emit(VDIVPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14758    }
14759}
14760
14761impl<'a> VdivphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
14762    fn vdivph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
14763        self.emit(VDIVPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14764    }
14765}
14766
14767impl<'a> VdivphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
14768    fn vdivph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
14769        self.emit(VDIVPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14770    }
14771}
14772
14773impl<'a> VdivphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
14774    fn vdivph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
14775        self.emit(VDIVPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14776    }
14777}
14778
14779/// `VDIVPH_MASKZ_ER`.
14780///
14781/// Supported operand variants:
14782///
14783/// ```text
14784/// +---+---------------+
14785/// | # | Operands      |
14786/// +---+---------------+
14787/// | 1 | Zmm, Zmm, Zmm |
14788/// +---+---------------+
14789/// ```
14790pub trait VdivphMaskzErEmitter<A, B, C> {
14791    fn vdivph_maskz_er(&mut self, op0: A, op1: B, op2: C);
14792}
14793
14794impl<'a> VdivphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
14795    fn vdivph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
14796        self.emit(VDIVPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14797    }
14798}
14799
14800/// `VDIVSH`.
14801///
14802/// Supported operand variants:
14803///
14804/// ```text
14805/// +---+---------------+
14806/// | # | Operands      |
14807/// +---+---------------+
14808/// | 1 | Xmm, Xmm, Mem |
14809/// | 2 | Xmm, Xmm, Xmm |
14810/// +---+---------------+
14811/// ```
14812pub trait VdivshEmitter<A, B, C> {
14813    fn vdivsh(&mut self, op0: A, op1: B, op2: C);
14814}
14815
14816impl<'a> VdivshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14817    fn vdivsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14818        self.emit(VDIVSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14819    }
14820}
14821
14822impl<'a> VdivshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14823    fn vdivsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14824        self.emit(VDIVSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14825    }
14826}
14827
14828/// `VDIVSH_ER`.
14829///
14830/// Supported operand variants:
14831///
14832/// ```text
14833/// +---+---------------+
14834/// | # | Operands      |
14835/// +---+---------------+
14836/// | 1 | Xmm, Xmm, Xmm |
14837/// +---+---------------+
14838/// ```
14839pub trait VdivshErEmitter<A, B, C> {
14840    fn vdivsh_er(&mut self, op0: A, op1: B, op2: C);
14841}
14842
14843impl<'a> VdivshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14844    fn vdivsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14845        self.emit(VDIVSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14846    }
14847}
14848
14849/// `VDIVSH_MASK`.
14850///
14851/// Supported operand variants:
14852///
14853/// ```text
14854/// +---+---------------+
14855/// | # | Operands      |
14856/// +---+---------------+
14857/// | 1 | Xmm, Xmm, Mem |
14858/// | 2 | Xmm, Xmm, Xmm |
14859/// +---+---------------+
14860/// ```
14861pub trait VdivshMaskEmitter<A, B, C> {
14862    fn vdivsh_mask(&mut self, op0: A, op1: B, op2: C);
14863}
14864
14865impl<'a> VdivshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14866    fn vdivsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14867        self.emit(VDIVSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14868    }
14869}
14870
14871impl<'a> VdivshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14872    fn vdivsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14873        self.emit(VDIVSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14874    }
14875}
14876
14877/// `VDIVSH_MASK_ER`.
14878///
14879/// Supported operand variants:
14880///
14881/// ```text
14882/// +---+---------------+
14883/// | # | Operands      |
14884/// +---+---------------+
14885/// | 1 | Xmm, Xmm, Xmm |
14886/// +---+---------------+
14887/// ```
14888pub trait VdivshMaskErEmitter<A, B, C> {
14889    fn vdivsh_mask_er(&mut self, op0: A, op1: B, op2: C);
14890}
14891
14892impl<'a> VdivshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14893    fn vdivsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14894        self.emit(VDIVSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14895    }
14896}
14897
14898/// `VDIVSH_MASKZ`.
14899///
14900/// Supported operand variants:
14901///
14902/// ```text
14903/// +---+---------------+
14904/// | # | Operands      |
14905/// +---+---------------+
14906/// | 1 | Xmm, Xmm, Mem |
14907/// | 2 | Xmm, Xmm, Xmm |
14908/// +---+---------------+
14909/// ```
14910pub trait VdivshMaskzEmitter<A, B, C> {
14911    fn vdivsh_maskz(&mut self, op0: A, op1: B, op2: C);
14912}
14913
14914impl<'a> VdivshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14915    fn vdivsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14916        self.emit(VDIVSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14917    }
14918}
14919
14920impl<'a> VdivshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
14921    fn vdivsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
14922        self.emit(VDIVSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14923    }
14924}
14925
14926/// `VDIVSH_MASKZ_ER`.
14927///
14928/// Supported operand variants:
14929///
14930/// ```text
14931/// +---+---------------+
14932/// | # | Operands      |
14933/// +---+---------------+
14934/// | 1 | Xmm, Xmm, Xmm |
14935/// +---+---------------+
14936/// ```
14937pub trait VdivshMaskzErEmitter<A, B, C> {
14938    fn vdivsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
14939}
14940
14941impl<'a> VdivshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
14942    fn vdivsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
14943        self.emit(VDIVSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
14944    }
14945}
14946
14947/// `VERR` (VERR). 
14948/// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
14949///
14950///
14951/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
14952///
14953/// Supported operand variants:
14954///
14955/// ```text
14956/// +---+----------+
14957/// | # | Operands |
14958/// +---+----------+
14959/// | 1 | Gpd      |
14960/// | 2 | Mem      |
14961/// +---+----------+
14962/// ```
14963pub trait VerrEmitter<A> {
14964    fn verr(&mut self, op0: A);
14965}
14966
14967impl<'a> VerrEmitter<Gpd> for Assembler<'a> {
14968    fn verr(&mut self, op0: Gpd) {
14969        self.emit(VERRR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
14970    }
14971}
14972
14973impl<'a> VerrEmitter<Mem> for Assembler<'a> {
14974    fn verr(&mut self, op0: Mem) {
14975        self.emit(VERRM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
14976    }
14977}
14978
14979/// `VERW` (VERW). 
14980/// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
14981///
14982///
14983/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
14984///
14985/// Supported operand variants:
14986///
14987/// ```text
14988/// +---+----------+
14989/// | # | Operands |
14990/// +---+----------+
14991/// | 1 | Gpd      |
14992/// | 2 | Mem      |
14993/// +---+----------+
14994/// ```
14995pub trait VerwEmitter<A> {
14996    fn verw(&mut self, op0: A);
14997}
14998
14999impl<'a> VerwEmitter<Gpd> for Assembler<'a> {
15000    fn verw(&mut self, op0: Gpd) {
15001        self.emit(VERWR, op0.as_operand(), &NOREG, &NOREG, &NOREG);
15002    }
15003}
15004
15005impl<'a> VerwEmitter<Mem> for Assembler<'a> {
15006    fn verw(&mut self, op0: Mem) {
15007        self.emit(VERWM, op0.as_operand(), &NOREG, &NOREG, &NOREG);
15008    }
15009}
15010
15011/// `VFCMADDCPH`.
15012///
15013/// Supported operand variants:
15014///
15015/// ```text
15016/// +---+---------------+
15017/// | # | Operands      |
15018/// +---+---------------+
15019/// | 1 | Xmm, Xmm, Mem |
15020/// | 2 | Xmm, Xmm, Xmm |
15021/// | 3 | Ymm, Ymm, Mem |
15022/// | 4 | Ymm, Ymm, Ymm |
15023/// | 5 | Zmm, Zmm, Mem |
15024/// | 6 | Zmm, Zmm, Zmm |
15025/// +---+---------------+
15026/// ```
15027pub trait VfcmaddcphEmitter<A, B, C> {
15028    fn vfcmaddcph(&mut self, op0: A, op1: B, op2: C);
15029}
15030
15031impl<'a> VfcmaddcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15032    fn vfcmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15033        self.emit(VFCMADDCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15034    }
15035}
15036
15037impl<'a> VfcmaddcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15038    fn vfcmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15039        self.emit(VFCMADDCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15040    }
15041}
15042
15043impl<'a> VfcmaddcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15044    fn vfcmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15045        self.emit(VFCMADDCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15046    }
15047}
15048
15049impl<'a> VfcmaddcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15050    fn vfcmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15051        self.emit(VFCMADDCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15052    }
15053}
15054
15055impl<'a> VfcmaddcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15056    fn vfcmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15057        self.emit(VFCMADDCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15058    }
15059}
15060
15061impl<'a> VfcmaddcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15062    fn vfcmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15063        self.emit(VFCMADDCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15064    }
15065}
15066
15067/// `VFCMADDCPH_ER`.
15068///
15069/// Supported operand variants:
15070///
15071/// ```text
15072/// +---+---------------+
15073/// | # | Operands      |
15074/// +---+---------------+
15075/// | 1 | Zmm, Zmm, Zmm |
15076/// +---+---------------+
15077/// ```
15078pub trait VfcmaddcphErEmitter<A, B, C> {
15079    fn vfcmaddcph_er(&mut self, op0: A, op1: B, op2: C);
15080}
15081
15082impl<'a> VfcmaddcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15083    fn vfcmaddcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15084        self.emit(VFCMADDCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15085    }
15086}
15087
15088/// `VFCMADDCPH_MASK`.
15089///
15090/// Supported operand variants:
15091///
15092/// ```text
15093/// +---+---------------+
15094/// | # | Operands      |
15095/// +---+---------------+
15096/// | 1 | Xmm, Xmm, Mem |
15097/// | 2 | Xmm, Xmm, Xmm |
15098/// | 3 | Ymm, Ymm, Mem |
15099/// | 4 | Ymm, Ymm, Ymm |
15100/// | 5 | Zmm, Zmm, Mem |
15101/// | 6 | Zmm, Zmm, Zmm |
15102/// +---+---------------+
15103/// ```
15104pub trait VfcmaddcphMaskEmitter<A, B, C> {
15105    fn vfcmaddcph_mask(&mut self, op0: A, op1: B, op2: C);
15106}
15107
15108impl<'a> VfcmaddcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15109    fn vfcmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15110        self.emit(VFCMADDCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15111    }
15112}
15113
15114impl<'a> VfcmaddcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15115    fn vfcmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15116        self.emit(VFCMADDCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15117    }
15118}
15119
15120impl<'a> VfcmaddcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15121    fn vfcmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15122        self.emit(VFCMADDCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15123    }
15124}
15125
15126impl<'a> VfcmaddcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15127    fn vfcmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15128        self.emit(VFCMADDCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15129    }
15130}
15131
15132impl<'a> VfcmaddcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15133    fn vfcmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15134        self.emit(VFCMADDCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15135    }
15136}
15137
15138impl<'a> VfcmaddcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15139    fn vfcmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15140        self.emit(VFCMADDCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15141    }
15142}
15143
15144/// `VFCMADDCPH_MASK_ER`.
15145///
15146/// Supported operand variants:
15147///
15148/// ```text
15149/// +---+---------------+
15150/// | # | Operands      |
15151/// +---+---------------+
15152/// | 1 | Zmm, Zmm, Zmm |
15153/// +---+---------------+
15154/// ```
15155pub trait VfcmaddcphMaskErEmitter<A, B, C> {
15156    fn vfcmaddcph_mask_er(&mut self, op0: A, op1: B, op2: C);
15157}
15158
15159impl<'a> VfcmaddcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15160    fn vfcmaddcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15161        self.emit(VFCMADDCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15162    }
15163}
15164
15165/// `VFCMADDCPH_MASKZ`.
15166///
15167/// Supported operand variants:
15168///
15169/// ```text
15170/// +---+---------------+
15171/// | # | Operands      |
15172/// +---+---------------+
15173/// | 1 | Xmm, Xmm, Mem |
15174/// | 2 | Xmm, Xmm, Xmm |
15175/// | 3 | Ymm, Ymm, Mem |
15176/// | 4 | Ymm, Ymm, Ymm |
15177/// | 5 | Zmm, Zmm, Mem |
15178/// | 6 | Zmm, Zmm, Zmm |
15179/// +---+---------------+
15180/// ```
15181pub trait VfcmaddcphMaskzEmitter<A, B, C> {
15182    fn vfcmaddcph_maskz(&mut self, op0: A, op1: B, op2: C);
15183}
15184
15185impl<'a> VfcmaddcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15186    fn vfcmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15187        self.emit(VFCMADDCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15188    }
15189}
15190
15191impl<'a> VfcmaddcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15192    fn vfcmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15193        self.emit(VFCMADDCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15194    }
15195}
15196
15197impl<'a> VfcmaddcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15198    fn vfcmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15199        self.emit(VFCMADDCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15200    }
15201}
15202
15203impl<'a> VfcmaddcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15204    fn vfcmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15205        self.emit(VFCMADDCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15206    }
15207}
15208
15209impl<'a> VfcmaddcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15210    fn vfcmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15211        self.emit(VFCMADDCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15212    }
15213}
15214
15215impl<'a> VfcmaddcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15216    fn vfcmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15217        self.emit(VFCMADDCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15218    }
15219}
15220
15221/// `VFCMADDCPH_MASKZ_ER`.
15222///
15223/// Supported operand variants:
15224///
15225/// ```text
15226/// +---+---------------+
15227/// | # | Operands      |
15228/// +---+---------------+
15229/// | 1 | Zmm, Zmm, Zmm |
15230/// +---+---------------+
15231/// ```
15232pub trait VfcmaddcphMaskzErEmitter<A, B, C> {
15233    fn vfcmaddcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
15234}
15235
15236impl<'a> VfcmaddcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15237    fn vfcmaddcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15238        self.emit(VFCMADDCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15239    }
15240}
15241
15242/// `VFCMADDCSH`.
15243///
15244/// Supported operand variants:
15245///
15246/// ```text
15247/// +---+---------------+
15248/// | # | Operands      |
15249/// +---+---------------+
15250/// | 1 | Xmm, Xmm, Mem |
15251/// | 2 | Xmm, Xmm, Xmm |
15252/// +---+---------------+
15253/// ```
15254pub trait VfcmaddcshEmitter<A, B, C> {
15255    fn vfcmaddcsh(&mut self, op0: A, op1: B, op2: C);
15256}
15257
15258impl<'a> VfcmaddcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15259    fn vfcmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15260        self.emit(VFCMADDCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15261    }
15262}
15263
15264impl<'a> VfcmaddcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15265    fn vfcmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15266        self.emit(VFCMADDCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15267    }
15268}
15269
15270/// `VFCMADDCSH_ER`.
15271///
15272/// Supported operand variants:
15273///
15274/// ```text
15275/// +---+---------------+
15276/// | # | Operands      |
15277/// +---+---------------+
15278/// | 1 | Xmm, Xmm, Xmm |
15279/// +---+---------------+
15280/// ```
15281pub trait VfcmaddcshErEmitter<A, B, C> {
15282    fn vfcmaddcsh_er(&mut self, op0: A, op1: B, op2: C);
15283}
15284
15285impl<'a> VfcmaddcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15286    fn vfcmaddcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15287        self.emit(VFCMADDCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15288    }
15289}
15290
15291/// `VFCMADDCSH_MASK`.
15292///
15293/// Supported operand variants:
15294///
15295/// ```text
15296/// +---+---------------+
15297/// | # | Operands      |
15298/// +---+---------------+
15299/// | 1 | Xmm, Xmm, Mem |
15300/// | 2 | Xmm, Xmm, Xmm |
15301/// +---+---------------+
15302/// ```
15303pub trait VfcmaddcshMaskEmitter<A, B, C> {
15304    fn vfcmaddcsh_mask(&mut self, op0: A, op1: B, op2: C);
15305}
15306
15307impl<'a> VfcmaddcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15308    fn vfcmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15309        self.emit(VFCMADDCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15310    }
15311}
15312
15313impl<'a> VfcmaddcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15314    fn vfcmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15315        self.emit(VFCMADDCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15316    }
15317}
15318
15319/// `VFCMADDCSH_MASK_ER`.
15320///
15321/// Supported operand variants:
15322///
15323/// ```text
15324/// +---+---------------+
15325/// | # | Operands      |
15326/// +---+---------------+
15327/// | 1 | Xmm, Xmm, Xmm |
15328/// +---+---------------+
15329/// ```
15330pub trait VfcmaddcshMaskErEmitter<A, B, C> {
15331    fn vfcmaddcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
15332}
15333
15334impl<'a> VfcmaddcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15335    fn vfcmaddcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15336        self.emit(VFCMADDCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15337    }
15338}
15339
15340/// `VFCMADDCSH_MASKZ`.
15341///
15342/// Supported operand variants:
15343///
15344/// ```text
15345/// +---+---------------+
15346/// | # | Operands      |
15347/// +---+---------------+
15348/// | 1 | Xmm, Xmm, Mem |
15349/// | 2 | Xmm, Xmm, Xmm |
15350/// +---+---------------+
15351/// ```
15352pub trait VfcmaddcshMaskzEmitter<A, B, C> {
15353    fn vfcmaddcsh_maskz(&mut self, op0: A, op1: B, op2: C);
15354}
15355
15356impl<'a> VfcmaddcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15357    fn vfcmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15358        self.emit(VFCMADDCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15359    }
15360}
15361
15362impl<'a> VfcmaddcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15363    fn vfcmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15364        self.emit(VFCMADDCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15365    }
15366}
15367
15368/// `VFCMADDCSH_MASKZ_ER`.
15369///
15370/// Supported operand variants:
15371///
15372/// ```text
15373/// +---+---------------+
15374/// | # | Operands      |
15375/// +---+---------------+
15376/// | 1 | Xmm, Xmm, Xmm |
15377/// +---+---------------+
15378/// ```
15379pub trait VfcmaddcshMaskzErEmitter<A, B, C> {
15380    fn vfcmaddcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
15381}
15382
15383impl<'a> VfcmaddcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15384    fn vfcmaddcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15385        self.emit(VFCMADDCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15386    }
15387}
15388
15389/// `VFCMULCPH`.
15390///
15391/// Supported operand variants:
15392///
15393/// ```text
15394/// +---+---------------+
15395/// | # | Operands      |
15396/// +---+---------------+
15397/// | 1 | Xmm, Xmm, Mem |
15398/// | 2 | Xmm, Xmm, Xmm |
15399/// | 3 | Ymm, Ymm, Mem |
15400/// | 4 | Ymm, Ymm, Ymm |
15401/// | 5 | Zmm, Zmm, Mem |
15402/// | 6 | Zmm, Zmm, Zmm |
15403/// +---+---------------+
15404/// ```
15405pub trait VfcmulcphEmitter<A, B, C> {
15406    fn vfcmulcph(&mut self, op0: A, op1: B, op2: C);
15407}
15408
15409impl<'a> VfcmulcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15410    fn vfcmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15411        self.emit(VFCMULCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15412    }
15413}
15414
15415impl<'a> VfcmulcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15416    fn vfcmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15417        self.emit(VFCMULCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15418    }
15419}
15420
15421impl<'a> VfcmulcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15422    fn vfcmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15423        self.emit(VFCMULCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15424    }
15425}
15426
15427impl<'a> VfcmulcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15428    fn vfcmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15429        self.emit(VFCMULCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15430    }
15431}
15432
15433impl<'a> VfcmulcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15434    fn vfcmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15435        self.emit(VFCMULCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15436    }
15437}
15438
15439impl<'a> VfcmulcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15440    fn vfcmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15441        self.emit(VFCMULCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15442    }
15443}
15444
15445/// `VFCMULCPH_ER`.
15446///
15447/// Supported operand variants:
15448///
15449/// ```text
15450/// +---+---------------+
15451/// | # | Operands      |
15452/// +---+---------------+
15453/// | 1 | Zmm, Zmm, Zmm |
15454/// +---+---------------+
15455/// ```
15456pub trait VfcmulcphErEmitter<A, B, C> {
15457    fn vfcmulcph_er(&mut self, op0: A, op1: B, op2: C);
15458}
15459
15460impl<'a> VfcmulcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15461    fn vfcmulcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15462        self.emit(VFCMULCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15463    }
15464}
15465
15466/// `VFCMULCPH_MASK`.
15467///
15468/// Supported operand variants:
15469///
15470/// ```text
15471/// +---+---------------+
15472/// | # | Operands      |
15473/// +---+---------------+
15474/// | 1 | Xmm, Xmm, Mem |
15475/// | 2 | Xmm, Xmm, Xmm |
15476/// | 3 | Ymm, Ymm, Mem |
15477/// | 4 | Ymm, Ymm, Ymm |
15478/// | 5 | Zmm, Zmm, Mem |
15479/// | 6 | Zmm, Zmm, Zmm |
15480/// +---+---------------+
15481/// ```
15482pub trait VfcmulcphMaskEmitter<A, B, C> {
15483    fn vfcmulcph_mask(&mut self, op0: A, op1: B, op2: C);
15484}
15485
15486impl<'a> VfcmulcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15487    fn vfcmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15488        self.emit(VFCMULCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15489    }
15490}
15491
15492impl<'a> VfcmulcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15493    fn vfcmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15494        self.emit(VFCMULCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15495    }
15496}
15497
15498impl<'a> VfcmulcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15499    fn vfcmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15500        self.emit(VFCMULCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15501    }
15502}
15503
15504impl<'a> VfcmulcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15505    fn vfcmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15506        self.emit(VFCMULCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15507    }
15508}
15509
15510impl<'a> VfcmulcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15511    fn vfcmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15512        self.emit(VFCMULCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15513    }
15514}
15515
15516impl<'a> VfcmulcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15517    fn vfcmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15518        self.emit(VFCMULCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15519    }
15520}
15521
15522/// `VFCMULCPH_MASK_ER`.
15523///
15524/// Supported operand variants:
15525///
15526/// ```text
15527/// +---+---------------+
15528/// | # | Operands      |
15529/// +---+---------------+
15530/// | 1 | Zmm, Zmm, Zmm |
15531/// +---+---------------+
15532/// ```
15533pub trait VfcmulcphMaskErEmitter<A, B, C> {
15534    fn vfcmulcph_mask_er(&mut self, op0: A, op1: B, op2: C);
15535}
15536
15537impl<'a> VfcmulcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15538    fn vfcmulcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15539        self.emit(VFCMULCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15540    }
15541}
15542
15543/// `VFCMULCPH_MASKZ`.
15544///
15545/// Supported operand variants:
15546///
15547/// ```text
15548/// +---+---------------+
15549/// | # | Operands      |
15550/// +---+---------------+
15551/// | 1 | Xmm, Xmm, Mem |
15552/// | 2 | Xmm, Xmm, Xmm |
15553/// | 3 | Ymm, Ymm, Mem |
15554/// | 4 | Ymm, Ymm, Ymm |
15555/// | 5 | Zmm, Zmm, Mem |
15556/// | 6 | Zmm, Zmm, Zmm |
15557/// +---+---------------+
15558/// ```
15559pub trait VfcmulcphMaskzEmitter<A, B, C> {
15560    fn vfcmulcph_maskz(&mut self, op0: A, op1: B, op2: C);
15561}
15562
15563impl<'a> VfcmulcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15564    fn vfcmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15565        self.emit(VFCMULCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15566    }
15567}
15568
15569impl<'a> VfcmulcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15570    fn vfcmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15571        self.emit(VFCMULCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15572    }
15573}
15574
15575impl<'a> VfcmulcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15576    fn vfcmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15577        self.emit(VFCMULCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15578    }
15579}
15580
15581impl<'a> VfcmulcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15582    fn vfcmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15583        self.emit(VFCMULCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15584    }
15585}
15586
15587impl<'a> VfcmulcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15588    fn vfcmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15589        self.emit(VFCMULCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15590    }
15591}
15592
15593impl<'a> VfcmulcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15594    fn vfcmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15595        self.emit(VFCMULCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15596    }
15597}
15598
15599/// `VFCMULCPH_MASKZ_ER`.
15600///
15601/// Supported operand variants:
15602///
15603/// ```text
15604/// +---+---------------+
15605/// | # | Operands      |
15606/// +---+---------------+
15607/// | 1 | Zmm, Zmm, Zmm |
15608/// +---+---------------+
15609/// ```
15610pub trait VfcmulcphMaskzErEmitter<A, B, C> {
15611    fn vfcmulcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
15612}
15613
15614impl<'a> VfcmulcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15615    fn vfcmulcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15616        self.emit(VFCMULCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15617    }
15618}
15619
15620/// `VFCMULCSH`.
15621///
15622/// Supported operand variants:
15623///
15624/// ```text
15625/// +---+---------------+
15626/// | # | Operands      |
15627/// +---+---------------+
15628/// | 1 | Xmm, Xmm, Mem |
15629/// | 2 | Xmm, Xmm, Xmm |
15630/// +---+---------------+
15631/// ```
15632pub trait VfcmulcshEmitter<A, B, C> {
15633    fn vfcmulcsh(&mut self, op0: A, op1: B, op2: C);
15634}
15635
15636impl<'a> VfcmulcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15637    fn vfcmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15638        self.emit(VFCMULCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15639    }
15640}
15641
15642impl<'a> VfcmulcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15643    fn vfcmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15644        self.emit(VFCMULCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15645    }
15646}
15647
15648/// `VFCMULCSH_ER`.
15649///
15650/// Supported operand variants:
15651///
15652/// ```text
15653/// +---+---------------+
15654/// | # | Operands      |
15655/// +---+---------------+
15656/// | 1 | Xmm, Xmm, Xmm |
15657/// +---+---------------+
15658/// ```
15659pub trait VfcmulcshErEmitter<A, B, C> {
15660    fn vfcmulcsh_er(&mut self, op0: A, op1: B, op2: C);
15661}
15662
15663impl<'a> VfcmulcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15664    fn vfcmulcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15665        self.emit(VFCMULCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15666    }
15667}
15668
15669/// `VFCMULCSH_MASK`.
15670///
15671/// Supported operand variants:
15672///
15673/// ```text
15674/// +---+---------------+
15675/// | # | Operands      |
15676/// +---+---------------+
15677/// | 1 | Xmm, Xmm, Mem |
15678/// | 2 | Xmm, Xmm, Xmm |
15679/// +---+---------------+
15680/// ```
15681pub trait VfcmulcshMaskEmitter<A, B, C> {
15682    fn vfcmulcsh_mask(&mut self, op0: A, op1: B, op2: C);
15683}
15684
15685impl<'a> VfcmulcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15686    fn vfcmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15687        self.emit(VFCMULCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15688    }
15689}
15690
15691impl<'a> VfcmulcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15692    fn vfcmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15693        self.emit(VFCMULCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15694    }
15695}
15696
15697/// `VFCMULCSH_MASK_ER`.
15698///
15699/// Supported operand variants:
15700///
15701/// ```text
15702/// +---+---------------+
15703/// | # | Operands      |
15704/// +---+---------------+
15705/// | 1 | Xmm, Xmm, Xmm |
15706/// +---+---------------+
15707/// ```
15708pub trait VfcmulcshMaskErEmitter<A, B, C> {
15709    fn vfcmulcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
15710}
15711
15712impl<'a> VfcmulcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15713    fn vfcmulcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15714        self.emit(VFCMULCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15715    }
15716}
15717
15718/// `VFCMULCSH_MASKZ`.
15719///
15720/// Supported operand variants:
15721///
15722/// ```text
15723/// +---+---------------+
15724/// | # | Operands      |
15725/// +---+---------------+
15726/// | 1 | Xmm, Xmm, Mem |
15727/// | 2 | Xmm, Xmm, Xmm |
15728/// +---+---------------+
15729/// ```
15730pub trait VfcmulcshMaskzEmitter<A, B, C> {
15731    fn vfcmulcsh_maskz(&mut self, op0: A, op1: B, op2: C);
15732}
15733
15734impl<'a> VfcmulcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15735    fn vfcmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15736        self.emit(VFCMULCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15737    }
15738}
15739
15740impl<'a> VfcmulcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15741    fn vfcmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15742        self.emit(VFCMULCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15743    }
15744}
15745
15746/// `VFCMULCSH_MASKZ_ER`.
15747///
15748/// Supported operand variants:
15749///
15750/// ```text
15751/// +---+---------------+
15752/// | # | Operands      |
15753/// +---+---------------+
15754/// | 1 | Xmm, Xmm, Xmm |
15755/// +---+---------------+
15756/// ```
15757pub trait VfcmulcshMaskzErEmitter<A, B, C> {
15758    fn vfcmulcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
15759}
15760
15761impl<'a> VfcmulcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15762    fn vfcmulcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15763        self.emit(VFCMULCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15764    }
15765}
15766
15767/// `VFMADD132PH`.
15768///
15769/// Supported operand variants:
15770///
15771/// ```text
15772/// +---+---------------+
15773/// | # | Operands      |
15774/// +---+---------------+
15775/// | 1 | Xmm, Xmm, Mem |
15776/// | 2 | Xmm, Xmm, Xmm |
15777/// | 3 | Ymm, Ymm, Mem |
15778/// | 4 | Ymm, Ymm, Ymm |
15779/// | 5 | Zmm, Zmm, Mem |
15780/// | 6 | Zmm, Zmm, Zmm |
15781/// +---+---------------+
15782/// ```
15783pub trait Vfmadd132phEmitter<A, B, C> {
15784    fn vfmadd132ph(&mut self, op0: A, op1: B, op2: C);
15785}
15786
15787impl<'a> Vfmadd132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15788    fn vfmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15789        self.emit(VFMADD132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15790    }
15791}
15792
15793impl<'a> Vfmadd132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15794    fn vfmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15795        self.emit(VFMADD132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15796    }
15797}
15798
15799impl<'a> Vfmadd132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15800    fn vfmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15801        self.emit(VFMADD132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15802    }
15803}
15804
15805impl<'a> Vfmadd132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15806    fn vfmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15807        self.emit(VFMADD132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15808    }
15809}
15810
15811impl<'a> Vfmadd132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15812    fn vfmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15813        self.emit(VFMADD132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15814    }
15815}
15816
15817impl<'a> Vfmadd132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15818    fn vfmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15819        self.emit(VFMADD132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15820    }
15821}
15822
15823/// `VFMADD132PH_ER`.
15824///
15825/// Supported operand variants:
15826///
15827/// ```text
15828/// +---+---------------+
15829/// | # | Operands      |
15830/// +---+---------------+
15831/// | 1 | Zmm, Zmm, Zmm |
15832/// +---+---------------+
15833/// ```
15834pub trait Vfmadd132phErEmitter<A, B, C> {
15835    fn vfmadd132ph_er(&mut self, op0: A, op1: B, op2: C);
15836}
15837
15838impl<'a> Vfmadd132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15839    fn vfmadd132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15840        self.emit(VFMADD132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15841    }
15842}
15843
15844/// `VFMADD132PH_MASK`.
15845///
15846/// Supported operand variants:
15847///
15848/// ```text
15849/// +---+---------------+
15850/// | # | Operands      |
15851/// +---+---------------+
15852/// | 1 | Xmm, Xmm, Mem |
15853/// | 2 | Xmm, Xmm, Xmm |
15854/// | 3 | Ymm, Ymm, Mem |
15855/// | 4 | Ymm, Ymm, Ymm |
15856/// | 5 | Zmm, Zmm, Mem |
15857/// | 6 | Zmm, Zmm, Zmm |
15858/// +---+---------------+
15859/// ```
15860pub trait Vfmadd132phMaskEmitter<A, B, C> {
15861    fn vfmadd132ph_mask(&mut self, op0: A, op1: B, op2: C);
15862}
15863
15864impl<'a> Vfmadd132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15865    fn vfmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15866        self.emit(VFMADD132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15867    }
15868}
15869
15870impl<'a> Vfmadd132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15871    fn vfmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15872        self.emit(VFMADD132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15873    }
15874}
15875
15876impl<'a> Vfmadd132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15877    fn vfmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15878        self.emit(VFMADD132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15879    }
15880}
15881
15882impl<'a> Vfmadd132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15883    fn vfmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15884        self.emit(VFMADD132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15885    }
15886}
15887
15888impl<'a> Vfmadd132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15889    fn vfmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15890        self.emit(VFMADD132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15891    }
15892}
15893
15894impl<'a> Vfmadd132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15895    fn vfmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15896        self.emit(VFMADD132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15897    }
15898}
15899
15900/// `VFMADD132PH_MASK_ER`.
15901///
15902/// Supported operand variants:
15903///
15904/// ```text
15905/// +---+---------------+
15906/// | # | Operands      |
15907/// +---+---------------+
15908/// | 1 | Zmm, Zmm, Zmm |
15909/// +---+---------------+
15910/// ```
15911pub trait Vfmadd132phMaskErEmitter<A, B, C> {
15912    fn vfmadd132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
15913}
15914
15915impl<'a> Vfmadd132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15916    fn vfmadd132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15917        self.emit(VFMADD132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15918    }
15919}
15920
15921/// `VFMADD132PH_MASKZ`.
15922///
15923/// Supported operand variants:
15924///
15925/// ```text
15926/// +---+---------------+
15927/// | # | Operands      |
15928/// +---+---------------+
15929/// | 1 | Xmm, Xmm, Mem |
15930/// | 2 | Xmm, Xmm, Xmm |
15931/// | 3 | Ymm, Ymm, Mem |
15932/// | 4 | Ymm, Ymm, Ymm |
15933/// | 5 | Zmm, Zmm, Mem |
15934/// | 6 | Zmm, Zmm, Zmm |
15935/// +---+---------------+
15936/// ```
15937pub trait Vfmadd132phMaskzEmitter<A, B, C> {
15938    fn vfmadd132ph_maskz(&mut self, op0: A, op1: B, op2: C);
15939}
15940
15941impl<'a> Vfmadd132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
15942    fn vfmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
15943        self.emit(VFMADD132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15944    }
15945}
15946
15947impl<'a> Vfmadd132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
15948    fn vfmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
15949        self.emit(VFMADD132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15950    }
15951}
15952
15953impl<'a> Vfmadd132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
15954    fn vfmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
15955        self.emit(VFMADD132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15956    }
15957}
15958
15959impl<'a> Vfmadd132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
15960    fn vfmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
15961        self.emit(VFMADD132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15962    }
15963}
15964
15965impl<'a> Vfmadd132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15966    fn vfmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15967        self.emit(VFMADD132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15968    }
15969}
15970
15971impl<'a> Vfmadd132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
15972    fn vfmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
15973        self.emit(VFMADD132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15974    }
15975}
15976
15977/// `VFMADD132PH_MASKZ_ER`.
15978///
15979/// Supported operand variants:
15980///
15981/// ```text
15982/// +---+---------------+
15983/// | # | Operands      |
15984/// +---+---------------+
15985/// | 1 | Zmm, Zmm, Zmm |
15986/// +---+---------------+
15987/// ```
15988pub trait Vfmadd132phMaskzErEmitter<A, B, C> {
15989    fn vfmadd132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
15990}
15991
15992impl<'a> Vfmadd132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
15993    fn vfmadd132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
15994        self.emit(VFMADD132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
15995    }
15996}
15997
15998/// `VFMADD132SH`.
15999///
16000/// Supported operand variants:
16001///
16002/// ```text
16003/// +---+---------------+
16004/// | # | Operands      |
16005/// +---+---------------+
16006/// | 1 | Xmm, Xmm, Mem |
16007/// | 2 | Xmm, Xmm, Xmm |
16008/// +---+---------------+
16009/// ```
16010pub trait Vfmadd132shEmitter<A, B, C> {
16011    fn vfmadd132sh(&mut self, op0: A, op1: B, op2: C);
16012}
16013
16014impl<'a> Vfmadd132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16015    fn vfmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16016        self.emit(VFMADD132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16017    }
16018}
16019
16020impl<'a> Vfmadd132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16021    fn vfmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16022        self.emit(VFMADD132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16023    }
16024}
16025
16026/// `VFMADD132SH_ER`.
16027///
16028/// Supported operand variants:
16029///
16030/// ```text
16031/// +---+---------------+
16032/// | # | Operands      |
16033/// +---+---------------+
16034/// | 1 | Xmm, Xmm, Xmm |
16035/// +---+---------------+
16036/// ```
16037pub trait Vfmadd132shErEmitter<A, B, C> {
16038    fn vfmadd132sh_er(&mut self, op0: A, op1: B, op2: C);
16039}
16040
16041impl<'a> Vfmadd132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16042    fn vfmadd132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16043        self.emit(VFMADD132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16044    }
16045}
16046
16047/// `VFMADD132SH_MASK`.
16048///
16049/// Supported operand variants:
16050///
16051/// ```text
16052/// +---+---------------+
16053/// | # | Operands      |
16054/// +---+---------------+
16055/// | 1 | Xmm, Xmm, Mem |
16056/// | 2 | Xmm, Xmm, Xmm |
16057/// +---+---------------+
16058/// ```
16059pub trait Vfmadd132shMaskEmitter<A, B, C> {
16060    fn vfmadd132sh_mask(&mut self, op0: A, op1: B, op2: C);
16061}
16062
16063impl<'a> Vfmadd132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16064    fn vfmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16065        self.emit(VFMADD132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16066    }
16067}
16068
16069impl<'a> Vfmadd132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16070    fn vfmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16071        self.emit(VFMADD132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16072    }
16073}
16074
16075/// `VFMADD132SH_MASK_ER`.
16076///
16077/// Supported operand variants:
16078///
16079/// ```text
16080/// +---+---------------+
16081/// | # | Operands      |
16082/// +---+---------------+
16083/// | 1 | Xmm, Xmm, Xmm |
16084/// +---+---------------+
16085/// ```
16086pub trait Vfmadd132shMaskErEmitter<A, B, C> {
16087    fn vfmadd132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
16088}
16089
16090impl<'a> Vfmadd132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16091    fn vfmadd132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16092        self.emit(VFMADD132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16093    }
16094}
16095
16096/// `VFMADD132SH_MASKZ`.
16097///
16098/// Supported operand variants:
16099///
16100/// ```text
16101/// +---+---------------+
16102/// | # | Operands      |
16103/// +---+---------------+
16104/// | 1 | Xmm, Xmm, Mem |
16105/// | 2 | Xmm, Xmm, Xmm |
16106/// +---+---------------+
16107/// ```
16108pub trait Vfmadd132shMaskzEmitter<A, B, C> {
16109    fn vfmadd132sh_maskz(&mut self, op0: A, op1: B, op2: C);
16110}
16111
16112impl<'a> Vfmadd132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16113    fn vfmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16114        self.emit(VFMADD132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16115    }
16116}
16117
16118impl<'a> Vfmadd132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16119    fn vfmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16120        self.emit(VFMADD132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16121    }
16122}
16123
16124/// `VFMADD132SH_MASKZ_ER`.
16125///
16126/// Supported operand variants:
16127///
16128/// ```text
16129/// +---+---------------+
16130/// | # | Operands      |
16131/// +---+---------------+
16132/// | 1 | Xmm, Xmm, Xmm |
16133/// +---+---------------+
16134/// ```
16135pub trait Vfmadd132shMaskzErEmitter<A, B, C> {
16136    fn vfmadd132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
16137}
16138
16139impl<'a> Vfmadd132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16140    fn vfmadd132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16141        self.emit(VFMADD132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16142    }
16143}
16144
16145/// `VFMADD213PH`.
16146///
16147/// Supported operand variants:
16148///
16149/// ```text
16150/// +---+---------------+
16151/// | # | Operands      |
16152/// +---+---------------+
16153/// | 1 | Xmm, Xmm, Mem |
16154/// | 2 | Xmm, Xmm, Xmm |
16155/// | 3 | Ymm, Ymm, Mem |
16156/// | 4 | Ymm, Ymm, Ymm |
16157/// | 5 | Zmm, Zmm, Mem |
16158/// | 6 | Zmm, Zmm, Zmm |
16159/// +---+---------------+
16160/// ```
16161pub trait Vfmadd213phEmitter<A, B, C> {
16162    fn vfmadd213ph(&mut self, op0: A, op1: B, op2: C);
16163}
16164
16165impl<'a> Vfmadd213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16166    fn vfmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16167        self.emit(VFMADD213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16168    }
16169}
16170
16171impl<'a> Vfmadd213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16172    fn vfmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16173        self.emit(VFMADD213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16174    }
16175}
16176
16177impl<'a> Vfmadd213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16178    fn vfmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16179        self.emit(VFMADD213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16180    }
16181}
16182
16183impl<'a> Vfmadd213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16184    fn vfmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16185        self.emit(VFMADD213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16186    }
16187}
16188
16189impl<'a> Vfmadd213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16190    fn vfmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16191        self.emit(VFMADD213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16192    }
16193}
16194
16195impl<'a> Vfmadd213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16196    fn vfmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16197        self.emit(VFMADD213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16198    }
16199}
16200
16201/// `VFMADD213PH_ER`.
16202///
16203/// Supported operand variants:
16204///
16205/// ```text
16206/// +---+---------------+
16207/// | # | Operands      |
16208/// +---+---------------+
16209/// | 1 | Zmm, Zmm, Zmm |
16210/// +---+---------------+
16211/// ```
16212pub trait Vfmadd213phErEmitter<A, B, C> {
16213    fn vfmadd213ph_er(&mut self, op0: A, op1: B, op2: C);
16214}
16215
16216impl<'a> Vfmadd213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16217    fn vfmadd213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16218        self.emit(VFMADD213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16219    }
16220}
16221
16222/// `VFMADD213PH_MASK`.
16223///
16224/// Supported operand variants:
16225///
16226/// ```text
16227/// +---+---------------+
16228/// | # | Operands      |
16229/// +---+---------------+
16230/// | 1 | Xmm, Xmm, Mem |
16231/// | 2 | Xmm, Xmm, Xmm |
16232/// | 3 | Ymm, Ymm, Mem |
16233/// | 4 | Ymm, Ymm, Ymm |
16234/// | 5 | Zmm, Zmm, Mem |
16235/// | 6 | Zmm, Zmm, Zmm |
16236/// +---+---------------+
16237/// ```
16238pub trait Vfmadd213phMaskEmitter<A, B, C> {
16239    fn vfmadd213ph_mask(&mut self, op0: A, op1: B, op2: C);
16240}
16241
16242impl<'a> Vfmadd213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16243    fn vfmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16244        self.emit(VFMADD213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16245    }
16246}
16247
16248impl<'a> Vfmadd213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16249    fn vfmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16250        self.emit(VFMADD213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16251    }
16252}
16253
16254impl<'a> Vfmadd213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16255    fn vfmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16256        self.emit(VFMADD213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16257    }
16258}
16259
16260impl<'a> Vfmadd213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16261    fn vfmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16262        self.emit(VFMADD213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16263    }
16264}
16265
16266impl<'a> Vfmadd213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16267    fn vfmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16268        self.emit(VFMADD213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16269    }
16270}
16271
16272impl<'a> Vfmadd213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16273    fn vfmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16274        self.emit(VFMADD213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16275    }
16276}
16277
16278/// `VFMADD213PH_MASK_ER`.
16279///
16280/// Supported operand variants:
16281///
16282/// ```text
16283/// +---+---------------+
16284/// | # | Operands      |
16285/// +---+---------------+
16286/// | 1 | Zmm, Zmm, Zmm |
16287/// +---+---------------+
16288/// ```
16289pub trait Vfmadd213phMaskErEmitter<A, B, C> {
16290    fn vfmadd213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
16291}
16292
16293impl<'a> Vfmadd213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16294    fn vfmadd213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16295        self.emit(VFMADD213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16296    }
16297}
16298
16299/// `VFMADD213PH_MASKZ`.
16300///
16301/// Supported operand variants:
16302///
16303/// ```text
16304/// +---+---------------+
16305/// | # | Operands      |
16306/// +---+---------------+
16307/// | 1 | Xmm, Xmm, Mem |
16308/// | 2 | Xmm, Xmm, Xmm |
16309/// | 3 | Ymm, Ymm, Mem |
16310/// | 4 | Ymm, Ymm, Ymm |
16311/// | 5 | Zmm, Zmm, Mem |
16312/// | 6 | Zmm, Zmm, Zmm |
16313/// +---+---------------+
16314/// ```
16315pub trait Vfmadd213phMaskzEmitter<A, B, C> {
16316    fn vfmadd213ph_maskz(&mut self, op0: A, op1: B, op2: C);
16317}
16318
16319impl<'a> Vfmadd213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16320    fn vfmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16321        self.emit(VFMADD213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16322    }
16323}
16324
16325impl<'a> Vfmadd213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16326    fn vfmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16327        self.emit(VFMADD213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16328    }
16329}
16330
16331impl<'a> Vfmadd213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16332    fn vfmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16333        self.emit(VFMADD213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16334    }
16335}
16336
16337impl<'a> Vfmadd213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16338    fn vfmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16339        self.emit(VFMADD213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16340    }
16341}
16342
16343impl<'a> Vfmadd213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16344    fn vfmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16345        self.emit(VFMADD213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16346    }
16347}
16348
16349impl<'a> Vfmadd213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16350    fn vfmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16351        self.emit(VFMADD213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16352    }
16353}
16354
16355/// `VFMADD213PH_MASKZ_ER`.
16356///
16357/// Supported operand variants:
16358///
16359/// ```text
16360/// +---+---------------+
16361/// | # | Operands      |
16362/// +---+---------------+
16363/// | 1 | Zmm, Zmm, Zmm |
16364/// +---+---------------+
16365/// ```
16366pub trait Vfmadd213phMaskzErEmitter<A, B, C> {
16367    fn vfmadd213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
16368}
16369
16370impl<'a> Vfmadd213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16371    fn vfmadd213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16372        self.emit(VFMADD213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16373    }
16374}
16375
16376/// `VFMADD213SH`.
16377///
16378/// Supported operand variants:
16379///
16380/// ```text
16381/// +---+---------------+
16382/// | # | Operands      |
16383/// +---+---------------+
16384/// | 1 | Xmm, Xmm, Mem |
16385/// | 2 | Xmm, Xmm, Xmm |
16386/// +---+---------------+
16387/// ```
16388pub trait Vfmadd213shEmitter<A, B, C> {
16389    fn vfmadd213sh(&mut self, op0: A, op1: B, op2: C);
16390}
16391
16392impl<'a> Vfmadd213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16393    fn vfmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16394        self.emit(VFMADD213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16395    }
16396}
16397
16398impl<'a> Vfmadd213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16399    fn vfmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16400        self.emit(VFMADD213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16401    }
16402}
16403
16404/// `VFMADD213SH_ER`.
16405///
16406/// Supported operand variants:
16407///
16408/// ```text
16409/// +---+---------------+
16410/// | # | Operands      |
16411/// +---+---------------+
16412/// | 1 | Xmm, Xmm, Xmm |
16413/// +---+---------------+
16414/// ```
16415pub trait Vfmadd213shErEmitter<A, B, C> {
16416    fn vfmadd213sh_er(&mut self, op0: A, op1: B, op2: C);
16417}
16418
16419impl<'a> Vfmadd213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16420    fn vfmadd213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16421        self.emit(VFMADD213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16422    }
16423}
16424
16425/// `VFMADD213SH_MASK`.
16426///
16427/// Supported operand variants:
16428///
16429/// ```text
16430/// +---+---------------+
16431/// | # | Operands      |
16432/// +---+---------------+
16433/// | 1 | Xmm, Xmm, Mem |
16434/// | 2 | Xmm, Xmm, Xmm |
16435/// +---+---------------+
16436/// ```
16437pub trait Vfmadd213shMaskEmitter<A, B, C> {
16438    fn vfmadd213sh_mask(&mut self, op0: A, op1: B, op2: C);
16439}
16440
16441impl<'a> Vfmadd213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16442    fn vfmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16443        self.emit(VFMADD213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16444    }
16445}
16446
16447impl<'a> Vfmadd213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16448    fn vfmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16449        self.emit(VFMADD213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16450    }
16451}
16452
16453/// `VFMADD213SH_MASK_ER`.
16454///
16455/// Supported operand variants:
16456///
16457/// ```text
16458/// +---+---------------+
16459/// | # | Operands      |
16460/// +---+---------------+
16461/// | 1 | Xmm, Xmm, Xmm |
16462/// +---+---------------+
16463/// ```
16464pub trait Vfmadd213shMaskErEmitter<A, B, C> {
16465    fn vfmadd213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
16466}
16467
16468impl<'a> Vfmadd213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16469    fn vfmadd213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16470        self.emit(VFMADD213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16471    }
16472}
16473
16474/// `VFMADD213SH_MASKZ`.
16475///
16476/// Supported operand variants:
16477///
16478/// ```text
16479/// +---+---------------+
16480/// | # | Operands      |
16481/// +---+---------------+
16482/// | 1 | Xmm, Xmm, Mem |
16483/// | 2 | Xmm, Xmm, Xmm |
16484/// +---+---------------+
16485/// ```
16486pub trait Vfmadd213shMaskzEmitter<A, B, C> {
16487    fn vfmadd213sh_maskz(&mut self, op0: A, op1: B, op2: C);
16488}
16489
16490impl<'a> Vfmadd213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16491    fn vfmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16492        self.emit(VFMADD213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16493    }
16494}
16495
16496impl<'a> Vfmadd213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16497    fn vfmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16498        self.emit(VFMADD213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16499    }
16500}
16501
16502/// `VFMADD213SH_MASKZ_ER`.
16503///
16504/// Supported operand variants:
16505///
16506/// ```text
16507/// +---+---------------+
16508/// | # | Operands      |
16509/// +---+---------------+
16510/// | 1 | Xmm, Xmm, Xmm |
16511/// +---+---------------+
16512/// ```
16513pub trait Vfmadd213shMaskzErEmitter<A, B, C> {
16514    fn vfmadd213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
16515}
16516
16517impl<'a> Vfmadd213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16518    fn vfmadd213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16519        self.emit(VFMADD213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16520    }
16521}
16522
16523/// `VFMADD231PH`.
16524///
16525/// Supported operand variants:
16526///
16527/// ```text
16528/// +---+---------------+
16529/// | # | Operands      |
16530/// +---+---------------+
16531/// | 1 | Xmm, Xmm, Mem |
16532/// | 2 | Xmm, Xmm, Xmm |
16533/// | 3 | Ymm, Ymm, Mem |
16534/// | 4 | Ymm, Ymm, Ymm |
16535/// | 5 | Zmm, Zmm, Mem |
16536/// | 6 | Zmm, Zmm, Zmm |
16537/// +---+---------------+
16538/// ```
16539pub trait Vfmadd231phEmitter<A, B, C> {
16540    fn vfmadd231ph(&mut self, op0: A, op1: B, op2: C);
16541}
16542
16543impl<'a> Vfmadd231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16544    fn vfmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16545        self.emit(VFMADD231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16546    }
16547}
16548
16549impl<'a> Vfmadd231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16550    fn vfmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16551        self.emit(VFMADD231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16552    }
16553}
16554
16555impl<'a> Vfmadd231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16556    fn vfmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16557        self.emit(VFMADD231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16558    }
16559}
16560
16561impl<'a> Vfmadd231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16562    fn vfmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16563        self.emit(VFMADD231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16564    }
16565}
16566
16567impl<'a> Vfmadd231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16568    fn vfmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16569        self.emit(VFMADD231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16570    }
16571}
16572
16573impl<'a> Vfmadd231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16574    fn vfmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16575        self.emit(VFMADD231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16576    }
16577}
16578
16579/// `VFMADD231PH_ER`.
16580///
16581/// Supported operand variants:
16582///
16583/// ```text
16584/// +---+---------------+
16585/// | # | Operands      |
16586/// +---+---------------+
16587/// | 1 | Zmm, Zmm, Zmm |
16588/// +---+---------------+
16589/// ```
16590pub trait Vfmadd231phErEmitter<A, B, C> {
16591    fn vfmadd231ph_er(&mut self, op0: A, op1: B, op2: C);
16592}
16593
16594impl<'a> Vfmadd231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16595    fn vfmadd231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16596        self.emit(VFMADD231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16597    }
16598}
16599
16600/// `VFMADD231PH_MASK`.
16601///
16602/// Supported operand variants:
16603///
16604/// ```text
16605/// +---+---------------+
16606/// | # | Operands      |
16607/// +---+---------------+
16608/// | 1 | Xmm, Xmm, Mem |
16609/// | 2 | Xmm, Xmm, Xmm |
16610/// | 3 | Ymm, Ymm, Mem |
16611/// | 4 | Ymm, Ymm, Ymm |
16612/// | 5 | Zmm, Zmm, Mem |
16613/// | 6 | Zmm, Zmm, Zmm |
16614/// +---+---------------+
16615/// ```
16616pub trait Vfmadd231phMaskEmitter<A, B, C> {
16617    fn vfmadd231ph_mask(&mut self, op0: A, op1: B, op2: C);
16618}
16619
16620impl<'a> Vfmadd231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16621    fn vfmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16622        self.emit(VFMADD231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16623    }
16624}
16625
16626impl<'a> Vfmadd231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16627    fn vfmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16628        self.emit(VFMADD231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16629    }
16630}
16631
16632impl<'a> Vfmadd231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16633    fn vfmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16634        self.emit(VFMADD231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16635    }
16636}
16637
16638impl<'a> Vfmadd231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16639    fn vfmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16640        self.emit(VFMADD231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16641    }
16642}
16643
16644impl<'a> Vfmadd231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16645    fn vfmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16646        self.emit(VFMADD231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16647    }
16648}
16649
16650impl<'a> Vfmadd231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16651    fn vfmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16652        self.emit(VFMADD231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16653    }
16654}
16655
16656/// `VFMADD231PH_MASK_ER`.
16657///
16658/// Supported operand variants:
16659///
16660/// ```text
16661/// +---+---------------+
16662/// | # | Operands      |
16663/// +---+---------------+
16664/// | 1 | Zmm, Zmm, Zmm |
16665/// +---+---------------+
16666/// ```
16667pub trait Vfmadd231phMaskErEmitter<A, B, C> {
16668    fn vfmadd231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
16669}
16670
16671impl<'a> Vfmadd231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16672    fn vfmadd231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16673        self.emit(VFMADD231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16674    }
16675}
16676
16677/// `VFMADD231PH_MASKZ`.
16678///
16679/// Supported operand variants:
16680///
16681/// ```text
16682/// +---+---------------+
16683/// | # | Operands      |
16684/// +---+---------------+
16685/// | 1 | Xmm, Xmm, Mem |
16686/// | 2 | Xmm, Xmm, Xmm |
16687/// | 3 | Ymm, Ymm, Mem |
16688/// | 4 | Ymm, Ymm, Ymm |
16689/// | 5 | Zmm, Zmm, Mem |
16690/// | 6 | Zmm, Zmm, Zmm |
16691/// +---+---------------+
16692/// ```
16693pub trait Vfmadd231phMaskzEmitter<A, B, C> {
16694    fn vfmadd231ph_maskz(&mut self, op0: A, op1: B, op2: C);
16695}
16696
16697impl<'a> Vfmadd231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16698    fn vfmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16699        self.emit(VFMADD231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16700    }
16701}
16702
16703impl<'a> Vfmadd231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16704    fn vfmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16705        self.emit(VFMADD231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16706    }
16707}
16708
16709impl<'a> Vfmadd231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16710    fn vfmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16711        self.emit(VFMADD231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16712    }
16713}
16714
16715impl<'a> Vfmadd231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16716    fn vfmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16717        self.emit(VFMADD231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16718    }
16719}
16720
16721impl<'a> Vfmadd231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16722    fn vfmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16723        self.emit(VFMADD231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16724    }
16725}
16726
16727impl<'a> Vfmadd231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16728    fn vfmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16729        self.emit(VFMADD231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16730    }
16731}
16732
16733/// `VFMADD231PH_MASKZ_ER`.
16734///
16735/// Supported operand variants:
16736///
16737/// ```text
16738/// +---+---------------+
16739/// | # | Operands      |
16740/// +---+---------------+
16741/// | 1 | Zmm, Zmm, Zmm |
16742/// +---+---------------+
16743/// ```
16744pub trait Vfmadd231phMaskzErEmitter<A, B, C> {
16745    fn vfmadd231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
16746}
16747
16748impl<'a> Vfmadd231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16749    fn vfmadd231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16750        self.emit(VFMADD231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16751    }
16752}
16753
16754/// `VFMADD231SH`.
16755///
16756/// Supported operand variants:
16757///
16758/// ```text
16759/// +---+---------------+
16760/// | # | Operands      |
16761/// +---+---------------+
16762/// | 1 | Xmm, Xmm, Mem |
16763/// | 2 | Xmm, Xmm, Xmm |
16764/// +---+---------------+
16765/// ```
16766pub trait Vfmadd231shEmitter<A, B, C> {
16767    fn vfmadd231sh(&mut self, op0: A, op1: B, op2: C);
16768}
16769
16770impl<'a> Vfmadd231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16771    fn vfmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16772        self.emit(VFMADD231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16773    }
16774}
16775
16776impl<'a> Vfmadd231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16777    fn vfmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16778        self.emit(VFMADD231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16779    }
16780}
16781
16782/// `VFMADD231SH_ER`.
16783///
16784/// Supported operand variants:
16785///
16786/// ```text
16787/// +---+---------------+
16788/// | # | Operands      |
16789/// +---+---------------+
16790/// | 1 | Xmm, Xmm, Xmm |
16791/// +---+---------------+
16792/// ```
16793pub trait Vfmadd231shErEmitter<A, B, C> {
16794    fn vfmadd231sh_er(&mut self, op0: A, op1: B, op2: C);
16795}
16796
16797impl<'a> Vfmadd231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16798    fn vfmadd231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16799        self.emit(VFMADD231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16800    }
16801}
16802
16803/// `VFMADD231SH_MASK`.
16804///
16805/// Supported operand variants:
16806///
16807/// ```text
16808/// +---+---------------+
16809/// | # | Operands      |
16810/// +---+---------------+
16811/// | 1 | Xmm, Xmm, Mem |
16812/// | 2 | Xmm, Xmm, Xmm |
16813/// +---+---------------+
16814/// ```
16815pub trait Vfmadd231shMaskEmitter<A, B, C> {
16816    fn vfmadd231sh_mask(&mut self, op0: A, op1: B, op2: C);
16817}
16818
16819impl<'a> Vfmadd231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16820    fn vfmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16821        self.emit(VFMADD231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16822    }
16823}
16824
16825impl<'a> Vfmadd231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16826    fn vfmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16827        self.emit(VFMADD231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16828    }
16829}
16830
16831/// `VFMADD231SH_MASK_ER`.
16832///
16833/// Supported operand variants:
16834///
16835/// ```text
16836/// +---+---------------+
16837/// | # | Operands      |
16838/// +---+---------------+
16839/// | 1 | Xmm, Xmm, Xmm |
16840/// +---+---------------+
16841/// ```
16842pub trait Vfmadd231shMaskErEmitter<A, B, C> {
16843    fn vfmadd231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
16844}
16845
16846impl<'a> Vfmadd231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16847    fn vfmadd231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16848        self.emit(VFMADD231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16849    }
16850}
16851
16852/// `VFMADD231SH_MASKZ`.
16853///
16854/// Supported operand variants:
16855///
16856/// ```text
16857/// +---+---------------+
16858/// | # | Operands      |
16859/// +---+---------------+
16860/// | 1 | Xmm, Xmm, Mem |
16861/// | 2 | Xmm, Xmm, Xmm |
16862/// +---+---------------+
16863/// ```
16864pub trait Vfmadd231shMaskzEmitter<A, B, C> {
16865    fn vfmadd231sh_maskz(&mut self, op0: A, op1: B, op2: C);
16866}
16867
16868impl<'a> Vfmadd231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16869    fn vfmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16870        self.emit(VFMADD231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16871    }
16872}
16873
16874impl<'a> Vfmadd231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16875    fn vfmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16876        self.emit(VFMADD231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16877    }
16878}
16879
16880/// `VFMADD231SH_MASKZ_ER`.
16881///
16882/// Supported operand variants:
16883///
16884/// ```text
16885/// +---+---------------+
16886/// | # | Operands      |
16887/// +---+---------------+
16888/// | 1 | Xmm, Xmm, Xmm |
16889/// +---+---------------+
16890/// ```
16891pub trait Vfmadd231shMaskzErEmitter<A, B, C> {
16892    fn vfmadd231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
16893}
16894
16895impl<'a> Vfmadd231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16896    fn vfmadd231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16897        self.emit(VFMADD231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16898    }
16899}
16900
16901/// `VFMADDCPH`.
16902///
16903/// Supported operand variants:
16904///
16905/// ```text
16906/// +---+---------------+
16907/// | # | Operands      |
16908/// +---+---------------+
16909/// | 1 | Xmm, Xmm, Mem |
16910/// | 2 | Xmm, Xmm, Xmm |
16911/// | 3 | Ymm, Ymm, Mem |
16912/// | 4 | Ymm, Ymm, Ymm |
16913/// | 5 | Zmm, Zmm, Mem |
16914/// | 6 | Zmm, Zmm, Zmm |
16915/// +---+---------------+
16916/// ```
16917pub trait VfmaddcphEmitter<A, B, C> {
16918    fn vfmaddcph(&mut self, op0: A, op1: B, op2: C);
16919}
16920
16921impl<'a> VfmaddcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16922    fn vfmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
16923        self.emit(VFMADDCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16924    }
16925}
16926
16927impl<'a> VfmaddcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
16928    fn vfmaddcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
16929        self.emit(VFMADDCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16930    }
16931}
16932
16933impl<'a> VfmaddcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
16934    fn vfmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
16935        self.emit(VFMADDCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16936    }
16937}
16938
16939impl<'a> VfmaddcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
16940    fn vfmaddcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
16941        self.emit(VFMADDCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16942    }
16943}
16944
16945impl<'a> VfmaddcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16946    fn vfmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16947        self.emit(VFMADDCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16948    }
16949}
16950
16951impl<'a> VfmaddcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
16952    fn vfmaddcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
16953        self.emit(VFMADDCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16954    }
16955}
16956
16957/// `VFMADDCPH_ER`.
16958///
16959/// Supported operand variants:
16960///
16961/// ```text
16962/// +---+---------------+
16963/// | # | Operands      |
16964/// +---+---------------+
16965/// | 1 | Zmm, Zmm, Zmm |
16966/// +---+---------------+
16967/// ```
16968pub trait VfmaddcphErEmitter<A, B, C> {
16969    fn vfmaddcph_er(&mut self, op0: A, op1: B, op2: C);
16970}
16971
16972impl<'a> VfmaddcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
16973    fn vfmaddcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
16974        self.emit(VFMADDCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
16975    }
16976}
16977
16978/// `VFMADDCPH_MASK`.
16979///
16980/// Supported operand variants:
16981///
16982/// ```text
16983/// +---+---------------+
16984/// | # | Operands      |
16985/// +---+---------------+
16986/// | 1 | Xmm, Xmm, Mem |
16987/// | 2 | Xmm, Xmm, Xmm |
16988/// | 3 | Ymm, Ymm, Mem |
16989/// | 4 | Ymm, Ymm, Ymm |
16990/// | 5 | Zmm, Zmm, Mem |
16991/// | 6 | Zmm, Zmm, Zmm |
16992/// +---+---------------+
16993/// ```
16994pub trait VfmaddcphMaskEmitter<A, B, C> {
16995    fn vfmaddcph_mask(&mut self, op0: A, op1: B, op2: C);
16996}
16997
16998impl<'a> VfmaddcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
16999    fn vfmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17000        self.emit(VFMADDCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17001    }
17002}
17003
17004impl<'a> VfmaddcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17005    fn vfmaddcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17006        self.emit(VFMADDCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17007    }
17008}
17009
17010impl<'a> VfmaddcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17011    fn vfmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17012        self.emit(VFMADDCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17013    }
17014}
17015
17016impl<'a> VfmaddcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17017    fn vfmaddcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17018        self.emit(VFMADDCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17019    }
17020}
17021
17022impl<'a> VfmaddcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17023    fn vfmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17024        self.emit(VFMADDCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17025    }
17026}
17027
17028impl<'a> VfmaddcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17029    fn vfmaddcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17030        self.emit(VFMADDCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17031    }
17032}
17033
17034/// `VFMADDCPH_MASK_ER`.
17035///
17036/// Supported operand variants:
17037///
17038/// ```text
17039/// +---+---------------+
17040/// | # | Operands      |
17041/// +---+---------------+
17042/// | 1 | Zmm, Zmm, Zmm |
17043/// +---+---------------+
17044/// ```
17045pub trait VfmaddcphMaskErEmitter<A, B, C> {
17046    fn vfmaddcph_mask_er(&mut self, op0: A, op1: B, op2: C);
17047}
17048
17049impl<'a> VfmaddcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17050    fn vfmaddcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17051        self.emit(VFMADDCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17052    }
17053}
17054
17055/// `VFMADDCPH_MASKZ`.
17056///
17057/// Supported operand variants:
17058///
17059/// ```text
17060/// +---+---------------+
17061/// | # | Operands      |
17062/// +---+---------------+
17063/// | 1 | Xmm, Xmm, Mem |
17064/// | 2 | Xmm, Xmm, Xmm |
17065/// | 3 | Ymm, Ymm, Mem |
17066/// | 4 | Ymm, Ymm, Ymm |
17067/// | 5 | Zmm, Zmm, Mem |
17068/// | 6 | Zmm, Zmm, Zmm |
17069/// +---+---------------+
17070/// ```
17071pub trait VfmaddcphMaskzEmitter<A, B, C> {
17072    fn vfmaddcph_maskz(&mut self, op0: A, op1: B, op2: C);
17073}
17074
17075impl<'a> VfmaddcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17076    fn vfmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17077        self.emit(VFMADDCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17078    }
17079}
17080
17081impl<'a> VfmaddcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17082    fn vfmaddcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17083        self.emit(VFMADDCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17084    }
17085}
17086
17087impl<'a> VfmaddcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17088    fn vfmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17089        self.emit(VFMADDCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17090    }
17091}
17092
17093impl<'a> VfmaddcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17094    fn vfmaddcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17095        self.emit(VFMADDCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17096    }
17097}
17098
17099impl<'a> VfmaddcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17100    fn vfmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17101        self.emit(VFMADDCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17102    }
17103}
17104
17105impl<'a> VfmaddcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17106    fn vfmaddcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17107        self.emit(VFMADDCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17108    }
17109}
17110
17111/// `VFMADDCPH_MASKZ_ER`.
17112///
17113/// Supported operand variants:
17114///
17115/// ```text
17116/// +---+---------------+
17117/// | # | Operands      |
17118/// +---+---------------+
17119/// | 1 | Zmm, Zmm, Zmm |
17120/// +---+---------------+
17121/// ```
17122pub trait VfmaddcphMaskzErEmitter<A, B, C> {
17123    fn vfmaddcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
17124}
17125
17126impl<'a> VfmaddcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17127    fn vfmaddcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17128        self.emit(VFMADDCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17129    }
17130}
17131
17132/// `VFMADDCSH`.
17133///
17134/// Supported operand variants:
17135///
17136/// ```text
17137/// +---+---------------+
17138/// | # | Operands      |
17139/// +---+---------------+
17140/// | 1 | Xmm, Xmm, Mem |
17141/// | 2 | Xmm, Xmm, Xmm |
17142/// +---+---------------+
17143/// ```
17144pub trait VfmaddcshEmitter<A, B, C> {
17145    fn vfmaddcsh(&mut self, op0: A, op1: B, op2: C);
17146}
17147
17148impl<'a> VfmaddcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17149    fn vfmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17150        self.emit(VFMADDCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17151    }
17152}
17153
17154impl<'a> VfmaddcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17155    fn vfmaddcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17156        self.emit(VFMADDCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17157    }
17158}
17159
17160/// `VFMADDCSH_ER`.
17161///
17162/// Supported operand variants:
17163///
17164/// ```text
17165/// +---+---------------+
17166/// | # | Operands      |
17167/// +---+---------------+
17168/// | 1 | Xmm, Xmm, Xmm |
17169/// +---+---------------+
17170/// ```
17171pub trait VfmaddcshErEmitter<A, B, C> {
17172    fn vfmaddcsh_er(&mut self, op0: A, op1: B, op2: C);
17173}
17174
17175impl<'a> VfmaddcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17176    fn vfmaddcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17177        self.emit(VFMADDCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17178    }
17179}
17180
17181/// `VFMADDCSH_MASK`.
17182///
17183/// Supported operand variants:
17184///
17185/// ```text
17186/// +---+---------------+
17187/// | # | Operands      |
17188/// +---+---------------+
17189/// | 1 | Xmm, Xmm, Mem |
17190/// | 2 | Xmm, Xmm, Xmm |
17191/// +---+---------------+
17192/// ```
17193pub trait VfmaddcshMaskEmitter<A, B, C> {
17194    fn vfmaddcsh_mask(&mut self, op0: A, op1: B, op2: C);
17195}
17196
17197impl<'a> VfmaddcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17198    fn vfmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17199        self.emit(VFMADDCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17200    }
17201}
17202
17203impl<'a> VfmaddcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17204    fn vfmaddcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17205        self.emit(VFMADDCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17206    }
17207}
17208
17209/// `VFMADDCSH_MASK_ER`.
17210///
17211/// Supported operand variants:
17212///
17213/// ```text
17214/// +---+---------------+
17215/// | # | Operands      |
17216/// +---+---------------+
17217/// | 1 | Xmm, Xmm, Xmm |
17218/// +---+---------------+
17219/// ```
17220pub trait VfmaddcshMaskErEmitter<A, B, C> {
17221    fn vfmaddcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
17222}
17223
17224impl<'a> VfmaddcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17225    fn vfmaddcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17226        self.emit(VFMADDCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17227    }
17228}
17229
17230/// `VFMADDCSH_MASKZ`.
17231///
17232/// Supported operand variants:
17233///
17234/// ```text
17235/// +---+---------------+
17236/// | # | Operands      |
17237/// +---+---------------+
17238/// | 1 | Xmm, Xmm, Mem |
17239/// | 2 | Xmm, Xmm, Xmm |
17240/// +---+---------------+
17241/// ```
17242pub trait VfmaddcshMaskzEmitter<A, B, C> {
17243    fn vfmaddcsh_maskz(&mut self, op0: A, op1: B, op2: C);
17244}
17245
17246impl<'a> VfmaddcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17247    fn vfmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17248        self.emit(VFMADDCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17249    }
17250}
17251
17252impl<'a> VfmaddcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17253    fn vfmaddcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17254        self.emit(VFMADDCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17255    }
17256}
17257
17258/// `VFMADDCSH_MASKZ_ER`.
17259///
17260/// Supported operand variants:
17261///
17262/// ```text
17263/// +---+---------------+
17264/// | # | Operands      |
17265/// +---+---------------+
17266/// | 1 | Xmm, Xmm, Xmm |
17267/// +---+---------------+
17268/// ```
17269pub trait VfmaddcshMaskzErEmitter<A, B, C> {
17270    fn vfmaddcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
17271}
17272
17273impl<'a> VfmaddcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17274    fn vfmaddcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17275        self.emit(VFMADDCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17276    }
17277}
17278
17279/// `VFMADDSUB132PH`.
17280///
17281/// Supported operand variants:
17282///
17283/// ```text
17284/// +---+---------------+
17285/// | # | Operands      |
17286/// +---+---------------+
17287/// | 1 | Xmm, Xmm, Mem |
17288/// | 2 | Xmm, Xmm, Xmm |
17289/// | 3 | Ymm, Ymm, Mem |
17290/// | 4 | Ymm, Ymm, Ymm |
17291/// | 5 | Zmm, Zmm, Mem |
17292/// | 6 | Zmm, Zmm, Zmm |
17293/// +---+---------------+
17294/// ```
17295pub trait Vfmaddsub132phEmitter<A, B, C> {
17296    fn vfmaddsub132ph(&mut self, op0: A, op1: B, op2: C);
17297}
17298
17299impl<'a> Vfmaddsub132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17300    fn vfmaddsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17301        self.emit(VFMADDSUB132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17302    }
17303}
17304
17305impl<'a> Vfmaddsub132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17306    fn vfmaddsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17307        self.emit(VFMADDSUB132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17308    }
17309}
17310
17311impl<'a> Vfmaddsub132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17312    fn vfmaddsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17313        self.emit(VFMADDSUB132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17314    }
17315}
17316
17317impl<'a> Vfmaddsub132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17318    fn vfmaddsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17319        self.emit(VFMADDSUB132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17320    }
17321}
17322
17323impl<'a> Vfmaddsub132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17324    fn vfmaddsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17325        self.emit(VFMADDSUB132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17326    }
17327}
17328
17329impl<'a> Vfmaddsub132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17330    fn vfmaddsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17331        self.emit(VFMADDSUB132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17332    }
17333}
17334
17335/// `VFMADDSUB132PH_ER`.
17336///
17337/// Supported operand variants:
17338///
17339/// ```text
17340/// +---+---------------+
17341/// | # | Operands      |
17342/// +---+---------------+
17343/// | 1 | Zmm, Zmm, Zmm |
17344/// +---+---------------+
17345/// ```
17346pub trait Vfmaddsub132phErEmitter<A, B, C> {
17347    fn vfmaddsub132ph_er(&mut self, op0: A, op1: B, op2: C);
17348}
17349
17350impl<'a> Vfmaddsub132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17351    fn vfmaddsub132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17352        self.emit(VFMADDSUB132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17353    }
17354}
17355
17356/// `VFMADDSUB132PH_MASK`.
17357///
17358/// Supported operand variants:
17359///
17360/// ```text
17361/// +---+---------------+
17362/// | # | Operands      |
17363/// +---+---------------+
17364/// | 1 | Xmm, Xmm, Mem |
17365/// | 2 | Xmm, Xmm, Xmm |
17366/// | 3 | Ymm, Ymm, Mem |
17367/// | 4 | Ymm, Ymm, Ymm |
17368/// | 5 | Zmm, Zmm, Mem |
17369/// | 6 | Zmm, Zmm, Zmm |
17370/// +---+---------------+
17371/// ```
17372pub trait Vfmaddsub132phMaskEmitter<A, B, C> {
17373    fn vfmaddsub132ph_mask(&mut self, op0: A, op1: B, op2: C);
17374}
17375
17376impl<'a> Vfmaddsub132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17377    fn vfmaddsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17378        self.emit(VFMADDSUB132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17379    }
17380}
17381
17382impl<'a> Vfmaddsub132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17383    fn vfmaddsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17384        self.emit(VFMADDSUB132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17385    }
17386}
17387
17388impl<'a> Vfmaddsub132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17389    fn vfmaddsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17390        self.emit(VFMADDSUB132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17391    }
17392}
17393
17394impl<'a> Vfmaddsub132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17395    fn vfmaddsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17396        self.emit(VFMADDSUB132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17397    }
17398}
17399
17400impl<'a> Vfmaddsub132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17401    fn vfmaddsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17402        self.emit(VFMADDSUB132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17403    }
17404}
17405
17406impl<'a> Vfmaddsub132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17407    fn vfmaddsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17408        self.emit(VFMADDSUB132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17409    }
17410}
17411
17412/// `VFMADDSUB132PH_MASK_ER`.
17413///
17414/// Supported operand variants:
17415///
17416/// ```text
17417/// +---+---------------+
17418/// | # | Operands      |
17419/// +---+---------------+
17420/// | 1 | Zmm, Zmm, Zmm |
17421/// +---+---------------+
17422/// ```
17423pub trait Vfmaddsub132phMaskErEmitter<A, B, C> {
17424    fn vfmaddsub132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
17425}
17426
17427impl<'a> Vfmaddsub132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17428    fn vfmaddsub132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17429        self.emit(VFMADDSUB132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17430    }
17431}
17432
17433/// `VFMADDSUB132PH_MASKZ`.
17434///
17435/// Supported operand variants:
17436///
17437/// ```text
17438/// +---+---------------+
17439/// | # | Operands      |
17440/// +---+---------------+
17441/// | 1 | Xmm, Xmm, Mem |
17442/// | 2 | Xmm, Xmm, Xmm |
17443/// | 3 | Ymm, Ymm, Mem |
17444/// | 4 | Ymm, Ymm, Ymm |
17445/// | 5 | Zmm, Zmm, Mem |
17446/// | 6 | Zmm, Zmm, Zmm |
17447/// +---+---------------+
17448/// ```
17449pub trait Vfmaddsub132phMaskzEmitter<A, B, C> {
17450    fn vfmaddsub132ph_maskz(&mut self, op0: A, op1: B, op2: C);
17451}
17452
17453impl<'a> Vfmaddsub132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17454    fn vfmaddsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17455        self.emit(VFMADDSUB132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17456    }
17457}
17458
17459impl<'a> Vfmaddsub132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17460    fn vfmaddsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17461        self.emit(VFMADDSUB132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17462    }
17463}
17464
17465impl<'a> Vfmaddsub132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17466    fn vfmaddsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17467        self.emit(VFMADDSUB132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17468    }
17469}
17470
17471impl<'a> Vfmaddsub132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17472    fn vfmaddsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17473        self.emit(VFMADDSUB132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17474    }
17475}
17476
17477impl<'a> Vfmaddsub132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17478    fn vfmaddsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17479        self.emit(VFMADDSUB132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17480    }
17481}
17482
17483impl<'a> Vfmaddsub132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17484    fn vfmaddsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17485        self.emit(VFMADDSUB132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17486    }
17487}
17488
17489/// `VFMADDSUB132PH_MASKZ_ER`.
17490///
17491/// Supported operand variants:
17492///
17493/// ```text
17494/// +---+---------------+
17495/// | # | Operands      |
17496/// +---+---------------+
17497/// | 1 | Zmm, Zmm, Zmm |
17498/// +---+---------------+
17499/// ```
17500pub trait Vfmaddsub132phMaskzErEmitter<A, B, C> {
17501    fn vfmaddsub132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
17502}
17503
17504impl<'a> Vfmaddsub132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17505    fn vfmaddsub132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17506        self.emit(VFMADDSUB132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17507    }
17508}
17509
17510/// `VFMADDSUB213PH`.
17511///
17512/// Supported operand variants:
17513///
17514/// ```text
17515/// +---+---------------+
17516/// | # | Operands      |
17517/// +---+---------------+
17518/// | 1 | Xmm, Xmm, Mem |
17519/// | 2 | Xmm, Xmm, Xmm |
17520/// | 3 | Ymm, Ymm, Mem |
17521/// | 4 | Ymm, Ymm, Ymm |
17522/// | 5 | Zmm, Zmm, Mem |
17523/// | 6 | Zmm, Zmm, Zmm |
17524/// +---+---------------+
17525/// ```
17526pub trait Vfmaddsub213phEmitter<A, B, C> {
17527    fn vfmaddsub213ph(&mut self, op0: A, op1: B, op2: C);
17528}
17529
17530impl<'a> Vfmaddsub213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17531    fn vfmaddsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17532        self.emit(VFMADDSUB213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17533    }
17534}
17535
17536impl<'a> Vfmaddsub213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17537    fn vfmaddsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17538        self.emit(VFMADDSUB213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17539    }
17540}
17541
17542impl<'a> Vfmaddsub213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17543    fn vfmaddsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17544        self.emit(VFMADDSUB213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17545    }
17546}
17547
17548impl<'a> Vfmaddsub213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17549    fn vfmaddsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17550        self.emit(VFMADDSUB213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17551    }
17552}
17553
17554impl<'a> Vfmaddsub213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17555    fn vfmaddsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17556        self.emit(VFMADDSUB213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17557    }
17558}
17559
17560impl<'a> Vfmaddsub213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17561    fn vfmaddsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17562        self.emit(VFMADDSUB213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17563    }
17564}
17565
17566/// `VFMADDSUB213PH_ER`.
17567///
17568/// Supported operand variants:
17569///
17570/// ```text
17571/// +---+---------------+
17572/// | # | Operands      |
17573/// +---+---------------+
17574/// | 1 | Zmm, Zmm, Zmm |
17575/// +---+---------------+
17576/// ```
17577pub trait Vfmaddsub213phErEmitter<A, B, C> {
17578    fn vfmaddsub213ph_er(&mut self, op0: A, op1: B, op2: C);
17579}
17580
17581impl<'a> Vfmaddsub213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17582    fn vfmaddsub213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17583        self.emit(VFMADDSUB213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17584    }
17585}
17586
17587/// `VFMADDSUB213PH_MASK`.
17588///
17589/// Supported operand variants:
17590///
17591/// ```text
17592/// +---+---------------+
17593/// | # | Operands      |
17594/// +---+---------------+
17595/// | 1 | Xmm, Xmm, Mem |
17596/// | 2 | Xmm, Xmm, Xmm |
17597/// | 3 | Ymm, Ymm, Mem |
17598/// | 4 | Ymm, Ymm, Ymm |
17599/// | 5 | Zmm, Zmm, Mem |
17600/// | 6 | Zmm, Zmm, Zmm |
17601/// +---+---------------+
17602/// ```
17603pub trait Vfmaddsub213phMaskEmitter<A, B, C> {
17604    fn vfmaddsub213ph_mask(&mut self, op0: A, op1: B, op2: C);
17605}
17606
17607impl<'a> Vfmaddsub213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17608    fn vfmaddsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17609        self.emit(VFMADDSUB213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17610    }
17611}
17612
17613impl<'a> Vfmaddsub213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17614    fn vfmaddsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17615        self.emit(VFMADDSUB213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17616    }
17617}
17618
17619impl<'a> Vfmaddsub213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17620    fn vfmaddsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17621        self.emit(VFMADDSUB213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17622    }
17623}
17624
17625impl<'a> Vfmaddsub213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17626    fn vfmaddsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17627        self.emit(VFMADDSUB213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17628    }
17629}
17630
17631impl<'a> Vfmaddsub213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17632    fn vfmaddsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17633        self.emit(VFMADDSUB213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17634    }
17635}
17636
17637impl<'a> Vfmaddsub213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17638    fn vfmaddsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17639        self.emit(VFMADDSUB213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17640    }
17641}
17642
17643/// `VFMADDSUB213PH_MASK_ER`.
17644///
17645/// Supported operand variants:
17646///
17647/// ```text
17648/// +---+---------------+
17649/// | # | Operands      |
17650/// +---+---------------+
17651/// | 1 | Zmm, Zmm, Zmm |
17652/// +---+---------------+
17653/// ```
17654pub trait Vfmaddsub213phMaskErEmitter<A, B, C> {
17655    fn vfmaddsub213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
17656}
17657
17658impl<'a> Vfmaddsub213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17659    fn vfmaddsub213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17660        self.emit(VFMADDSUB213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17661    }
17662}
17663
17664/// `VFMADDSUB213PH_MASKZ`.
17665///
17666/// Supported operand variants:
17667///
17668/// ```text
17669/// +---+---------------+
17670/// | # | Operands      |
17671/// +---+---------------+
17672/// | 1 | Xmm, Xmm, Mem |
17673/// | 2 | Xmm, Xmm, Xmm |
17674/// | 3 | Ymm, Ymm, Mem |
17675/// | 4 | Ymm, Ymm, Ymm |
17676/// | 5 | Zmm, Zmm, Mem |
17677/// | 6 | Zmm, Zmm, Zmm |
17678/// +---+---------------+
17679/// ```
17680pub trait Vfmaddsub213phMaskzEmitter<A, B, C> {
17681    fn vfmaddsub213ph_maskz(&mut self, op0: A, op1: B, op2: C);
17682}
17683
17684impl<'a> Vfmaddsub213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17685    fn vfmaddsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17686        self.emit(VFMADDSUB213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17687    }
17688}
17689
17690impl<'a> Vfmaddsub213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17691    fn vfmaddsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17692        self.emit(VFMADDSUB213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17693    }
17694}
17695
17696impl<'a> Vfmaddsub213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17697    fn vfmaddsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17698        self.emit(VFMADDSUB213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17699    }
17700}
17701
17702impl<'a> Vfmaddsub213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17703    fn vfmaddsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17704        self.emit(VFMADDSUB213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17705    }
17706}
17707
17708impl<'a> Vfmaddsub213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17709    fn vfmaddsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17710        self.emit(VFMADDSUB213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17711    }
17712}
17713
17714impl<'a> Vfmaddsub213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17715    fn vfmaddsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17716        self.emit(VFMADDSUB213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17717    }
17718}
17719
17720/// `VFMADDSUB213PH_MASKZ_ER`.
17721///
17722/// Supported operand variants:
17723///
17724/// ```text
17725/// +---+---------------+
17726/// | # | Operands      |
17727/// +---+---------------+
17728/// | 1 | Zmm, Zmm, Zmm |
17729/// +---+---------------+
17730/// ```
17731pub trait Vfmaddsub213phMaskzErEmitter<A, B, C> {
17732    fn vfmaddsub213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
17733}
17734
17735impl<'a> Vfmaddsub213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17736    fn vfmaddsub213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17737        self.emit(VFMADDSUB213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17738    }
17739}
17740
17741/// `VFMADDSUB231PH`.
17742///
17743/// Supported operand variants:
17744///
17745/// ```text
17746/// +---+---------------+
17747/// | # | Operands      |
17748/// +---+---------------+
17749/// | 1 | Xmm, Xmm, Mem |
17750/// | 2 | Xmm, Xmm, Xmm |
17751/// | 3 | Ymm, Ymm, Mem |
17752/// | 4 | Ymm, Ymm, Ymm |
17753/// | 5 | Zmm, Zmm, Mem |
17754/// | 6 | Zmm, Zmm, Zmm |
17755/// +---+---------------+
17756/// ```
17757pub trait Vfmaddsub231phEmitter<A, B, C> {
17758    fn vfmaddsub231ph(&mut self, op0: A, op1: B, op2: C);
17759}
17760
17761impl<'a> Vfmaddsub231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17762    fn vfmaddsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17763        self.emit(VFMADDSUB231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17764    }
17765}
17766
17767impl<'a> Vfmaddsub231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17768    fn vfmaddsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17769        self.emit(VFMADDSUB231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17770    }
17771}
17772
17773impl<'a> Vfmaddsub231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17774    fn vfmaddsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17775        self.emit(VFMADDSUB231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17776    }
17777}
17778
17779impl<'a> Vfmaddsub231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17780    fn vfmaddsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17781        self.emit(VFMADDSUB231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17782    }
17783}
17784
17785impl<'a> Vfmaddsub231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17786    fn vfmaddsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17787        self.emit(VFMADDSUB231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17788    }
17789}
17790
17791impl<'a> Vfmaddsub231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17792    fn vfmaddsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17793        self.emit(VFMADDSUB231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17794    }
17795}
17796
17797/// `VFMADDSUB231PH_ER`.
17798///
17799/// Supported operand variants:
17800///
17801/// ```text
17802/// +---+---------------+
17803/// | # | Operands      |
17804/// +---+---------------+
17805/// | 1 | Zmm, Zmm, Zmm |
17806/// +---+---------------+
17807/// ```
17808pub trait Vfmaddsub231phErEmitter<A, B, C> {
17809    fn vfmaddsub231ph_er(&mut self, op0: A, op1: B, op2: C);
17810}
17811
17812impl<'a> Vfmaddsub231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17813    fn vfmaddsub231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17814        self.emit(VFMADDSUB231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17815    }
17816}
17817
17818/// `VFMADDSUB231PH_MASK`.
17819///
17820/// Supported operand variants:
17821///
17822/// ```text
17823/// +---+---------------+
17824/// | # | Operands      |
17825/// +---+---------------+
17826/// | 1 | Xmm, Xmm, Mem |
17827/// | 2 | Xmm, Xmm, Xmm |
17828/// | 3 | Ymm, Ymm, Mem |
17829/// | 4 | Ymm, Ymm, Ymm |
17830/// | 5 | Zmm, Zmm, Mem |
17831/// | 6 | Zmm, Zmm, Zmm |
17832/// +---+---------------+
17833/// ```
17834pub trait Vfmaddsub231phMaskEmitter<A, B, C> {
17835    fn vfmaddsub231ph_mask(&mut self, op0: A, op1: B, op2: C);
17836}
17837
17838impl<'a> Vfmaddsub231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17839    fn vfmaddsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17840        self.emit(VFMADDSUB231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17841    }
17842}
17843
17844impl<'a> Vfmaddsub231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17845    fn vfmaddsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17846        self.emit(VFMADDSUB231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17847    }
17848}
17849
17850impl<'a> Vfmaddsub231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17851    fn vfmaddsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17852        self.emit(VFMADDSUB231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17853    }
17854}
17855
17856impl<'a> Vfmaddsub231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17857    fn vfmaddsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17858        self.emit(VFMADDSUB231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17859    }
17860}
17861
17862impl<'a> Vfmaddsub231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17863    fn vfmaddsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17864        self.emit(VFMADDSUB231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17865    }
17866}
17867
17868impl<'a> Vfmaddsub231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17869    fn vfmaddsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17870        self.emit(VFMADDSUB231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17871    }
17872}
17873
17874/// `VFMADDSUB231PH_MASK_ER`.
17875///
17876/// Supported operand variants:
17877///
17878/// ```text
17879/// +---+---------------+
17880/// | # | Operands      |
17881/// +---+---------------+
17882/// | 1 | Zmm, Zmm, Zmm |
17883/// +---+---------------+
17884/// ```
17885pub trait Vfmaddsub231phMaskErEmitter<A, B, C> {
17886    fn vfmaddsub231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
17887}
17888
17889impl<'a> Vfmaddsub231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17890    fn vfmaddsub231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17891        self.emit(VFMADDSUB231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17892    }
17893}
17894
17895/// `VFMADDSUB231PH_MASKZ`.
17896///
17897/// Supported operand variants:
17898///
17899/// ```text
17900/// +---+---------------+
17901/// | # | Operands      |
17902/// +---+---------------+
17903/// | 1 | Xmm, Xmm, Mem |
17904/// | 2 | Xmm, Xmm, Xmm |
17905/// | 3 | Ymm, Ymm, Mem |
17906/// | 4 | Ymm, Ymm, Ymm |
17907/// | 5 | Zmm, Zmm, Mem |
17908/// | 6 | Zmm, Zmm, Zmm |
17909/// +---+---------------+
17910/// ```
17911pub trait Vfmaddsub231phMaskzEmitter<A, B, C> {
17912    fn vfmaddsub231ph_maskz(&mut self, op0: A, op1: B, op2: C);
17913}
17914
17915impl<'a> Vfmaddsub231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17916    fn vfmaddsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17917        self.emit(VFMADDSUB231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17918    }
17919}
17920
17921impl<'a> Vfmaddsub231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17922    fn vfmaddsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
17923        self.emit(VFMADDSUB231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17924    }
17925}
17926
17927impl<'a> Vfmaddsub231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
17928    fn vfmaddsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
17929        self.emit(VFMADDSUB231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17930    }
17931}
17932
17933impl<'a> Vfmaddsub231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
17934    fn vfmaddsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
17935        self.emit(VFMADDSUB231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17936    }
17937}
17938
17939impl<'a> Vfmaddsub231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17940    fn vfmaddsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17941        self.emit(VFMADDSUB231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17942    }
17943}
17944
17945impl<'a> Vfmaddsub231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
17946    fn vfmaddsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
17947        self.emit(VFMADDSUB231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17948    }
17949}
17950
17951/// `VFMADDSUB231PH_MASKZ_ER`.
17952///
17953/// Supported operand variants:
17954///
17955/// ```text
17956/// +---+---------------+
17957/// | # | Operands      |
17958/// +---+---------------+
17959/// | 1 | Zmm, Zmm, Zmm |
17960/// +---+---------------+
17961/// ```
17962pub trait Vfmaddsub231phMaskzErEmitter<A, B, C> {
17963    fn vfmaddsub231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
17964}
17965
17966impl<'a> Vfmaddsub231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
17967    fn vfmaddsub231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
17968        self.emit(VFMADDSUB231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17969    }
17970}
17971
17972/// `VFMSUB132PH`.
17973///
17974/// Supported operand variants:
17975///
17976/// ```text
17977/// +---+---------------+
17978/// | # | Operands      |
17979/// +---+---------------+
17980/// | 1 | Xmm, Xmm, Mem |
17981/// | 2 | Xmm, Xmm, Xmm |
17982/// | 3 | Ymm, Ymm, Mem |
17983/// | 4 | Ymm, Ymm, Ymm |
17984/// | 5 | Zmm, Zmm, Mem |
17985/// | 6 | Zmm, Zmm, Zmm |
17986/// +---+---------------+
17987/// ```
17988pub trait Vfmsub132phEmitter<A, B, C> {
17989    fn vfmsub132ph(&mut self, op0: A, op1: B, op2: C);
17990}
17991
17992impl<'a> Vfmsub132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
17993    fn vfmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
17994        self.emit(VFMSUB132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
17995    }
17996}
17997
17998impl<'a> Vfmsub132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
17999    fn vfmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18000        self.emit(VFMSUB132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18001    }
18002}
18003
18004impl<'a> Vfmsub132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18005    fn vfmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18006        self.emit(VFMSUB132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18007    }
18008}
18009
18010impl<'a> Vfmsub132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18011    fn vfmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18012        self.emit(VFMSUB132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18013    }
18014}
18015
18016impl<'a> Vfmsub132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18017    fn vfmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18018        self.emit(VFMSUB132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18019    }
18020}
18021
18022impl<'a> Vfmsub132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18023    fn vfmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18024        self.emit(VFMSUB132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18025    }
18026}
18027
18028/// `VFMSUB132PH_ER`.
18029///
18030/// Supported operand variants:
18031///
18032/// ```text
18033/// +---+---------------+
18034/// | # | Operands      |
18035/// +---+---------------+
18036/// | 1 | Zmm, Zmm, Zmm |
18037/// +---+---------------+
18038/// ```
18039pub trait Vfmsub132phErEmitter<A, B, C> {
18040    fn vfmsub132ph_er(&mut self, op0: A, op1: B, op2: C);
18041}
18042
18043impl<'a> Vfmsub132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18044    fn vfmsub132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18045        self.emit(VFMSUB132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18046    }
18047}
18048
18049/// `VFMSUB132PH_MASK`.
18050///
18051/// Supported operand variants:
18052///
18053/// ```text
18054/// +---+---------------+
18055/// | # | Operands      |
18056/// +---+---------------+
18057/// | 1 | Xmm, Xmm, Mem |
18058/// | 2 | Xmm, Xmm, Xmm |
18059/// | 3 | Ymm, Ymm, Mem |
18060/// | 4 | Ymm, Ymm, Ymm |
18061/// | 5 | Zmm, Zmm, Mem |
18062/// | 6 | Zmm, Zmm, Zmm |
18063/// +---+---------------+
18064/// ```
18065pub trait Vfmsub132phMaskEmitter<A, B, C> {
18066    fn vfmsub132ph_mask(&mut self, op0: A, op1: B, op2: C);
18067}
18068
18069impl<'a> Vfmsub132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18070    fn vfmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18071        self.emit(VFMSUB132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18072    }
18073}
18074
18075impl<'a> Vfmsub132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18076    fn vfmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18077        self.emit(VFMSUB132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18078    }
18079}
18080
18081impl<'a> Vfmsub132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18082    fn vfmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18083        self.emit(VFMSUB132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18084    }
18085}
18086
18087impl<'a> Vfmsub132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18088    fn vfmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18089        self.emit(VFMSUB132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18090    }
18091}
18092
18093impl<'a> Vfmsub132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18094    fn vfmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18095        self.emit(VFMSUB132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18096    }
18097}
18098
18099impl<'a> Vfmsub132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18100    fn vfmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18101        self.emit(VFMSUB132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18102    }
18103}
18104
18105/// `VFMSUB132PH_MASK_ER`.
18106///
18107/// Supported operand variants:
18108///
18109/// ```text
18110/// +---+---------------+
18111/// | # | Operands      |
18112/// +---+---------------+
18113/// | 1 | Zmm, Zmm, Zmm |
18114/// +---+---------------+
18115/// ```
18116pub trait Vfmsub132phMaskErEmitter<A, B, C> {
18117    fn vfmsub132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
18118}
18119
18120impl<'a> Vfmsub132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18121    fn vfmsub132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18122        self.emit(VFMSUB132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18123    }
18124}
18125
18126/// `VFMSUB132PH_MASKZ`.
18127///
18128/// Supported operand variants:
18129///
18130/// ```text
18131/// +---+---------------+
18132/// | # | Operands      |
18133/// +---+---------------+
18134/// | 1 | Xmm, Xmm, Mem |
18135/// | 2 | Xmm, Xmm, Xmm |
18136/// | 3 | Ymm, Ymm, Mem |
18137/// | 4 | Ymm, Ymm, Ymm |
18138/// | 5 | Zmm, Zmm, Mem |
18139/// | 6 | Zmm, Zmm, Zmm |
18140/// +---+---------------+
18141/// ```
18142pub trait Vfmsub132phMaskzEmitter<A, B, C> {
18143    fn vfmsub132ph_maskz(&mut self, op0: A, op1: B, op2: C);
18144}
18145
18146impl<'a> Vfmsub132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18147    fn vfmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18148        self.emit(VFMSUB132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18149    }
18150}
18151
18152impl<'a> Vfmsub132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18153    fn vfmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18154        self.emit(VFMSUB132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18155    }
18156}
18157
18158impl<'a> Vfmsub132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18159    fn vfmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18160        self.emit(VFMSUB132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18161    }
18162}
18163
18164impl<'a> Vfmsub132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18165    fn vfmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18166        self.emit(VFMSUB132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18167    }
18168}
18169
18170impl<'a> Vfmsub132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18171    fn vfmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18172        self.emit(VFMSUB132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18173    }
18174}
18175
18176impl<'a> Vfmsub132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18177    fn vfmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18178        self.emit(VFMSUB132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18179    }
18180}
18181
18182/// `VFMSUB132PH_MASKZ_ER`.
18183///
18184/// Supported operand variants:
18185///
18186/// ```text
18187/// +---+---------------+
18188/// | # | Operands      |
18189/// +---+---------------+
18190/// | 1 | Zmm, Zmm, Zmm |
18191/// +---+---------------+
18192/// ```
18193pub trait Vfmsub132phMaskzErEmitter<A, B, C> {
18194    fn vfmsub132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
18195}
18196
18197impl<'a> Vfmsub132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18198    fn vfmsub132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18199        self.emit(VFMSUB132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18200    }
18201}
18202
18203/// `VFMSUB132SH`.
18204///
18205/// Supported operand variants:
18206///
18207/// ```text
18208/// +---+---------------+
18209/// | # | Operands      |
18210/// +---+---------------+
18211/// | 1 | Xmm, Xmm, Mem |
18212/// | 2 | Xmm, Xmm, Xmm |
18213/// +---+---------------+
18214/// ```
18215pub trait Vfmsub132shEmitter<A, B, C> {
18216    fn vfmsub132sh(&mut self, op0: A, op1: B, op2: C);
18217}
18218
18219impl<'a> Vfmsub132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18220    fn vfmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18221        self.emit(VFMSUB132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18222    }
18223}
18224
18225impl<'a> Vfmsub132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18226    fn vfmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18227        self.emit(VFMSUB132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18228    }
18229}
18230
18231/// `VFMSUB132SH_ER`.
18232///
18233/// Supported operand variants:
18234///
18235/// ```text
18236/// +---+---------------+
18237/// | # | Operands      |
18238/// +---+---------------+
18239/// | 1 | Xmm, Xmm, Xmm |
18240/// +---+---------------+
18241/// ```
18242pub trait Vfmsub132shErEmitter<A, B, C> {
18243    fn vfmsub132sh_er(&mut self, op0: A, op1: B, op2: C);
18244}
18245
18246impl<'a> Vfmsub132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18247    fn vfmsub132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18248        self.emit(VFMSUB132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18249    }
18250}
18251
18252/// `VFMSUB132SH_MASK`.
18253///
18254/// Supported operand variants:
18255///
18256/// ```text
18257/// +---+---------------+
18258/// | # | Operands      |
18259/// +---+---------------+
18260/// | 1 | Xmm, Xmm, Mem |
18261/// | 2 | Xmm, Xmm, Xmm |
18262/// +---+---------------+
18263/// ```
18264pub trait Vfmsub132shMaskEmitter<A, B, C> {
18265    fn vfmsub132sh_mask(&mut self, op0: A, op1: B, op2: C);
18266}
18267
18268impl<'a> Vfmsub132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18269    fn vfmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18270        self.emit(VFMSUB132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18271    }
18272}
18273
18274impl<'a> Vfmsub132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18275    fn vfmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18276        self.emit(VFMSUB132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18277    }
18278}
18279
18280/// `VFMSUB132SH_MASK_ER`.
18281///
18282/// Supported operand variants:
18283///
18284/// ```text
18285/// +---+---------------+
18286/// | # | Operands      |
18287/// +---+---------------+
18288/// | 1 | Xmm, Xmm, Xmm |
18289/// +---+---------------+
18290/// ```
18291pub trait Vfmsub132shMaskErEmitter<A, B, C> {
18292    fn vfmsub132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
18293}
18294
18295impl<'a> Vfmsub132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18296    fn vfmsub132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18297        self.emit(VFMSUB132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18298    }
18299}
18300
18301/// `VFMSUB132SH_MASKZ`.
18302///
18303/// Supported operand variants:
18304///
18305/// ```text
18306/// +---+---------------+
18307/// | # | Operands      |
18308/// +---+---------------+
18309/// | 1 | Xmm, Xmm, Mem |
18310/// | 2 | Xmm, Xmm, Xmm |
18311/// +---+---------------+
18312/// ```
18313pub trait Vfmsub132shMaskzEmitter<A, B, C> {
18314    fn vfmsub132sh_maskz(&mut self, op0: A, op1: B, op2: C);
18315}
18316
18317impl<'a> Vfmsub132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18318    fn vfmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18319        self.emit(VFMSUB132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18320    }
18321}
18322
18323impl<'a> Vfmsub132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18324    fn vfmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18325        self.emit(VFMSUB132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18326    }
18327}
18328
18329/// `VFMSUB132SH_MASKZ_ER`.
18330///
18331/// Supported operand variants:
18332///
18333/// ```text
18334/// +---+---------------+
18335/// | # | Operands      |
18336/// +---+---------------+
18337/// | 1 | Xmm, Xmm, Xmm |
18338/// +---+---------------+
18339/// ```
18340pub trait Vfmsub132shMaskzErEmitter<A, B, C> {
18341    fn vfmsub132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
18342}
18343
18344impl<'a> Vfmsub132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18345    fn vfmsub132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18346        self.emit(VFMSUB132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18347    }
18348}
18349
18350/// `VFMSUB213PH`.
18351///
18352/// Supported operand variants:
18353///
18354/// ```text
18355/// +---+---------------+
18356/// | # | Operands      |
18357/// +---+---------------+
18358/// | 1 | Xmm, Xmm, Mem |
18359/// | 2 | Xmm, Xmm, Xmm |
18360/// | 3 | Ymm, Ymm, Mem |
18361/// | 4 | Ymm, Ymm, Ymm |
18362/// | 5 | Zmm, Zmm, Mem |
18363/// | 6 | Zmm, Zmm, Zmm |
18364/// +---+---------------+
18365/// ```
18366pub trait Vfmsub213phEmitter<A, B, C> {
18367    fn vfmsub213ph(&mut self, op0: A, op1: B, op2: C);
18368}
18369
18370impl<'a> Vfmsub213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18371    fn vfmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18372        self.emit(VFMSUB213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18373    }
18374}
18375
18376impl<'a> Vfmsub213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18377    fn vfmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18378        self.emit(VFMSUB213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18379    }
18380}
18381
18382impl<'a> Vfmsub213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18383    fn vfmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18384        self.emit(VFMSUB213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18385    }
18386}
18387
18388impl<'a> Vfmsub213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18389    fn vfmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18390        self.emit(VFMSUB213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18391    }
18392}
18393
18394impl<'a> Vfmsub213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18395    fn vfmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18396        self.emit(VFMSUB213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18397    }
18398}
18399
18400impl<'a> Vfmsub213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18401    fn vfmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18402        self.emit(VFMSUB213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18403    }
18404}
18405
18406/// `VFMSUB213PH_ER`.
18407///
18408/// Supported operand variants:
18409///
18410/// ```text
18411/// +---+---------------+
18412/// | # | Operands      |
18413/// +---+---------------+
18414/// | 1 | Zmm, Zmm, Zmm |
18415/// +---+---------------+
18416/// ```
18417pub trait Vfmsub213phErEmitter<A, B, C> {
18418    fn vfmsub213ph_er(&mut self, op0: A, op1: B, op2: C);
18419}
18420
18421impl<'a> Vfmsub213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18422    fn vfmsub213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18423        self.emit(VFMSUB213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18424    }
18425}
18426
18427/// `VFMSUB213PH_MASK`.
18428///
18429/// Supported operand variants:
18430///
18431/// ```text
18432/// +---+---------------+
18433/// | # | Operands      |
18434/// +---+---------------+
18435/// | 1 | Xmm, Xmm, Mem |
18436/// | 2 | Xmm, Xmm, Xmm |
18437/// | 3 | Ymm, Ymm, Mem |
18438/// | 4 | Ymm, Ymm, Ymm |
18439/// | 5 | Zmm, Zmm, Mem |
18440/// | 6 | Zmm, Zmm, Zmm |
18441/// +---+---------------+
18442/// ```
18443pub trait Vfmsub213phMaskEmitter<A, B, C> {
18444    fn vfmsub213ph_mask(&mut self, op0: A, op1: B, op2: C);
18445}
18446
18447impl<'a> Vfmsub213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18448    fn vfmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18449        self.emit(VFMSUB213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18450    }
18451}
18452
18453impl<'a> Vfmsub213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18454    fn vfmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18455        self.emit(VFMSUB213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18456    }
18457}
18458
18459impl<'a> Vfmsub213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18460    fn vfmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18461        self.emit(VFMSUB213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18462    }
18463}
18464
18465impl<'a> Vfmsub213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18466    fn vfmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18467        self.emit(VFMSUB213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18468    }
18469}
18470
18471impl<'a> Vfmsub213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18472    fn vfmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18473        self.emit(VFMSUB213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18474    }
18475}
18476
18477impl<'a> Vfmsub213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18478    fn vfmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18479        self.emit(VFMSUB213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18480    }
18481}
18482
18483/// `VFMSUB213PH_MASK_ER`.
18484///
18485/// Supported operand variants:
18486///
18487/// ```text
18488/// +---+---------------+
18489/// | # | Operands      |
18490/// +---+---------------+
18491/// | 1 | Zmm, Zmm, Zmm |
18492/// +---+---------------+
18493/// ```
18494pub trait Vfmsub213phMaskErEmitter<A, B, C> {
18495    fn vfmsub213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
18496}
18497
18498impl<'a> Vfmsub213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18499    fn vfmsub213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18500        self.emit(VFMSUB213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18501    }
18502}
18503
18504/// `VFMSUB213PH_MASKZ`.
18505///
18506/// Supported operand variants:
18507///
18508/// ```text
18509/// +---+---------------+
18510/// | # | Operands      |
18511/// +---+---------------+
18512/// | 1 | Xmm, Xmm, Mem |
18513/// | 2 | Xmm, Xmm, Xmm |
18514/// | 3 | Ymm, Ymm, Mem |
18515/// | 4 | Ymm, Ymm, Ymm |
18516/// | 5 | Zmm, Zmm, Mem |
18517/// | 6 | Zmm, Zmm, Zmm |
18518/// +---+---------------+
18519/// ```
18520pub trait Vfmsub213phMaskzEmitter<A, B, C> {
18521    fn vfmsub213ph_maskz(&mut self, op0: A, op1: B, op2: C);
18522}
18523
18524impl<'a> Vfmsub213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18525    fn vfmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18526        self.emit(VFMSUB213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18527    }
18528}
18529
18530impl<'a> Vfmsub213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18531    fn vfmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18532        self.emit(VFMSUB213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18533    }
18534}
18535
18536impl<'a> Vfmsub213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18537    fn vfmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18538        self.emit(VFMSUB213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18539    }
18540}
18541
18542impl<'a> Vfmsub213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18543    fn vfmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18544        self.emit(VFMSUB213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18545    }
18546}
18547
18548impl<'a> Vfmsub213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18549    fn vfmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18550        self.emit(VFMSUB213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18551    }
18552}
18553
18554impl<'a> Vfmsub213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18555    fn vfmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18556        self.emit(VFMSUB213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18557    }
18558}
18559
18560/// `VFMSUB213PH_MASKZ_ER`.
18561///
18562/// Supported operand variants:
18563///
18564/// ```text
18565/// +---+---------------+
18566/// | # | Operands      |
18567/// +---+---------------+
18568/// | 1 | Zmm, Zmm, Zmm |
18569/// +---+---------------+
18570/// ```
18571pub trait Vfmsub213phMaskzErEmitter<A, B, C> {
18572    fn vfmsub213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
18573}
18574
18575impl<'a> Vfmsub213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18576    fn vfmsub213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18577        self.emit(VFMSUB213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18578    }
18579}
18580
18581/// `VFMSUB213SH`.
18582///
18583/// Supported operand variants:
18584///
18585/// ```text
18586/// +---+---------------+
18587/// | # | Operands      |
18588/// +---+---------------+
18589/// | 1 | Xmm, Xmm, Mem |
18590/// | 2 | Xmm, Xmm, Xmm |
18591/// +---+---------------+
18592/// ```
18593pub trait Vfmsub213shEmitter<A, B, C> {
18594    fn vfmsub213sh(&mut self, op0: A, op1: B, op2: C);
18595}
18596
18597impl<'a> Vfmsub213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18598    fn vfmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18599        self.emit(VFMSUB213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18600    }
18601}
18602
18603impl<'a> Vfmsub213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18604    fn vfmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18605        self.emit(VFMSUB213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18606    }
18607}
18608
18609/// `VFMSUB213SH_ER`.
18610///
18611/// Supported operand variants:
18612///
18613/// ```text
18614/// +---+---------------+
18615/// | # | Operands      |
18616/// +---+---------------+
18617/// | 1 | Xmm, Xmm, Xmm |
18618/// +---+---------------+
18619/// ```
18620pub trait Vfmsub213shErEmitter<A, B, C> {
18621    fn vfmsub213sh_er(&mut self, op0: A, op1: B, op2: C);
18622}
18623
18624impl<'a> Vfmsub213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18625    fn vfmsub213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18626        self.emit(VFMSUB213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18627    }
18628}
18629
18630/// `VFMSUB213SH_MASK`.
18631///
18632/// Supported operand variants:
18633///
18634/// ```text
18635/// +---+---------------+
18636/// | # | Operands      |
18637/// +---+---------------+
18638/// | 1 | Xmm, Xmm, Mem |
18639/// | 2 | Xmm, Xmm, Xmm |
18640/// +---+---------------+
18641/// ```
18642pub trait Vfmsub213shMaskEmitter<A, B, C> {
18643    fn vfmsub213sh_mask(&mut self, op0: A, op1: B, op2: C);
18644}
18645
18646impl<'a> Vfmsub213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18647    fn vfmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18648        self.emit(VFMSUB213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18649    }
18650}
18651
18652impl<'a> Vfmsub213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18653    fn vfmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18654        self.emit(VFMSUB213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18655    }
18656}
18657
18658/// `VFMSUB213SH_MASK_ER`.
18659///
18660/// Supported operand variants:
18661///
18662/// ```text
18663/// +---+---------------+
18664/// | # | Operands      |
18665/// +---+---------------+
18666/// | 1 | Xmm, Xmm, Xmm |
18667/// +---+---------------+
18668/// ```
18669pub trait Vfmsub213shMaskErEmitter<A, B, C> {
18670    fn vfmsub213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
18671}
18672
18673impl<'a> Vfmsub213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18674    fn vfmsub213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18675        self.emit(VFMSUB213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18676    }
18677}
18678
18679/// `VFMSUB213SH_MASKZ`.
18680///
18681/// Supported operand variants:
18682///
18683/// ```text
18684/// +---+---------------+
18685/// | # | Operands      |
18686/// +---+---------------+
18687/// | 1 | Xmm, Xmm, Mem |
18688/// | 2 | Xmm, Xmm, Xmm |
18689/// +---+---------------+
18690/// ```
18691pub trait Vfmsub213shMaskzEmitter<A, B, C> {
18692    fn vfmsub213sh_maskz(&mut self, op0: A, op1: B, op2: C);
18693}
18694
18695impl<'a> Vfmsub213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18696    fn vfmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18697        self.emit(VFMSUB213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18698    }
18699}
18700
18701impl<'a> Vfmsub213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18702    fn vfmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18703        self.emit(VFMSUB213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18704    }
18705}
18706
18707/// `VFMSUB213SH_MASKZ_ER`.
18708///
18709/// Supported operand variants:
18710///
18711/// ```text
18712/// +---+---------------+
18713/// | # | Operands      |
18714/// +---+---------------+
18715/// | 1 | Xmm, Xmm, Xmm |
18716/// +---+---------------+
18717/// ```
18718pub trait Vfmsub213shMaskzErEmitter<A, B, C> {
18719    fn vfmsub213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
18720}
18721
18722impl<'a> Vfmsub213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18723    fn vfmsub213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18724        self.emit(VFMSUB213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18725    }
18726}
18727
18728/// `VFMSUB231PH`.
18729///
18730/// Supported operand variants:
18731///
18732/// ```text
18733/// +---+---------------+
18734/// | # | Operands      |
18735/// +---+---------------+
18736/// | 1 | Xmm, Xmm, Mem |
18737/// | 2 | Xmm, Xmm, Xmm |
18738/// | 3 | Ymm, Ymm, Mem |
18739/// | 4 | Ymm, Ymm, Ymm |
18740/// | 5 | Zmm, Zmm, Mem |
18741/// | 6 | Zmm, Zmm, Zmm |
18742/// +---+---------------+
18743/// ```
18744pub trait Vfmsub231phEmitter<A, B, C> {
18745    fn vfmsub231ph(&mut self, op0: A, op1: B, op2: C);
18746}
18747
18748impl<'a> Vfmsub231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18749    fn vfmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18750        self.emit(VFMSUB231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18751    }
18752}
18753
18754impl<'a> Vfmsub231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18755    fn vfmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18756        self.emit(VFMSUB231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18757    }
18758}
18759
18760impl<'a> Vfmsub231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18761    fn vfmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18762        self.emit(VFMSUB231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18763    }
18764}
18765
18766impl<'a> Vfmsub231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18767    fn vfmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18768        self.emit(VFMSUB231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18769    }
18770}
18771
18772impl<'a> Vfmsub231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18773    fn vfmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18774        self.emit(VFMSUB231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18775    }
18776}
18777
18778impl<'a> Vfmsub231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18779    fn vfmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18780        self.emit(VFMSUB231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18781    }
18782}
18783
18784/// `VFMSUB231PH_ER`.
18785///
18786/// Supported operand variants:
18787///
18788/// ```text
18789/// +---+---------------+
18790/// | # | Operands      |
18791/// +---+---------------+
18792/// | 1 | Zmm, Zmm, Zmm |
18793/// +---+---------------+
18794/// ```
18795pub trait Vfmsub231phErEmitter<A, B, C> {
18796    fn vfmsub231ph_er(&mut self, op0: A, op1: B, op2: C);
18797}
18798
18799impl<'a> Vfmsub231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18800    fn vfmsub231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18801        self.emit(VFMSUB231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18802    }
18803}
18804
18805/// `VFMSUB231PH_MASK`.
18806///
18807/// Supported operand variants:
18808///
18809/// ```text
18810/// +---+---------------+
18811/// | # | Operands      |
18812/// +---+---------------+
18813/// | 1 | Xmm, Xmm, Mem |
18814/// | 2 | Xmm, Xmm, Xmm |
18815/// | 3 | Ymm, Ymm, Mem |
18816/// | 4 | Ymm, Ymm, Ymm |
18817/// | 5 | Zmm, Zmm, Mem |
18818/// | 6 | Zmm, Zmm, Zmm |
18819/// +---+---------------+
18820/// ```
18821pub trait Vfmsub231phMaskEmitter<A, B, C> {
18822    fn vfmsub231ph_mask(&mut self, op0: A, op1: B, op2: C);
18823}
18824
18825impl<'a> Vfmsub231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18826    fn vfmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18827        self.emit(VFMSUB231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18828    }
18829}
18830
18831impl<'a> Vfmsub231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18832    fn vfmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18833        self.emit(VFMSUB231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18834    }
18835}
18836
18837impl<'a> Vfmsub231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18838    fn vfmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18839        self.emit(VFMSUB231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18840    }
18841}
18842
18843impl<'a> Vfmsub231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18844    fn vfmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18845        self.emit(VFMSUB231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18846    }
18847}
18848
18849impl<'a> Vfmsub231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18850    fn vfmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18851        self.emit(VFMSUB231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18852    }
18853}
18854
18855impl<'a> Vfmsub231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18856    fn vfmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18857        self.emit(VFMSUB231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18858    }
18859}
18860
18861/// `VFMSUB231PH_MASK_ER`.
18862///
18863/// Supported operand variants:
18864///
18865/// ```text
18866/// +---+---------------+
18867/// | # | Operands      |
18868/// +---+---------------+
18869/// | 1 | Zmm, Zmm, Zmm |
18870/// +---+---------------+
18871/// ```
18872pub trait Vfmsub231phMaskErEmitter<A, B, C> {
18873    fn vfmsub231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
18874}
18875
18876impl<'a> Vfmsub231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18877    fn vfmsub231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18878        self.emit(VFMSUB231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18879    }
18880}
18881
18882/// `VFMSUB231PH_MASKZ`.
18883///
18884/// Supported operand variants:
18885///
18886/// ```text
18887/// +---+---------------+
18888/// | # | Operands      |
18889/// +---+---------------+
18890/// | 1 | Xmm, Xmm, Mem |
18891/// | 2 | Xmm, Xmm, Xmm |
18892/// | 3 | Ymm, Ymm, Mem |
18893/// | 4 | Ymm, Ymm, Ymm |
18894/// | 5 | Zmm, Zmm, Mem |
18895/// | 6 | Zmm, Zmm, Zmm |
18896/// +---+---------------+
18897/// ```
18898pub trait Vfmsub231phMaskzEmitter<A, B, C> {
18899    fn vfmsub231ph_maskz(&mut self, op0: A, op1: B, op2: C);
18900}
18901
18902impl<'a> Vfmsub231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18903    fn vfmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18904        self.emit(VFMSUB231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18905    }
18906}
18907
18908impl<'a> Vfmsub231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18909    fn vfmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18910        self.emit(VFMSUB231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18911    }
18912}
18913
18914impl<'a> Vfmsub231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
18915    fn vfmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
18916        self.emit(VFMSUB231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18917    }
18918}
18919
18920impl<'a> Vfmsub231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
18921    fn vfmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
18922        self.emit(VFMSUB231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18923    }
18924}
18925
18926impl<'a> Vfmsub231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18927    fn vfmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18928        self.emit(VFMSUB231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18929    }
18930}
18931
18932impl<'a> Vfmsub231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
18933    fn vfmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
18934        self.emit(VFMSUB231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18935    }
18936}
18937
18938/// `VFMSUB231PH_MASKZ_ER`.
18939///
18940/// Supported operand variants:
18941///
18942/// ```text
18943/// +---+---------------+
18944/// | # | Operands      |
18945/// +---+---------------+
18946/// | 1 | Zmm, Zmm, Zmm |
18947/// +---+---------------+
18948/// ```
18949pub trait Vfmsub231phMaskzErEmitter<A, B, C> {
18950    fn vfmsub231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
18951}
18952
18953impl<'a> Vfmsub231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
18954    fn vfmsub231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
18955        self.emit(VFMSUB231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18956    }
18957}
18958
18959/// `VFMSUB231SH`.
18960///
18961/// Supported operand variants:
18962///
18963/// ```text
18964/// +---+---------------+
18965/// | # | Operands      |
18966/// +---+---------------+
18967/// | 1 | Xmm, Xmm, Mem |
18968/// | 2 | Xmm, Xmm, Xmm |
18969/// +---+---------------+
18970/// ```
18971pub trait Vfmsub231shEmitter<A, B, C> {
18972    fn vfmsub231sh(&mut self, op0: A, op1: B, op2: C);
18973}
18974
18975impl<'a> Vfmsub231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
18976    fn vfmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
18977        self.emit(VFMSUB231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18978    }
18979}
18980
18981impl<'a> Vfmsub231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
18982    fn vfmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
18983        self.emit(VFMSUB231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
18984    }
18985}
18986
18987/// `VFMSUB231SH_ER`.
18988///
18989/// Supported operand variants:
18990///
18991/// ```text
18992/// +---+---------------+
18993/// | # | Operands      |
18994/// +---+---------------+
18995/// | 1 | Xmm, Xmm, Xmm |
18996/// +---+---------------+
18997/// ```
18998pub trait Vfmsub231shErEmitter<A, B, C> {
18999    fn vfmsub231sh_er(&mut self, op0: A, op1: B, op2: C);
19000}
19001
19002impl<'a> Vfmsub231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19003    fn vfmsub231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19004        self.emit(VFMSUB231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19005    }
19006}
19007
19008/// `VFMSUB231SH_MASK`.
19009///
19010/// Supported operand variants:
19011///
19012/// ```text
19013/// +---+---------------+
19014/// | # | Operands      |
19015/// +---+---------------+
19016/// | 1 | Xmm, Xmm, Mem |
19017/// | 2 | Xmm, Xmm, Xmm |
19018/// +---+---------------+
19019/// ```
19020pub trait Vfmsub231shMaskEmitter<A, B, C> {
19021    fn vfmsub231sh_mask(&mut self, op0: A, op1: B, op2: C);
19022}
19023
19024impl<'a> Vfmsub231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19025    fn vfmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19026        self.emit(VFMSUB231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19027    }
19028}
19029
19030impl<'a> Vfmsub231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19031    fn vfmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19032        self.emit(VFMSUB231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19033    }
19034}
19035
19036/// `VFMSUB231SH_MASK_ER`.
19037///
19038/// Supported operand variants:
19039///
19040/// ```text
19041/// +---+---------------+
19042/// | # | Operands      |
19043/// +---+---------------+
19044/// | 1 | Xmm, Xmm, Xmm |
19045/// +---+---------------+
19046/// ```
19047pub trait Vfmsub231shMaskErEmitter<A, B, C> {
19048    fn vfmsub231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
19049}
19050
19051impl<'a> Vfmsub231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19052    fn vfmsub231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19053        self.emit(VFMSUB231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19054    }
19055}
19056
19057/// `VFMSUB231SH_MASKZ`.
19058///
19059/// Supported operand variants:
19060///
19061/// ```text
19062/// +---+---------------+
19063/// | # | Operands      |
19064/// +---+---------------+
19065/// | 1 | Xmm, Xmm, Mem |
19066/// | 2 | Xmm, Xmm, Xmm |
19067/// +---+---------------+
19068/// ```
19069pub trait Vfmsub231shMaskzEmitter<A, B, C> {
19070    fn vfmsub231sh_maskz(&mut self, op0: A, op1: B, op2: C);
19071}
19072
19073impl<'a> Vfmsub231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19074    fn vfmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19075        self.emit(VFMSUB231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19076    }
19077}
19078
19079impl<'a> Vfmsub231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19080    fn vfmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19081        self.emit(VFMSUB231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19082    }
19083}
19084
19085/// `VFMSUB231SH_MASKZ_ER`.
19086///
19087/// Supported operand variants:
19088///
19089/// ```text
19090/// +---+---------------+
19091/// | # | Operands      |
19092/// +---+---------------+
19093/// | 1 | Xmm, Xmm, Xmm |
19094/// +---+---------------+
19095/// ```
19096pub trait Vfmsub231shMaskzErEmitter<A, B, C> {
19097    fn vfmsub231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
19098}
19099
19100impl<'a> Vfmsub231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19101    fn vfmsub231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19102        self.emit(VFMSUB231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19103    }
19104}
19105
19106/// `VFMSUBADD132PH`.
19107///
19108/// Supported operand variants:
19109///
19110/// ```text
19111/// +---+---------------+
19112/// | # | Operands      |
19113/// +---+---------------+
19114/// | 1 | Xmm, Xmm, Mem |
19115/// | 2 | Xmm, Xmm, Xmm |
19116/// | 3 | Ymm, Ymm, Mem |
19117/// | 4 | Ymm, Ymm, Ymm |
19118/// | 5 | Zmm, Zmm, Mem |
19119/// | 6 | Zmm, Zmm, Zmm |
19120/// +---+---------------+
19121/// ```
19122pub trait Vfmsubadd132phEmitter<A, B, C> {
19123    fn vfmsubadd132ph(&mut self, op0: A, op1: B, op2: C);
19124}
19125
19126impl<'a> Vfmsubadd132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19127    fn vfmsubadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19128        self.emit(VFMSUBADD132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19129    }
19130}
19131
19132impl<'a> Vfmsubadd132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19133    fn vfmsubadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19134        self.emit(VFMSUBADD132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19135    }
19136}
19137
19138impl<'a> Vfmsubadd132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19139    fn vfmsubadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19140        self.emit(VFMSUBADD132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19141    }
19142}
19143
19144impl<'a> Vfmsubadd132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19145    fn vfmsubadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19146        self.emit(VFMSUBADD132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19147    }
19148}
19149
19150impl<'a> Vfmsubadd132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19151    fn vfmsubadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19152        self.emit(VFMSUBADD132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19153    }
19154}
19155
19156impl<'a> Vfmsubadd132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19157    fn vfmsubadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19158        self.emit(VFMSUBADD132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19159    }
19160}
19161
19162/// `VFMSUBADD132PH_ER`.
19163///
19164/// Supported operand variants:
19165///
19166/// ```text
19167/// +---+---------------+
19168/// | # | Operands      |
19169/// +---+---------------+
19170/// | 1 | Zmm, Zmm, Zmm |
19171/// +---+---------------+
19172/// ```
19173pub trait Vfmsubadd132phErEmitter<A, B, C> {
19174    fn vfmsubadd132ph_er(&mut self, op0: A, op1: B, op2: C);
19175}
19176
19177impl<'a> Vfmsubadd132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19178    fn vfmsubadd132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19179        self.emit(VFMSUBADD132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19180    }
19181}
19182
19183/// `VFMSUBADD132PH_MASK`.
19184///
19185/// Supported operand variants:
19186///
19187/// ```text
19188/// +---+---------------+
19189/// | # | Operands      |
19190/// +---+---------------+
19191/// | 1 | Xmm, Xmm, Mem |
19192/// | 2 | Xmm, Xmm, Xmm |
19193/// | 3 | Ymm, Ymm, Mem |
19194/// | 4 | Ymm, Ymm, Ymm |
19195/// | 5 | Zmm, Zmm, Mem |
19196/// | 6 | Zmm, Zmm, Zmm |
19197/// +---+---------------+
19198/// ```
19199pub trait Vfmsubadd132phMaskEmitter<A, B, C> {
19200    fn vfmsubadd132ph_mask(&mut self, op0: A, op1: B, op2: C);
19201}
19202
19203impl<'a> Vfmsubadd132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19204    fn vfmsubadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19205        self.emit(VFMSUBADD132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19206    }
19207}
19208
19209impl<'a> Vfmsubadd132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19210    fn vfmsubadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19211        self.emit(VFMSUBADD132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19212    }
19213}
19214
19215impl<'a> Vfmsubadd132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19216    fn vfmsubadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19217        self.emit(VFMSUBADD132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19218    }
19219}
19220
19221impl<'a> Vfmsubadd132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19222    fn vfmsubadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19223        self.emit(VFMSUBADD132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19224    }
19225}
19226
19227impl<'a> Vfmsubadd132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19228    fn vfmsubadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19229        self.emit(VFMSUBADD132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19230    }
19231}
19232
19233impl<'a> Vfmsubadd132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19234    fn vfmsubadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19235        self.emit(VFMSUBADD132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19236    }
19237}
19238
19239/// `VFMSUBADD132PH_MASK_ER`.
19240///
19241/// Supported operand variants:
19242///
19243/// ```text
19244/// +---+---------------+
19245/// | # | Operands      |
19246/// +---+---------------+
19247/// | 1 | Zmm, Zmm, Zmm |
19248/// +---+---------------+
19249/// ```
19250pub trait Vfmsubadd132phMaskErEmitter<A, B, C> {
19251    fn vfmsubadd132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
19252}
19253
19254impl<'a> Vfmsubadd132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19255    fn vfmsubadd132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19256        self.emit(VFMSUBADD132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19257    }
19258}
19259
19260/// `VFMSUBADD132PH_MASKZ`.
19261///
19262/// Supported operand variants:
19263///
19264/// ```text
19265/// +---+---------------+
19266/// | # | Operands      |
19267/// +---+---------------+
19268/// | 1 | Xmm, Xmm, Mem |
19269/// | 2 | Xmm, Xmm, Xmm |
19270/// | 3 | Ymm, Ymm, Mem |
19271/// | 4 | Ymm, Ymm, Ymm |
19272/// | 5 | Zmm, Zmm, Mem |
19273/// | 6 | Zmm, Zmm, Zmm |
19274/// +---+---------------+
19275/// ```
19276pub trait Vfmsubadd132phMaskzEmitter<A, B, C> {
19277    fn vfmsubadd132ph_maskz(&mut self, op0: A, op1: B, op2: C);
19278}
19279
19280impl<'a> Vfmsubadd132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19281    fn vfmsubadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19282        self.emit(VFMSUBADD132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19283    }
19284}
19285
19286impl<'a> Vfmsubadd132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19287    fn vfmsubadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19288        self.emit(VFMSUBADD132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19289    }
19290}
19291
19292impl<'a> Vfmsubadd132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19293    fn vfmsubadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19294        self.emit(VFMSUBADD132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19295    }
19296}
19297
19298impl<'a> Vfmsubadd132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19299    fn vfmsubadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19300        self.emit(VFMSUBADD132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19301    }
19302}
19303
19304impl<'a> Vfmsubadd132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19305    fn vfmsubadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19306        self.emit(VFMSUBADD132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19307    }
19308}
19309
19310impl<'a> Vfmsubadd132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19311    fn vfmsubadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19312        self.emit(VFMSUBADD132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19313    }
19314}
19315
19316/// `VFMSUBADD132PH_MASKZ_ER`.
19317///
19318/// Supported operand variants:
19319///
19320/// ```text
19321/// +---+---------------+
19322/// | # | Operands      |
19323/// +---+---------------+
19324/// | 1 | Zmm, Zmm, Zmm |
19325/// +---+---------------+
19326/// ```
19327pub trait Vfmsubadd132phMaskzErEmitter<A, B, C> {
19328    fn vfmsubadd132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
19329}
19330
19331impl<'a> Vfmsubadd132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19332    fn vfmsubadd132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19333        self.emit(VFMSUBADD132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19334    }
19335}
19336
19337/// `VFMSUBADD213PH`.
19338///
19339/// Supported operand variants:
19340///
19341/// ```text
19342/// +---+---------------+
19343/// | # | Operands      |
19344/// +---+---------------+
19345/// | 1 | Xmm, Xmm, Mem |
19346/// | 2 | Xmm, Xmm, Xmm |
19347/// | 3 | Ymm, Ymm, Mem |
19348/// | 4 | Ymm, Ymm, Ymm |
19349/// | 5 | Zmm, Zmm, Mem |
19350/// | 6 | Zmm, Zmm, Zmm |
19351/// +---+---------------+
19352/// ```
19353pub trait Vfmsubadd213phEmitter<A, B, C> {
19354    fn vfmsubadd213ph(&mut self, op0: A, op1: B, op2: C);
19355}
19356
19357impl<'a> Vfmsubadd213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19358    fn vfmsubadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19359        self.emit(VFMSUBADD213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19360    }
19361}
19362
19363impl<'a> Vfmsubadd213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19364    fn vfmsubadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19365        self.emit(VFMSUBADD213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19366    }
19367}
19368
19369impl<'a> Vfmsubadd213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19370    fn vfmsubadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19371        self.emit(VFMSUBADD213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19372    }
19373}
19374
19375impl<'a> Vfmsubadd213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19376    fn vfmsubadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19377        self.emit(VFMSUBADD213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19378    }
19379}
19380
19381impl<'a> Vfmsubadd213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19382    fn vfmsubadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19383        self.emit(VFMSUBADD213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19384    }
19385}
19386
19387impl<'a> Vfmsubadd213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19388    fn vfmsubadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19389        self.emit(VFMSUBADD213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19390    }
19391}
19392
19393/// `VFMSUBADD213PH_ER`.
19394///
19395/// Supported operand variants:
19396///
19397/// ```text
19398/// +---+---------------+
19399/// | # | Operands      |
19400/// +---+---------------+
19401/// | 1 | Zmm, Zmm, Zmm |
19402/// +---+---------------+
19403/// ```
19404pub trait Vfmsubadd213phErEmitter<A, B, C> {
19405    fn vfmsubadd213ph_er(&mut self, op0: A, op1: B, op2: C);
19406}
19407
19408impl<'a> Vfmsubadd213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19409    fn vfmsubadd213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19410        self.emit(VFMSUBADD213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19411    }
19412}
19413
19414/// `VFMSUBADD213PH_MASK`.
19415///
19416/// Supported operand variants:
19417///
19418/// ```text
19419/// +---+---------------+
19420/// | # | Operands      |
19421/// +---+---------------+
19422/// | 1 | Xmm, Xmm, Mem |
19423/// | 2 | Xmm, Xmm, Xmm |
19424/// | 3 | Ymm, Ymm, Mem |
19425/// | 4 | Ymm, Ymm, Ymm |
19426/// | 5 | Zmm, Zmm, Mem |
19427/// | 6 | Zmm, Zmm, Zmm |
19428/// +---+---------------+
19429/// ```
19430pub trait Vfmsubadd213phMaskEmitter<A, B, C> {
19431    fn vfmsubadd213ph_mask(&mut self, op0: A, op1: B, op2: C);
19432}
19433
19434impl<'a> Vfmsubadd213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19435    fn vfmsubadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19436        self.emit(VFMSUBADD213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19437    }
19438}
19439
19440impl<'a> Vfmsubadd213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19441    fn vfmsubadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19442        self.emit(VFMSUBADD213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19443    }
19444}
19445
19446impl<'a> Vfmsubadd213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19447    fn vfmsubadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19448        self.emit(VFMSUBADD213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19449    }
19450}
19451
19452impl<'a> Vfmsubadd213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19453    fn vfmsubadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19454        self.emit(VFMSUBADD213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19455    }
19456}
19457
19458impl<'a> Vfmsubadd213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19459    fn vfmsubadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19460        self.emit(VFMSUBADD213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19461    }
19462}
19463
19464impl<'a> Vfmsubadd213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19465    fn vfmsubadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19466        self.emit(VFMSUBADD213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19467    }
19468}
19469
19470/// `VFMSUBADD213PH_MASK_ER`.
19471///
19472/// Supported operand variants:
19473///
19474/// ```text
19475/// +---+---------------+
19476/// | # | Operands      |
19477/// +---+---------------+
19478/// | 1 | Zmm, Zmm, Zmm |
19479/// +---+---------------+
19480/// ```
19481pub trait Vfmsubadd213phMaskErEmitter<A, B, C> {
19482    fn vfmsubadd213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
19483}
19484
19485impl<'a> Vfmsubadd213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19486    fn vfmsubadd213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19487        self.emit(VFMSUBADD213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19488    }
19489}
19490
19491/// `VFMSUBADD213PH_MASKZ`.
19492///
19493/// Supported operand variants:
19494///
19495/// ```text
19496/// +---+---------------+
19497/// | # | Operands      |
19498/// +---+---------------+
19499/// | 1 | Xmm, Xmm, Mem |
19500/// | 2 | Xmm, Xmm, Xmm |
19501/// | 3 | Ymm, Ymm, Mem |
19502/// | 4 | Ymm, Ymm, Ymm |
19503/// | 5 | Zmm, Zmm, Mem |
19504/// | 6 | Zmm, Zmm, Zmm |
19505/// +---+---------------+
19506/// ```
19507pub trait Vfmsubadd213phMaskzEmitter<A, B, C> {
19508    fn vfmsubadd213ph_maskz(&mut self, op0: A, op1: B, op2: C);
19509}
19510
19511impl<'a> Vfmsubadd213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19512    fn vfmsubadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19513        self.emit(VFMSUBADD213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19514    }
19515}
19516
19517impl<'a> Vfmsubadd213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19518    fn vfmsubadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19519        self.emit(VFMSUBADD213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19520    }
19521}
19522
19523impl<'a> Vfmsubadd213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19524    fn vfmsubadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19525        self.emit(VFMSUBADD213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19526    }
19527}
19528
19529impl<'a> Vfmsubadd213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19530    fn vfmsubadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19531        self.emit(VFMSUBADD213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19532    }
19533}
19534
19535impl<'a> Vfmsubadd213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19536    fn vfmsubadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19537        self.emit(VFMSUBADD213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19538    }
19539}
19540
19541impl<'a> Vfmsubadd213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19542    fn vfmsubadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19543        self.emit(VFMSUBADD213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19544    }
19545}
19546
19547/// `VFMSUBADD213PH_MASKZ_ER`.
19548///
19549/// Supported operand variants:
19550///
19551/// ```text
19552/// +---+---------------+
19553/// | # | Operands      |
19554/// +---+---------------+
19555/// | 1 | Zmm, Zmm, Zmm |
19556/// +---+---------------+
19557/// ```
19558pub trait Vfmsubadd213phMaskzErEmitter<A, B, C> {
19559    fn vfmsubadd213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
19560}
19561
19562impl<'a> Vfmsubadd213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19563    fn vfmsubadd213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19564        self.emit(VFMSUBADD213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19565    }
19566}
19567
19568/// `VFMSUBADD231PH`.
19569///
19570/// Supported operand variants:
19571///
19572/// ```text
19573/// +---+---------------+
19574/// | # | Operands      |
19575/// +---+---------------+
19576/// | 1 | Xmm, Xmm, Mem |
19577/// | 2 | Xmm, Xmm, Xmm |
19578/// | 3 | Ymm, Ymm, Mem |
19579/// | 4 | Ymm, Ymm, Ymm |
19580/// | 5 | Zmm, Zmm, Mem |
19581/// | 6 | Zmm, Zmm, Zmm |
19582/// +---+---------------+
19583/// ```
19584pub trait Vfmsubadd231phEmitter<A, B, C> {
19585    fn vfmsubadd231ph(&mut self, op0: A, op1: B, op2: C);
19586}
19587
19588impl<'a> Vfmsubadd231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19589    fn vfmsubadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19590        self.emit(VFMSUBADD231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19591    }
19592}
19593
19594impl<'a> Vfmsubadd231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19595    fn vfmsubadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19596        self.emit(VFMSUBADD231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19597    }
19598}
19599
19600impl<'a> Vfmsubadd231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19601    fn vfmsubadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19602        self.emit(VFMSUBADD231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19603    }
19604}
19605
19606impl<'a> Vfmsubadd231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19607    fn vfmsubadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19608        self.emit(VFMSUBADD231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19609    }
19610}
19611
19612impl<'a> Vfmsubadd231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19613    fn vfmsubadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19614        self.emit(VFMSUBADD231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19615    }
19616}
19617
19618impl<'a> Vfmsubadd231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19619    fn vfmsubadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19620        self.emit(VFMSUBADD231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19621    }
19622}
19623
19624/// `VFMSUBADD231PH_ER`.
19625///
19626/// Supported operand variants:
19627///
19628/// ```text
19629/// +---+---------------+
19630/// | # | Operands      |
19631/// +---+---------------+
19632/// | 1 | Zmm, Zmm, Zmm |
19633/// +---+---------------+
19634/// ```
19635pub trait Vfmsubadd231phErEmitter<A, B, C> {
19636    fn vfmsubadd231ph_er(&mut self, op0: A, op1: B, op2: C);
19637}
19638
19639impl<'a> Vfmsubadd231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19640    fn vfmsubadd231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19641        self.emit(VFMSUBADD231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19642    }
19643}
19644
19645/// `VFMSUBADD231PH_MASK`.
19646///
19647/// Supported operand variants:
19648///
19649/// ```text
19650/// +---+---------------+
19651/// | # | Operands      |
19652/// +---+---------------+
19653/// | 1 | Xmm, Xmm, Mem |
19654/// | 2 | Xmm, Xmm, Xmm |
19655/// | 3 | Ymm, Ymm, Mem |
19656/// | 4 | Ymm, Ymm, Ymm |
19657/// | 5 | Zmm, Zmm, Mem |
19658/// | 6 | Zmm, Zmm, Zmm |
19659/// +---+---------------+
19660/// ```
19661pub trait Vfmsubadd231phMaskEmitter<A, B, C> {
19662    fn vfmsubadd231ph_mask(&mut self, op0: A, op1: B, op2: C);
19663}
19664
19665impl<'a> Vfmsubadd231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19666    fn vfmsubadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19667        self.emit(VFMSUBADD231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19668    }
19669}
19670
19671impl<'a> Vfmsubadd231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19672    fn vfmsubadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19673        self.emit(VFMSUBADD231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19674    }
19675}
19676
19677impl<'a> Vfmsubadd231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19678    fn vfmsubadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19679        self.emit(VFMSUBADD231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19680    }
19681}
19682
19683impl<'a> Vfmsubadd231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19684    fn vfmsubadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19685        self.emit(VFMSUBADD231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19686    }
19687}
19688
19689impl<'a> Vfmsubadd231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19690    fn vfmsubadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19691        self.emit(VFMSUBADD231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19692    }
19693}
19694
19695impl<'a> Vfmsubadd231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19696    fn vfmsubadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19697        self.emit(VFMSUBADD231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19698    }
19699}
19700
19701/// `VFMSUBADD231PH_MASK_ER`.
19702///
19703/// Supported operand variants:
19704///
19705/// ```text
19706/// +---+---------------+
19707/// | # | Operands      |
19708/// +---+---------------+
19709/// | 1 | Zmm, Zmm, Zmm |
19710/// +---+---------------+
19711/// ```
19712pub trait Vfmsubadd231phMaskErEmitter<A, B, C> {
19713    fn vfmsubadd231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
19714}
19715
19716impl<'a> Vfmsubadd231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19717    fn vfmsubadd231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19718        self.emit(VFMSUBADD231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19719    }
19720}
19721
19722/// `VFMSUBADD231PH_MASKZ`.
19723///
19724/// Supported operand variants:
19725///
19726/// ```text
19727/// +---+---------------+
19728/// | # | Operands      |
19729/// +---+---------------+
19730/// | 1 | Xmm, Xmm, Mem |
19731/// | 2 | Xmm, Xmm, Xmm |
19732/// | 3 | Ymm, Ymm, Mem |
19733/// | 4 | Ymm, Ymm, Ymm |
19734/// | 5 | Zmm, Zmm, Mem |
19735/// | 6 | Zmm, Zmm, Zmm |
19736/// +---+---------------+
19737/// ```
19738pub trait Vfmsubadd231phMaskzEmitter<A, B, C> {
19739    fn vfmsubadd231ph_maskz(&mut self, op0: A, op1: B, op2: C);
19740}
19741
19742impl<'a> Vfmsubadd231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19743    fn vfmsubadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19744        self.emit(VFMSUBADD231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19745    }
19746}
19747
19748impl<'a> Vfmsubadd231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19749    fn vfmsubadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19750        self.emit(VFMSUBADD231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19751    }
19752}
19753
19754impl<'a> Vfmsubadd231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19755    fn vfmsubadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19756        self.emit(VFMSUBADD231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19757    }
19758}
19759
19760impl<'a> Vfmsubadd231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19761    fn vfmsubadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19762        self.emit(VFMSUBADD231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19763    }
19764}
19765
19766impl<'a> Vfmsubadd231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19767    fn vfmsubadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19768        self.emit(VFMSUBADD231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19769    }
19770}
19771
19772impl<'a> Vfmsubadd231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19773    fn vfmsubadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19774        self.emit(VFMSUBADD231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19775    }
19776}
19777
19778/// `VFMSUBADD231PH_MASKZ_ER`.
19779///
19780/// Supported operand variants:
19781///
19782/// ```text
19783/// +---+---------------+
19784/// | # | Operands      |
19785/// +---+---------------+
19786/// | 1 | Zmm, Zmm, Zmm |
19787/// +---+---------------+
19788/// ```
19789pub trait Vfmsubadd231phMaskzErEmitter<A, B, C> {
19790    fn vfmsubadd231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
19791}
19792
19793impl<'a> Vfmsubadd231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19794    fn vfmsubadd231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19795        self.emit(VFMSUBADD231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19796    }
19797}
19798
19799/// `VFMULCPH`.
19800///
19801/// Supported operand variants:
19802///
19803/// ```text
19804/// +---+---------------+
19805/// | # | Operands      |
19806/// +---+---------------+
19807/// | 1 | Xmm, Xmm, Mem |
19808/// | 2 | Xmm, Xmm, Xmm |
19809/// | 3 | Ymm, Ymm, Mem |
19810/// | 4 | Ymm, Ymm, Ymm |
19811/// | 5 | Zmm, Zmm, Mem |
19812/// | 6 | Zmm, Zmm, Zmm |
19813/// +---+---------------+
19814/// ```
19815pub trait VfmulcphEmitter<A, B, C> {
19816    fn vfmulcph(&mut self, op0: A, op1: B, op2: C);
19817}
19818
19819impl<'a> VfmulcphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19820    fn vfmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19821        self.emit(VFMULCPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19822    }
19823}
19824
19825impl<'a> VfmulcphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19826    fn vfmulcph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19827        self.emit(VFMULCPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19828    }
19829}
19830
19831impl<'a> VfmulcphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19832    fn vfmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19833        self.emit(VFMULCPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19834    }
19835}
19836
19837impl<'a> VfmulcphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19838    fn vfmulcph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19839        self.emit(VFMULCPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19840    }
19841}
19842
19843impl<'a> VfmulcphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19844    fn vfmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19845        self.emit(VFMULCPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19846    }
19847}
19848
19849impl<'a> VfmulcphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19850    fn vfmulcph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19851        self.emit(VFMULCPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19852    }
19853}
19854
19855/// `VFMULCPH_ER`.
19856///
19857/// Supported operand variants:
19858///
19859/// ```text
19860/// +---+---------------+
19861/// | # | Operands      |
19862/// +---+---------------+
19863/// | 1 | Zmm, Zmm, Zmm |
19864/// +---+---------------+
19865/// ```
19866pub trait VfmulcphErEmitter<A, B, C> {
19867    fn vfmulcph_er(&mut self, op0: A, op1: B, op2: C);
19868}
19869
19870impl<'a> VfmulcphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19871    fn vfmulcph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19872        self.emit(VFMULCPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19873    }
19874}
19875
19876/// `VFMULCPH_MASK`.
19877///
19878/// Supported operand variants:
19879///
19880/// ```text
19881/// +---+---------------+
19882/// | # | Operands      |
19883/// +---+---------------+
19884/// | 1 | Xmm, Xmm, Mem |
19885/// | 2 | Xmm, Xmm, Xmm |
19886/// | 3 | Ymm, Ymm, Mem |
19887/// | 4 | Ymm, Ymm, Ymm |
19888/// | 5 | Zmm, Zmm, Mem |
19889/// | 6 | Zmm, Zmm, Zmm |
19890/// +---+---------------+
19891/// ```
19892pub trait VfmulcphMaskEmitter<A, B, C> {
19893    fn vfmulcph_mask(&mut self, op0: A, op1: B, op2: C);
19894}
19895
19896impl<'a> VfmulcphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19897    fn vfmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19898        self.emit(VFMULCPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19899    }
19900}
19901
19902impl<'a> VfmulcphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19903    fn vfmulcph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19904        self.emit(VFMULCPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19905    }
19906}
19907
19908impl<'a> VfmulcphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19909    fn vfmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19910        self.emit(VFMULCPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19911    }
19912}
19913
19914impl<'a> VfmulcphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19915    fn vfmulcph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19916        self.emit(VFMULCPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19917    }
19918}
19919
19920impl<'a> VfmulcphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19921    fn vfmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19922        self.emit(VFMULCPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19923    }
19924}
19925
19926impl<'a> VfmulcphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
19927    fn vfmulcph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
19928        self.emit(VFMULCPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19929    }
19930}
19931
19932/// `VFMULCPH_MASK_ER`.
19933///
19934/// Supported operand variants:
19935///
19936/// ```text
19937/// +---+---------------+
19938/// | # | Operands      |
19939/// +---+---------------+
19940/// | 1 | Zmm, Zmm, Zmm |
19941/// +---+---------------+
19942/// ```
19943pub trait VfmulcphMaskErEmitter<A, B, C> {
19944    fn vfmulcph_mask_er(&mut self, op0: A, op1: B, op2: C);
19945}
19946
19947impl<'a> VfmulcphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19948    fn vfmulcph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19949        self.emit(VFMULCPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19950    }
19951}
19952
19953/// `VFMULCPH_MASKZ`.
19954///
19955/// Supported operand variants:
19956///
19957/// ```text
19958/// +---+---------------+
19959/// | # | Operands      |
19960/// +---+---------------+
19961/// | 1 | Xmm, Xmm, Mem |
19962/// | 2 | Xmm, Xmm, Xmm |
19963/// | 3 | Ymm, Ymm, Mem |
19964/// | 4 | Ymm, Ymm, Ymm |
19965/// | 5 | Zmm, Zmm, Mem |
19966/// | 6 | Zmm, Zmm, Zmm |
19967/// +---+---------------+
19968/// ```
19969pub trait VfmulcphMaskzEmitter<A, B, C> {
19970    fn vfmulcph_maskz(&mut self, op0: A, op1: B, op2: C);
19971}
19972
19973impl<'a> VfmulcphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
19974    fn vfmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
19975        self.emit(VFMULCPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19976    }
19977}
19978
19979impl<'a> VfmulcphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
19980    fn vfmulcph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
19981        self.emit(VFMULCPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19982    }
19983}
19984
19985impl<'a> VfmulcphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
19986    fn vfmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
19987        self.emit(VFMULCPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19988    }
19989}
19990
19991impl<'a> VfmulcphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
19992    fn vfmulcph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
19993        self.emit(VFMULCPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
19994    }
19995}
19996
19997impl<'a> VfmulcphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
19998    fn vfmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
19999        self.emit(VFMULCPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20000    }
20001}
20002
20003impl<'a> VfmulcphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20004    fn vfmulcph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20005        self.emit(VFMULCPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20006    }
20007}
20008
20009/// `VFMULCPH_MASKZ_ER`.
20010///
20011/// Supported operand variants:
20012///
20013/// ```text
20014/// +---+---------------+
20015/// | # | Operands      |
20016/// +---+---------------+
20017/// | 1 | Zmm, Zmm, Zmm |
20018/// +---+---------------+
20019/// ```
20020pub trait VfmulcphMaskzErEmitter<A, B, C> {
20021    fn vfmulcph_maskz_er(&mut self, op0: A, op1: B, op2: C);
20022}
20023
20024impl<'a> VfmulcphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20025    fn vfmulcph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20026        self.emit(VFMULCPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20027    }
20028}
20029
20030/// `VFMULCSH`.
20031///
20032/// Supported operand variants:
20033///
20034/// ```text
20035/// +---+---------------+
20036/// | # | Operands      |
20037/// +---+---------------+
20038/// | 1 | Xmm, Xmm, Mem |
20039/// | 2 | Xmm, Xmm, Xmm |
20040/// +---+---------------+
20041/// ```
20042pub trait VfmulcshEmitter<A, B, C> {
20043    fn vfmulcsh(&mut self, op0: A, op1: B, op2: C);
20044}
20045
20046impl<'a> VfmulcshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20047    fn vfmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20048        self.emit(VFMULCSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20049    }
20050}
20051
20052impl<'a> VfmulcshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20053    fn vfmulcsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20054        self.emit(VFMULCSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20055    }
20056}
20057
20058/// `VFMULCSH_ER`.
20059///
20060/// Supported operand variants:
20061///
20062/// ```text
20063/// +---+---------------+
20064/// | # | Operands      |
20065/// +---+---------------+
20066/// | 1 | Xmm, Xmm, Xmm |
20067/// +---+---------------+
20068/// ```
20069pub trait VfmulcshErEmitter<A, B, C> {
20070    fn vfmulcsh_er(&mut self, op0: A, op1: B, op2: C);
20071}
20072
20073impl<'a> VfmulcshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20074    fn vfmulcsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20075        self.emit(VFMULCSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20076    }
20077}
20078
20079/// `VFMULCSH_MASK`.
20080///
20081/// Supported operand variants:
20082///
20083/// ```text
20084/// +---+---------------+
20085/// | # | Operands      |
20086/// +---+---------------+
20087/// | 1 | Xmm, Xmm, Mem |
20088/// | 2 | Xmm, Xmm, Xmm |
20089/// +---+---------------+
20090/// ```
20091pub trait VfmulcshMaskEmitter<A, B, C> {
20092    fn vfmulcsh_mask(&mut self, op0: A, op1: B, op2: C);
20093}
20094
20095impl<'a> VfmulcshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20096    fn vfmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20097        self.emit(VFMULCSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20098    }
20099}
20100
20101impl<'a> VfmulcshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20102    fn vfmulcsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20103        self.emit(VFMULCSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20104    }
20105}
20106
20107/// `VFMULCSH_MASK_ER`.
20108///
20109/// Supported operand variants:
20110///
20111/// ```text
20112/// +---+---------------+
20113/// | # | Operands      |
20114/// +---+---------------+
20115/// | 1 | Xmm, Xmm, Xmm |
20116/// +---+---------------+
20117/// ```
20118pub trait VfmulcshMaskErEmitter<A, B, C> {
20119    fn vfmulcsh_mask_er(&mut self, op0: A, op1: B, op2: C);
20120}
20121
20122impl<'a> VfmulcshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20123    fn vfmulcsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20124        self.emit(VFMULCSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20125    }
20126}
20127
20128/// `VFMULCSH_MASKZ`.
20129///
20130/// Supported operand variants:
20131///
20132/// ```text
20133/// +---+---------------+
20134/// | # | Operands      |
20135/// +---+---------------+
20136/// | 1 | Xmm, Xmm, Mem |
20137/// | 2 | Xmm, Xmm, Xmm |
20138/// +---+---------------+
20139/// ```
20140pub trait VfmulcshMaskzEmitter<A, B, C> {
20141    fn vfmulcsh_maskz(&mut self, op0: A, op1: B, op2: C);
20142}
20143
20144impl<'a> VfmulcshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20145    fn vfmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20146        self.emit(VFMULCSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20147    }
20148}
20149
20150impl<'a> VfmulcshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20151    fn vfmulcsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20152        self.emit(VFMULCSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20153    }
20154}
20155
20156/// `VFMULCSH_MASKZ_ER`.
20157///
20158/// Supported operand variants:
20159///
20160/// ```text
20161/// +---+---------------+
20162/// | # | Operands      |
20163/// +---+---------------+
20164/// | 1 | Xmm, Xmm, Xmm |
20165/// +---+---------------+
20166/// ```
20167pub trait VfmulcshMaskzErEmitter<A, B, C> {
20168    fn vfmulcsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
20169}
20170
20171impl<'a> VfmulcshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20172    fn vfmulcsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20173        self.emit(VFMULCSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20174    }
20175}
20176
20177/// `VFNMADD132PH`.
20178///
20179/// Supported operand variants:
20180///
20181/// ```text
20182/// +---+---------------+
20183/// | # | Operands      |
20184/// +---+---------------+
20185/// | 1 | Xmm, Xmm, Mem |
20186/// | 2 | Xmm, Xmm, Xmm |
20187/// | 3 | Ymm, Ymm, Mem |
20188/// | 4 | Ymm, Ymm, Ymm |
20189/// | 5 | Zmm, Zmm, Mem |
20190/// | 6 | Zmm, Zmm, Zmm |
20191/// +---+---------------+
20192/// ```
20193pub trait Vfnmadd132phEmitter<A, B, C> {
20194    fn vfnmadd132ph(&mut self, op0: A, op1: B, op2: C);
20195}
20196
20197impl<'a> Vfnmadd132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20198    fn vfnmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20199        self.emit(VFNMADD132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20200    }
20201}
20202
20203impl<'a> Vfnmadd132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20204    fn vfnmadd132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20205        self.emit(VFNMADD132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20206    }
20207}
20208
20209impl<'a> Vfnmadd132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20210    fn vfnmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20211        self.emit(VFNMADD132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20212    }
20213}
20214
20215impl<'a> Vfnmadd132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20216    fn vfnmadd132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20217        self.emit(VFNMADD132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20218    }
20219}
20220
20221impl<'a> Vfnmadd132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20222    fn vfnmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20223        self.emit(VFNMADD132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20224    }
20225}
20226
20227impl<'a> Vfnmadd132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20228    fn vfnmadd132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20229        self.emit(VFNMADD132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20230    }
20231}
20232
20233/// `VFNMADD132PH_ER`.
20234///
20235/// Supported operand variants:
20236///
20237/// ```text
20238/// +---+---------------+
20239/// | # | Operands      |
20240/// +---+---------------+
20241/// | 1 | Zmm, Zmm, Zmm |
20242/// +---+---------------+
20243/// ```
20244pub trait Vfnmadd132phErEmitter<A, B, C> {
20245    fn vfnmadd132ph_er(&mut self, op0: A, op1: B, op2: C);
20246}
20247
20248impl<'a> Vfnmadd132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20249    fn vfnmadd132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20250        self.emit(VFNMADD132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20251    }
20252}
20253
20254/// `VFNMADD132PH_MASK`.
20255///
20256/// Supported operand variants:
20257///
20258/// ```text
20259/// +---+---------------+
20260/// | # | Operands      |
20261/// +---+---------------+
20262/// | 1 | Xmm, Xmm, Mem |
20263/// | 2 | Xmm, Xmm, Xmm |
20264/// | 3 | Ymm, Ymm, Mem |
20265/// | 4 | Ymm, Ymm, Ymm |
20266/// | 5 | Zmm, Zmm, Mem |
20267/// | 6 | Zmm, Zmm, Zmm |
20268/// +---+---------------+
20269/// ```
20270pub trait Vfnmadd132phMaskEmitter<A, B, C> {
20271    fn vfnmadd132ph_mask(&mut self, op0: A, op1: B, op2: C);
20272}
20273
20274impl<'a> Vfnmadd132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20275    fn vfnmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20276        self.emit(VFNMADD132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20277    }
20278}
20279
20280impl<'a> Vfnmadd132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20281    fn vfnmadd132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20282        self.emit(VFNMADD132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20283    }
20284}
20285
20286impl<'a> Vfnmadd132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20287    fn vfnmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20288        self.emit(VFNMADD132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20289    }
20290}
20291
20292impl<'a> Vfnmadd132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20293    fn vfnmadd132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20294        self.emit(VFNMADD132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20295    }
20296}
20297
20298impl<'a> Vfnmadd132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20299    fn vfnmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20300        self.emit(VFNMADD132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20301    }
20302}
20303
20304impl<'a> Vfnmadd132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20305    fn vfnmadd132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20306        self.emit(VFNMADD132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20307    }
20308}
20309
20310/// `VFNMADD132PH_MASK_ER`.
20311///
20312/// Supported operand variants:
20313///
20314/// ```text
20315/// +---+---------------+
20316/// | # | Operands      |
20317/// +---+---------------+
20318/// | 1 | Zmm, Zmm, Zmm |
20319/// +---+---------------+
20320/// ```
20321pub trait Vfnmadd132phMaskErEmitter<A, B, C> {
20322    fn vfnmadd132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
20323}
20324
20325impl<'a> Vfnmadd132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20326    fn vfnmadd132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20327        self.emit(VFNMADD132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20328    }
20329}
20330
20331/// `VFNMADD132PH_MASKZ`.
20332///
20333/// Supported operand variants:
20334///
20335/// ```text
20336/// +---+---------------+
20337/// | # | Operands      |
20338/// +---+---------------+
20339/// | 1 | Xmm, Xmm, Mem |
20340/// | 2 | Xmm, Xmm, Xmm |
20341/// | 3 | Ymm, Ymm, Mem |
20342/// | 4 | Ymm, Ymm, Ymm |
20343/// | 5 | Zmm, Zmm, Mem |
20344/// | 6 | Zmm, Zmm, Zmm |
20345/// +---+---------------+
20346/// ```
20347pub trait Vfnmadd132phMaskzEmitter<A, B, C> {
20348    fn vfnmadd132ph_maskz(&mut self, op0: A, op1: B, op2: C);
20349}
20350
20351impl<'a> Vfnmadd132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20352    fn vfnmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20353        self.emit(VFNMADD132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20354    }
20355}
20356
20357impl<'a> Vfnmadd132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20358    fn vfnmadd132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20359        self.emit(VFNMADD132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20360    }
20361}
20362
20363impl<'a> Vfnmadd132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20364    fn vfnmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20365        self.emit(VFNMADD132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20366    }
20367}
20368
20369impl<'a> Vfnmadd132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20370    fn vfnmadd132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20371        self.emit(VFNMADD132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20372    }
20373}
20374
20375impl<'a> Vfnmadd132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20376    fn vfnmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20377        self.emit(VFNMADD132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20378    }
20379}
20380
20381impl<'a> Vfnmadd132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20382    fn vfnmadd132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20383        self.emit(VFNMADD132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20384    }
20385}
20386
20387/// `VFNMADD132PH_MASKZ_ER`.
20388///
20389/// Supported operand variants:
20390///
20391/// ```text
20392/// +---+---------------+
20393/// | # | Operands      |
20394/// +---+---------------+
20395/// | 1 | Zmm, Zmm, Zmm |
20396/// +---+---------------+
20397/// ```
20398pub trait Vfnmadd132phMaskzErEmitter<A, B, C> {
20399    fn vfnmadd132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
20400}
20401
20402impl<'a> Vfnmadd132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20403    fn vfnmadd132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20404        self.emit(VFNMADD132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20405    }
20406}
20407
20408/// `VFNMADD132SH`.
20409///
20410/// Supported operand variants:
20411///
20412/// ```text
20413/// +---+---------------+
20414/// | # | Operands      |
20415/// +---+---------------+
20416/// | 1 | Xmm, Xmm, Mem |
20417/// | 2 | Xmm, Xmm, Xmm |
20418/// +---+---------------+
20419/// ```
20420pub trait Vfnmadd132shEmitter<A, B, C> {
20421    fn vfnmadd132sh(&mut self, op0: A, op1: B, op2: C);
20422}
20423
20424impl<'a> Vfnmadd132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20425    fn vfnmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20426        self.emit(VFNMADD132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20427    }
20428}
20429
20430impl<'a> Vfnmadd132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20431    fn vfnmadd132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20432        self.emit(VFNMADD132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20433    }
20434}
20435
20436/// `VFNMADD132SH_ER`.
20437///
20438/// Supported operand variants:
20439///
20440/// ```text
20441/// +---+---------------+
20442/// | # | Operands      |
20443/// +---+---------------+
20444/// | 1 | Xmm, Xmm, Xmm |
20445/// +---+---------------+
20446/// ```
20447pub trait Vfnmadd132shErEmitter<A, B, C> {
20448    fn vfnmadd132sh_er(&mut self, op0: A, op1: B, op2: C);
20449}
20450
20451impl<'a> Vfnmadd132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20452    fn vfnmadd132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20453        self.emit(VFNMADD132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20454    }
20455}
20456
20457/// `VFNMADD132SH_MASK`.
20458///
20459/// Supported operand variants:
20460///
20461/// ```text
20462/// +---+---------------+
20463/// | # | Operands      |
20464/// +---+---------------+
20465/// | 1 | Xmm, Xmm, Mem |
20466/// | 2 | Xmm, Xmm, Xmm |
20467/// +---+---------------+
20468/// ```
20469pub trait Vfnmadd132shMaskEmitter<A, B, C> {
20470    fn vfnmadd132sh_mask(&mut self, op0: A, op1: B, op2: C);
20471}
20472
20473impl<'a> Vfnmadd132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20474    fn vfnmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20475        self.emit(VFNMADD132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20476    }
20477}
20478
20479impl<'a> Vfnmadd132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20480    fn vfnmadd132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20481        self.emit(VFNMADD132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20482    }
20483}
20484
20485/// `VFNMADD132SH_MASK_ER`.
20486///
20487/// Supported operand variants:
20488///
20489/// ```text
20490/// +---+---------------+
20491/// | # | Operands      |
20492/// +---+---------------+
20493/// | 1 | Xmm, Xmm, Xmm |
20494/// +---+---------------+
20495/// ```
20496pub trait Vfnmadd132shMaskErEmitter<A, B, C> {
20497    fn vfnmadd132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
20498}
20499
20500impl<'a> Vfnmadd132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20501    fn vfnmadd132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20502        self.emit(VFNMADD132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20503    }
20504}
20505
20506/// `VFNMADD132SH_MASKZ`.
20507///
20508/// Supported operand variants:
20509///
20510/// ```text
20511/// +---+---------------+
20512/// | # | Operands      |
20513/// +---+---------------+
20514/// | 1 | Xmm, Xmm, Mem |
20515/// | 2 | Xmm, Xmm, Xmm |
20516/// +---+---------------+
20517/// ```
20518pub trait Vfnmadd132shMaskzEmitter<A, B, C> {
20519    fn vfnmadd132sh_maskz(&mut self, op0: A, op1: B, op2: C);
20520}
20521
20522impl<'a> Vfnmadd132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20523    fn vfnmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20524        self.emit(VFNMADD132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20525    }
20526}
20527
20528impl<'a> Vfnmadd132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20529    fn vfnmadd132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20530        self.emit(VFNMADD132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20531    }
20532}
20533
20534/// `VFNMADD132SH_MASKZ_ER`.
20535///
20536/// Supported operand variants:
20537///
20538/// ```text
20539/// +---+---------------+
20540/// | # | Operands      |
20541/// +---+---------------+
20542/// | 1 | Xmm, Xmm, Xmm |
20543/// +---+---------------+
20544/// ```
20545pub trait Vfnmadd132shMaskzErEmitter<A, B, C> {
20546    fn vfnmadd132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
20547}
20548
20549impl<'a> Vfnmadd132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20550    fn vfnmadd132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20551        self.emit(VFNMADD132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20552    }
20553}
20554
20555/// `VFNMADD213PH`.
20556///
20557/// Supported operand variants:
20558///
20559/// ```text
20560/// +---+---------------+
20561/// | # | Operands      |
20562/// +---+---------------+
20563/// | 1 | Xmm, Xmm, Mem |
20564/// | 2 | Xmm, Xmm, Xmm |
20565/// | 3 | Ymm, Ymm, Mem |
20566/// | 4 | Ymm, Ymm, Ymm |
20567/// | 5 | Zmm, Zmm, Mem |
20568/// | 6 | Zmm, Zmm, Zmm |
20569/// +---+---------------+
20570/// ```
20571pub trait Vfnmadd213phEmitter<A, B, C> {
20572    fn vfnmadd213ph(&mut self, op0: A, op1: B, op2: C);
20573}
20574
20575impl<'a> Vfnmadd213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20576    fn vfnmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20577        self.emit(VFNMADD213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20578    }
20579}
20580
20581impl<'a> Vfnmadd213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20582    fn vfnmadd213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20583        self.emit(VFNMADD213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20584    }
20585}
20586
20587impl<'a> Vfnmadd213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20588    fn vfnmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20589        self.emit(VFNMADD213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20590    }
20591}
20592
20593impl<'a> Vfnmadd213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20594    fn vfnmadd213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20595        self.emit(VFNMADD213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20596    }
20597}
20598
20599impl<'a> Vfnmadd213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20600    fn vfnmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20601        self.emit(VFNMADD213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20602    }
20603}
20604
20605impl<'a> Vfnmadd213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20606    fn vfnmadd213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20607        self.emit(VFNMADD213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20608    }
20609}
20610
20611/// `VFNMADD213PH_ER`.
20612///
20613/// Supported operand variants:
20614///
20615/// ```text
20616/// +---+---------------+
20617/// | # | Operands      |
20618/// +---+---------------+
20619/// | 1 | Zmm, Zmm, Zmm |
20620/// +---+---------------+
20621/// ```
20622pub trait Vfnmadd213phErEmitter<A, B, C> {
20623    fn vfnmadd213ph_er(&mut self, op0: A, op1: B, op2: C);
20624}
20625
20626impl<'a> Vfnmadd213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20627    fn vfnmadd213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20628        self.emit(VFNMADD213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20629    }
20630}
20631
20632/// `VFNMADD213PH_MASK`.
20633///
20634/// Supported operand variants:
20635///
20636/// ```text
20637/// +---+---------------+
20638/// | # | Operands      |
20639/// +---+---------------+
20640/// | 1 | Xmm, Xmm, Mem |
20641/// | 2 | Xmm, Xmm, Xmm |
20642/// | 3 | Ymm, Ymm, Mem |
20643/// | 4 | Ymm, Ymm, Ymm |
20644/// | 5 | Zmm, Zmm, Mem |
20645/// | 6 | Zmm, Zmm, Zmm |
20646/// +---+---------------+
20647/// ```
20648pub trait Vfnmadd213phMaskEmitter<A, B, C> {
20649    fn vfnmadd213ph_mask(&mut self, op0: A, op1: B, op2: C);
20650}
20651
20652impl<'a> Vfnmadd213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20653    fn vfnmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20654        self.emit(VFNMADD213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20655    }
20656}
20657
20658impl<'a> Vfnmadd213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20659    fn vfnmadd213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20660        self.emit(VFNMADD213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20661    }
20662}
20663
20664impl<'a> Vfnmadd213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20665    fn vfnmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20666        self.emit(VFNMADD213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20667    }
20668}
20669
20670impl<'a> Vfnmadd213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20671    fn vfnmadd213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20672        self.emit(VFNMADD213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20673    }
20674}
20675
20676impl<'a> Vfnmadd213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20677    fn vfnmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20678        self.emit(VFNMADD213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20679    }
20680}
20681
20682impl<'a> Vfnmadd213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20683    fn vfnmadd213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20684        self.emit(VFNMADD213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20685    }
20686}
20687
20688/// `VFNMADD213PH_MASK_ER`.
20689///
20690/// Supported operand variants:
20691///
20692/// ```text
20693/// +---+---------------+
20694/// | # | Operands      |
20695/// +---+---------------+
20696/// | 1 | Zmm, Zmm, Zmm |
20697/// +---+---------------+
20698/// ```
20699pub trait Vfnmadd213phMaskErEmitter<A, B, C> {
20700    fn vfnmadd213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
20701}
20702
20703impl<'a> Vfnmadd213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20704    fn vfnmadd213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20705        self.emit(VFNMADD213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20706    }
20707}
20708
20709/// `VFNMADD213PH_MASKZ`.
20710///
20711/// Supported operand variants:
20712///
20713/// ```text
20714/// +---+---------------+
20715/// | # | Operands      |
20716/// +---+---------------+
20717/// | 1 | Xmm, Xmm, Mem |
20718/// | 2 | Xmm, Xmm, Xmm |
20719/// | 3 | Ymm, Ymm, Mem |
20720/// | 4 | Ymm, Ymm, Ymm |
20721/// | 5 | Zmm, Zmm, Mem |
20722/// | 6 | Zmm, Zmm, Zmm |
20723/// +---+---------------+
20724/// ```
20725pub trait Vfnmadd213phMaskzEmitter<A, B, C> {
20726    fn vfnmadd213ph_maskz(&mut self, op0: A, op1: B, op2: C);
20727}
20728
20729impl<'a> Vfnmadd213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20730    fn vfnmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20731        self.emit(VFNMADD213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20732    }
20733}
20734
20735impl<'a> Vfnmadd213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20736    fn vfnmadd213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20737        self.emit(VFNMADD213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20738    }
20739}
20740
20741impl<'a> Vfnmadd213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20742    fn vfnmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20743        self.emit(VFNMADD213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20744    }
20745}
20746
20747impl<'a> Vfnmadd213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20748    fn vfnmadd213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20749        self.emit(VFNMADD213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20750    }
20751}
20752
20753impl<'a> Vfnmadd213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20754    fn vfnmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20755        self.emit(VFNMADD213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20756    }
20757}
20758
20759impl<'a> Vfnmadd213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20760    fn vfnmadd213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20761        self.emit(VFNMADD213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20762    }
20763}
20764
20765/// `VFNMADD213PH_MASKZ_ER`.
20766///
20767/// Supported operand variants:
20768///
20769/// ```text
20770/// +---+---------------+
20771/// | # | Operands      |
20772/// +---+---------------+
20773/// | 1 | Zmm, Zmm, Zmm |
20774/// +---+---------------+
20775/// ```
20776pub trait Vfnmadd213phMaskzErEmitter<A, B, C> {
20777    fn vfnmadd213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
20778}
20779
20780impl<'a> Vfnmadd213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20781    fn vfnmadd213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20782        self.emit(VFNMADD213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20783    }
20784}
20785
20786/// `VFNMADD213SH`.
20787///
20788/// Supported operand variants:
20789///
20790/// ```text
20791/// +---+---------------+
20792/// | # | Operands      |
20793/// +---+---------------+
20794/// | 1 | Xmm, Xmm, Mem |
20795/// | 2 | Xmm, Xmm, Xmm |
20796/// +---+---------------+
20797/// ```
20798pub trait Vfnmadd213shEmitter<A, B, C> {
20799    fn vfnmadd213sh(&mut self, op0: A, op1: B, op2: C);
20800}
20801
20802impl<'a> Vfnmadd213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20803    fn vfnmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20804        self.emit(VFNMADD213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20805    }
20806}
20807
20808impl<'a> Vfnmadd213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20809    fn vfnmadd213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20810        self.emit(VFNMADD213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20811    }
20812}
20813
20814/// `VFNMADD213SH_ER`.
20815///
20816/// Supported operand variants:
20817///
20818/// ```text
20819/// +---+---------------+
20820/// | # | Operands      |
20821/// +---+---------------+
20822/// | 1 | Xmm, Xmm, Xmm |
20823/// +---+---------------+
20824/// ```
20825pub trait Vfnmadd213shErEmitter<A, B, C> {
20826    fn vfnmadd213sh_er(&mut self, op0: A, op1: B, op2: C);
20827}
20828
20829impl<'a> Vfnmadd213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20830    fn vfnmadd213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20831        self.emit(VFNMADD213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20832    }
20833}
20834
20835/// `VFNMADD213SH_MASK`.
20836///
20837/// Supported operand variants:
20838///
20839/// ```text
20840/// +---+---------------+
20841/// | # | Operands      |
20842/// +---+---------------+
20843/// | 1 | Xmm, Xmm, Mem |
20844/// | 2 | Xmm, Xmm, Xmm |
20845/// +---+---------------+
20846/// ```
20847pub trait Vfnmadd213shMaskEmitter<A, B, C> {
20848    fn vfnmadd213sh_mask(&mut self, op0: A, op1: B, op2: C);
20849}
20850
20851impl<'a> Vfnmadd213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20852    fn vfnmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20853        self.emit(VFNMADD213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20854    }
20855}
20856
20857impl<'a> Vfnmadd213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20858    fn vfnmadd213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20859        self.emit(VFNMADD213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20860    }
20861}
20862
20863/// `VFNMADD213SH_MASK_ER`.
20864///
20865/// Supported operand variants:
20866///
20867/// ```text
20868/// +---+---------------+
20869/// | # | Operands      |
20870/// +---+---------------+
20871/// | 1 | Xmm, Xmm, Xmm |
20872/// +---+---------------+
20873/// ```
20874pub trait Vfnmadd213shMaskErEmitter<A, B, C> {
20875    fn vfnmadd213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
20876}
20877
20878impl<'a> Vfnmadd213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20879    fn vfnmadd213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20880        self.emit(VFNMADD213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20881    }
20882}
20883
20884/// `VFNMADD213SH_MASKZ`.
20885///
20886/// Supported operand variants:
20887///
20888/// ```text
20889/// +---+---------------+
20890/// | # | Operands      |
20891/// +---+---------------+
20892/// | 1 | Xmm, Xmm, Mem |
20893/// | 2 | Xmm, Xmm, Xmm |
20894/// +---+---------------+
20895/// ```
20896pub trait Vfnmadd213shMaskzEmitter<A, B, C> {
20897    fn vfnmadd213sh_maskz(&mut self, op0: A, op1: B, op2: C);
20898}
20899
20900impl<'a> Vfnmadd213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20901    fn vfnmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20902        self.emit(VFNMADD213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20903    }
20904}
20905
20906impl<'a> Vfnmadd213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20907    fn vfnmadd213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20908        self.emit(VFNMADD213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20909    }
20910}
20911
20912/// `VFNMADD213SH_MASKZ_ER`.
20913///
20914/// Supported operand variants:
20915///
20916/// ```text
20917/// +---+---------------+
20918/// | # | Operands      |
20919/// +---+---------------+
20920/// | 1 | Xmm, Xmm, Xmm |
20921/// +---+---------------+
20922/// ```
20923pub trait Vfnmadd213shMaskzErEmitter<A, B, C> {
20924    fn vfnmadd213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
20925}
20926
20927impl<'a> Vfnmadd213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20928    fn vfnmadd213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20929        self.emit(VFNMADD213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20930    }
20931}
20932
20933/// `VFNMADD231PH`.
20934///
20935/// Supported operand variants:
20936///
20937/// ```text
20938/// +---+---------------+
20939/// | # | Operands      |
20940/// +---+---------------+
20941/// | 1 | Xmm, Xmm, Mem |
20942/// | 2 | Xmm, Xmm, Xmm |
20943/// | 3 | Ymm, Ymm, Mem |
20944/// | 4 | Ymm, Ymm, Ymm |
20945/// | 5 | Zmm, Zmm, Mem |
20946/// | 6 | Zmm, Zmm, Zmm |
20947/// +---+---------------+
20948/// ```
20949pub trait Vfnmadd231phEmitter<A, B, C> {
20950    fn vfnmadd231ph(&mut self, op0: A, op1: B, op2: C);
20951}
20952
20953impl<'a> Vfnmadd231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
20954    fn vfnmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
20955        self.emit(VFNMADD231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20956    }
20957}
20958
20959impl<'a> Vfnmadd231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
20960    fn vfnmadd231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
20961        self.emit(VFNMADD231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20962    }
20963}
20964
20965impl<'a> Vfnmadd231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
20966    fn vfnmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
20967        self.emit(VFNMADD231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20968    }
20969}
20970
20971impl<'a> Vfnmadd231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
20972    fn vfnmadd231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
20973        self.emit(VFNMADD231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20974    }
20975}
20976
20977impl<'a> Vfnmadd231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
20978    fn vfnmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
20979        self.emit(VFNMADD231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20980    }
20981}
20982
20983impl<'a> Vfnmadd231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
20984    fn vfnmadd231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
20985        self.emit(VFNMADD231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
20986    }
20987}
20988
20989/// `VFNMADD231PH_ER`.
20990///
20991/// Supported operand variants:
20992///
20993/// ```text
20994/// +---+---------------+
20995/// | # | Operands      |
20996/// +---+---------------+
20997/// | 1 | Zmm, Zmm, Zmm |
20998/// +---+---------------+
20999/// ```
21000pub trait Vfnmadd231phErEmitter<A, B, C> {
21001    fn vfnmadd231ph_er(&mut self, op0: A, op1: B, op2: C);
21002}
21003
21004impl<'a> Vfnmadd231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21005    fn vfnmadd231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21006        self.emit(VFNMADD231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21007    }
21008}
21009
21010/// `VFNMADD231PH_MASK`.
21011///
21012/// Supported operand variants:
21013///
21014/// ```text
21015/// +---+---------------+
21016/// | # | Operands      |
21017/// +---+---------------+
21018/// | 1 | Xmm, Xmm, Mem |
21019/// | 2 | Xmm, Xmm, Xmm |
21020/// | 3 | Ymm, Ymm, Mem |
21021/// | 4 | Ymm, Ymm, Ymm |
21022/// | 5 | Zmm, Zmm, Mem |
21023/// | 6 | Zmm, Zmm, Zmm |
21024/// +---+---------------+
21025/// ```
21026pub trait Vfnmadd231phMaskEmitter<A, B, C> {
21027    fn vfnmadd231ph_mask(&mut self, op0: A, op1: B, op2: C);
21028}
21029
21030impl<'a> Vfnmadd231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21031    fn vfnmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21032        self.emit(VFNMADD231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21033    }
21034}
21035
21036impl<'a> Vfnmadd231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21037    fn vfnmadd231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21038        self.emit(VFNMADD231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21039    }
21040}
21041
21042impl<'a> Vfnmadd231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21043    fn vfnmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21044        self.emit(VFNMADD231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21045    }
21046}
21047
21048impl<'a> Vfnmadd231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21049    fn vfnmadd231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21050        self.emit(VFNMADD231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21051    }
21052}
21053
21054impl<'a> Vfnmadd231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21055    fn vfnmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21056        self.emit(VFNMADD231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21057    }
21058}
21059
21060impl<'a> Vfnmadd231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21061    fn vfnmadd231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21062        self.emit(VFNMADD231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21063    }
21064}
21065
21066/// `VFNMADD231PH_MASK_ER`.
21067///
21068/// Supported operand variants:
21069///
21070/// ```text
21071/// +---+---------------+
21072/// | # | Operands      |
21073/// +---+---------------+
21074/// | 1 | Zmm, Zmm, Zmm |
21075/// +---+---------------+
21076/// ```
21077pub trait Vfnmadd231phMaskErEmitter<A, B, C> {
21078    fn vfnmadd231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
21079}
21080
21081impl<'a> Vfnmadd231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21082    fn vfnmadd231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21083        self.emit(VFNMADD231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21084    }
21085}
21086
21087/// `VFNMADD231PH_MASKZ`.
21088///
21089/// Supported operand variants:
21090///
21091/// ```text
21092/// +---+---------------+
21093/// | # | Operands      |
21094/// +---+---------------+
21095/// | 1 | Xmm, Xmm, Mem |
21096/// | 2 | Xmm, Xmm, Xmm |
21097/// | 3 | Ymm, Ymm, Mem |
21098/// | 4 | Ymm, Ymm, Ymm |
21099/// | 5 | Zmm, Zmm, Mem |
21100/// | 6 | Zmm, Zmm, Zmm |
21101/// +---+---------------+
21102/// ```
21103pub trait Vfnmadd231phMaskzEmitter<A, B, C> {
21104    fn vfnmadd231ph_maskz(&mut self, op0: A, op1: B, op2: C);
21105}
21106
21107impl<'a> Vfnmadd231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21108    fn vfnmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21109        self.emit(VFNMADD231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21110    }
21111}
21112
21113impl<'a> Vfnmadd231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21114    fn vfnmadd231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21115        self.emit(VFNMADD231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21116    }
21117}
21118
21119impl<'a> Vfnmadd231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21120    fn vfnmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21121        self.emit(VFNMADD231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21122    }
21123}
21124
21125impl<'a> Vfnmadd231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21126    fn vfnmadd231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21127        self.emit(VFNMADD231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21128    }
21129}
21130
21131impl<'a> Vfnmadd231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21132    fn vfnmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21133        self.emit(VFNMADD231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21134    }
21135}
21136
21137impl<'a> Vfnmadd231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21138    fn vfnmadd231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21139        self.emit(VFNMADD231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21140    }
21141}
21142
21143/// `VFNMADD231PH_MASKZ_ER`.
21144///
21145/// Supported operand variants:
21146///
21147/// ```text
21148/// +---+---------------+
21149/// | # | Operands      |
21150/// +---+---------------+
21151/// | 1 | Zmm, Zmm, Zmm |
21152/// +---+---------------+
21153/// ```
21154pub trait Vfnmadd231phMaskzErEmitter<A, B, C> {
21155    fn vfnmadd231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
21156}
21157
21158impl<'a> Vfnmadd231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21159    fn vfnmadd231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21160        self.emit(VFNMADD231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21161    }
21162}
21163
21164/// `VFNMADD231SH`.
21165///
21166/// Supported operand variants:
21167///
21168/// ```text
21169/// +---+---------------+
21170/// | # | Operands      |
21171/// +---+---------------+
21172/// | 1 | Xmm, Xmm, Mem |
21173/// | 2 | Xmm, Xmm, Xmm |
21174/// +---+---------------+
21175/// ```
21176pub trait Vfnmadd231shEmitter<A, B, C> {
21177    fn vfnmadd231sh(&mut self, op0: A, op1: B, op2: C);
21178}
21179
21180impl<'a> Vfnmadd231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21181    fn vfnmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21182        self.emit(VFNMADD231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21183    }
21184}
21185
21186impl<'a> Vfnmadd231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21187    fn vfnmadd231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21188        self.emit(VFNMADD231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21189    }
21190}
21191
21192/// `VFNMADD231SH_ER`.
21193///
21194/// Supported operand variants:
21195///
21196/// ```text
21197/// +---+---------------+
21198/// | # | Operands      |
21199/// +---+---------------+
21200/// | 1 | Xmm, Xmm, Xmm |
21201/// +---+---------------+
21202/// ```
21203pub trait Vfnmadd231shErEmitter<A, B, C> {
21204    fn vfnmadd231sh_er(&mut self, op0: A, op1: B, op2: C);
21205}
21206
21207impl<'a> Vfnmadd231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21208    fn vfnmadd231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21209        self.emit(VFNMADD231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21210    }
21211}
21212
21213/// `VFNMADD231SH_MASK`.
21214///
21215/// Supported operand variants:
21216///
21217/// ```text
21218/// +---+---------------+
21219/// | # | Operands      |
21220/// +---+---------------+
21221/// | 1 | Xmm, Xmm, Mem |
21222/// | 2 | Xmm, Xmm, Xmm |
21223/// +---+---------------+
21224/// ```
21225pub trait Vfnmadd231shMaskEmitter<A, B, C> {
21226    fn vfnmadd231sh_mask(&mut self, op0: A, op1: B, op2: C);
21227}
21228
21229impl<'a> Vfnmadd231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21230    fn vfnmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21231        self.emit(VFNMADD231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21232    }
21233}
21234
21235impl<'a> Vfnmadd231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21236    fn vfnmadd231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21237        self.emit(VFNMADD231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21238    }
21239}
21240
21241/// `VFNMADD231SH_MASK_ER`.
21242///
21243/// Supported operand variants:
21244///
21245/// ```text
21246/// +---+---------------+
21247/// | # | Operands      |
21248/// +---+---------------+
21249/// | 1 | Xmm, Xmm, Xmm |
21250/// +---+---------------+
21251/// ```
21252pub trait Vfnmadd231shMaskErEmitter<A, B, C> {
21253    fn vfnmadd231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
21254}
21255
21256impl<'a> Vfnmadd231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21257    fn vfnmadd231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21258        self.emit(VFNMADD231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21259    }
21260}
21261
21262/// `VFNMADD231SH_MASKZ`.
21263///
21264/// Supported operand variants:
21265///
21266/// ```text
21267/// +---+---------------+
21268/// | # | Operands      |
21269/// +---+---------------+
21270/// | 1 | Xmm, Xmm, Mem |
21271/// | 2 | Xmm, Xmm, Xmm |
21272/// +---+---------------+
21273/// ```
21274pub trait Vfnmadd231shMaskzEmitter<A, B, C> {
21275    fn vfnmadd231sh_maskz(&mut self, op0: A, op1: B, op2: C);
21276}
21277
21278impl<'a> Vfnmadd231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21279    fn vfnmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21280        self.emit(VFNMADD231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21281    }
21282}
21283
21284impl<'a> Vfnmadd231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21285    fn vfnmadd231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21286        self.emit(VFNMADD231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21287    }
21288}
21289
21290/// `VFNMADD231SH_MASKZ_ER`.
21291///
21292/// Supported operand variants:
21293///
21294/// ```text
21295/// +---+---------------+
21296/// | # | Operands      |
21297/// +---+---------------+
21298/// | 1 | Xmm, Xmm, Xmm |
21299/// +---+---------------+
21300/// ```
21301pub trait Vfnmadd231shMaskzErEmitter<A, B, C> {
21302    fn vfnmadd231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
21303}
21304
21305impl<'a> Vfnmadd231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21306    fn vfnmadd231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21307        self.emit(VFNMADD231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21308    }
21309}
21310
21311/// `VFNMSUB132PH`.
21312///
21313/// Supported operand variants:
21314///
21315/// ```text
21316/// +---+---------------+
21317/// | # | Operands      |
21318/// +---+---------------+
21319/// | 1 | Xmm, Xmm, Mem |
21320/// | 2 | Xmm, Xmm, Xmm |
21321/// | 3 | Ymm, Ymm, Mem |
21322/// | 4 | Ymm, Ymm, Ymm |
21323/// | 5 | Zmm, Zmm, Mem |
21324/// | 6 | Zmm, Zmm, Zmm |
21325/// +---+---------------+
21326/// ```
21327pub trait Vfnmsub132phEmitter<A, B, C> {
21328    fn vfnmsub132ph(&mut self, op0: A, op1: B, op2: C);
21329}
21330
21331impl<'a> Vfnmsub132phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21332    fn vfnmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21333        self.emit(VFNMSUB132PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21334    }
21335}
21336
21337impl<'a> Vfnmsub132phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21338    fn vfnmsub132ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21339        self.emit(VFNMSUB132PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21340    }
21341}
21342
21343impl<'a> Vfnmsub132phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21344    fn vfnmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21345        self.emit(VFNMSUB132PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21346    }
21347}
21348
21349impl<'a> Vfnmsub132phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21350    fn vfnmsub132ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21351        self.emit(VFNMSUB132PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21352    }
21353}
21354
21355impl<'a> Vfnmsub132phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21356    fn vfnmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21357        self.emit(VFNMSUB132PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21358    }
21359}
21360
21361impl<'a> Vfnmsub132phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21362    fn vfnmsub132ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21363        self.emit(VFNMSUB132PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21364    }
21365}
21366
21367/// `VFNMSUB132PH_ER`.
21368///
21369/// Supported operand variants:
21370///
21371/// ```text
21372/// +---+---------------+
21373/// | # | Operands      |
21374/// +---+---------------+
21375/// | 1 | Zmm, Zmm, Zmm |
21376/// +---+---------------+
21377/// ```
21378pub trait Vfnmsub132phErEmitter<A, B, C> {
21379    fn vfnmsub132ph_er(&mut self, op0: A, op1: B, op2: C);
21380}
21381
21382impl<'a> Vfnmsub132phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21383    fn vfnmsub132ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21384        self.emit(VFNMSUB132PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21385    }
21386}
21387
21388/// `VFNMSUB132PH_MASK`.
21389///
21390/// Supported operand variants:
21391///
21392/// ```text
21393/// +---+---------------+
21394/// | # | Operands      |
21395/// +---+---------------+
21396/// | 1 | Xmm, Xmm, Mem |
21397/// | 2 | Xmm, Xmm, Xmm |
21398/// | 3 | Ymm, Ymm, Mem |
21399/// | 4 | Ymm, Ymm, Ymm |
21400/// | 5 | Zmm, Zmm, Mem |
21401/// | 6 | Zmm, Zmm, Zmm |
21402/// +---+---------------+
21403/// ```
21404pub trait Vfnmsub132phMaskEmitter<A, B, C> {
21405    fn vfnmsub132ph_mask(&mut self, op0: A, op1: B, op2: C);
21406}
21407
21408impl<'a> Vfnmsub132phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21409    fn vfnmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21410        self.emit(VFNMSUB132PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21411    }
21412}
21413
21414impl<'a> Vfnmsub132phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21415    fn vfnmsub132ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21416        self.emit(VFNMSUB132PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21417    }
21418}
21419
21420impl<'a> Vfnmsub132phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21421    fn vfnmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21422        self.emit(VFNMSUB132PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21423    }
21424}
21425
21426impl<'a> Vfnmsub132phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21427    fn vfnmsub132ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21428        self.emit(VFNMSUB132PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21429    }
21430}
21431
21432impl<'a> Vfnmsub132phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21433    fn vfnmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21434        self.emit(VFNMSUB132PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21435    }
21436}
21437
21438impl<'a> Vfnmsub132phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21439    fn vfnmsub132ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21440        self.emit(VFNMSUB132PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21441    }
21442}
21443
21444/// `VFNMSUB132PH_MASK_ER`.
21445///
21446/// Supported operand variants:
21447///
21448/// ```text
21449/// +---+---------------+
21450/// | # | Operands      |
21451/// +---+---------------+
21452/// | 1 | Zmm, Zmm, Zmm |
21453/// +---+---------------+
21454/// ```
21455pub trait Vfnmsub132phMaskErEmitter<A, B, C> {
21456    fn vfnmsub132ph_mask_er(&mut self, op0: A, op1: B, op2: C);
21457}
21458
21459impl<'a> Vfnmsub132phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21460    fn vfnmsub132ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21461        self.emit(VFNMSUB132PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21462    }
21463}
21464
21465/// `VFNMSUB132PH_MASKZ`.
21466///
21467/// Supported operand variants:
21468///
21469/// ```text
21470/// +---+---------------+
21471/// | # | Operands      |
21472/// +---+---------------+
21473/// | 1 | Xmm, Xmm, Mem |
21474/// | 2 | Xmm, Xmm, Xmm |
21475/// | 3 | Ymm, Ymm, Mem |
21476/// | 4 | Ymm, Ymm, Ymm |
21477/// | 5 | Zmm, Zmm, Mem |
21478/// | 6 | Zmm, Zmm, Zmm |
21479/// +---+---------------+
21480/// ```
21481pub trait Vfnmsub132phMaskzEmitter<A, B, C> {
21482    fn vfnmsub132ph_maskz(&mut self, op0: A, op1: B, op2: C);
21483}
21484
21485impl<'a> Vfnmsub132phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21486    fn vfnmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21487        self.emit(VFNMSUB132PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21488    }
21489}
21490
21491impl<'a> Vfnmsub132phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21492    fn vfnmsub132ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21493        self.emit(VFNMSUB132PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21494    }
21495}
21496
21497impl<'a> Vfnmsub132phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21498    fn vfnmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21499        self.emit(VFNMSUB132PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21500    }
21501}
21502
21503impl<'a> Vfnmsub132phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21504    fn vfnmsub132ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21505        self.emit(VFNMSUB132PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21506    }
21507}
21508
21509impl<'a> Vfnmsub132phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21510    fn vfnmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21511        self.emit(VFNMSUB132PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21512    }
21513}
21514
21515impl<'a> Vfnmsub132phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21516    fn vfnmsub132ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21517        self.emit(VFNMSUB132PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21518    }
21519}
21520
21521/// `VFNMSUB132PH_MASKZ_ER`.
21522///
21523/// Supported operand variants:
21524///
21525/// ```text
21526/// +---+---------------+
21527/// | # | Operands      |
21528/// +---+---------------+
21529/// | 1 | Zmm, Zmm, Zmm |
21530/// +---+---------------+
21531/// ```
21532pub trait Vfnmsub132phMaskzErEmitter<A, B, C> {
21533    fn vfnmsub132ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
21534}
21535
21536impl<'a> Vfnmsub132phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21537    fn vfnmsub132ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21538        self.emit(VFNMSUB132PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21539    }
21540}
21541
21542/// `VFNMSUB132SH`.
21543///
21544/// Supported operand variants:
21545///
21546/// ```text
21547/// +---+---------------+
21548/// | # | Operands      |
21549/// +---+---------------+
21550/// | 1 | Xmm, Xmm, Mem |
21551/// | 2 | Xmm, Xmm, Xmm |
21552/// +---+---------------+
21553/// ```
21554pub trait Vfnmsub132shEmitter<A, B, C> {
21555    fn vfnmsub132sh(&mut self, op0: A, op1: B, op2: C);
21556}
21557
21558impl<'a> Vfnmsub132shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21559    fn vfnmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21560        self.emit(VFNMSUB132SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21561    }
21562}
21563
21564impl<'a> Vfnmsub132shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21565    fn vfnmsub132sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21566        self.emit(VFNMSUB132SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21567    }
21568}
21569
21570/// `VFNMSUB132SH_ER`.
21571///
21572/// Supported operand variants:
21573///
21574/// ```text
21575/// +---+---------------+
21576/// | # | Operands      |
21577/// +---+---------------+
21578/// | 1 | Xmm, Xmm, Xmm |
21579/// +---+---------------+
21580/// ```
21581pub trait Vfnmsub132shErEmitter<A, B, C> {
21582    fn vfnmsub132sh_er(&mut self, op0: A, op1: B, op2: C);
21583}
21584
21585impl<'a> Vfnmsub132shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21586    fn vfnmsub132sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21587        self.emit(VFNMSUB132SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21588    }
21589}
21590
21591/// `VFNMSUB132SH_MASK`.
21592///
21593/// Supported operand variants:
21594///
21595/// ```text
21596/// +---+---------------+
21597/// | # | Operands      |
21598/// +---+---------------+
21599/// | 1 | Xmm, Xmm, Mem |
21600/// | 2 | Xmm, Xmm, Xmm |
21601/// +---+---------------+
21602/// ```
21603pub trait Vfnmsub132shMaskEmitter<A, B, C> {
21604    fn vfnmsub132sh_mask(&mut self, op0: A, op1: B, op2: C);
21605}
21606
21607impl<'a> Vfnmsub132shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21608    fn vfnmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21609        self.emit(VFNMSUB132SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21610    }
21611}
21612
21613impl<'a> Vfnmsub132shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21614    fn vfnmsub132sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21615        self.emit(VFNMSUB132SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21616    }
21617}
21618
21619/// `VFNMSUB132SH_MASK_ER`.
21620///
21621/// Supported operand variants:
21622///
21623/// ```text
21624/// +---+---------------+
21625/// | # | Operands      |
21626/// +---+---------------+
21627/// | 1 | Xmm, Xmm, Xmm |
21628/// +---+---------------+
21629/// ```
21630pub trait Vfnmsub132shMaskErEmitter<A, B, C> {
21631    fn vfnmsub132sh_mask_er(&mut self, op0: A, op1: B, op2: C);
21632}
21633
21634impl<'a> Vfnmsub132shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21635    fn vfnmsub132sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21636        self.emit(VFNMSUB132SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21637    }
21638}
21639
21640/// `VFNMSUB132SH_MASKZ`.
21641///
21642/// Supported operand variants:
21643///
21644/// ```text
21645/// +---+---------------+
21646/// | # | Operands      |
21647/// +---+---------------+
21648/// | 1 | Xmm, Xmm, Mem |
21649/// | 2 | Xmm, Xmm, Xmm |
21650/// +---+---------------+
21651/// ```
21652pub trait Vfnmsub132shMaskzEmitter<A, B, C> {
21653    fn vfnmsub132sh_maskz(&mut self, op0: A, op1: B, op2: C);
21654}
21655
21656impl<'a> Vfnmsub132shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21657    fn vfnmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21658        self.emit(VFNMSUB132SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21659    }
21660}
21661
21662impl<'a> Vfnmsub132shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21663    fn vfnmsub132sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21664        self.emit(VFNMSUB132SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21665    }
21666}
21667
21668/// `VFNMSUB132SH_MASKZ_ER`.
21669///
21670/// Supported operand variants:
21671///
21672/// ```text
21673/// +---+---------------+
21674/// | # | Operands      |
21675/// +---+---------------+
21676/// | 1 | Xmm, Xmm, Xmm |
21677/// +---+---------------+
21678/// ```
21679pub trait Vfnmsub132shMaskzErEmitter<A, B, C> {
21680    fn vfnmsub132sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
21681}
21682
21683impl<'a> Vfnmsub132shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21684    fn vfnmsub132sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21685        self.emit(VFNMSUB132SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21686    }
21687}
21688
21689/// `VFNMSUB213PH`.
21690///
21691/// Supported operand variants:
21692///
21693/// ```text
21694/// +---+---------------+
21695/// | # | Operands      |
21696/// +---+---------------+
21697/// | 1 | Xmm, Xmm, Mem |
21698/// | 2 | Xmm, Xmm, Xmm |
21699/// | 3 | Ymm, Ymm, Mem |
21700/// | 4 | Ymm, Ymm, Ymm |
21701/// | 5 | Zmm, Zmm, Mem |
21702/// | 6 | Zmm, Zmm, Zmm |
21703/// +---+---------------+
21704/// ```
21705pub trait Vfnmsub213phEmitter<A, B, C> {
21706    fn vfnmsub213ph(&mut self, op0: A, op1: B, op2: C);
21707}
21708
21709impl<'a> Vfnmsub213phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21710    fn vfnmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21711        self.emit(VFNMSUB213PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21712    }
21713}
21714
21715impl<'a> Vfnmsub213phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21716    fn vfnmsub213ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21717        self.emit(VFNMSUB213PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21718    }
21719}
21720
21721impl<'a> Vfnmsub213phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21722    fn vfnmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21723        self.emit(VFNMSUB213PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21724    }
21725}
21726
21727impl<'a> Vfnmsub213phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21728    fn vfnmsub213ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21729        self.emit(VFNMSUB213PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21730    }
21731}
21732
21733impl<'a> Vfnmsub213phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21734    fn vfnmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21735        self.emit(VFNMSUB213PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21736    }
21737}
21738
21739impl<'a> Vfnmsub213phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21740    fn vfnmsub213ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21741        self.emit(VFNMSUB213PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21742    }
21743}
21744
21745/// `VFNMSUB213PH_ER`.
21746///
21747/// Supported operand variants:
21748///
21749/// ```text
21750/// +---+---------------+
21751/// | # | Operands      |
21752/// +---+---------------+
21753/// | 1 | Zmm, Zmm, Zmm |
21754/// +---+---------------+
21755/// ```
21756pub trait Vfnmsub213phErEmitter<A, B, C> {
21757    fn vfnmsub213ph_er(&mut self, op0: A, op1: B, op2: C);
21758}
21759
21760impl<'a> Vfnmsub213phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21761    fn vfnmsub213ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21762        self.emit(VFNMSUB213PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21763    }
21764}
21765
21766/// `VFNMSUB213PH_MASK`.
21767///
21768/// Supported operand variants:
21769///
21770/// ```text
21771/// +---+---------------+
21772/// | # | Operands      |
21773/// +---+---------------+
21774/// | 1 | Xmm, Xmm, Mem |
21775/// | 2 | Xmm, Xmm, Xmm |
21776/// | 3 | Ymm, Ymm, Mem |
21777/// | 4 | Ymm, Ymm, Ymm |
21778/// | 5 | Zmm, Zmm, Mem |
21779/// | 6 | Zmm, Zmm, Zmm |
21780/// +---+---------------+
21781/// ```
21782pub trait Vfnmsub213phMaskEmitter<A, B, C> {
21783    fn vfnmsub213ph_mask(&mut self, op0: A, op1: B, op2: C);
21784}
21785
21786impl<'a> Vfnmsub213phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21787    fn vfnmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21788        self.emit(VFNMSUB213PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21789    }
21790}
21791
21792impl<'a> Vfnmsub213phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21793    fn vfnmsub213ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21794        self.emit(VFNMSUB213PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21795    }
21796}
21797
21798impl<'a> Vfnmsub213phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21799    fn vfnmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21800        self.emit(VFNMSUB213PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21801    }
21802}
21803
21804impl<'a> Vfnmsub213phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21805    fn vfnmsub213ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21806        self.emit(VFNMSUB213PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21807    }
21808}
21809
21810impl<'a> Vfnmsub213phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21811    fn vfnmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21812        self.emit(VFNMSUB213PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21813    }
21814}
21815
21816impl<'a> Vfnmsub213phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21817    fn vfnmsub213ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21818        self.emit(VFNMSUB213PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21819    }
21820}
21821
21822/// `VFNMSUB213PH_MASK_ER`.
21823///
21824/// Supported operand variants:
21825///
21826/// ```text
21827/// +---+---------------+
21828/// | # | Operands      |
21829/// +---+---------------+
21830/// | 1 | Zmm, Zmm, Zmm |
21831/// +---+---------------+
21832/// ```
21833pub trait Vfnmsub213phMaskErEmitter<A, B, C> {
21834    fn vfnmsub213ph_mask_er(&mut self, op0: A, op1: B, op2: C);
21835}
21836
21837impl<'a> Vfnmsub213phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21838    fn vfnmsub213ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21839        self.emit(VFNMSUB213PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21840    }
21841}
21842
21843/// `VFNMSUB213PH_MASKZ`.
21844///
21845/// Supported operand variants:
21846///
21847/// ```text
21848/// +---+---------------+
21849/// | # | Operands      |
21850/// +---+---------------+
21851/// | 1 | Xmm, Xmm, Mem |
21852/// | 2 | Xmm, Xmm, Xmm |
21853/// | 3 | Ymm, Ymm, Mem |
21854/// | 4 | Ymm, Ymm, Ymm |
21855/// | 5 | Zmm, Zmm, Mem |
21856/// | 6 | Zmm, Zmm, Zmm |
21857/// +---+---------------+
21858/// ```
21859pub trait Vfnmsub213phMaskzEmitter<A, B, C> {
21860    fn vfnmsub213ph_maskz(&mut self, op0: A, op1: B, op2: C);
21861}
21862
21863impl<'a> Vfnmsub213phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21864    fn vfnmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21865        self.emit(VFNMSUB213PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21866    }
21867}
21868
21869impl<'a> Vfnmsub213phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21870    fn vfnmsub213ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21871        self.emit(VFNMSUB213PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21872    }
21873}
21874
21875impl<'a> Vfnmsub213phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
21876    fn vfnmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
21877        self.emit(VFNMSUB213PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21878    }
21879}
21880
21881impl<'a> Vfnmsub213phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
21882    fn vfnmsub213ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
21883        self.emit(VFNMSUB213PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21884    }
21885}
21886
21887impl<'a> Vfnmsub213phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21888    fn vfnmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21889        self.emit(VFNMSUB213PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21890    }
21891}
21892
21893impl<'a> Vfnmsub213phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
21894    fn vfnmsub213ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
21895        self.emit(VFNMSUB213PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21896    }
21897}
21898
21899/// `VFNMSUB213PH_MASKZ_ER`.
21900///
21901/// Supported operand variants:
21902///
21903/// ```text
21904/// +---+---------------+
21905/// | # | Operands      |
21906/// +---+---------------+
21907/// | 1 | Zmm, Zmm, Zmm |
21908/// +---+---------------+
21909/// ```
21910pub trait Vfnmsub213phMaskzErEmitter<A, B, C> {
21911    fn vfnmsub213ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
21912}
21913
21914impl<'a> Vfnmsub213phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
21915    fn vfnmsub213ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
21916        self.emit(VFNMSUB213PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21917    }
21918}
21919
21920/// `VFNMSUB213SH`.
21921///
21922/// Supported operand variants:
21923///
21924/// ```text
21925/// +---+---------------+
21926/// | # | Operands      |
21927/// +---+---------------+
21928/// | 1 | Xmm, Xmm, Mem |
21929/// | 2 | Xmm, Xmm, Xmm |
21930/// +---+---------------+
21931/// ```
21932pub trait Vfnmsub213shEmitter<A, B, C> {
21933    fn vfnmsub213sh(&mut self, op0: A, op1: B, op2: C);
21934}
21935
21936impl<'a> Vfnmsub213shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21937    fn vfnmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21938        self.emit(VFNMSUB213SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21939    }
21940}
21941
21942impl<'a> Vfnmsub213shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21943    fn vfnmsub213sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21944        self.emit(VFNMSUB213SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21945    }
21946}
21947
21948/// `VFNMSUB213SH_ER`.
21949///
21950/// Supported operand variants:
21951///
21952/// ```text
21953/// +---+---------------+
21954/// | # | Operands      |
21955/// +---+---------------+
21956/// | 1 | Xmm, Xmm, Xmm |
21957/// +---+---------------+
21958/// ```
21959pub trait Vfnmsub213shErEmitter<A, B, C> {
21960    fn vfnmsub213sh_er(&mut self, op0: A, op1: B, op2: C);
21961}
21962
21963impl<'a> Vfnmsub213shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21964    fn vfnmsub213sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21965        self.emit(VFNMSUB213SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21966    }
21967}
21968
21969/// `VFNMSUB213SH_MASK`.
21970///
21971/// Supported operand variants:
21972///
21973/// ```text
21974/// +---+---------------+
21975/// | # | Operands      |
21976/// +---+---------------+
21977/// | 1 | Xmm, Xmm, Mem |
21978/// | 2 | Xmm, Xmm, Xmm |
21979/// +---+---------------+
21980/// ```
21981pub trait Vfnmsub213shMaskEmitter<A, B, C> {
21982    fn vfnmsub213sh_mask(&mut self, op0: A, op1: B, op2: C);
21983}
21984
21985impl<'a> Vfnmsub213shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
21986    fn vfnmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
21987        self.emit(VFNMSUB213SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21988    }
21989}
21990
21991impl<'a> Vfnmsub213shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
21992    fn vfnmsub213sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
21993        self.emit(VFNMSUB213SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
21994    }
21995}
21996
21997/// `VFNMSUB213SH_MASK_ER`.
21998///
21999/// Supported operand variants:
22000///
22001/// ```text
22002/// +---+---------------+
22003/// | # | Operands      |
22004/// +---+---------------+
22005/// | 1 | Xmm, Xmm, Xmm |
22006/// +---+---------------+
22007/// ```
22008pub trait Vfnmsub213shMaskErEmitter<A, B, C> {
22009    fn vfnmsub213sh_mask_er(&mut self, op0: A, op1: B, op2: C);
22010}
22011
22012impl<'a> Vfnmsub213shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22013    fn vfnmsub213sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22014        self.emit(VFNMSUB213SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22015    }
22016}
22017
22018/// `VFNMSUB213SH_MASKZ`.
22019///
22020/// Supported operand variants:
22021///
22022/// ```text
22023/// +---+---------------+
22024/// | # | Operands      |
22025/// +---+---------------+
22026/// | 1 | Xmm, Xmm, Mem |
22027/// | 2 | Xmm, Xmm, Xmm |
22028/// +---+---------------+
22029/// ```
22030pub trait Vfnmsub213shMaskzEmitter<A, B, C> {
22031    fn vfnmsub213sh_maskz(&mut self, op0: A, op1: B, op2: C);
22032}
22033
22034impl<'a> Vfnmsub213shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22035    fn vfnmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22036        self.emit(VFNMSUB213SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22037    }
22038}
22039
22040impl<'a> Vfnmsub213shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22041    fn vfnmsub213sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22042        self.emit(VFNMSUB213SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22043    }
22044}
22045
22046/// `VFNMSUB213SH_MASKZ_ER`.
22047///
22048/// Supported operand variants:
22049///
22050/// ```text
22051/// +---+---------------+
22052/// | # | Operands      |
22053/// +---+---------------+
22054/// | 1 | Xmm, Xmm, Xmm |
22055/// +---+---------------+
22056/// ```
22057pub trait Vfnmsub213shMaskzErEmitter<A, B, C> {
22058    fn vfnmsub213sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
22059}
22060
22061impl<'a> Vfnmsub213shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22062    fn vfnmsub213sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22063        self.emit(VFNMSUB213SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22064    }
22065}
22066
22067/// `VFNMSUB231PH`.
22068///
22069/// Supported operand variants:
22070///
22071/// ```text
22072/// +---+---------------+
22073/// | # | Operands      |
22074/// +---+---------------+
22075/// | 1 | Xmm, Xmm, Mem |
22076/// | 2 | Xmm, Xmm, Xmm |
22077/// | 3 | Ymm, Ymm, Mem |
22078/// | 4 | Ymm, Ymm, Ymm |
22079/// | 5 | Zmm, Zmm, Mem |
22080/// | 6 | Zmm, Zmm, Zmm |
22081/// +---+---------------+
22082/// ```
22083pub trait Vfnmsub231phEmitter<A, B, C> {
22084    fn vfnmsub231ph(&mut self, op0: A, op1: B, op2: C);
22085}
22086
22087impl<'a> Vfnmsub231phEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22088    fn vfnmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22089        self.emit(VFNMSUB231PH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22090    }
22091}
22092
22093impl<'a> Vfnmsub231phEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22094    fn vfnmsub231ph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22095        self.emit(VFNMSUB231PH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22096    }
22097}
22098
22099impl<'a> Vfnmsub231phEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
22100    fn vfnmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
22101        self.emit(VFNMSUB231PH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22102    }
22103}
22104
22105impl<'a> Vfnmsub231phEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
22106    fn vfnmsub231ph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
22107        self.emit(VFNMSUB231PH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22108    }
22109}
22110
22111impl<'a> Vfnmsub231phEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
22112    fn vfnmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
22113        self.emit(VFNMSUB231PH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22114    }
22115}
22116
22117impl<'a> Vfnmsub231phEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
22118    fn vfnmsub231ph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
22119        self.emit(VFNMSUB231PH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22120    }
22121}
22122
22123/// `VFNMSUB231PH_ER`.
22124///
22125/// Supported operand variants:
22126///
22127/// ```text
22128/// +---+---------------+
22129/// | # | Operands      |
22130/// +---+---------------+
22131/// | 1 | Zmm, Zmm, Zmm |
22132/// +---+---------------+
22133/// ```
22134pub trait Vfnmsub231phErEmitter<A, B, C> {
22135    fn vfnmsub231ph_er(&mut self, op0: A, op1: B, op2: C);
22136}
22137
22138impl<'a> Vfnmsub231phErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
22139    fn vfnmsub231ph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
22140        self.emit(VFNMSUB231PH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22141    }
22142}
22143
22144/// `VFNMSUB231PH_MASK`.
22145///
22146/// Supported operand variants:
22147///
22148/// ```text
22149/// +---+---------------+
22150/// | # | Operands      |
22151/// +---+---------------+
22152/// | 1 | Xmm, Xmm, Mem |
22153/// | 2 | Xmm, Xmm, Xmm |
22154/// | 3 | Ymm, Ymm, Mem |
22155/// | 4 | Ymm, Ymm, Ymm |
22156/// | 5 | Zmm, Zmm, Mem |
22157/// | 6 | Zmm, Zmm, Zmm |
22158/// +---+---------------+
22159/// ```
22160pub trait Vfnmsub231phMaskEmitter<A, B, C> {
22161    fn vfnmsub231ph_mask(&mut self, op0: A, op1: B, op2: C);
22162}
22163
22164impl<'a> Vfnmsub231phMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22165    fn vfnmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22166        self.emit(VFNMSUB231PH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22167    }
22168}
22169
22170impl<'a> Vfnmsub231phMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22171    fn vfnmsub231ph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22172        self.emit(VFNMSUB231PH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22173    }
22174}
22175
22176impl<'a> Vfnmsub231phMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
22177    fn vfnmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
22178        self.emit(VFNMSUB231PH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22179    }
22180}
22181
22182impl<'a> Vfnmsub231phMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
22183    fn vfnmsub231ph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
22184        self.emit(VFNMSUB231PH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22185    }
22186}
22187
22188impl<'a> Vfnmsub231phMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
22189    fn vfnmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
22190        self.emit(VFNMSUB231PH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22191    }
22192}
22193
22194impl<'a> Vfnmsub231phMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
22195    fn vfnmsub231ph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
22196        self.emit(VFNMSUB231PH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22197    }
22198}
22199
22200/// `VFNMSUB231PH_MASK_ER`.
22201///
22202/// Supported operand variants:
22203///
22204/// ```text
22205/// +---+---------------+
22206/// | # | Operands      |
22207/// +---+---------------+
22208/// | 1 | Zmm, Zmm, Zmm |
22209/// +---+---------------+
22210/// ```
22211pub trait Vfnmsub231phMaskErEmitter<A, B, C> {
22212    fn vfnmsub231ph_mask_er(&mut self, op0: A, op1: B, op2: C);
22213}
22214
22215impl<'a> Vfnmsub231phMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
22216    fn vfnmsub231ph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
22217        self.emit(VFNMSUB231PH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22218    }
22219}
22220
22221/// `VFNMSUB231PH_MASKZ`.
22222///
22223/// Supported operand variants:
22224///
22225/// ```text
22226/// +---+---------------+
22227/// | # | Operands      |
22228/// +---+---------------+
22229/// | 1 | Xmm, Xmm, Mem |
22230/// | 2 | Xmm, Xmm, Xmm |
22231/// | 3 | Ymm, Ymm, Mem |
22232/// | 4 | Ymm, Ymm, Ymm |
22233/// | 5 | Zmm, Zmm, Mem |
22234/// | 6 | Zmm, Zmm, Zmm |
22235/// +---+---------------+
22236/// ```
22237pub trait Vfnmsub231phMaskzEmitter<A, B, C> {
22238    fn vfnmsub231ph_maskz(&mut self, op0: A, op1: B, op2: C);
22239}
22240
22241impl<'a> Vfnmsub231phMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22242    fn vfnmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22243        self.emit(VFNMSUB231PH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22244    }
22245}
22246
22247impl<'a> Vfnmsub231phMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22248    fn vfnmsub231ph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22249        self.emit(VFNMSUB231PH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22250    }
22251}
22252
22253impl<'a> Vfnmsub231phMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
22254    fn vfnmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
22255        self.emit(VFNMSUB231PH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22256    }
22257}
22258
22259impl<'a> Vfnmsub231phMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
22260    fn vfnmsub231ph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
22261        self.emit(VFNMSUB231PH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22262    }
22263}
22264
22265impl<'a> Vfnmsub231phMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
22266    fn vfnmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
22267        self.emit(VFNMSUB231PH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22268    }
22269}
22270
22271impl<'a> Vfnmsub231phMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
22272    fn vfnmsub231ph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
22273        self.emit(VFNMSUB231PH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22274    }
22275}
22276
22277/// `VFNMSUB231PH_MASKZ_ER`.
22278///
22279/// Supported operand variants:
22280///
22281/// ```text
22282/// +---+---------------+
22283/// | # | Operands      |
22284/// +---+---------------+
22285/// | 1 | Zmm, Zmm, Zmm |
22286/// +---+---------------+
22287/// ```
22288pub trait Vfnmsub231phMaskzErEmitter<A, B, C> {
22289    fn vfnmsub231ph_maskz_er(&mut self, op0: A, op1: B, op2: C);
22290}
22291
22292impl<'a> Vfnmsub231phMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
22293    fn vfnmsub231ph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
22294        self.emit(VFNMSUB231PH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22295    }
22296}
22297
22298/// `VFNMSUB231SH`.
22299///
22300/// Supported operand variants:
22301///
22302/// ```text
22303/// +---+---------------+
22304/// | # | Operands      |
22305/// +---+---------------+
22306/// | 1 | Xmm, Xmm, Mem |
22307/// | 2 | Xmm, Xmm, Xmm |
22308/// +---+---------------+
22309/// ```
22310pub trait Vfnmsub231shEmitter<A, B, C> {
22311    fn vfnmsub231sh(&mut self, op0: A, op1: B, op2: C);
22312}
22313
22314impl<'a> Vfnmsub231shEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22315    fn vfnmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22316        self.emit(VFNMSUB231SHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22317    }
22318}
22319
22320impl<'a> Vfnmsub231shEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22321    fn vfnmsub231sh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22322        self.emit(VFNMSUB231SHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22323    }
22324}
22325
22326/// `VFNMSUB231SH_ER`.
22327///
22328/// Supported operand variants:
22329///
22330/// ```text
22331/// +---+---------------+
22332/// | # | Operands      |
22333/// +---+---------------+
22334/// | 1 | Xmm, Xmm, Xmm |
22335/// +---+---------------+
22336/// ```
22337pub trait Vfnmsub231shErEmitter<A, B, C> {
22338    fn vfnmsub231sh_er(&mut self, op0: A, op1: B, op2: C);
22339}
22340
22341impl<'a> Vfnmsub231shErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22342    fn vfnmsub231sh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22343        self.emit(VFNMSUB231SHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22344    }
22345}
22346
22347/// `VFNMSUB231SH_MASK`.
22348///
22349/// Supported operand variants:
22350///
22351/// ```text
22352/// +---+---------------+
22353/// | # | Operands      |
22354/// +---+---------------+
22355/// | 1 | Xmm, Xmm, Mem |
22356/// | 2 | Xmm, Xmm, Xmm |
22357/// +---+---------------+
22358/// ```
22359pub trait Vfnmsub231shMaskEmitter<A, B, C> {
22360    fn vfnmsub231sh_mask(&mut self, op0: A, op1: B, op2: C);
22361}
22362
22363impl<'a> Vfnmsub231shMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22364    fn vfnmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22365        self.emit(VFNMSUB231SHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22366    }
22367}
22368
22369impl<'a> Vfnmsub231shMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22370    fn vfnmsub231sh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22371        self.emit(VFNMSUB231SHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22372    }
22373}
22374
22375/// `VFNMSUB231SH_MASK_ER`.
22376///
22377/// Supported operand variants:
22378///
22379/// ```text
22380/// +---+---------------+
22381/// | # | Operands      |
22382/// +---+---------------+
22383/// | 1 | Xmm, Xmm, Xmm |
22384/// +---+---------------+
22385/// ```
22386pub trait Vfnmsub231shMaskErEmitter<A, B, C> {
22387    fn vfnmsub231sh_mask_er(&mut self, op0: A, op1: B, op2: C);
22388}
22389
22390impl<'a> Vfnmsub231shMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22391    fn vfnmsub231sh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22392        self.emit(VFNMSUB231SHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22393    }
22394}
22395
22396/// `VFNMSUB231SH_MASKZ`.
22397///
22398/// Supported operand variants:
22399///
22400/// ```text
22401/// +---+---------------+
22402/// | # | Operands      |
22403/// +---+---------------+
22404/// | 1 | Xmm, Xmm, Mem |
22405/// | 2 | Xmm, Xmm, Xmm |
22406/// +---+---------------+
22407/// ```
22408pub trait Vfnmsub231shMaskzEmitter<A, B, C> {
22409    fn vfnmsub231sh_maskz(&mut self, op0: A, op1: B, op2: C);
22410}
22411
22412impl<'a> Vfnmsub231shMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22413    fn vfnmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22414        self.emit(VFNMSUB231SHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22415    }
22416}
22417
22418impl<'a> Vfnmsub231shMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22419    fn vfnmsub231sh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22420        self.emit(VFNMSUB231SHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22421    }
22422}
22423
22424/// `VFNMSUB231SH_MASKZ_ER`.
22425///
22426/// Supported operand variants:
22427///
22428/// ```text
22429/// +---+---------------+
22430/// | # | Operands      |
22431/// +---+---------------+
22432/// | 1 | Xmm, Xmm, Xmm |
22433/// +---+---------------+
22434/// ```
22435pub trait Vfnmsub231shMaskzErEmitter<A, B, C> {
22436    fn vfnmsub231sh_maskz_er(&mut self, op0: A, op1: B, op2: C);
22437}
22438
22439impl<'a> Vfnmsub231shMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22440    fn vfnmsub231sh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22441        self.emit(VFNMSUB231SHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22442    }
22443}
22444
22445/// `VFPCLASSPH`.
22446///
22447/// Supported operand variants:
22448///
22449/// ```text
22450/// +---+----------------+
22451/// | # | Operands       |
22452/// +---+----------------+
22453/// | 1 | KReg, Mem, Imm |
22454/// | 2 | KReg, Xmm, Imm |
22455/// | 3 | KReg, Ymm, Imm |
22456/// | 4 | KReg, Zmm, Imm |
22457/// +---+----------------+
22458/// ```
22459pub trait VfpclassphEmitter<A, B, C> {
22460    fn vfpclassph(&mut self, op0: A, op1: B, op2: C);
22461}
22462
22463impl<'a> VfpclassphEmitter<KReg, Xmm, Imm> for Assembler<'a> {
22464    fn vfpclassph(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
22465        self.emit(VFPCLASSPH128KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22466    }
22467}
22468
22469impl<'a> VfpclassphEmitter<KReg, Mem, Imm> for Assembler<'a> {
22470    fn vfpclassph(&mut self, op0: KReg, op1: Mem, op2: Imm) {
22471        self.emit(VFPCLASSPH128KMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22472    }
22473}
22474
22475impl<'a> VfpclassphEmitter<KReg, Ymm, Imm> for Assembler<'a> {
22476    fn vfpclassph(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
22477        self.emit(VFPCLASSPH256KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22478    }
22479}
22480
22481impl<'a> VfpclassphEmitter<KReg, Zmm, Imm> for Assembler<'a> {
22482    fn vfpclassph(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
22483        self.emit(VFPCLASSPH512KRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22484    }
22485}
22486
22487/// `VFPCLASSPH_MASK`.
22488///
22489/// Supported operand variants:
22490///
22491/// ```text
22492/// +---+----------------+
22493/// | # | Operands       |
22494/// +---+----------------+
22495/// | 1 | KReg, Mem, Imm |
22496/// | 2 | KReg, Xmm, Imm |
22497/// | 3 | KReg, Ymm, Imm |
22498/// | 4 | KReg, Zmm, Imm |
22499/// +---+----------------+
22500/// ```
22501pub trait VfpclassphMaskEmitter<A, B, C> {
22502    fn vfpclassph_mask(&mut self, op0: A, op1: B, op2: C);
22503}
22504
22505impl<'a> VfpclassphMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
22506    fn vfpclassph_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
22507        self.emit(VFPCLASSPH128KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22508    }
22509}
22510
22511impl<'a> VfpclassphMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
22512    fn vfpclassph_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
22513        self.emit(VFPCLASSPH128KMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22514    }
22515}
22516
22517impl<'a> VfpclassphMaskEmitter<KReg, Ymm, Imm> for Assembler<'a> {
22518    fn vfpclassph_mask(&mut self, op0: KReg, op1: Ymm, op2: Imm) {
22519        self.emit(VFPCLASSPH256KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22520    }
22521}
22522
22523impl<'a> VfpclassphMaskEmitter<KReg, Zmm, Imm> for Assembler<'a> {
22524    fn vfpclassph_mask(&mut self, op0: KReg, op1: Zmm, op2: Imm) {
22525        self.emit(VFPCLASSPH512KRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22526    }
22527}
22528
22529/// `VFPCLASSSH`.
22530///
22531/// Supported operand variants:
22532///
22533/// ```text
22534/// +---+----------------+
22535/// | # | Operands       |
22536/// +---+----------------+
22537/// | 1 | KReg, Mem, Imm |
22538/// | 2 | KReg, Xmm, Imm |
22539/// +---+----------------+
22540/// ```
22541pub trait VfpclassshEmitter<A, B, C> {
22542    fn vfpclasssh(&mut self, op0: A, op1: B, op2: C);
22543}
22544
22545impl<'a> VfpclassshEmitter<KReg, Xmm, Imm> for Assembler<'a> {
22546    fn vfpclasssh(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
22547        self.emit(VFPCLASSSHKRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22548    }
22549}
22550
22551impl<'a> VfpclassshEmitter<KReg, Mem, Imm> for Assembler<'a> {
22552    fn vfpclasssh(&mut self, op0: KReg, op1: Mem, op2: Imm) {
22553        self.emit(VFPCLASSSHKMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22554    }
22555}
22556
22557/// `VFPCLASSSH_MASK`.
22558///
22559/// Supported operand variants:
22560///
22561/// ```text
22562/// +---+----------------+
22563/// | # | Operands       |
22564/// +---+----------------+
22565/// | 1 | KReg, Mem, Imm |
22566/// | 2 | KReg, Xmm, Imm |
22567/// +---+----------------+
22568/// ```
22569pub trait VfpclassshMaskEmitter<A, B, C> {
22570    fn vfpclasssh_mask(&mut self, op0: A, op1: B, op2: C);
22571}
22572
22573impl<'a> VfpclassshMaskEmitter<KReg, Xmm, Imm> for Assembler<'a> {
22574    fn vfpclasssh_mask(&mut self, op0: KReg, op1: Xmm, op2: Imm) {
22575        self.emit(VFPCLASSSHKRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22576    }
22577}
22578
22579impl<'a> VfpclassshMaskEmitter<KReg, Mem, Imm> for Assembler<'a> {
22580    fn vfpclasssh_mask(&mut self, op0: KReg, op1: Mem, op2: Imm) {
22581        self.emit(VFPCLASSSHKMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22582    }
22583}
22584
22585/// `VGETEXPPH`.
22586///
22587/// Supported operand variants:
22588///
22589/// ```text
22590/// +---+----------+
22591/// | # | Operands |
22592/// +---+----------+
22593/// | 1 | Xmm, Mem |
22594/// | 2 | Xmm, Xmm |
22595/// | 3 | Ymm, Mem |
22596/// | 4 | Ymm, Ymm |
22597/// | 5 | Zmm, Mem |
22598/// | 6 | Zmm, Zmm |
22599/// +---+----------+
22600/// ```
22601pub trait VgetexpphEmitter<A, B> {
22602    fn vgetexpph(&mut self, op0: A, op1: B);
22603}
22604
22605impl<'a> VgetexpphEmitter<Xmm, Xmm> for Assembler<'a> {
22606    fn vgetexpph(&mut self, op0: Xmm, op1: Xmm) {
22607        self.emit(VGETEXPPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22608    }
22609}
22610
22611impl<'a> VgetexpphEmitter<Xmm, Mem> for Assembler<'a> {
22612    fn vgetexpph(&mut self, op0: Xmm, op1: Mem) {
22613        self.emit(VGETEXPPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22614    }
22615}
22616
22617impl<'a> VgetexpphEmitter<Ymm, Ymm> for Assembler<'a> {
22618    fn vgetexpph(&mut self, op0: Ymm, op1: Ymm) {
22619        self.emit(VGETEXPPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22620    }
22621}
22622
22623impl<'a> VgetexpphEmitter<Ymm, Mem> for Assembler<'a> {
22624    fn vgetexpph(&mut self, op0: Ymm, op1: Mem) {
22625        self.emit(VGETEXPPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22626    }
22627}
22628
22629impl<'a> VgetexpphEmitter<Zmm, Zmm> for Assembler<'a> {
22630    fn vgetexpph(&mut self, op0: Zmm, op1: Zmm) {
22631        self.emit(VGETEXPPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22632    }
22633}
22634
22635impl<'a> VgetexpphEmitter<Zmm, Mem> for Assembler<'a> {
22636    fn vgetexpph(&mut self, op0: Zmm, op1: Mem) {
22637        self.emit(VGETEXPPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22638    }
22639}
22640
22641/// `VGETEXPPH_MASK`.
22642///
22643/// Supported operand variants:
22644///
22645/// ```text
22646/// +---+----------+
22647/// | # | Operands |
22648/// +---+----------+
22649/// | 1 | Xmm, Mem |
22650/// | 2 | Xmm, Xmm |
22651/// | 3 | Ymm, Mem |
22652/// | 4 | Ymm, Ymm |
22653/// | 5 | Zmm, Mem |
22654/// | 6 | Zmm, Zmm |
22655/// +---+----------+
22656/// ```
22657pub trait VgetexpphMaskEmitter<A, B> {
22658    fn vgetexpph_mask(&mut self, op0: A, op1: B);
22659}
22660
22661impl<'a> VgetexpphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
22662    fn vgetexpph_mask(&mut self, op0: Xmm, op1: Xmm) {
22663        self.emit(VGETEXPPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22664    }
22665}
22666
22667impl<'a> VgetexpphMaskEmitter<Xmm, Mem> for Assembler<'a> {
22668    fn vgetexpph_mask(&mut self, op0: Xmm, op1: Mem) {
22669        self.emit(VGETEXPPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22670    }
22671}
22672
22673impl<'a> VgetexpphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
22674    fn vgetexpph_mask(&mut self, op0: Ymm, op1: Ymm) {
22675        self.emit(VGETEXPPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22676    }
22677}
22678
22679impl<'a> VgetexpphMaskEmitter<Ymm, Mem> for Assembler<'a> {
22680    fn vgetexpph_mask(&mut self, op0: Ymm, op1: Mem) {
22681        self.emit(VGETEXPPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22682    }
22683}
22684
22685impl<'a> VgetexpphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
22686    fn vgetexpph_mask(&mut self, op0: Zmm, op1: Zmm) {
22687        self.emit(VGETEXPPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22688    }
22689}
22690
22691impl<'a> VgetexpphMaskEmitter<Zmm, Mem> for Assembler<'a> {
22692    fn vgetexpph_mask(&mut self, op0: Zmm, op1: Mem) {
22693        self.emit(VGETEXPPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22694    }
22695}
22696
22697/// `VGETEXPPH_MASK_SAE`.
22698///
22699/// Supported operand variants:
22700///
22701/// ```text
22702/// +---+----------+
22703/// | # | Operands |
22704/// +---+----------+
22705/// | 1 | Zmm, Zmm |
22706/// +---+----------+
22707/// ```
22708pub trait VgetexpphMaskSaeEmitter<A, B> {
22709    fn vgetexpph_mask_sae(&mut self, op0: A, op1: B);
22710}
22711
22712impl<'a> VgetexpphMaskSaeEmitter<Zmm, Zmm> for Assembler<'a> {
22713    fn vgetexpph_mask_sae(&mut self, op0: Zmm, op1: Zmm) {
22714        self.emit(VGETEXPPH512RR_MASK_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22715    }
22716}
22717
22718/// `VGETEXPPH_MASKZ`.
22719///
22720/// Supported operand variants:
22721///
22722/// ```text
22723/// +---+----------+
22724/// | # | Operands |
22725/// +---+----------+
22726/// | 1 | Xmm, Mem |
22727/// | 2 | Xmm, Xmm |
22728/// | 3 | Ymm, Mem |
22729/// | 4 | Ymm, Ymm |
22730/// | 5 | Zmm, Mem |
22731/// | 6 | Zmm, Zmm |
22732/// +---+----------+
22733/// ```
22734pub trait VgetexpphMaskzEmitter<A, B> {
22735    fn vgetexpph_maskz(&mut self, op0: A, op1: B);
22736}
22737
22738impl<'a> VgetexpphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
22739    fn vgetexpph_maskz(&mut self, op0: Xmm, op1: Xmm) {
22740        self.emit(VGETEXPPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22741    }
22742}
22743
22744impl<'a> VgetexpphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
22745    fn vgetexpph_maskz(&mut self, op0: Xmm, op1: Mem) {
22746        self.emit(VGETEXPPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22747    }
22748}
22749
22750impl<'a> VgetexpphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
22751    fn vgetexpph_maskz(&mut self, op0: Ymm, op1: Ymm) {
22752        self.emit(VGETEXPPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22753    }
22754}
22755
22756impl<'a> VgetexpphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
22757    fn vgetexpph_maskz(&mut self, op0: Ymm, op1: Mem) {
22758        self.emit(VGETEXPPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22759    }
22760}
22761
22762impl<'a> VgetexpphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
22763    fn vgetexpph_maskz(&mut self, op0: Zmm, op1: Zmm) {
22764        self.emit(VGETEXPPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22765    }
22766}
22767
22768impl<'a> VgetexpphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
22769    fn vgetexpph_maskz(&mut self, op0: Zmm, op1: Mem) {
22770        self.emit(VGETEXPPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22771    }
22772}
22773
22774/// `VGETEXPPH_MASKZ_SAE`.
22775///
22776/// Supported operand variants:
22777///
22778/// ```text
22779/// +---+----------+
22780/// | # | Operands |
22781/// +---+----------+
22782/// | 1 | Zmm, Zmm |
22783/// +---+----------+
22784/// ```
22785pub trait VgetexpphMaskzSaeEmitter<A, B> {
22786    fn vgetexpph_maskz_sae(&mut self, op0: A, op1: B);
22787}
22788
22789impl<'a> VgetexpphMaskzSaeEmitter<Zmm, Zmm> for Assembler<'a> {
22790    fn vgetexpph_maskz_sae(&mut self, op0: Zmm, op1: Zmm) {
22791        self.emit(VGETEXPPH512RR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22792    }
22793}
22794
22795/// `VGETEXPPH_SAE`.
22796///
22797/// Supported operand variants:
22798///
22799/// ```text
22800/// +---+----------+
22801/// | # | Operands |
22802/// +---+----------+
22803/// | 1 | Zmm, Zmm |
22804/// +---+----------+
22805/// ```
22806pub trait VgetexpphSaeEmitter<A, B> {
22807    fn vgetexpph_sae(&mut self, op0: A, op1: B);
22808}
22809
22810impl<'a> VgetexpphSaeEmitter<Zmm, Zmm> for Assembler<'a> {
22811    fn vgetexpph_sae(&mut self, op0: Zmm, op1: Zmm) {
22812        self.emit(VGETEXPPH512RR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
22813    }
22814}
22815
22816/// `VGETEXPSH`.
22817///
22818/// Supported operand variants:
22819///
22820/// ```text
22821/// +---+---------------+
22822/// | # | Operands      |
22823/// +---+---------------+
22824/// | 1 | Xmm, Xmm, Mem |
22825/// | 2 | Xmm, Xmm, Xmm |
22826/// +---+---------------+
22827/// ```
22828pub trait VgetexpshEmitter<A, B, C> {
22829    fn vgetexpsh(&mut self, op0: A, op1: B, op2: C);
22830}
22831
22832impl<'a> VgetexpshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22833    fn vgetexpsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22834        self.emit(VGETEXPSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22835    }
22836}
22837
22838impl<'a> VgetexpshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22839    fn vgetexpsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22840        self.emit(VGETEXPSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22841    }
22842}
22843
22844/// `VGETEXPSH_MASK`.
22845///
22846/// Supported operand variants:
22847///
22848/// ```text
22849/// +---+---------------+
22850/// | # | Operands      |
22851/// +---+---------------+
22852/// | 1 | Xmm, Xmm, Mem |
22853/// | 2 | Xmm, Xmm, Xmm |
22854/// +---+---------------+
22855/// ```
22856pub trait VgetexpshMaskEmitter<A, B, C> {
22857    fn vgetexpsh_mask(&mut self, op0: A, op1: B, op2: C);
22858}
22859
22860impl<'a> VgetexpshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22861    fn vgetexpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22862        self.emit(VGETEXPSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22863    }
22864}
22865
22866impl<'a> VgetexpshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22867    fn vgetexpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22868        self.emit(VGETEXPSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22869    }
22870}
22871
22872/// `VGETEXPSH_MASK_SAE`.
22873///
22874/// Supported operand variants:
22875///
22876/// ```text
22877/// +---+---------------+
22878/// | # | Operands      |
22879/// +---+---------------+
22880/// | 1 | Xmm, Xmm, Xmm |
22881/// +---+---------------+
22882/// ```
22883pub trait VgetexpshMaskSaeEmitter<A, B, C> {
22884    fn vgetexpsh_mask_sae(&mut self, op0: A, op1: B, op2: C);
22885}
22886
22887impl<'a> VgetexpshMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22888    fn vgetexpsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22889        self.emit(VGETEXPSHRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22890    }
22891}
22892
22893/// `VGETEXPSH_MASKZ`.
22894///
22895/// Supported operand variants:
22896///
22897/// ```text
22898/// +---+---------------+
22899/// | # | Operands      |
22900/// +---+---------------+
22901/// | 1 | Xmm, Xmm, Mem |
22902/// | 2 | Xmm, Xmm, Xmm |
22903/// +---+---------------+
22904/// ```
22905pub trait VgetexpshMaskzEmitter<A, B, C> {
22906    fn vgetexpsh_maskz(&mut self, op0: A, op1: B, op2: C);
22907}
22908
22909impl<'a> VgetexpshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22910    fn vgetexpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22911        self.emit(VGETEXPSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22912    }
22913}
22914
22915impl<'a> VgetexpshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
22916    fn vgetexpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
22917        self.emit(VGETEXPSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22918    }
22919}
22920
22921/// `VGETEXPSH_MASKZ_SAE`.
22922///
22923/// Supported operand variants:
22924///
22925/// ```text
22926/// +---+---------------+
22927/// | # | Operands      |
22928/// +---+---------------+
22929/// | 1 | Xmm, Xmm, Xmm |
22930/// +---+---------------+
22931/// ```
22932pub trait VgetexpshMaskzSaeEmitter<A, B, C> {
22933    fn vgetexpsh_maskz_sae(&mut self, op0: A, op1: B, op2: C);
22934}
22935
22936impl<'a> VgetexpshMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22937    fn vgetexpsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22938        self.emit(VGETEXPSHRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22939    }
22940}
22941
22942/// `VGETEXPSH_SAE`.
22943///
22944/// Supported operand variants:
22945///
22946/// ```text
22947/// +---+---------------+
22948/// | # | Operands      |
22949/// +---+---------------+
22950/// | 1 | Xmm, Xmm, Xmm |
22951/// +---+---------------+
22952/// ```
22953pub trait VgetexpshSaeEmitter<A, B, C> {
22954    fn vgetexpsh_sae(&mut self, op0: A, op1: B, op2: C);
22955}
22956
22957impl<'a> VgetexpshSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
22958    fn vgetexpsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
22959        self.emit(VGETEXPSHRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22960    }
22961}
22962
22963/// `VGETMANTPH`.
22964///
22965/// Supported operand variants:
22966///
22967/// ```text
22968/// +---+---------------+
22969/// | # | Operands      |
22970/// +---+---------------+
22971/// | 1 | Xmm, Mem, Imm |
22972/// | 2 | Xmm, Xmm, Imm |
22973/// | 3 | Ymm, Mem, Imm |
22974/// | 4 | Ymm, Ymm, Imm |
22975/// | 5 | Zmm, Mem, Imm |
22976/// | 6 | Zmm, Zmm, Imm |
22977/// +---+---------------+
22978/// ```
22979pub trait VgetmantphEmitter<A, B, C> {
22980    fn vgetmantph(&mut self, op0: A, op1: B, op2: C);
22981}
22982
22983impl<'a> VgetmantphEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
22984    fn vgetmantph(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
22985        self.emit(VGETMANTPH128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22986    }
22987}
22988
22989impl<'a> VgetmantphEmitter<Xmm, Mem, Imm> for Assembler<'a> {
22990    fn vgetmantph(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
22991        self.emit(VGETMANTPH128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22992    }
22993}
22994
22995impl<'a> VgetmantphEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
22996    fn vgetmantph(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
22997        self.emit(VGETMANTPH256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
22998    }
22999}
23000
23001impl<'a> VgetmantphEmitter<Ymm, Mem, Imm> for Assembler<'a> {
23002    fn vgetmantph(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
23003        self.emit(VGETMANTPH256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23004    }
23005}
23006
23007impl<'a> VgetmantphEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
23008    fn vgetmantph(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
23009        self.emit(VGETMANTPH512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23010    }
23011}
23012
23013impl<'a> VgetmantphEmitter<Zmm, Mem, Imm> for Assembler<'a> {
23014    fn vgetmantph(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
23015        self.emit(VGETMANTPH512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23016    }
23017}
23018
23019/// `VGETMANTPH_MASK`.
23020///
23021/// Supported operand variants:
23022///
23023/// ```text
23024/// +---+---------------+
23025/// | # | Operands      |
23026/// +---+---------------+
23027/// | 1 | Xmm, Mem, Imm |
23028/// | 2 | Xmm, Xmm, Imm |
23029/// | 3 | Ymm, Mem, Imm |
23030/// | 4 | Ymm, Ymm, Imm |
23031/// | 5 | Zmm, Mem, Imm |
23032/// | 6 | Zmm, Zmm, Imm |
23033/// +---+---------------+
23034/// ```
23035pub trait VgetmantphMaskEmitter<A, B, C> {
23036    fn vgetmantph_mask(&mut self, op0: A, op1: B, op2: C);
23037}
23038
23039impl<'a> VgetmantphMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
23040    fn vgetmantph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
23041        self.emit(VGETMANTPH128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23042    }
23043}
23044
23045impl<'a> VgetmantphMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
23046    fn vgetmantph_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
23047        self.emit(VGETMANTPH128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23048    }
23049}
23050
23051impl<'a> VgetmantphMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
23052    fn vgetmantph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
23053        self.emit(VGETMANTPH256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23054    }
23055}
23056
23057impl<'a> VgetmantphMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
23058    fn vgetmantph_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
23059        self.emit(VGETMANTPH256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23060    }
23061}
23062
23063impl<'a> VgetmantphMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
23064    fn vgetmantph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
23065        self.emit(VGETMANTPH512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23066    }
23067}
23068
23069impl<'a> VgetmantphMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
23070    fn vgetmantph_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
23071        self.emit(VGETMANTPH512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23072    }
23073}
23074
23075/// `VGETMANTPH_MASK_SAE`.
23076///
23077/// Supported operand variants:
23078///
23079/// ```text
23080/// +---+---------------+
23081/// | # | Operands      |
23082/// +---+---------------+
23083/// | 1 | Zmm, Zmm, Imm |
23084/// +---+---------------+
23085/// ```
23086pub trait VgetmantphMaskSaeEmitter<A, B, C> {
23087    fn vgetmantph_mask_sae(&mut self, op0: A, op1: B, op2: C);
23088}
23089
23090impl<'a> VgetmantphMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
23091    fn vgetmantph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
23092        self.emit(VGETMANTPH512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23093    }
23094}
23095
23096/// `VGETMANTPH_MASKZ`.
23097///
23098/// Supported operand variants:
23099///
23100/// ```text
23101/// +---+---------------+
23102/// | # | Operands      |
23103/// +---+---------------+
23104/// | 1 | Xmm, Mem, Imm |
23105/// | 2 | Xmm, Xmm, Imm |
23106/// | 3 | Ymm, Mem, Imm |
23107/// | 4 | Ymm, Ymm, Imm |
23108/// | 5 | Zmm, Mem, Imm |
23109/// | 6 | Zmm, Zmm, Imm |
23110/// +---+---------------+
23111/// ```
23112pub trait VgetmantphMaskzEmitter<A, B, C> {
23113    fn vgetmantph_maskz(&mut self, op0: A, op1: B, op2: C);
23114}
23115
23116impl<'a> VgetmantphMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
23117    fn vgetmantph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
23118        self.emit(VGETMANTPH128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23119    }
23120}
23121
23122impl<'a> VgetmantphMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
23123    fn vgetmantph_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
23124        self.emit(VGETMANTPH128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23125    }
23126}
23127
23128impl<'a> VgetmantphMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
23129    fn vgetmantph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
23130        self.emit(VGETMANTPH256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23131    }
23132}
23133
23134impl<'a> VgetmantphMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
23135    fn vgetmantph_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
23136        self.emit(VGETMANTPH256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23137    }
23138}
23139
23140impl<'a> VgetmantphMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
23141    fn vgetmantph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
23142        self.emit(VGETMANTPH512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23143    }
23144}
23145
23146impl<'a> VgetmantphMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
23147    fn vgetmantph_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
23148        self.emit(VGETMANTPH512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23149    }
23150}
23151
23152/// `VGETMANTPH_MASKZ_SAE`.
23153///
23154/// Supported operand variants:
23155///
23156/// ```text
23157/// +---+---------------+
23158/// | # | Operands      |
23159/// +---+---------------+
23160/// | 1 | Zmm, Zmm, Imm |
23161/// +---+---------------+
23162/// ```
23163pub trait VgetmantphMaskzSaeEmitter<A, B, C> {
23164    fn vgetmantph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
23165}
23166
23167impl<'a> VgetmantphMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
23168    fn vgetmantph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
23169        self.emit(VGETMANTPH512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23170    }
23171}
23172
23173/// `VGETMANTPH_SAE`.
23174///
23175/// Supported operand variants:
23176///
23177/// ```text
23178/// +---+---------------+
23179/// | # | Operands      |
23180/// +---+---------------+
23181/// | 1 | Zmm, Zmm, Imm |
23182/// +---+---------------+
23183/// ```
23184pub trait VgetmantphSaeEmitter<A, B, C> {
23185    fn vgetmantph_sae(&mut self, op0: A, op1: B, op2: C);
23186}
23187
23188impl<'a> VgetmantphSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
23189    fn vgetmantph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
23190        self.emit(VGETMANTPH512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23191    }
23192}
23193
23194/// `VGETMANTSH`.
23195///
23196/// Supported operand variants:
23197///
23198/// ```text
23199/// +---+--------------------+
23200/// | # | Operands           |
23201/// +---+--------------------+
23202/// | 1 | Xmm, Xmm, Mem, Imm |
23203/// | 2 | Xmm, Xmm, Xmm, Imm |
23204/// +---+--------------------+
23205/// ```
23206pub trait VgetmantshEmitter<A, B, C, D> {
23207    fn vgetmantsh(&mut self, op0: A, op1: B, op2: C, op3: D);
23208}
23209
23210impl<'a> VgetmantshEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23211    fn vgetmantsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23212        self.emit(VGETMANTSHRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23213    }
23214}
23215
23216impl<'a> VgetmantshEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23217    fn vgetmantsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23218        self.emit(VGETMANTSHRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23219    }
23220}
23221
23222/// `VGETMANTSH_MASK`.
23223///
23224/// Supported operand variants:
23225///
23226/// ```text
23227/// +---+--------------------+
23228/// | # | Operands           |
23229/// +---+--------------------+
23230/// | 1 | Xmm, Xmm, Mem, Imm |
23231/// | 2 | Xmm, Xmm, Xmm, Imm |
23232/// +---+--------------------+
23233/// ```
23234pub trait VgetmantshMaskEmitter<A, B, C, D> {
23235    fn vgetmantsh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
23236}
23237
23238impl<'a> VgetmantshMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23239    fn vgetmantsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23240        self.emit(VGETMANTSHRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23241    }
23242}
23243
23244impl<'a> VgetmantshMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23245    fn vgetmantsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23246        self.emit(VGETMANTSHRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23247    }
23248}
23249
23250/// `VGETMANTSH_MASK_SAE`.
23251///
23252/// Supported operand variants:
23253///
23254/// ```text
23255/// +---+--------------------+
23256/// | # | Operands           |
23257/// +---+--------------------+
23258/// | 1 | Xmm, Xmm, Xmm, Imm |
23259/// +---+--------------------+
23260/// ```
23261pub trait VgetmantshMaskSaeEmitter<A, B, C, D> {
23262    fn vgetmantsh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
23263}
23264
23265impl<'a> VgetmantshMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23266    fn vgetmantsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23267        self.emit(VGETMANTSHRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23268    }
23269}
23270
23271/// `VGETMANTSH_MASKZ`.
23272///
23273/// Supported operand variants:
23274///
23275/// ```text
23276/// +---+--------------------+
23277/// | # | Operands           |
23278/// +---+--------------------+
23279/// | 1 | Xmm, Xmm, Mem, Imm |
23280/// | 2 | Xmm, Xmm, Xmm, Imm |
23281/// +---+--------------------+
23282/// ```
23283pub trait VgetmantshMaskzEmitter<A, B, C, D> {
23284    fn vgetmantsh_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
23285}
23286
23287impl<'a> VgetmantshMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23288    fn vgetmantsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23289        self.emit(VGETMANTSHRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23290    }
23291}
23292
23293impl<'a> VgetmantshMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23294    fn vgetmantsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23295        self.emit(VGETMANTSHRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23296    }
23297}
23298
23299/// `VGETMANTSH_MASKZ_SAE`.
23300///
23301/// Supported operand variants:
23302///
23303/// ```text
23304/// +---+--------------------+
23305/// | # | Operands           |
23306/// +---+--------------------+
23307/// | 1 | Xmm, Xmm, Xmm, Imm |
23308/// +---+--------------------+
23309/// ```
23310pub trait VgetmantshMaskzSaeEmitter<A, B, C, D> {
23311    fn vgetmantsh_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
23312}
23313
23314impl<'a> VgetmantshMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23315    fn vgetmantsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23316        self.emit(VGETMANTSHRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23317    }
23318}
23319
23320/// `VGETMANTSH_SAE`.
23321///
23322/// Supported operand variants:
23323///
23324/// ```text
23325/// +---+--------------------+
23326/// | # | Operands           |
23327/// +---+--------------------+
23328/// | 1 | Xmm, Xmm, Xmm, Imm |
23329/// +---+--------------------+
23330/// ```
23331pub trait VgetmantshSaeEmitter<A, B, C, D> {
23332    fn vgetmantsh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
23333}
23334
23335impl<'a> VgetmantshSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23336    fn vgetmantsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23337        self.emit(VGETMANTSHRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23338    }
23339}
23340
23341/// `VGF2P8AFFINEINVQB` (VGF2P8AFFINEINVQB). 
23342/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
23343///
23344///
23345/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
23346///
23347/// Supported operand variants:
23348///
23349/// ```text
23350/// +---+--------------------+
23351/// | # | Operands           |
23352/// +---+--------------------+
23353/// | 1 | Xmm, Xmm, Mem, Imm |
23354/// | 2 | Xmm, Xmm, Xmm, Imm |
23355/// | 3 | Ymm, Ymm, Mem, Imm |
23356/// | 4 | Ymm, Ymm, Ymm, Imm |
23357/// | 5 | Zmm, Zmm, Mem, Imm |
23358/// | 6 | Zmm, Zmm, Zmm, Imm |
23359/// +---+--------------------+
23360/// ```
23361pub trait Vgf2p8affineinvqbEmitter<A, B, C, D> {
23362    fn vgf2p8affineinvqb(&mut self, op0: A, op1: B, op2: C, op3: D);
23363}
23364
23365impl<'a> Vgf2p8affineinvqbEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23366    fn vgf2p8affineinvqb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23367        self.emit(VGF2P8AFFINEINVQB128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23368    }
23369}
23370
23371impl<'a> Vgf2p8affineinvqbEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23372    fn vgf2p8affineinvqb(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23373        self.emit(VGF2P8AFFINEINVQB128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23374    }
23375}
23376
23377impl<'a> Vgf2p8affineinvqbEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
23378    fn vgf2p8affineinvqb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
23379        self.emit(VGF2P8AFFINEINVQB256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23380    }
23381}
23382
23383impl<'a> Vgf2p8affineinvqbEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
23384    fn vgf2p8affineinvqb(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
23385        self.emit(VGF2P8AFFINEINVQB256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23386    }
23387}
23388
23389impl<'a> Vgf2p8affineinvqbEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
23390    fn vgf2p8affineinvqb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
23391        self.emit(VGF2P8AFFINEINVQB512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23392    }
23393}
23394
23395impl<'a> Vgf2p8affineinvqbEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
23396    fn vgf2p8affineinvqb(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
23397        self.emit(VGF2P8AFFINEINVQB512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23398    }
23399}
23400
23401/// `VGF2P8AFFINEINVQB_MASK` (VGF2P8AFFINEINVQB). 
23402/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
23403///
23404///
23405/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
23406///
23407/// Supported operand variants:
23408///
23409/// ```text
23410/// +---+--------------------+
23411/// | # | Operands           |
23412/// +---+--------------------+
23413/// | 1 | Xmm, Xmm, Mem, Imm |
23414/// | 2 | Xmm, Xmm, Xmm, Imm |
23415/// | 3 | Ymm, Ymm, Mem, Imm |
23416/// | 4 | Ymm, Ymm, Ymm, Imm |
23417/// | 5 | Zmm, Zmm, Mem, Imm |
23418/// | 6 | Zmm, Zmm, Zmm, Imm |
23419/// +---+--------------------+
23420/// ```
23421pub trait Vgf2p8affineinvqbMaskEmitter<A, B, C, D> {
23422    fn vgf2p8affineinvqb_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
23423}
23424
23425impl<'a> Vgf2p8affineinvqbMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23426    fn vgf2p8affineinvqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23427        self.emit(VGF2P8AFFINEINVQB128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23428    }
23429}
23430
23431impl<'a> Vgf2p8affineinvqbMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23432    fn vgf2p8affineinvqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23433        self.emit(VGF2P8AFFINEINVQB128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23434    }
23435}
23436
23437impl<'a> Vgf2p8affineinvqbMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
23438    fn vgf2p8affineinvqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
23439        self.emit(VGF2P8AFFINEINVQB256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23440    }
23441}
23442
23443impl<'a> Vgf2p8affineinvqbMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
23444    fn vgf2p8affineinvqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
23445        self.emit(VGF2P8AFFINEINVQB256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23446    }
23447}
23448
23449impl<'a> Vgf2p8affineinvqbMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
23450    fn vgf2p8affineinvqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
23451        self.emit(VGF2P8AFFINEINVQB512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23452    }
23453}
23454
23455impl<'a> Vgf2p8affineinvqbMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
23456    fn vgf2p8affineinvqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
23457        self.emit(VGF2P8AFFINEINVQB512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23458    }
23459}
23460
23461/// `VGF2P8AFFINEINVQB_MASKZ` (VGF2P8AFFINEINVQB). 
23462/// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
23463///
23464///
23465/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
23466///
23467/// Supported operand variants:
23468///
23469/// ```text
23470/// +---+--------------------+
23471/// | # | Operands           |
23472/// +---+--------------------+
23473/// | 1 | Xmm, Xmm, Mem, Imm |
23474/// | 2 | Xmm, Xmm, Xmm, Imm |
23475/// | 3 | Ymm, Ymm, Mem, Imm |
23476/// | 4 | Ymm, Ymm, Ymm, Imm |
23477/// | 5 | Zmm, Zmm, Mem, Imm |
23478/// | 6 | Zmm, Zmm, Zmm, Imm |
23479/// +---+--------------------+
23480/// ```
23481pub trait Vgf2p8affineinvqbMaskzEmitter<A, B, C, D> {
23482    fn vgf2p8affineinvqb_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
23483}
23484
23485impl<'a> Vgf2p8affineinvqbMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23486    fn vgf2p8affineinvqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23487        self.emit(VGF2P8AFFINEINVQB128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23488    }
23489}
23490
23491impl<'a> Vgf2p8affineinvqbMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23492    fn vgf2p8affineinvqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23493        self.emit(VGF2P8AFFINEINVQB128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23494    }
23495}
23496
23497impl<'a> Vgf2p8affineinvqbMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
23498    fn vgf2p8affineinvqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
23499        self.emit(VGF2P8AFFINEINVQB256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23500    }
23501}
23502
23503impl<'a> Vgf2p8affineinvqbMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
23504    fn vgf2p8affineinvqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
23505        self.emit(VGF2P8AFFINEINVQB256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23506    }
23507}
23508
23509impl<'a> Vgf2p8affineinvqbMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
23510    fn vgf2p8affineinvqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
23511        self.emit(VGF2P8AFFINEINVQB512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23512    }
23513}
23514
23515impl<'a> Vgf2p8affineinvqbMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
23516    fn vgf2p8affineinvqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
23517        self.emit(VGF2P8AFFINEINVQB512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23518    }
23519}
23520
23521/// `VGF2P8AFFINEQB` (VGF2P8AFFINEQB). 
23522/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
23523///
23524///
23525/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
23526///
23527/// Supported operand variants:
23528///
23529/// ```text
23530/// +---+--------------------+
23531/// | # | Operands           |
23532/// +---+--------------------+
23533/// | 1 | Xmm, Xmm, Mem, Imm |
23534/// | 2 | Xmm, Xmm, Xmm, Imm |
23535/// | 3 | Ymm, Ymm, Mem, Imm |
23536/// | 4 | Ymm, Ymm, Ymm, Imm |
23537/// | 5 | Zmm, Zmm, Mem, Imm |
23538/// | 6 | Zmm, Zmm, Zmm, Imm |
23539/// +---+--------------------+
23540/// ```
23541pub trait Vgf2p8affineqbEmitter<A, B, C, D> {
23542    fn vgf2p8affineqb(&mut self, op0: A, op1: B, op2: C, op3: D);
23543}
23544
23545impl<'a> Vgf2p8affineqbEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23546    fn vgf2p8affineqb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23547        self.emit(VGF2P8AFFINEQB128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23548    }
23549}
23550
23551impl<'a> Vgf2p8affineqbEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23552    fn vgf2p8affineqb(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23553        self.emit(VGF2P8AFFINEQB128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23554    }
23555}
23556
23557impl<'a> Vgf2p8affineqbEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
23558    fn vgf2p8affineqb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
23559        self.emit(VGF2P8AFFINEQB256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23560    }
23561}
23562
23563impl<'a> Vgf2p8affineqbEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
23564    fn vgf2p8affineqb(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
23565        self.emit(VGF2P8AFFINEQB256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23566    }
23567}
23568
23569impl<'a> Vgf2p8affineqbEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
23570    fn vgf2p8affineqb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
23571        self.emit(VGF2P8AFFINEQB512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23572    }
23573}
23574
23575impl<'a> Vgf2p8affineqbEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
23576    fn vgf2p8affineqb(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
23577        self.emit(VGF2P8AFFINEQB512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23578    }
23579}
23580
23581/// `VGF2P8AFFINEQB_MASK` (VGF2P8AFFINEQB). 
23582/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
23583///
23584///
23585/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
23586///
23587/// Supported operand variants:
23588///
23589/// ```text
23590/// +---+--------------------+
23591/// | # | Operands           |
23592/// +---+--------------------+
23593/// | 1 | Xmm, Xmm, Mem, Imm |
23594/// | 2 | Xmm, Xmm, Xmm, Imm |
23595/// | 3 | Ymm, Ymm, Mem, Imm |
23596/// | 4 | Ymm, Ymm, Ymm, Imm |
23597/// | 5 | Zmm, Zmm, Mem, Imm |
23598/// | 6 | Zmm, Zmm, Zmm, Imm |
23599/// +---+--------------------+
23600/// ```
23601pub trait Vgf2p8affineqbMaskEmitter<A, B, C, D> {
23602    fn vgf2p8affineqb_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
23603}
23604
23605impl<'a> Vgf2p8affineqbMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23606    fn vgf2p8affineqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23607        self.emit(VGF2P8AFFINEQB128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23608    }
23609}
23610
23611impl<'a> Vgf2p8affineqbMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23612    fn vgf2p8affineqb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23613        self.emit(VGF2P8AFFINEQB128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23614    }
23615}
23616
23617impl<'a> Vgf2p8affineqbMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
23618    fn vgf2p8affineqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
23619        self.emit(VGF2P8AFFINEQB256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23620    }
23621}
23622
23623impl<'a> Vgf2p8affineqbMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
23624    fn vgf2p8affineqb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
23625        self.emit(VGF2P8AFFINEQB256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23626    }
23627}
23628
23629impl<'a> Vgf2p8affineqbMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
23630    fn vgf2p8affineqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
23631        self.emit(VGF2P8AFFINEQB512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23632    }
23633}
23634
23635impl<'a> Vgf2p8affineqbMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
23636    fn vgf2p8affineqb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
23637        self.emit(VGF2P8AFFINEQB512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23638    }
23639}
23640
23641/// `VGF2P8AFFINEQB_MASKZ` (VGF2P8AFFINEQB). 
23642/// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
23643///
23644///
23645/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
23646///
23647/// Supported operand variants:
23648///
23649/// ```text
23650/// +---+--------------------+
23651/// | # | Operands           |
23652/// +---+--------------------+
23653/// | 1 | Xmm, Xmm, Mem, Imm |
23654/// | 2 | Xmm, Xmm, Xmm, Imm |
23655/// | 3 | Ymm, Ymm, Mem, Imm |
23656/// | 4 | Ymm, Ymm, Ymm, Imm |
23657/// | 5 | Zmm, Zmm, Mem, Imm |
23658/// | 6 | Zmm, Zmm, Zmm, Imm |
23659/// +---+--------------------+
23660/// ```
23661pub trait Vgf2p8affineqbMaskzEmitter<A, B, C, D> {
23662    fn vgf2p8affineqb_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
23663}
23664
23665impl<'a> Vgf2p8affineqbMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
23666    fn vgf2p8affineqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
23667        self.emit(VGF2P8AFFINEQB128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23668    }
23669}
23670
23671impl<'a> Vgf2p8affineqbMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
23672    fn vgf2p8affineqb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
23673        self.emit(VGF2P8AFFINEQB128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23674    }
23675}
23676
23677impl<'a> Vgf2p8affineqbMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
23678    fn vgf2p8affineqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
23679        self.emit(VGF2P8AFFINEQB256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23680    }
23681}
23682
23683impl<'a> Vgf2p8affineqbMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
23684    fn vgf2p8affineqb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
23685        self.emit(VGF2P8AFFINEQB256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23686    }
23687}
23688
23689impl<'a> Vgf2p8affineqbMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
23690    fn vgf2p8affineqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
23691        self.emit(VGF2P8AFFINEQB512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23692    }
23693}
23694
23695impl<'a> Vgf2p8affineqbMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
23696    fn vgf2p8affineqb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
23697        self.emit(VGF2P8AFFINEQB512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
23698    }
23699}
23700
23701/// `VGF2P8MULB` (VGF2P8MULB). 
23702/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
23703///
23704///
23705/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
23706///
23707/// Supported operand variants:
23708///
23709/// ```text
23710/// +---+---------------+
23711/// | # | Operands      |
23712/// +---+---------------+
23713/// | 1 | Xmm, Xmm, Mem |
23714/// | 2 | Xmm, Xmm, Xmm |
23715/// | 3 | Ymm, Ymm, Mem |
23716/// | 4 | Ymm, Ymm, Ymm |
23717/// | 5 | Zmm, Zmm, Mem |
23718/// | 6 | Zmm, Zmm, Zmm |
23719/// +---+---------------+
23720/// ```
23721pub trait Vgf2p8mulbEmitter<A, B, C> {
23722    fn vgf2p8mulb(&mut self, op0: A, op1: B, op2: C);
23723}
23724
23725impl<'a> Vgf2p8mulbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
23726    fn vgf2p8mulb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
23727        self.emit(VGF2P8MULB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23728    }
23729}
23730
23731impl<'a> Vgf2p8mulbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
23732    fn vgf2p8mulb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
23733        self.emit(VGF2P8MULB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23734    }
23735}
23736
23737impl<'a> Vgf2p8mulbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
23738    fn vgf2p8mulb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
23739        self.emit(VGF2P8MULB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23740    }
23741}
23742
23743impl<'a> Vgf2p8mulbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
23744    fn vgf2p8mulb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
23745        self.emit(VGF2P8MULB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23746    }
23747}
23748
23749impl<'a> Vgf2p8mulbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
23750    fn vgf2p8mulb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
23751        self.emit(VGF2P8MULB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23752    }
23753}
23754
23755impl<'a> Vgf2p8mulbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
23756    fn vgf2p8mulb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
23757        self.emit(VGF2P8MULB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23758    }
23759}
23760
23761/// `VGF2P8MULB_MASK` (VGF2P8MULB). 
23762/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
23763///
23764///
23765/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
23766///
23767/// Supported operand variants:
23768///
23769/// ```text
23770/// +---+---------------+
23771/// | # | Operands      |
23772/// +---+---------------+
23773/// | 1 | Xmm, Xmm, Mem |
23774/// | 2 | Xmm, Xmm, Xmm |
23775/// | 3 | Ymm, Ymm, Mem |
23776/// | 4 | Ymm, Ymm, Ymm |
23777/// | 5 | Zmm, Zmm, Mem |
23778/// | 6 | Zmm, Zmm, Zmm |
23779/// +---+---------------+
23780/// ```
23781pub trait Vgf2p8mulbMaskEmitter<A, B, C> {
23782    fn vgf2p8mulb_mask(&mut self, op0: A, op1: B, op2: C);
23783}
23784
23785impl<'a> Vgf2p8mulbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
23786    fn vgf2p8mulb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
23787        self.emit(VGF2P8MULB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23788    }
23789}
23790
23791impl<'a> Vgf2p8mulbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
23792    fn vgf2p8mulb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
23793        self.emit(VGF2P8MULB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23794    }
23795}
23796
23797impl<'a> Vgf2p8mulbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
23798    fn vgf2p8mulb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
23799        self.emit(VGF2P8MULB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23800    }
23801}
23802
23803impl<'a> Vgf2p8mulbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
23804    fn vgf2p8mulb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
23805        self.emit(VGF2P8MULB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23806    }
23807}
23808
23809impl<'a> Vgf2p8mulbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
23810    fn vgf2p8mulb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
23811        self.emit(VGF2P8MULB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23812    }
23813}
23814
23815impl<'a> Vgf2p8mulbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
23816    fn vgf2p8mulb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
23817        self.emit(VGF2P8MULB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23818    }
23819}
23820
23821/// `VGF2P8MULB_MASKZ` (VGF2P8MULB). 
23822/// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
23823///
23824///
23825/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
23826///
23827/// Supported operand variants:
23828///
23829/// ```text
23830/// +---+---------------+
23831/// | # | Operands      |
23832/// +---+---------------+
23833/// | 1 | Xmm, Xmm, Mem |
23834/// | 2 | Xmm, Xmm, Xmm |
23835/// | 3 | Ymm, Ymm, Mem |
23836/// | 4 | Ymm, Ymm, Ymm |
23837/// | 5 | Zmm, Zmm, Mem |
23838/// | 6 | Zmm, Zmm, Zmm |
23839/// +---+---------------+
23840/// ```
23841pub trait Vgf2p8mulbMaskzEmitter<A, B, C> {
23842    fn vgf2p8mulb_maskz(&mut self, op0: A, op1: B, op2: C);
23843}
23844
23845impl<'a> Vgf2p8mulbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
23846    fn vgf2p8mulb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
23847        self.emit(VGF2P8MULB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23848    }
23849}
23850
23851impl<'a> Vgf2p8mulbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
23852    fn vgf2p8mulb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
23853        self.emit(VGF2P8MULB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23854    }
23855}
23856
23857impl<'a> Vgf2p8mulbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
23858    fn vgf2p8mulb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
23859        self.emit(VGF2P8MULB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23860    }
23861}
23862
23863impl<'a> Vgf2p8mulbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
23864    fn vgf2p8mulb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
23865        self.emit(VGF2P8MULB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23866    }
23867}
23868
23869impl<'a> Vgf2p8mulbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
23870    fn vgf2p8mulb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
23871        self.emit(VGF2P8MULB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23872    }
23873}
23874
23875impl<'a> Vgf2p8mulbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
23876    fn vgf2p8mulb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
23877        self.emit(VGF2P8MULB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23878    }
23879}
23880
23881/// `VMAXPH`.
23882///
23883/// Supported operand variants:
23884///
23885/// ```text
23886/// +---+---------------+
23887/// | # | Operands      |
23888/// +---+---------------+
23889/// | 1 | Xmm, Xmm, Mem |
23890/// | 2 | Xmm, Xmm, Xmm |
23891/// | 3 | Ymm, Ymm, Mem |
23892/// | 4 | Ymm, Ymm, Ymm |
23893/// | 5 | Zmm, Zmm, Mem |
23894/// | 6 | Zmm, Zmm, Zmm |
23895/// +---+---------------+
23896/// ```
23897pub trait VmaxphEmitter<A, B, C> {
23898    fn vmaxph(&mut self, op0: A, op1: B, op2: C);
23899}
23900
23901impl<'a> VmaxphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
23902    fn vmaxph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
23903        self.emit(VMAXPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23904    }
23905}
23906
23907impl<'a> VmaxphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
23908    fn vmaxph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
23909        self.emit(VMAXPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23910    }
23911}
23912
23913impl<'a> VmaxphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
23914    fn vmaxph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
23915        self.emit(VMAXPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23916    }
23917}
23918
23919impl<'a> VmaxphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
23920    fn vmaxph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
23921        self.emit(VMAXPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23922    }
23923}
23924
23925impl<'a> VmaxphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
23926    fn vmaxph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
23927        self.emit(VMAXPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23928    }
23929}
23930
23931impl<'a> VmaxphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
23932    fn vmaxph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
23933        self.emit(VMAXPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23934    }
23935}
23936
23937/// `VMAXPH_MASK`.
23938///
23939/// Supported operand variants:
23940///
23941/// ```text
23942/// +---+---------------+
23943/// | # | Operands      |
23944/// +---+---------------+
23945/// | 1 | Xmm, Xmm, Mem |
23946/// | 2 | Xmm, Xmm, Xmm |
23947/// | 3 | Ymm, Ymm, Mem |
23948/// | 4 | Ymm, Ymm, Ymm |
23949/// | 5 | Zmm, Zmm, Mem |
23950/// | 6 | Zmm, Zmm, Zmm |
23951/// +---+---------------+
23952/// ```
23953pub trait VmaxphMaskEmitter<A, B, C> {
23954    fn vmaxph_mask(&mut self, op0: A, op1: B, op2: C);
23955}
23956
23957impl<'a> VmaxphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
23958    fn vmaxph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
23959        self.emit(VMAXPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23960    }
23961}
23962
23963impl<'a> VmaxphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
23964    fn vmaxph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
23965        self.emit(VMAXPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23966    }
23967}
23968
23969impl<'a> VmaxphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
23970    fn vmaxph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
23971        self.emit(VMAXPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23972    }
23973}
23974
23975impl<'a> VmaxphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
23976    fn vmaxph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
23977        self.emit(VMAXPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23978    }
23979}
23980
23981impl<'a> VmaxphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
23982    fn vmaxph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
23983        self.emit(VMAXPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23984    }
23985}
23986
23987impl<'a> VmaxphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
23988    fn vmaxph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
23989        self.emit(VMAXPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
23990    }
23991}
23992
23993/// `VMAXPH_MASK_SAE`.
23994///
23995/// Supported operand variants:
23996///
23997/// ```text
23998/// +---+---------------+
23999/// | # | Operands      |
24000/// +---+---------------+
24001/// | 1 | Zmm, Zmm, Zmm |
24002/// +---+---------------+
24003/// ```
24004pub trait VmaxphMaskSaeEmitter<A, B, C> {
24005    fn vmaxph_mask_sae(&mut self, op0: A, op1: B, op2: C);
24006}
24007
24008impl<'a> VmaxphMaskSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24009    fn vmaxph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24010        self.emit(VMAXPH512RRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24011    }
24012}
24013
24014/// `VMAXPH_MASKZ`.
24015///
24016/// Supported operand variants:
24017///
24018/// ```text
24019/// +---+---------------+
24020/// | # | Operands      |
24021/// +---+---------------+
24022/// | 1 | Xmm, Xmm, Mem |
24023/// | 2 | Xmm, Xmm, Xmm |
24024/// | 3 | Ymm, Ymm, Mem |
24025/// | 4 | Ymm, Ymm, Ymm |
24026/// | 5 | Zmm, Zmm, Mem |
24027/// | 6 | Zmm, Zmm, Zmm |
24028/// +---+---------------+
24029/// ```
24030pub trait VmaxphMaskzEmitter<A, B, C> {
24031    fn vmaxph_maskz(&mut self, op0: A, op1: B, op2: C);
24032}
24033
24034impl<'a> VmaxphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24035    fn vmaxph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24036        self.emit(VMAXPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24037    }
24038}
24039
24040impl<'a> VmaxphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24041    fn vmaxph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24042        self.emit(VMAXPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24043    }
24044}
24045
24046impl<'a> VmaxphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
24047    fn vmaxph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
24048        self.emit(VMAXPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24049    }
24050}
24051
24052impl<'a> VmaxphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
24053    fn vmaxph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
24054        self.emit(VMAXPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24055    }
24056}
24057
24058impl<'a> VmaxphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24059    fn vmaxph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24060        self.emit(VMAXPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24061    }
24062}
24063
24064impl<'a> VmaxphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
24065    fn vmaxph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
24066        self.emit(VMAXPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24067    }
24068}
24069
24070/// `VMAXPH_MASKZ_SAE`.
24071///
24072/// Supported operand variants:
24073///
24074/// ```text
24075/// +---+---------------+
24076/// | # | Operands      |
24077/// +---+---------------+
24078/// | 1 | Zmm, Zmm, Zmm |
24079/// +---+---------------+
24080/// ```
24081pub trait VmaxphMaskzSaeEmitter<A, B, C> {
24082    fn vmaxph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
24083}
24084
24085impl<'a> VmaxphMaskzSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24086    fn vmaxph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24087        self.emit(VMAXPH512RRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24088    }
24089}
24090
24091/// `VMAXPH_SAE`.
24092///
24093/// Supported operand variants:
24094///
24095/// ```text
24096/// +---+---------------+
24097/// | # | Operands      |
24098/// +---+---------------+
24099/// | 1 | Zmm, Zmm, Zmm |
24100/// +---+---------------+
24101/// ```
24102pub trait VmaxphSaeEmitter<A, B, C> {
24103    fn vmaxph_sae(&mut self, op0: A, op1: B, op2: C);
24104}
24105
24106impl<'a> VmaxphSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24107    fn vmaxph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24108        self.emit(VMAXPH512RRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24109    }
24110}
24111
24112/// `VMAXSH`.
24113///
24114/// Supported operand variants:
24115///
24116/// ```text
24117/// +---+---------------+
24118/// | # | Operands      |
24119/// +---+---------------+
24120/// | 1 | Xmm, Xmm, Mem |
24121/// | 2 | Xmm, Xmm, Xmm |
24122/// +---+---------------+
24123/// ```
24124pub trait VmaxshEmitter<A, B, C> {
24125    fn vmaxsh(&mut self, op0: A, op1: B, op2: C);
24126}
24127
24128impl<'a> VmaxshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24129    fn vmaxsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24130        self.emit(VMAXSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24131    }
24132}
24133
24134impl<'a> VmaxshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24135    fn vmaxsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24136        self.emit(VMAXSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24137    }
24138}
24139
24140/// `VMAXSH_MASK`.
24141///
24142/// Supported operand variants:
24143///
24144/// ```text
24145/// +---+---------------+
24146/// | # | Operands      |
24147/// +---+---------------+
24148/// | 1 | Xmm, Xmm, Mem |
24149/// | 2 | Xmm, Xmm, Xmm |
24150/// +---+---------------+
24151/// ```
24152pub trait VmaxshMaskEmitter<A, B, C> {
24153    fn vmaxsh_mask(&mut self, op0: A, op1: B, op2: C);
24154}
24155
24156impl<'a> VmaxshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24157    fn vmaxsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24158        self.emit(VMAXSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24159    }
24160}
24161
24162impl<'a> VmaxshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24163    fn vmaxsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24164        self.emit(VMAXSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24165    }
24166}
24167
24168/// `VMAXSH_MASK_SAE`.
24169///
24170/// Supported operand variants:
24171///
24172/// ```text
24173/// +---+---------------+
24174/// | # | Operands      |
24175/// +---+---------------+
24176/// | 1 | Xmm, Xmm, Xmm |
24177/// +---+---------------+
24178/// ```
24179pub trait VmaxshMaskSaeEmitter<A, B, C> {
24180    fn vmaxsh_mask_sae(&mut self, op0: A, op1: B, op2: C);
24181}
24182
24183impl<'a> VmaxshMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24184    fn vmaxsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24185        self.emit(VMAXSHRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24186    }
24187}
24188
24189/// `VMAXSH_MASKZ`.
24190///
24191/// Supported operand variants:
24192///
24193/// ```text
24194/// +---+---------------+
24195/// | # | Operands      |
24196/// +---+---------------+
24197/// | 1 | Xmm, Xmm, Mem |
24198/// | 2 | Xmm, Xmm, Xmm |
24199/// +---+---------------+
24200/// ```
24201pub trait VmaxshMaskzEmitter<A, B, C> {
24202    fn vmaxsh_maskz(&mut self, op0: A, op1: B, op2: C);
24203}
24204
24205impl<'a> VmaxshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24206    fn vmaxsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24207        self.emit(VMAXSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24208    }
24209}
24210
24211impl<'a> VmaxshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24212    fn vmaxsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24213        self.emit(VMAXSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24214    }
24215}
24216
24217/// `VMAXSH_MASKZ_SAE`.
24218///
24219/// Supported operand variants:
24220///
24221/// ```text
24222/// +---+---------------+
24223/// | # | Operands      |
24224/// +---+---------------+
24225/// | 1 | Xmm, Xmm, Xmm |
24226/// +---+---------------+
24227/// ```
24228pub trait VmaxshMaskzSaeEmitter<A, B, C> {
24229    fn vmaxsh_maskz_sae(&mut self, op0: A, op1: B, op2: C);
24230}
24231
24232impl<'a> VmaxshMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24233    fn vmaxsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24234        self.emit(VMAXSHRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24235    }
24236}
24237
24238/// `VMAXSH_SAE`.
24239///
24240/// Supported operand variants:
24241///
24242/// ```text
24243/// +---+---------------+
24244/// | # | Operands      |
24245/// +---+---------------+
24246/// | 1 | Xmm, Xmm, Xmm |
24247/// +---+---------------+
24248/// ```
24249pub trait VmaxshSaeEmitter<A, B, C> {
24250    fn vmaxsh_sae(&mut self, op0: A, op1: B, op2: C);
24251}
24252
24253impl<'a> VmaxshSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24254    fn vmaxsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24255        self.emit(VMAXSHRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24256    }
24257}
24258
24259/// `VMINPH`.
24260///
24261/// Supported operand variants:
24262///
24263/// ```text
24264/// +---+---------------+
24265/// | # | Operands      |
24266/// +---+---------------+
24267/// | 1 | Xmm, Xmm, Mem |
24268/// | 2 | Xmm, Xmm, Xmm |
24269/// | 3 | Ymm, Ymm, Mem |
24270/// | 4 | Ymm, Ymm, Ymm |
24271/// | 5 | Zmm, Zmm, Mem |
24272/// | 6 | Zmm, Zmm, Zmm |
24273/// +---+---------------+
24274/// ```
24275pub trait VminphEmitter<A, B, C> {
24276    fn vminph(&mut self, op0: A, op1: B, op2: C);
24277}
24278
24279impl<'a> VminphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24280    fn vminph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24281        self.emit(VMINPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24282    }
24283}
24284
24285impl<'a> VminphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24286    fn vminph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24287        self.emit(VMINPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24288    }
24289}
24290
24291impl<'a> VminphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
24292    fn vminph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
24293        self.emit(VMINPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24294    }
24295}
24296
24297impl<'a> VminphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
24298    fn vminph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
24299        self.emit(VMINPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24300    }
24301}
24302
24303impl<'a> VminphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24304    fn vminph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24305        self.emit(VMINPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24306    }
24307}
24308
24309impl<'a> VminphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
24310    fn vminph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
24311        self.emit(VMINPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24312    }
24313}
24314
24315/// `VMINPH_MASK`.
24316///
24317/// Supported operand variants:
24318///
24319/// ```text
24320/// +---+---------------+
24321/// | # | Operands      |
24322/// +---+---------------+
24323/// | 1 | Xmm, Xmm, Mem |
24324/// | 2 | Xmm, Xmm, Xmm |
24325/// | 3 | Ymm, Ymm, Mem |
24326/// | 4 | Ymm, Ymm, Ymm |
24327/// | 5 | Zmm, Zmm, Mem |
24328/// | 6 | Zmm, Zmm, Zmm |
24329/// +---+---------------+
24330/// ```
24331pub trait VminphMaskEmitter<A, B, C> {
24332    fn vminph_mask(&mut self, op0: A, op1: B, op2: C);
24333}
24334
24335impl<'a> VminphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24336    fn vminph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24337        self.emit(VMINPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24338    }
24339}
24340
24341impl<'a> VminphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24342    fn vminph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24343        self.emit(VMINPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24344    }
24345}
24346
24347impl<'a> VminphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
24348    fn vminph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
24349        self.emit(VMINPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24350    }
24351}
24352
24353impl<'a> VminphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
24354    fn vminph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
24355        self.emit(VMINPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24356    }
24357}
24358
24359impl<'a> VminphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24360    fn vminph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24361        self.emit(VMINPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24362    }
24363}
24364
24365impl<'a> VminphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
24366    fn vminph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
24367        self.emit(VMINPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24368    }
24369}
24370
24371/// `VMINPH_MASK_SAE`.
24372///
24373/// Supported operand variants:
24374///
24375/// ```text
24376/// +---+---------------+
24377/// | # | Operands      |
24378/// +---+---------------+
24379/// | 1 | Zmm, Zmm, Zmm |
24380/// +---+---------------+
24381/// ```
24382pub trait VminphMaskSaeEmitter<A, B, C> {
24383    fn vminph_mask_sae(&mut self, op0: A, op1: B, op2: C);
24384}
24385
24386impl<'a> VminphMaskSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24387    fn vminph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24388        self.emit(VMINPH512RRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24389    }
24390}
24391
24392/// `VMINPH_MASKZ`.
24393///
24394/// Supported operand variants:
24395///
24396/// ```text
24397/// +---+---------------+
24398/// | # | Operands      |
24399/// +---+---------------+
24400/// | 1 | Xmm, Xmm, Mem |
24401/// | 2 | Xmm, Xmm, Xmm |
24402/// | 3 | Ymm, Ymm, Mem |
24403/// | 4 | Ymm, Ymm, Ymm |
24404/// | 5 | Zmm, Zmm, Mem |
24405/// | 6 | Zmm, Zmm, Zmm |
24406/// +---+---------------+
24407/// ```
24408pub trait VminphMaskzEmitter<A, B, C> {
24409    fn vminph_maskz(&mut self, op0: A, op1: B, op2: C);
24410}
24411
24412impl<'a> VminphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24413    fn vminph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24414        self.emit(VMINPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24415    }
24416}
24417
24418impl<'a> VminphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24419    fn vminph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24420        self.emit(VMINPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24421    }
24422}
24423
24424impl<'a> VminphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
24425    fn vminph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
24426        self.emit(VMINPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24427    }
24428}
24429
24430impl<'a> VminphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
24431    fn vminph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
24432        self.emit(VMINPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24433    }
24434}
24435
24436impl<'a> VminphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24437    fn vminph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24438        self.emit(VMINPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24439    }
24440}
24441
24442impl<'a> VminphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
24443    fn vminph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
24444        self.emit(VMINPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24445    }
24446}
24447
24448/// `VMINPH_MASKZ_SAE`.
24449///
24450/// Supported operand variants:
24451///
24452/// ```text
24453/// +---+---------------+
24454/// | # | Operands      |
24455/// +---+---------------+
24456/// | 1 | Zmm, Zmm, Zmm |
24457/// +---+---------------+
24458/// ```
24459pub trait VminphMaskzSaeEmitter<A, B, C> {
24460    fn vminph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
24461}
24462
24463impl<'a> VminphMaskzSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24464    fn vminph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24465        self.emit(VMINPH512RRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24466    }
24467}
24468
24469/// `VMINPH_SAE`.
24470///
24471/// Supported operand variants:
24472///
24473/// ```text
24474/// +---+---------------+
24475/// | # | Operands      |
24476/// +---+---------------+
24477/// | 1 | Zmm, Zmm, Zmm |
24478/// +---+---------------+
24479/// ```
24480pub trait VminphSaeEmitter<A, B, C> {
24481    fn vminph_sae(&mut self, op0: A, op1: B, op2: C);
24482}
24483
24484impl<'a> VminphSaeEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24485    fn vminph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24486        self.emit(VMINPH512RRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24487    }
24488}
24489
24490/// `VMINSH`.
24491///
24492/// Supported operand variants:
24493///
24494/// ```text
24495/// +---+---------------+
24496/// | # | Operands      |
24497/// +---+---------------+
24498/// | 1 | Xmm, Xmm, Mem |
24499/// | 2 | Xmm, Xmm, Xmm |
24500/// +---+---------------+
24501/// ```
24502pub trait VminshEmitter<A, B, C> {
24503    fn vminsh(&mut self, op0: A, op1: B, op2: C);
24504}
24505
24506impl<'a> VminshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24507    fn vminsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24508        self.emit(VMINSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24509    }
24510}
24511
24512impl<'a> VminshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24513    fn vminsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24514        self.emit(VMINSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24515    }
24516}
24517
24518/// `VMINSH_MASK`.
24519///
24520/// Supported operand variants:
24521///
24522/// ```text
24523/// +---+---------------+
24524/// | # | Operands      |
24525/// +---+---------------+
24526/// | 1 | Xmm, Xmm, Mem |
24527/// | 2 | Xmm, Xmm, Xmm |
24528/// +---+---------------+
24529/// ```
24530pub trait VminshMaskEmitter<A, B, C> {
24531    fn vminsh_mask(&mut self, op0: A, op1: B, op2: C);
24532}
24533
24534impl<'a> VminshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24535    fn vminsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24536        self.emit(VMINSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24537    }
24538}
24539
24540impl<'a> VminshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24541    fn vminsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24542        self.emit(VMINSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24543    }
24544}
24545
24546/// `VMINSH_MASK_SAE`.
24547///
24548/// Supported operand variants:
24549///
24550/// ```text
24551/// +---+---------------+
24552/// | # | Operands      |
24553/// +---+---------------+
24554/// | 1 | Xmm, Xmm, Xmm |
24555/// +---+---------------+
24556/// ```
24557pub trait VminshMaskSaeEmitter<A, B, C> {
24558    fn vminsh_mask_sae(&mut self, op0: A, op1: B, op2: C);
24559}
24560
24561impl<'a> VminshMaskSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24562    fn vminsh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24563        self.emit(VMINSHRRR_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24564    }
24565}
24566
24567/// `VMINSH_MASKZ`.
24568///
24569/// Supported operand variants:
24570///
24571/// ```text
24572/// +---+---------------+
24573/// | # | Operands      |
24574/// +---+---------------+
24575/// | 1 | Xmm, Xmm, Mem |
24576/// | 2 | Xmm, Xmm, Xmm |
24577/// +---+---------------+
24578/// ```
24579pub trait VminshMaskzEmitter<A, B, C> {
24580    fn vminsh_maskz(&mut self, op0: A, op1: B, op2: C);
24581}
24582
24583impl<'a> VminshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24584    fn vminsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24585        self.emit(VMINSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24586    }
24587}
24588
24589impl<'a> VminshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24590    fn vminsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24591        self.emit(VMINSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24592    }
24593}
24594
24595/// `VMINSH_MASKZ_SAE`.
24596///
24597/// Supported operand variants:
24598///
24599/// ```text
24600/// +---+---------------+
24601/// | # | Operands      |
24602/// +---+---------------+
24603/// | 1 | Xmm, Xmm, Xmm |
24604/// +---+---------------+
24605/// ```
24606pub trait VminshMaskzSaeEmitter<A, B, C> {
24607    fn vminsh_maskz_sae(&mut self, op0: A, op1: B, op2: C);
24608}
24609
24610impl<'a> VminshMaskzSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24611    fn vminsh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24612        self.emit(VMINSHRRR_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24613    }
24614}
24615
24616/// `VMINSH_SAE`.
24617///
24618/// Supported operand variants:
24619///
24620/// ```text
24621/// +---+---------------+
24622/// | # | Operands      |
24623/// +---+---------------+
24624/// | 1 | Xmm, Xmm, Xmm |
24625/// +---+---------------+
24626/// ```
24627pub trait VminshSaeEmitter<A, B, C> {
24628    fn vminsh_sae(&mut self, op0: A, op1: B, op2: C);
24629}
24630
24631impl<'a> VminshSaeEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24632    fn vminsh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24633        self.emit(VMINSHRRR_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24634    }
24635}
24636
24637/// `VMOVSH`.
24638///
24639/// Supported operand variants:
24640///
24641/// ```text
24642/// +---+----------+
24643/// | # | Operands |
24644/// +---+----------+
24645/// | 1 | Mem, Xmm |
24646/// | 2 | Xmm, Mem |
24647/// +---+----------+
24648/// ```
24649pub trait VmovshEmitter_2<A, B> {
24650    fn vmovsh_2(&mut self, op0: A, op1: B);
24651}
24652
24653impl<'a> VmovshEmitter_2<Xmm, Mem> for Assembler<'a> {
24654    fn vmovsh_2(&mut self, op0: Xmm, op1: Mem) {
24655        self.emit(VMOVSHRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24656    }
24657}
24658
24659impl<'a> VmovshEmitter_2<Mem, Xmm> for Assembler<'a> {
24660    fn vmovsh_2(&mut self, op0: Mem, op1: Xmm) {
24661        self.emit(VMOVSHMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24662    }
24663}
24664
24665/// `VMOVSH`.
24666///
24667/// Supported operand variants:
24668///
24669/// ```text
24670/// +---+---------------+
24671/// | # | Operands      |
24672/// +---+---------------+
24673/// | 1 | Xmm, Xmm, Xmm |
24674/// +---+---------------+
24675/// ```
24676pub trait VmovshEmitter_3<A, B, C> {
24677    fn vmovsh_3(&mut self, op0: A, op1: B, op2: C);
24678}
24679
24680impl<'a> VmovshEmitter_3<Xmm, Xmm, Xmm> for Assembler<'a> {
24681    fn vmovsh_3(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24682        self.emit(VMOVSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24683    }
24684}
24685
24686/// `VMOVSH_MASK`.
24687///
24688/// Supported operand variants:
24689///
24690/// ```text
24691/// +---+----------+
24692/// | # | Operands |
24693/// +---+----------+
24694/// | 1 | Mem, Xmm |
24695/// | 2 | Xmm, Mem |
24696/// +---+----------+
24697/// ```
24698pub trait VmovshMaskEmitter_2<A, B> {
24699    fn vmovsh_mask_2(&mut self, op0: A, op1: B);
24700}
24701
24702impl<'a> VmovshMaskEmitter_2<Xmm, Mem> for Assembler<'a> {
24703    fn vmovsh_mask_2(&mut self, op0: Xmm, op1: Mem) {
24704        self.emit(VMOVSHRM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24705    }
24706}
24707
24708impl<'a> VmovshMaskEmitter_2<Mem, Xmm> for Assembler<'a> {
24709    fn vmovsh_mask_2(&mut self, op0: Mem, op1: Xmm) {
24710        self.emit(VMOVSHMR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24711    }
24712}
24713
24714/// `VMOVSH_MASK`.
24715///
24716/// Supported operand variants:
24717///
24718/// ```text
24719/// +---+---------------+
24720/// | # | Operands      |
24721/// +---+---------------+
24722/// | 1 | Xmm, Xmm, Xmm |
24723/// +---+---------------+
24724/// ```
24725pub trait VmovshMaskEmitter_3<A, B, C> {
24726    fn vmovsh_mask_3(&mut self, op0: A, op1: B, op2: C);
24727}
24728
24729impl<'a> VmovshMaskEmitter_3<Xmm, Xmm, Xmm> for Assembler<'a> {
24730    fn vmovsh_mask_3(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24731        self.emit(VMOVSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24732    }
24733}
24734
24735/// `VMOVSH_MASKZ`.
24736///
24737/// Supported operand variants:
24738///
24739/// ```text
24740/// +---+----------+
24741/// | # | Operands |
24742/// +---+----------+
24743/// | 1 | Xmm, Mem |
24744/// +---+----------+
24745/// ```
24746pub trait VmovshMaskzEmitter_2<A, B> {
24747    fn vmovsh_maskz_2(&mut self, op0: A, op1: B);
24748}
24749
24750impl<'a> VmovshMaskzEmitter_2<Xmm, Mem> for Assembler<'a> {
24751    fn vmovsh_maskz_2(&mut self, op0: Xmm, op1: Mem) {
24752        self.emit(VMOVSHRM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24753    }
24754}
24755
24756/// `VMOVSH_MASKZ`.
24757///
24758/// Supported operand variants:
24759///
24760/// ```text
24761/// +---+---------------+
24762/// | # | Operands      |
24763/// +---+---------------+
24764/// | 1 | Xmm, Xmm, Xmm |
24765/// +---+---------------+
24766/// ```
24767pub trait VmovshMaskzEmitter_3<A, B, C> {
24768    fn vmovsh_maskz_3(&mut self, op0: A, op1: B, op2: C);
24769}
24770
24771impl<'a> VmovshMaskzEmitter_3<Xmm, Xmm, Xmm> for Assembler<'a> {
24772    fn vmovsh_maskz_3(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24773        self.emit(VMOVSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24774    }
24775}
24776
24777/// `VMOVW_G2X`.
24778///
24779/// Supported operand variants:
24780///
24781/// ```text
24782/// +---+----------+
24783/// | # | Operands |
24784/// +---+----------+
24785/// | 1 | Xmm, Gpd |
24786/// | 2 | Xmm, Mem |
24787/// +---+----------+
24788/// ```
24789pub trait VmovwG2xEmitter<A, B> {
24790    fn vmovw_g2x(&mut self, op0: A, op1: B);
24791}
24792
24793impl<'a> VmovwG2xEmitter<Xmm, Gpd> for Assembler<'a> {
24794    fn vmovw_g2x(&mut self, op0: Xmm, op1: Gpd) {
24795        self.emit(VMOVW_G2XRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24796    }
24797}
24798
24799impl<'a> VmovwG2xEmitter<Xmm, Mem> for Assembler<'a> {
24800    fn vmovw_g2x(&mut self, op0: Xmm, op1: Mem) {
24801        self.emit(VMOVW_G2XRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24802    }
24803}
24804
24805/// `VMOVW_X2G`.
24806///
24807/// Supported operand variants:
24808///
24809/// ```text
24810/// +---+----------+
24811/// | # | Operands |
24812/// +---+----------+
24813/// | 1 | Gpd, Xmm |
24814/// | 2 | Mem, Xmm |
24815/// +---+----------+
24816/// ```
24817pub trait VmovwX2gEmitter<A, B> {
24818    fn vmovw_x2g(&mut self, op0: A, op1: B);
24819}
24820
24821impl<'a> VmovwX2gEmitter<Gpd, Xmm> for Assembler<'a> {
24822    fn vmovw_x2g(&mut self, op0: Gpd, op1: Xmm) {
24823        self.emit(VMOVW_X2GRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24824    }
24825}
24826
24827impl<'a> VmovwX2gEmitter<Mem, Xmm> for Assembler<'a> {
24828    fn vmovw_x2g(&mut self, op0: Mem, op1: Xmm) {
24829        self.emit(VMOVW_X2GMR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
24830    }
24831}
24832
24833/// `VMULPH`.
24834///
24835/// Supported operand variants:
24836///
24837/// ```text
24838/// +---+---------------+
24839/// | # | Operands      |
24840/// +---+---------------+
24841/// | 1 | Xmm, Xmm, Mem |
24842/// | 2 | Xmm, Xmm, Xmm |
24843/// | 3 | Ymm, Ymm, Mem |
24844/// | 4 | Ymm, Ymm, Ymm |
24845/// | 5 | Zmm, Zmm, Mem |
24846/// | 6 | Zmm, Zmm, Zmm |
24847/// +---+---------------+
24848/// ```
24849pub trait VmulphEmitter<A, B, C> {
24850    fn vmulph(&mut self, op0: A, op1: B, op2: C);
24851}
24852
24853impl<'a> VmulphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24854    fn vmulph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24855        self.emit(VMULPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24856    }
24857}
24858
24859impl<'a> VmulphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24860    fn vmulph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24861        self.emit(VMULPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24862    }
24863}
24864
24865impl<'a> VmulphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
24866    fn vmulph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
24867        self.emit(VMULPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24868    }
24869}
24870
24871impl<'a> VmulphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
24872    fn vmulph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
24873        self.emit(VMULPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24874    }
24875}
24876
24877impl<'a> VmulphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24878    fn vmulph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24879        self.emit(VMULPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24880    }
24881}
24882
24883impl<'a> VmulphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
24884    fn vmulph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
24885        self.emit(VMULPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24886    }
24887}
24888
24889/// `VMULPH_ER`.
24890///
24891/// Supported operand variants:
24892///
24893/// ```text
24894/// +---+---------------+
24895/// | # | Operands      |
24896/// +---+---------------+
24897/// | 1 | Zmm, Zmm, Zmm |
24898/// +---+---------------+
24899/// ```
24900pub trait VmulphErEmitter<A, B, C> {
24901    fn vmulph_er(&mut self, op0: A, op1: B, op2: C);
24902}
24903
24904impl<'a> VmulphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24905    fn vmulph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24906        self.emit(VMULPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24907    }
24908}
24909
24910/// `VMULPH_MASK`.
24911///
24912/// Supported operand variants:
24913///
24914/// ```text
24915/// +---+---------------+
24916/// | # | Operands      |
24917/// +---+---------------+
24918/// | 1 | Xmm, Xmm, Mem |
24919/// | 2 | Xmm, Xmm, Xmm |
24920/// | 3 | Ymm, Ymm, Mem |
24921/// | 4 | Ymm, Ymm, Ymm |
24922/// | 5 | Zmm, Zmm, Mem |
24923/// | 6 | Zmm, Zmm, Zmm |
24924/// +---+---------------+
24925/// ```
24926pub trait VmulphMaskEmitter<A, B, C> {
24927    fn vmulph_mask(&mut self, op0: A, op1: B, op2: C);
24928}
24929
24930impl<'a> VmulphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
24931    fn vmulph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
24932        self.emit(VMULPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24933    }
24934}
24935
24936impl<'a> VmulphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
24937    fn vmulph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
24938        self.emit(VMULPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24939    }
24940}
24941
24942impl<'a> VmulphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
24943    fn vmulph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
24944        self.emit(VMULPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24945    }
24946}
24947
24948impl<'a> VmulphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
24949    fn vmulph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
24950        self.emit(VMULPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24951    }
24952}
24953
24954impl<'a> VmulphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24955    fn vmulph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24956        self.emit(VMULPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24957    }
24958}
24959
24960impl<'a> VmulphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
24961    fn vmulph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
24962        self.emit(VMULPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24963    }
24964}
24965
24966/// `VMULPH_MASK_ER`.
24967///
24968/// Supported operand variants:
24969///
24970/// ```text
24971/// +---+---------------+
24972/// | # | Operands      |
24973/// +---+---------------+
24974/// | 1 | Zmm, Zmm, Zmm |
24975/// +---+---------------+
24976/// ```
24977pub trait VmulphMaskErEmitter<A, B, C> {
24978    fn vmulph_mask_er(&mut self, op0: A, op1: B, op2: C);
24979}
24980
24981impl<'a> VmulphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
24982    fn vmulph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
24983        self.emit(VMULPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
24984    }
24985}
24986
24987/// `VMULPH_MASKZ`.
24988///
24989/// Supported operand variants:
24990///
24991/// ```text
24992/// +---+---------------+
24993/// | # | Operands      |
24994/// +---+---------------+
24995/// | 1 | Xmm, Xmm, Mem |
24996/// | 2 | Xmm, Xmm, Xmm |
24997/// | 3 | Ymm, Ymm, Mem |
24998/// | 4 | Ymm, Ymm, Ymm |
24999/// | 5 | Zmm, Zmm, Mem |
25000/// | 6 | Zmm, Zmm, Zmm |
25001/// +---+---------------+
25002/// ```
25003pub trait VmulphMaskzEmitter<A, B, C> {
25004    fn vmulph_maskz(&mut self, op0: A, op1: B, op2: C);
25005}
25006
25007impl<'a> VmulphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25008    fn vmulph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25009        self.emit(VMULPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25010    }
25011}
25012
25013impl<'a> VmulphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25014    fn vmulph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25015        self.emit(VMULPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25016    }
25017}
25018
25019impl<'a> VmulphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25020    fn vmulph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25021        self.emit(VMULPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25022    }
25023}
25024
25025impl<'a> VmulphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25026    fn vmulph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25027        self.emit(VMULPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25028    }
25029}
25030
25031impl<'a> VmulphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
25032    fn vmulph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
25033        self.emit(VMULPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25034    }
25035}
25036
25037impl<'a> VmulphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
25038    fn vmulph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
25039        self.emit(VMULPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25040    }
25041}
25042
25043/// `VMULPH_MASKZ_ER`.
25044///
25045/// Supported operand variants:
25046///
25047/// ```text
25048/// +---+---------------+
25049/// | # | Operands      |
25050/// +---+---------------+
25051/// | 1 | Zmm, Zmm, Zmm |
25052/// +---+---------------+
25053/// ```
25054pub trait VmulphMaskzErEmitter<A, B, C> {
25055    fn vmulph_maskz_er(&mut self, op0: A, op1: B, op2: C);
25056}
25057
25058impl<'a> VmulphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
25059    fn vmulph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
25060        self.emit(VMULPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25061    }
25062}
25063
25064/// `VMULSH`.
25065///
25066/// Supported operand variants:
25067///
25068/// ```text
25069/// +---+---------------+
25070/// | # | Operands      |
25071/// +---+---------------+
25072/// | 1 | Xmm, Xmm, Mem |
25073/// | 2 | Xmm, Xmm, Xmm |
25074/// +---+---------------+
25075/// ```
25076pub trait VmulshEmitter<A, B, C> {
25077    fn vmulsh(&mut self, op0: A, op1: B, op2: C);
25078}
25079
25080impl<'a> VmulshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25081    fn vmulsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25082        self.emit(VMULSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25083    }
25084}
25085
25086impl<'a> VmulshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25087    fn vmulsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25088        self.emit(VMULSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25089    }
25090}
25091
25092/// `VMULSH_ER`.
25093///
25094/// Supported operand variants:
25095///
25096/// ```text
25097/// +---+---------------+
25098/// | # | Operands      |
25099/// +---+---------------+
25100/// | 1 | Xmm, Xmm, Xmm |
25101/// +---+---------------+
25102/// ```
25103pub trait VmulshErEmitter<A, B, C> {
25104    fn vmulsh_er(&mut self, op0: A, op1: B, op2: C);
25105}
25106
25107impl<'a> VmulshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25108    fn vmulsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25109        self.emit(VMULSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25110    }
25111}
25112
25113/// `VMULSH_MASK`.
25114///
25115/// Supported operand variants:
25116///
25117/// ```text
25118/// +---+---------------+
25119/// | # | Operands      |
25120/// +---+---------------+
25121/// | 1 | Xmm, Xmm, Mem |
25122/// | 2 | Xmm, Xmm, Xmm |
25123/// +---+---------------+
25124/// ```
25125pub trait VmulshMaskEmitter<A, B, C> {
25126    fn vmulsh_mask(&mut self, op0: A, op1: B, op2: C);
25127}
25128
25129impl<'a> VmulshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25130    fn vmulsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25131        self.emit(VMULSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25132    }
25133}
25134
25135impl<'a> VmulshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25136    fn vmulsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25137        self.emit(VMULSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25138    }
25139}
25140
25141/// `VMULSH_MASK_ER`.
25142///
25143/// Supported operand variants:
25144///
25145/// ```text
25146/// +---+---------------+
25147/// | # | Operands      |
25148/// +---+---------------+
25149/// | 1 | Xmm, Xmm, Xmm |
25150/// +---+---------------+
25151/// ```
25152pub trait VmulshMaskErEmitter<A, B, C> {
25153    fn vmulsh_mask_er(&mut self, op0: A, op1: B, op2: C);
25154}
25155
25156impl<'a> VmulshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25157    fn vmulsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25158        self.emit(VMULSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25159    }
25160}
25161
25162/// `VMULSH_MASKZ`.
25163///
25164/// Supported operand variants:
25165///
25166/// ```text
25167/// +---+---------------+
25168/// | # | Operands      |
25169/// +---+---------------+
25170/// | 1 | Xmm, Xmm, Mem |
25171/// | 2 | Xmm, Xmm, Xmm |
25172/// +---+---------------+
25173/// ```
25174pub trait VmulshMaskzEmitter<A, B, C> {
25175    fn vmulsh_maskz(&mut self, op0: A, op1: B, op2: C);
25176}
25177
25178impl<'a> VmulshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25179    fn vmulsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25180        self.emit(VMULSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25181    }
25182}
25183
25184impl<'a> VmulshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25185    fn vmulsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25186        self.emit(VMULSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25187    }
25188}
25189
25190/// `VMULSH_MASKZ_ER`.
25191///
25192/// Supported operand variants:
25193///
25194/// ```text
25195/// +---+---------------+
25196/// | # | Operands      |
25197/// +---+---------------+
25198/// | 1 | Xmm, Xmm, Xmm |
25199/// +---+---------------+
25200/// ```
25201pub trait VmulshMaskzErEmitter<A, B, C> {
25202    fn vmulsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
25203}
25204
25205impl<'a> VmulshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25206    fn vmulsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25207        self.emit(VMULSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25208    }
25209}
25210
25211/// `VPCLMULQDQ` (VPCLMULQDQ). 
25212/// Performs a carry-less multiplication of two quadwords, selected from the first source and second source operand according to the value of the immediate byte. Bits 4 and 0 are used to select which 64-bit half of each operand to use according to Table 4-13, other bits of the immediate byte are ignored.
25213///
25214///
25215/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PCLMULQDQ.html).
25216///
25217/// Supported operand variants:
25218///
25219/// ```text
25220/// +---+--------------------+
25221/// | # | Operands           |
25222/// +---+--------------------+
25223/// | 1 | Xmm, Xmm, Mem, Imm |
25224/// | 2 | Xmm, Xmm, Xmm, Imm |
25225/// | 3 | Ymm, Ymm, Mem, Imm |
25226/// | 4 | Ymm, Ymm, Ymm, Imm |
25227/// | 5 | Zmm, Zmm, Mem, Imm |
25228/// | 6 | Zmm, Zmm, Zmm, Imm |
25229/// +---+--------------------+
25230/// ```
25231pub trait VpclmulqdqEmitter<A, B, C, D> {
25232    fn vpclmulqdq(&mut self, op0: A, op1: B, op2: C, op3: D);
25233}
25234
25235impl<'a> VpclmulqdqEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
25236    fn vpclmulqdq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
25237        self.emit(VPCLMULQDQ128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
25238    }
25239}
25240
25241impl<'a> VpclmulqdqEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
25242    fn vpclmulqdq(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
25243        self.emit(VPCLMULQDQ128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
25244    }
25245}
25246
25247impl<'a> VpclmulqdqEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
25248    fn vpclmulqdq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
25249        self.emit(VPCLMULQDQ256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
25250    }
25251}
25252
25253impl<'a> VpclmulqdqEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
25254    fn vpclmulqdq(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
25255        self.emit(VPCLMULQDQ256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
25256    }
25257}
25258
25259impl<'a> VpclmulqdqEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
25260    fn vpclmulqdq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
25261        self.emit(VPCLMULQDQ512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
25262    }
25263}
25264
25265impl<'a> VpclmulqdqEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
25266    fn vpclmulqdq(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
25267        self.emit(VPCLMULQDQ512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
25268    }
25269}
25270
25271/// `VPDPBSSD`.
25272///
25273/// Supported operand variants:
25274///
25275/// ```text
25276/// +---+---------------+
25277/// | # | Operands      |
25278/// +---+---------------+
25279/// | 1 | Xmm, Xmm, Mem |
25280/// | 2 | Xmm, Xmm, Xmm |
25281/// | 3 | Ymm, Ymm, Mem |
25282/// | 4 | Ymm, Ymm, Ymm |
25283/// +---+---------------+
25284/// ```
25285pub trait VpdpbssdEmitter<A, B, C> {
25286    fn vpdpbssd(&mut self, op0: A, op1: B, op2: C);
25287}
25288
25289impl<'a> VpdpbssdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25290    fn vpdpbssd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25291        self.emit(VPDPBSSD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25292    }
25293}
25294
25295impl<'a> VpdpbssdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25296    fn vpdpbssd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25297        self.emit(VPDPBSSD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25298    }
25299}
25300
25301impl<'a> VpdpbssdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25302    fn vpdpbssd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25303        self.emit(VPDPBSSD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25304    }
25305}
25306
25307impl<'a> VpdpbssdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25308    fn vpdpbssd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25309        self.emit(VPDPBSSD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25310    }
25311}
25312
25313/// `VPDPBSSDS`.
25314///
25315/// Supported operand variants:
25316///
25317/// ```text
25318/// +---+---------------+
25319/// | # | Operands      |
25320/// +---+---------------+
25321/// | 1 | Xmm, Xmm, Mem |
25322/// | 2 | Xmm, Xmm, Xmm |
25323/// | 3 | Ymm, Ymm, Mem |
25324/// | 4 | Ymm, Ymm, Ymm |
25325/// +---+---------------+
25326/// ```
25327pub trait VpdpbssdsEmitter<A, B, C> {
25328    fn vpdpbssds(&mut self, op0: A, op1: B, op2: C);
25329}
25330
25331impl<'a> VpdpbssdsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25332    fn vpdpbssds(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25333        self.emit(VPDPBSSDS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25334    }
25335}
25336
25337impl<'a> VpdpbssdsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25338    fn vpdpbssds(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25339        self.emit(VPDPBSSDS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25340    }
25341}
25342
25343impl<'a> VpdpbssdsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25344    fn vpdpbssds(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25345        self.emit(VPDPBSSDS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25346    }
25347}
25348
25349impl<'a> VpdpbssdsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25350    fn vpdpbssds(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25351        self.emit(VPDPBSSDS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25352    }
25353}
25354
25355/// `VPDPBSUD`.
25356///
25357/// Supported operand variants:
25358///
25359/// ```text
25360/// +---+---------------+
25361/// | # | Operands      |
25362/// +---+---------------+
25363/// | 1 | Xmm, Xmm, Mem |
25364/// | 2 | Xmm, Xmm, Xmm |
25365/// | 3 | Ymm, Ymm, Mem |
25366/// | 4 | Ymm, Ymm, Ymm |
25367/// +---+---------------+
25368/// ```
25369pub trait VpdpbsudEmitter<A, B, C> {
25370    fn vpdpbsud(&mut self, op0: A, op1: B, op2: C);
25371}
25372
25373impl<'a> VpdpbsudEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25374    fn vpdpbsud(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25375        self.emit(VPDPBSUD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25376    }
25377}
25378
25379impl<'a> VpdpbsudEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25380    fn vpdpbsud(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25381        self.emit(VPDPBSUD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25382    }
25383}
25384
25385impl<'a> VpdpbsudEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25386    fn vpdpbsud(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25387        self.emit(VPDPBSUD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25388    }
25389}
25390
25391impl<'a> VpdpbsudEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25392    fn vpdpbsud(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25393        self.emit(VPDPBSUD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25394    }
25395}
25396
25397/// `VPDPBSUDS`.
25398///
25399/// Supported operand variants:
25400///
25401/// ```text
25402/// +---+---------------+
25403/// | # | Operands      |
25404/// +---+---------------+
25405/// | 1 | Xmm, Xmm, Mem |
25406/// | 2 | Xmm, Xmm, Xmm |
25407/// | 3 | Ymm, Ymm, Mem |
25408/// | 4 | Ymm, Ymm, Ymm |
25409/// +---+---------------+
25410/// ```
25411pub trait VpdpbsudsEmitter<A, B, C> {
25412    fn vpdpbsuds(&mut self, op0: A, op1: B, op2: C);
25413}
25414
25415impl<'a> VpdpbsudsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25416    fn vpdpbsuds(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25417        self.emit(VPDPBSUDS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25418    }
25419}
25420
25421impl<'a> VpdpbsudsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25422    fn vpdpbsuds(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25423        self.emit(VPDPBSUDS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25424    }
25425}
25426
25427impl<'a> VpdpbsudsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25428    fn vpdpbsuds(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25429        self.emit(VPDPBSUDS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25430    }
25431}
25432
25433impl<'a> VpdpbsudsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25434    fn vpdpbsuds(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25435        self.emit(VPDPBSUDS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25436    }
25437}
25438
25439/// `VPDPBUUD`.
25440///
25441/// Supported operand variants:
25442///
25443/// ```text
25444/// +---+---------------+
25445/// | # | Operands      |
25446/// +---+---------------+
25447/// | 1 | Xmm, Xmm, Mem |
25448/// | 2 | Xmm, Xmm, Xmm |
25449/// | 3 | Ymm, Ymm, Mem |
25450/// | 4 | Ymm, Ymm, Ymm |
25451/// +---+---------------+
25452/// ```
25453pub trait VpdpbuudEmitter<A, B, C> {
25454    fn vpdpbuud(&mut self, op0: A, op1: B, op2: C);
25455}
25456
25457impl<'a> VpdpbuudEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25458    fn vpdpbuud(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25459        self.emit(VPDPBUUD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25460    }
25461}
25462
25463impl<'a> VpdpbuudEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25464    fn vpdpbuud(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25465        self.emit(VPDPBUUD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25466    }
25467}
25468
25469impl<'a> VpdpbuudEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25470    fn vpdpbuud(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25471        self.emit(VPDPBUUD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25472    }
25473}
25474
25475impl<'a> VpdpbuudEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25476    fn vpdpbuud(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25477        self.emit(VPDPBUUD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25478    }
25479}
25480
25481/// `VPDPBUUDS`.
25482///
25483/// Supported operand variants:
25484///
25485/// ```text
25486/// +---+---------------+
25487/// | # | Operands      |
25488/// +---+---------------+
25489/// | 1 | Xmm, Xmm, Mem |
25490/// | 2 | Xmm, Xmm, Xmm |
25491/// | 3 | Ymm, Ymm, Mem |
25492/// | 4 | Ymm, Ymm, Ymm |
25493/// +---+---------------+
25494/// ```
25495pub trait VpdpbuudsEmitter<A, B, C> {
25496    fn vpdpbuuds(&mut self, op0: A, op1: B, op2: C);
25497}
25498
25499impl<'a> VpdpbuudsEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25500    fn vpdpbuuds(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25501        self.emit(VPDPBUUDS128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25502    }
25503}
25504
25505impl<'a> VpdpbuudsEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25506    fn vpdpbuuds(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25507        self.emit(VPDPBUUDS128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25508    }
25509}
25510
25511impl<'a> VpdpbuudsEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
25512    fn vpdpbuuds(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
25513        self.emit(VPDPBUUDS256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25514    }
25515}
25516
25517impl<'a> VpdpbuudsEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
25518    fn vpdpbuuds(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
25519        self.emit(VPDPBUUDS256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25520    }
25521}
25522
25523/// `VRCPPH`.
25524///
25525/// Supported operand variants:
25526///
25527/// ```text
25528/// +---+----------+
25529/// | # | Operands |
25530/// +---+----------+
25531/// | 1 | Xmm, Mem |
25532/// | 2 | Xmm, Xmm |
25533/// | 3 | Ymm, Mem |
25534/// | 4 | Ymm, Ymm |
25535/// | 5 | Zmm, Mem |
25536/// | 6 | Zmm, Zmm |
25537/// +---+----------+
25538/// ```
25539pub trait VrcpphEmitter<A, B> {
25540    fn vrcpph(&mut self, op0: A, op1: B);
25541}
25542
25543impl<'a> VrcpphEmitter<Xmm, Xmm> for Assembler<'a> {
25544    fn vrcpph(&mut self, op0: Xmm, op1: Xmm) {
25545        self.emit(VRCPPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25546    }
25547}
25548
25549impl<'a> VrcpphEmitter<Xmm, Mem> for Assembler<'a> {
25550    fn vrcpph(&mut self, op0: Xmm, op1: Mem) {
25551        self.emit(VRCPPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25552    }
25553}
25554
25555impl<'a> VrcpphEmitter<Ymm, Ymm> for Assembler<'a> {
25556    fn vrcpph(&mut self, op0: Ymm, op1: Ymm) {
25557        self.emit(VRCPPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25558    }
25559}
25560
25561impl<'a> VrcpphEmitter<Ymm, Mem> for Assembler<'a> {
25562    fn vrcpph(&mut self, op0: Ymm, op1: Mem) {
25563        self.emit(VRCPPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25564    }
25565}
25566
25567impl<'a> VrcpphEmitter<Zmm, Zmm> for Assembler<'a> {
25568    fn vrcpph(&mut self, op0: Zmm, op1: Zmm) {
25569        self.emit(VRCPPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25570    }
25571}
25572
25573impl<'a> VrcpphEmitter<Zmm, Mem> for Assembler<'a> {
25574    fn vrcpph(&mut self, op0: Zmm, op1: Mem) {
25575        self.emit(VRCPPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25576    }
25577}
25578
25579/// `VRCPPH_MASK`.
25580///
25581/// Supported operand variants:
25582///
25583/// ```text
25584/// +---+----------+
25585/// | # | Operands |
25586/// +---+----------+
25587/// | 1 | Xmm, Mem |
25588/// | 2 | Xmm, Xmm |
25589/// | 3 | Ymm, Mem |
25590/// | 4 | Ymm, Ymm |
25591/// | 5 | Zmm, Mem |
25592/// | 6 | Zmm, Zmm |
25593/// +---+----------+
25594/// ```
25595pub trait VrcpphMaskEmitter<A, B> {
25596    fn vrcpph_mask(&mut self, op0: A, op1: B);
25597}
25598
25599impl<'a> VrcpphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
25600    fn vrcpph_mask(&mut self, op0: Xmm, op1: Xmm) {
25601        self.emit(VRCPPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25602    }
25603}
25604
25605impl<'a> VrcpphMaskEmitter<Xmm, Mem> for Assembler<'a> {
25606    fn vrcpph_mask(&mut self, op0: Xmm, op1: Mem) {
25607        self.emit(VRCPPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25608    }
25609}
25610
25611impl<'a> VrcpphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
25612    fn vrcpph_mask(&mut self, op0: Ymm, op1: Ymm) {
25613        self.emit(VRCPPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25614    }
25615}
25616
25617impl<'a> VrcpphMaskEmitter<Ymm, Mem> for Assembler<'a> {
25618    fn vrcpph_mask(&mut self, op0: Ymm, op1: Mem) {
25619        self.emit(VRCPPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25620    }
25621}
25622
25623impl<'a> VrcpphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
25624    fn vrcpph_mask(&mut self, op0: Zmm, op1: Zmm) {
25625        self.emit(VRCPPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25626    }
25627}
25628
25629impl<'a> VrcpphMaskEmitter<Zmm, Mem> for Assembler<'a> {
25630    fn vrcpph_mask(&mut self, op0: Zmm, op1: Mem) {
25631        self.emit(VRCPPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25632    }
25633}
25634
25635/// `VRCPPH_MASKZ`.
25636///
25637/// Supported operand variants:
25638///
25639/// ```text
25640/// +---+----------+
25641/// | # | Operands |
25642/// +---+----------+
25643/// | 1 | Xmm, Mem |
25644/// | 2 | Xmm, Xmm |
25645/// | 3 | Ymm, Mem |
25646/// | 4 | Ymm, Ymm |
25647/// | 5 | Zmm, Mem |
25648/// | 6 | Zmm, Zmm |
25649/// +---+----------+
25650/// ```
25651pub trait VrcpphMaskzEmitter<A, B> {
25652    fn vrcpph_maskz(&mut self, op0: A, op1: B);
25653}
25654
25655impl<'a> VrcpphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
25656    fn vrcpph_maskz(&mut self, op0: Xmm, op1: Xmm) {
25657        self.emit(VRCPPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25658    }
25659}
25660
25661impl<'a> VrcpphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
25662    fn vrcpph_maskz(&mut self, op0: Xmm, op1: Mem) {
25663        self.emit(VRCPPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25664    }
25665}
25666
25667impl<'a> VrcpphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
25668    fn vrcpph_maskz(&mut self, op0: Ymm, op1: Ymm) {
25669        self.emit(VRCPPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25670    }
25671}
25672
25673impl<'a> VrcpphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
25674    fn vrcpph_maskz(&mut self, op0: Ymm, op1: Mem) {
25675        self.emit(VRCPPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25676    }
25677}
25678
25679impl<'a> VrcpphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
25680    fn vrcpph_maskz(&mut self, op0: Zmm, op1: Zmm) {
25681        self.emit(VRCPPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25682    }
25683}
25684
25685impl<'a> VrcpphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
25686    fn vrcpph_maskz(&mut self, op0: Zmm, op1: Mem) {
25687        self.emit(VRCPPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
25688    }
25689}
25690
25691/// `VRCPSH`.
25692///
25693/// Supported operand variants:
25694///
25695/// ```text
25696/// +---+---------------+
25697/// | # | Operands      |
25698/// +---+---------------+
25699/// | 1 | Xmm, Xmm, Mem |
25700/// | 2 | Xmm, Xmm, Xmm |
25701/// +---+---------------+
25702/// ```
25703pub trait VrcpshEmitter<A, B, C> {
25704    fn vrcpsh(&mut self, op0: A, op1: B, op2: C);
25705}
25706
25707impl<'a> VrcpshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25708    fn vrcpsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25709        self.emit(VRCPSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25710    }
25711}
25712
25713impl<'a> VrcpshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25714    fn vrcpsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25715        self.emit(VRCPSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25716    }
25717}
25718
25719/// `VRCPSH_MASK`.
25720///
25721/// Supported operand variants:
25722///
25723/// ```text
25724/// +---+---------------+
25725/// | # | Operands      |
25726/// +---+---------------+
25727/// | 1 | Xmm, Xmm, Mem |
25728/// | 2 | Xmm, Xmm, Xmm |
25729/// +---+---------------+
25730/// ```
25731pub trait VrcpshMaskEmitter<A, B, C> {
25732    fn vrcpsh_mask(&mut self, op0: A, op1: B, op2: C);
25733}
25734
25735impl<'a> VrcpshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25736    fn vrcpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25737        self.emit(VRCPSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25738    }
25739}
25740
25741impl<'a> VrcpshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25742    fn vrcpsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25743        self.emit(VRCPSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25744    }
25745}
25746
25747/// `VRCPSH_MASKZ`.
25748///
25749/// Supported operand variants:
25750///
25751/// ```text
25752/// +---+---------------+
25753/// | # | Operands      |
25754/// +---+---------------+
25755/// | 1 | Xmm, Xmm, Mem |
25756/// | 2 | Xmm, Xmm, Xmm |
25757/// +---+---------------+
25758/// ```
25759pub trait VrcpshMaskzEmitter<A, B, C> {
25760    fn vrcpsh_maskz(&mut self, op0: A, op1: B, op2: C);
25761}
25762
25763impl<'a> VrcpshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
25764    fn vrcpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
25765        self.emit(VRCPSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25766    }
25767}
25768
25769impl<'a> VrcpshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
25770    fn vrcpsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
25771        self.emit(VRCPSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25772    }
25773}
25774
25775/// `VREDUCEPH`.
25776///
25777/// Supported operand variants:
25778///
25779/// ```text
25780/// +---+---------------+
25781/// | # | Operands      |
25782/// +---+---------------+
25783/// | 1 | Xmm, Mem, Imm |
25784/// | 2 | Xmm, Xmm, Imm |
25785/// | 3 | Ymm, Mem, Imm |
25786/// | 4 | Ymm, Ymm, Imm |
25787/// | 5 | Zmm, Mem, Imm |
25788/// | 6 | Zmm, Zmm, Imm |
25789/// +---+---------------+
25790/// ```
25791pub trait VreducephEmitter<A, B, C> {
25792    fn vreduceph(&mut self, op0: A, op1: B, op2: C);
25793}
25794
25795impl<'a> VreducephEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
25796    fn vreduceph(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
25797        self.emit(VREDUCEPH128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25798    }
25799}
25800
25801impl<'a> VreducephEmitter<Xmm, Mem, Imm> for Assembler<'a> {
25802    fn vreduceph(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
25803        self.emit(VREDUCEPH128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25804    }
25805}
25806
25807impl<'a> VreducephEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
25808    fn vreduceph(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
25809        self.emit(VREDUCEPH256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25810    }
25811}
25812
25813impl<'a> VreducephEmitter<Ymm, Mem, Imm> for Assembler<'a> {
25814    fn vreduceph(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
25815        self.emit(VREDUCEPH256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25816    }
25817}
25818
25819impl<'a> VreducephEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
25820    fn vreduceph(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
25821        self.emit(VREDUCEPH512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25822    }
25823}
25824
25825impl<'a> VreducephEmitter<Zmm, Mem, Imm> for Assembler<'a> {
25826    fn vreduceph(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
25827        self.emit(VREDUCEPH512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25828    }
25829}
25830
25831/// `VREDUCEPH_MASK`.
25832///
25833/// Supported operand variants:
25834///
25835/// ```text
25836/// +---+---------------+
25837/// | # | Operands      |
25838/// +---+---------------+
25839/// | 1 | Xmm, Mem, Imm |
25840/// | 2 | Xmm, Xmm, Imm |
25841/// | 3 | Ymm, Mem, Imm |
25842/// | 4 | Ymm, Ymm, Imm |
25843/// | 5 | Zmm, Mem, Imm |
25844/// | 6 | Zmm, Zmm, Imm |
25845/// +---+---------------+
25846/// ```
25847pub trait VreducephMaskEmitter<A, B, C> {
25848    fn vreduceph_mask(&mut self, op0: A, op1: B, op2: C);
25849}
25850
25851impl<'a> VreducephMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
25852    fn vreduceph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
25853        self.emit(VREDUCEPH128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25854    }
25855}
25856
25857impl<'a> VreducephMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
25858    fn vreduceph_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
25859        self.emit(VREDUCEPH128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25860    }
25861}
25862
25863impl<'a> VreducephMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
25864    fn vreduceph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
25865        self.emit(VREDUCEPH256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25866    }
25867}
25868
25869impl<'a> VreducephMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
25870    fn vreduceph_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
25871        self.emit(VREDUCEPH256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25872    }
25873}
25874
25875impl<'a> VreducephMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
25876    fn vreduceph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
25877        self.emit(VREDUCEPH512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25878    }
25879}
25880
25881impl<'a> VreducephMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
25882    fn vreduceph_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
25883        self.emit(VREDUCEPH512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25884    }
25885}
25886
25887/// `VREDUCEPH_MASK_SAE`.
25888///
25889/// Supported operand variants:
25890///
25891/// ```text
25892/// +---+---------------+
25893/// | # | Operands      |
25894/// +---+---------------+
25895/// | 1 | Zmm, Zmm, Imm |
25896/// +---+---------------+
25897/// ```
25898pub trait VreducephMaskSaeEmitter<A, B, C> {
25899    fn vreduceph_mask_sae(&mut self, op0: A, op1: B, op2: C);
25900}
25901
25902impl<'a> VreducephMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
25903    fn vreduceph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
25904        self.emit(VREDUCEPH512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25905    }
25906}
25907
25908/// `VREDUCEPH_MASKZ`.
25909///
25910/// Supported operand variants:
25911///
25912/// ```text
25913/// +---+---------------+
25914/// | # | Operands      |
25915/// +---+---------------+
25916/// | 1 | Xmm, Mem, Imm |
25917/// | 2 | Xmm, Xmm, Imm |
25918/// | 3 | Ymm, Mem, Imm |
25919/// | 4 | Ymm, Ymm, Imm |
25920/// | 5 | Zmm, Mem, Imm |
25921/// | 6 | Zmm, Zmm, Imm |
25922/// +---+---------------+
25923/// ```
25924pub trait VreducephMaskzEmitter<A, B, C> {
25925    fn vreduceph_maskz(&mut self, op0: A, op1: B, op2: C);
25926}
25927
25928impl<'a> VreducephMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
25929    fn vreduceph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
25930        self.emit(VREDUCEPH128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25931    }
25932}
25933
25934impl<'a> VreducephMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
25935    fn vreduceph_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
25936        self.emit(VREDUCEPH128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25937    }
25938}
25939
25940impl<'a> VreducephMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
25941    fn vreduceph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
25942        self.emit(VREDUCEPH256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25943    }
25944}
25945
25946impl<'a> VreducephMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
25947    fn vreduceph_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
25948        self.emit(VREDUCEPH256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25949    }
25950}
25951
25952impl<'a> VreducephMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
25953    fn vreduceph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
25954        self.emit(VREDUCEPH512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25955    }
25956}
25957
25958impl<'a> VreducephMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
25959    fn vreduceph_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
25960        self.emit(VREDUCEPH512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25961    }
25962}
25963
25964/// `VREDUCEPH_MASKZ_SAE`.
25965///
25966/// Supported operand variants:
25967///
25968/// ```text
25969/// +---+---------------+
25970/// | # | Operands      |
25971/// +---+---------------+
25972/// | 1 | Zmm, Zmm, Imm |
25973/// +---+---------------+
25974/// ```
25975pub trait VreducephMaskzSaeEmitter<A, B, C> {
25976    fn vreduceph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
25977}
25978
25979impl<'a> VreducephMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
25980    fn vreduceph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
25981        self.emit(VREDUCEPH512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
25982    }
25983}
25984
25985/// `VREDUCEPH_SAE`.
25986///
25987/// Supported operand variants:
25988///
25989/// ```text
25990/// +---+---------------+
25991/// | # | Operands      |
25992/// +---+---------------+
25993/// | 1 | Zmm, Zmm, Imm |
25994/// +---+---------------+
25995/// ```
25996pub trait VreducephSaeEmitter<A, B, C> {
25997    fn vreduceph_sae(&mut self, op0: A, op1: B, op2: C);
25998}
25999
26000impl<'a> VreducephSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26001    fn vreduceph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26002        self.emit(VREDUCEPH512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26003    }
26004}
26005
26006/// `VREDUCESH`.
26007///
26008/// Supported operand variants:
26009///
26010/// ```text
26011/// +---+--------------------+
26012/// | # | Operands           |
26013/// +---+--------------------+
26014/// | 1 | Xmm, Xmm, Mem, Imm |
26015/// | 2 | Xmm, Xmm, Xmm, Imm |
26016/// +---+--------------------+
26017/// ```
26018pub trait VreduceshEmitter<A, B, C, D> {
26019    fn vreducesh(&mut self, op0: A, op1: B, op2: C, op3: D);
26020}
26021
26022impl<'a> VreduceshEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26023    fn vreducesh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26024        self.emit(VREDUCESHRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26025    }
26026}
26027
26028impl<'a> VreduceshEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
26029    fn vreducesh(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
26030        self.emit(VREDUCESHRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26031    }
26032}
26033
26034/// `VREDUCESH_MASK`.
26035///
26036/// Supported operand variants:
26037///
26038/// ```text
26039/// +---+--------------------+
26040/// | # | Operands           |
26041/// +---+--------------------+
26042/// | 1 | Xmm, Xmm, Mem, Imm |
26043/// | 2 | Xmm, Xmm, Xmm, Imm |
26044/// +---+--------------------+
26045/// ```
26046pub trait VreduceshMaskEmitter<A, B, C, D> {
26047    fn vreducesh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
26048}
26049
26050impl<'a> VreduceshMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26051    fn vreducesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26052        self.emit(VREDUCESHRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26053    }
26054}
26055
26056impl<'a> VreduceshMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
26057    fn vreducesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
26058        self.emit(VREDUCESHRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26059    }
26060}
26061
26062/// `VREDUCESH_MASK_SAE`.
26063///
26064/// Supported operand variants:
26065///
26066/// ```text
26067/// +---+--------------------+
26068/// | # | Operands           |
26069/// +---+--------------------+
26070/// | 1 | Xmm, Xmm, Xmm, Imm |
26071/// +---+--------------------+
26072/// ```
26073pub trait VreduceshMaskSaeEmitter<A, B, C, D> {
26074    fn vreducesh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
26075}
26076
26077impl<'a> VreduceshMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26078    fn vreducesh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26079        self.emit(VREDUCESHRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26080    }
26081}
26082
26083/// `VREDUCESH_MASKZ`.
26084///
26085/// Supported operand variants:
26086///
26087/// ```text
26088/// +---+--------------------+
26089/// | # | Operands           |
26090/// +---+--------------------+
26091/// | 1 | Xmm, Xmm, Mem, Imm |
26092/// | 2 | Xmm, Xmm, Xmm, Imm |
26093/// +---+--------------------+
26094/// ```
26095pub trait VreduceshMaskzEmitter<A, B, C, D> {
26096    fn vreducesh_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
26097}
26098
26099impl<'a> VreduceshMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26100    fn vreducesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26101        self.emit(VREDUCESHRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26102    }
26103}
26104
26105impl<'a> VreduceshMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
26106    fn vreducesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
26107        self.emit(VREDUCESHRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26108    }
26109}
26110
26111/// `VREDUCESH_MASKZ_SAE`.
26112///
26113/// Supported operand variants:
26114///
26115/// ```text
26116/// +---+--------------------+
26117/// | # | Operands           |
26118/// +---+--------------------+
26119/// | 1 | Xmm, Xmm, Xmm, Imm |
26120/// +---+--------------------+
26121/// ```
26122pub trait VreduceshMaskzSaeEmitter<A, B, C, D> {
26123    fn vreducesh_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
26124}
26125
26126impl<'a> VreduceshMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26127    fn vreducesh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26128        self.emit(VREDUCESHRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26129    }
26130}
26131
26132/// `VREDUCESH_SAE`.
26133///
26134/// Supported operand variants:
26135///
26136/// ```text
26137/// +---+--------------------+
26138/// | # | Operands           |
26139/// +---+--------------------+
26140/// | 1 | Xmm, Xmm, Xmm, Imm |
26141/// +---+--------------------+
26142/// ```
26143pub trait VreduceshSaeEmitter<A, B, C, D> {
26144    fn vreducesh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
26145}
26146
26147impl<'a> VreduceshSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26148    fn vreducesh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26149        self.emit(VREDUCESHRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26150    }
26151}
26152
26153/// `VRNDSCALEPH`.
26154///
26155/// Supported operand variants:
26156///
26157/// ```text
26158/// +---+---------------+
26159/// | # | Operands      |
26160/// +---+---------------+
26161/// | 1 | Xmm, Mem, Imm |
26162/// | 2 | Xmm, Xmm, Imm |
26163/// | 3 | Ymm, Mem, Imm |
26164/// | 4 | Ymm, Ymm, Imm |
26165/// | 5 | Zmm, Mem, Imm |
26166/// | 6 | Zmm, Zmm, Imm |
26167/// +---+---------------+
26168/// ```
26169pub trait VrndscalephEmitter<A, B, C> {
26170    fn vrndscaleph(&mut self, op0: A, op1: B, op2: C);
26171}
26172
26173impl<'a> VrndscalephEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
26174    fn vrndscaleph(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
26175        self.emit(VRNDSCALEPH128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26176    }
26177}
26178
26179impl<'a> VrndscalephEmitter<Xmm, Mem, Imm> for Assembler<'a> {
26180    fn vrndscaleph(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
26181        self.emit(VRNDSCALEPH128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26182    }
26183}
26184
26185impl<'a> VrndscalephEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
26186    fn vrndscaleph(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
26187        self.emit(VRNDSCALEPH256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26188    }
26189}
26190
26191impl<'a> VrndscalephEmitter<Ymm, Mem, Imm> for Assembler<'a> {
26192    fn vrndscaleph(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
26193        self.emit(VRNDSCALEPH256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26194    }
26195}
26196
26197impl<'a> VrndscalephEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26198    fn vrndscaleph(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26199        self.emit(VRNDSCALEPH512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26200    }
26201}
26202
26203impl<'a> VrndscalephEmitter<Zmm, Mem, Imm> for Assembler<'a> {
26204    fn vrndscaleph(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
26205        self.emit(VRNDSCALEPH512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26206    }
26207}
26208
26209/// `VRNDSCALEPH_MASK`.
26210///
26211/// Supported operand variants:
26212///
26213/// ```text
26214/// +---+---------------+
26215/// | # | Operands      |
26216/// +---+---------------+
26217/// | 1 | Xmm, Mem, Imm |
26218/// | 2 | Xmm, Xmm, Imm |
26219/// | 3 | Ymm, Mem, Imm |
26220/// | 4 | Ymm, Ymm, Imm |
26221/// | 5 | Zmm, Mem, Imm |
26222/// | 6 | Zmm, Zmm, Imm |
26223/// +---+---------------+
26224/// ```
26225pub trait VrndscalephMaskEmitter<A, B, C> {
26226    fn vrndscaleph_mask(&mut self, op0: A, op1: B, op2: C);
26227}
26228
26229impl<'a> VrndscalephMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
26230    fn vrndscaleph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
26231        self.emit(VRNDSCALEPH128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26232    }
26233}
26234
26235impl<'a> VrndscalephMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
26236    fn vrndscaleph_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
26237        self.emit(VRNDSCALEPH128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26238    }
26239}
26240
26241impl<'a> VrndscalephMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
26242    fn vrndscaleph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
26243        self.emit(VRNDSCALEPH256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26244    }
26245}
26246
26247impl<'a> VrndscalephMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
26248    fn vrndscaleph_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
26249        self.emit(VRNDSCALEPH256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26250    }
26251}
26252
26253impl<'a> VrndscalephMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26254    fn vrndscaleph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26255        self.emit(VRNDSCALEPH512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26256    }
26257}
26258
26259impl<'a> VrndscalephMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
26260    fn vrndscaleph_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
26261        self.emit(VRNDSCALEPH512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26262    }
26263}
26264
26265/// `VRNDSCALEPH_MASK_SAE`.
26266///
26267/// Supported operand variants:
26268///
26269/// ```text
26270/// +---+---------------+
26271/// | # | Operands      |
26272/// +---+---------------+
26273/// | 1 | Zmm, Zmm, Imm |
26274/// +---+---------------+
26275/// ```
26276pub trait VrndscalephMaskSaeEmitter<A, B, C> {
26277    fn vrndscaleph_mask_sae(&mut self, op0: A, op1: B, op2: C);
26278}
26279
26280impl<'a> VrndscalephMaskSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26281    fn vrndscaleph_mask_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26282        self.emit(VRNDSCALEPH512RRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26283    }
26284}
26285
26286/// `VRNDSCALEPH_MASKZ`.
26287///
26288/// Supported operand variants:
26289///
26290/// ```text
26291/// +---+---------------+
26292/// | # | Operands      |
26293/// +---+---------------+
26294/// | 1 | Xmm, Mem, Imm |
26295/// | 2 | Xmm, Xmm, Imm |
26296/// | 3 | Ymm, Mem, Imm |
26297/// | 4 | Ymm, Ymm, Imm |
26298/// | 5 | Zmm, Mem, Imm |
26299/// | 6 | Zmm, Zmm, Imm |
26300/// +---+---------------+
26301/// ```
26302pub trait VrndscalephMaskzEmitter<A, B, C> {
26303    fn vrndscaleph_maskz(&mut self, op0: A, op1: B, op2: C);
26304}
26305
26306impl<'a> VrndscalephMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
26307    fn vrndscaleph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
26308        self.emit(VRNDSCALEPH128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26309    }
26310}
26311
26312impl<'a> VrndscalephMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
26313    fn vrndscaleph_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
26314        self.emit(VRNDSCALEPH128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26315    }
26316}
26317
26318impl<'a> VrndscalephMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
26319    fn vrndscaleph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
26320        self.emit(VRNDSCALEPH256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26321    }
26322}
26323
26324impl<'a> VrndscalephMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
26325    fn vrndscaleph_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
26326        self.emit(VRNDSCALEPH256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26327    }
26328}
26329
26330impl<'a> VrndscalephMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26331    fn vrndscaleph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26332        self.emit(VRNDSCALEPH512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26333    }
26334}
26335
26336impl<'a> VrndscalephMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
26337    fn vrndscaleph_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
26338        self.emit(VRNDSCALEPH512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26339    }
26340}
26341
26342/// `VRNDSCALEPH_MASKZ_SAE`.
26343///
26344/// Supported operand variants:
26345///
26346/// ```text
26347/// +---+---------------+
26348/// | # | Operands      |
26349/// +---+---------------+
26350/// | 1 | Zmm, Zmm, Imm |
26351/// +---+---------------+
26352/// ```
26353pub trait VrndscalephMaskzSaeEmitter<A, B, C> {
26354    fn vrndscaleph_maskz_sae(&mut self, op0: A, op1: B, op2: C);
26355}
26356
26357impl<'a> VrndscalephMaskzSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26358    fn vrndscaleph_maskz_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26359        self.emit(VRNDSCALEPH512RRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26360    }
26361}
26362
26363/// `VRNDSCALEPH_SAE`.
26364///
26365/// Supported operand variants:
26366///
26367/// ```text
26368/// +---+---------------+
26369/// | # | Operands      |
26370/// +---+---------------+
26371/// | 1 | Zmm, Zmm, Imm |
26372/// +---+---------------+
26373/// ```
26374pub trait VrndscalephSaeEmitter<A, B, C> {
26375    fn vrndscaleph_sae(&mut self, op0: A, op1: B, op2: C);
26376}
26377
26378impl<'a> VrndscalephSaeEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
26379    fn vrndscaleph_sae(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
26380        self.emit(VRNDSCALEPH512RRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26381    }
26382}
26383
26384/// `VRNDSCALESH`.
26385///
26386/// Supported operand variants:
26387///
26388/// ```text
26389/// +---+--------------------+
26390/// | # | Operands           |
26391/// +---+--------------------+
26392/// | 1 | Xmm, Xmm, Mem, Imm |
26393/// | 2 | Xmm, Xmm, Xmm, Imm |
26394/// +---+--------------------+
26395/// ```
26396pub trait VrndscaleshEmitter<A, B, C, D> {
26397    fn vrndscalesh(&mut self, op0: A, op1: B, op2: C, op3: D);
26398}
26399
26400impl<'a> VrndscaleshEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26401    fn vrndscalesh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26402        self.emit(VRNDSCALESHRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26403    }
26404}
26405
26406impl<'a> VrndscaleshEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
26407    fn vrndscalesh(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
26408        self.emit(VRNDSCALESHRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26409    }
26410}
26411
26412/// `VRNDSCALESH_MASK`.
26413///
26414/// Supported operand variants:
26415///
26416/// ```text
26417/// +---+--------------------+
26418/// | # | Operands           |
26419/// +---+--------------------+
26420/// | 1 | Xmm, Xmm, Mem, Imm |
26421/// | 2 | Xmm, Xmm, Xmm, Imm |
26422/// +---+--------------------+
26423/// ```
26424pub trait VrndscaleshMaskEmitter<A, B, C, D> {
26425    fn vrndscalesh_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
26426}
26427
26428impl<'a> VrndscaleshMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26429    fn vrndscalesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26430        self.emit(VRNDSCALESHRRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26431    }
26432}
26433
26434impl<'a> VrndscaleshMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
26435    fn vrndscalesh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
26436        self.emit(VRNDSCALESHRRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26437    }
26438}
26439
26440/// `VRNDSCALESH_MASK_SAE`.
26441///
26442/// Supported operand variants:
26443///
26444/// ```text
26445/// +---+--------------------+
26446/// | # | Operands           |
26447/// +---+--------------------+
26448/// | 1 | Xmm, Xmm, Xmm, Imm |
26449/// +---+--------------------+
26450/// ```
26451pub trait VrndscaleshMaskSaeEmitter<A, B, C, D> {
26452    fn vrndscalesh_mask_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
26453}
26454
26455impl<'a> VrndscaleshMaskSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26456    fn vrndscalesh_mask_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26457        self.emit(VRNDSCALESHRRRI_MASK_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26458    }
26459}
26460
26461/// `VRNDSCALESH_MASKZ`.
26462///
26463/// Supported operand variants:
26464///
26465/// ```text
26466/// +---+--------------------+
26467/// | # | Operands           |
26468/// +---+--------------------+
26469/// | 1 | Xmm, Xmm, Mem, Imm |
26470/// | 2 | Xmm, Xmm, Xmm, Imm |
26471/// +---+--------------------+
26472/// ```
26473pub trait VrndscaleshMaskzEmitter<A, B, C, D> {
26474    fn vrndscalesh_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
26475}
26476
26477impl<'a> VrndscaleshMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26478    fn vrndscalesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26479        self.emit(VRNDSCALESHRRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26480    }
26481}
26482
26483impl<'a> VrndscaleshMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
26484    fn vrndscalesh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
26485        self.emit(VRNDSCALESHRRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26486    }
26487}
26488
26489/// `VRNDSCALESH_MASKZ_SAE`.
26490///
26491/// Supported operand variants:
26492///
26493/// ```text
26494/// +---+--------------------+
26495/// | # | Operands           |
26496/// +---+--------------------+
26497/// | 1 | Xmm, Xmm, Xmm, Imm |
26498/// +---+--------------------+
26499/// ```
26500pub trait VrndscaleshMaskzSaeEmitter<A, B, C, D> {
26501    fn vrndscalesh_maskz_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
26502}
26503
26504impl<'a> VrndscaleshMaskzSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26505    fn vrndscalesh_maskz_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26506        self.emit(VRNDSCALESHRRRI_MASKZ_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26507    }
26508}
26509
26510/// `VRNDSCALESH_SAE`.
26511///
26512/// Supported operand variants:
26513///
26514/// ```text
26515/// +---+--------------------+
26516/// | # | Operands           |
26517/// +---+--------------------+
26518/// | 1 | Xmm, Xmm, Xmm, Imm |
26519/// +---+--------------------+
26520/// ```
26521pub trait VrndscaleshSaeEmitter<A, B, C, D> {
26522    fn vrndscalesh_sae(&mut self, op0: A, op1: B, op2: C, op3: D);
26523}
26524
26525impl<'a> VrndscaleshSaeEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
26526    fn vrndscalesh_sae(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
26527        self.emit(VRNDSCALESHRRRI_SAE, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
26528    }
26529}
26530
26531/// `VRSQRTPH`.
26532///
26533/// Supported operand variants:
26534///
26535/// ```text
26536/// +---+----------+
26537/// | # | Operands |
26538/// +---+----------+
26539/// | 1 | Xmm, Mem |
26540/// | 2 | Xmm, Xmm |
26541/// | 3 | Ymm, Mem |
26542/// | 4 | Ymm, Ymm |
26543/// | 5 | Zmm, Mem |
26544/// | 6 | Zmm, Zmm |
26545/// +---+----------+
26546/// ```
26547pub trait VrsqrtphEmitter<A, B> {
26548    fn vrsqrtph(&mut self, op0: A, op1: B);
26549}
26550
26551impl<'a> VrsqrtphEmitter<Xmm, Xmm> for Assembler<'a> {
26552    fn vrsqrtph(&mut self, op0: Xmm, op1: Xmm) {
26553        self.emit(VRSQRTPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26554    }
26555}
26556
26557impl<'a> VrsqrtphEmitter<Xmm, Mem> for Assembler<'a> {
26558    fn vrsqrtph(&mut self, op0: Xmm, op1: Mem) {
26559        self.emit(VRSQRTPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26560    }
26561}
26562
26563impl<'a> VrsqrtphEmitter<Ymm, Ymm> for Assembler<'a> {
26564    fn vrsqrtph(&mut self, op0: Ymm, op1: Ymm) {
26565        self.emit(VRSQRTPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26566    }
26567}
26568
26569impl<'a> VrsqrtphEmitter<Ymm, Mem> for Assembler<'a> {
26570    fn vrsqrtph(&mut self, op0: Ymm, op1: Mem) {
26571        self.emit(VRSQRTPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26572    }
26573}
26574
26575impl<'a> VrsqrtphEmitter<Zmm, Zmm> for Assembler<'a> {
26576    fn vrsqrtph(&mut self, op0: Zmm, op1: Zmm) {
26577        self.emit(VRSQRTPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26578    }
26579}
26580
26581impl<'a> VrsqrtphEmitter<Zmm, Mem> for Assembler<'a> {
26582    fn vrsqrtph(&mut self, op0: Zmm, op1: Mem) {
26583        self.emit(VRSQRTPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26584    }
26585}
26586
26587/// `VRSQRTPH_MASK`.
26588///
26589/// Supported operand variants:
26590///
26591/// ```text
26592/// +---+----------+
26593/// | # | Operands |
26594/// +---+----------+
26595/// | 1 | Xmm, Mem |
26596/// | 2 | Xmm, Xmm |
26597/// | 3 | Ymm, Mem |
26598/// | 4 | Ymm, Ymm |
26599/// | 5 | Zmm, Mem |
26600/// | 6 | Zmm, Zmm |
26601/// +---+----------+
26602/// ```
26603pub trait VrsqrtphMaskEmitter<A, B> {
26604    fn vrsqrtph_mask(&mut self, op0: A, op1: B);
26605}
26606
26607impl<'a> VrsqrtphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
26608    fn vrsqrtph_mask(&mut self, op0: Xmm, op1: Xmm) {
26609        self.emit(VRSQRTPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26610    }
26611}
26612
26613impl<'a> VrsqrtphMaskEmitter<Xmm, Mem> for Assembler<'a> {
26614    fn vrsqrtph_mask(&mut self, op0: Xmm, op1: Mem) {
26615        self.emit(VRSQRTPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26616    }
26617}
26618
26619impl<'a> VrsqrtphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
26620    fn vrsqrtph_mask(&mut self, op0: Ymm, op1: Ymm) {
26621        self.emit(VRSQRTPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26622    }
26623}
26624
26625impl<'a> VrsqrtphMaskEmitter<Ymm, Mem> for Assembler<'a> {
26626    fn vrsqrtph_mask(&mut self, op0: Ymm, op1: Mem) {
26627        self.emit(VRSQRTPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26628    }
26629}
26630
26631impl<'a> VrsqrtphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
26632    fn vrsqrtph_mask(&mut self, op0: Zmm, op1: Zmm) {
26633        self.emit(VRSQRTPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26634    }
26635}
26636
26637impl<'a> VrsqrtphMaskEmitter<Zmm, Mem> for Assembler<'a> {
26638    fn vrsqrtph_mask(&mut self, op0: Zmm, op1: Mem) {
26639        self.emit(VRSQRTPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26640    }
26641}
26642
26643/// `VRSQRTPH_MASKZ`.
26644///
26645/// Supported operand variants:
26646///
26647/// ```text
26648/// +---+----------+
26649/// | # | Operands |
26650/// +---+----------+
26651/// | 1 | Xmm, Mem |
26652/// | 2 | Xmm, Xmm |
26653/// | 3 | Ymm, Mem |
26654/// | 4 | Ymm, Ymm |
26655/// | 5 | Zmm, Mem |
26656/// | 6 | Zmm, Zmm |
26657/// +---+----------+
26658/// ```
26659pub trait VrsqrtphMaskzEmitter<A, B> {
26660    fn vrsqrtph_maskz(&mut self, op0: A, op1: B);
26661}
26662
26663impl<'a> VrsqrtphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
26664    fn vrsqrtph_maskz(&mut self, op0: Xmm, op1: Xmm) {
26665        self.emit(VRSQRTPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26666    }
26667}
26668
26669impl<'a> VrsqrtphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
26670    fn vrsqrtph_maskz(&mut self, op0: Xmm, op1: Mem) {
26671        self.emit(VRSQRTPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26672    }
26673}
26674
26675impl<'a> VrsqrtphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
26676    fn vrsqrtph_maskz(&mut self, op0: Ymm, op1: Ymm) {
26677        self.emit(VRSQRTPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26678    }
26679}
26680
26681impl<'a> VrsqrtphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
26682    fn vrsqrtph_maskz(&mut self, op0: Ymm, op1: Mem) {
26683        self.emit(VRSQRTPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26684    }
26685}
26686
26687impl<'a> VrsqrtphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
26688    fn vrsqrtph_maskz(&mut self, op0: Zmm, op1: Zmm) {
26689        self.emit(VRSQRTPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26690    }
26691}
26692
26693impl<'a> VrsqrtphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
26694    fn vrsqrtph_maskz(&mut self, op0: Zmm, op1: Mem) {
26695        self.emit(VRSQRTPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
26696    }
26697}
26698
26699/// `VRSQRTSH`.
26700///
26701/// Supported operand variants:
26702///
26703/// ```text
26704/// +---+---------------+
26705/// | # | Operands      |
26706/// +---+---------------+
26707/// | 1 | Xmm, Xmm, Mem |
26708/// | 2 | Xmm, Xmm, Xmm |
26709/// +---+---------------+
26710/// ```
26711pub trait VrsqrtshEmitter<A, B, C> {
26712    fn vrsqrtsh(&mut self, op0: A, op1: B, op2: C);
26713}
26714
26715impl<'a> VrsqrtshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
26716    fn vrsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
26717        self.emit(VRSQRTSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26718    }
26719}
26720
26721impl<'a> VrsqrtshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
26722    fn vrsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
26723        self.emit(VRSQRTSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26724    }
26725}
26726
26727/// `VRSQRTSH_MASK`.
26728///
26729/// Supported operand variants:
26730///
26731/// ```text
26732/// +---+---------------+
26733/// | # | Operands      |
26734/// +---+---------------+
26735/// | 1 | Xmm, Xmm, Mem |
26736/// | 2 | Xmm, Xmm, Xmm |
26737/// +---+---------------+
26738/// ```
26739pub trait VrsqrtshMaskEmitter<A, B, C> {
26740    fn vrsqrtsh_mask(&mut self, op0: A, op1: B, op2: C);
26741}
26742
26743impl<'a> VrsqrtshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
26744    fn vrsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
26745        self.emit(VRSQRTSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26746    }
26747}
26748
26749impl<'a> VrsqrtshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
26750    fn vrsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
26751        self.emit(VRSQRTSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26752    }
26753}
26754
26755/// `VRSQRTSH_MASKZ`.
26756///
26757/// Supported operand variants:
26758///
26759/// ```text
26760/// +---+---------------+
26761/// | # | Operands      |
26762/// +---+---------------+
26763/// | 1 | Xmm, Xmm, Mem |
26764/// | 2 | Xmm, Xmm, Xmm |
26765/// +---+---------------+
26766/// ```
26767pub trait VrsqrtshMaskzEmitter<A, B, C> {
26768    fn vrsqrtsh_maskz(&mut self, op0: A, op1: B, op2: C);
26769}
26770
26771impl<'a> VrsqrtshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
26772    fn vrsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
26773        self.emit(VRSQRTSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26774    }
26775}
26776
26777impl<'a> VrsqrtshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
26778    fn vrsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
26779        self.emit(VRSQRTSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26780    }
26781}
26782
26783/// `VSCALEFPH`.
26784///
26785/// Supported operand variants:
26786///
26787/// ```text
26788/// +---+---------------+
26789/// | # | Operands      |
26790/// +---+---------------+
26791/// | 1 | Xmm, Xmm, Mem |
26792/// | 2 | Xmm, Xmm, Xmm |
26793/// | 3 | Ymm, Ymm, Mem |
26794/// | 4 | Ymm, Ymm, Ymm |
26795/// | 5 | Zmm, Zmm, Mem |
26796/// | 6 | Zmm, Zmm, Zmm |
26797/// +---+---------------+
26798/// ```
26799pub trait VscalefphEmitter<A, B, C> {
26800    fn vscalefph(&mut self, op0: A, op1: B, op2: C);
26801}
26802
26803impl<'a> VscalefphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
26804    fn vscalefph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
26805        self.emit(VSCALEFPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26806    }
26807}
26808
26809impl<'a> VscalefphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
26810    fn vscalefph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
26811        self.emit(VSCALEFPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26812    }
26813}
26814
26815impl<'a> VscalefphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
26816    fn vscalefph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
26817        self.emit(VSCALEFPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26818    }
26819}
26820
26821impl<'a> VscalefphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
26822    fn vscalefph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
26823        self.emit(VSCALEFPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26824    }
26825}
26826
26827impl<'a> VscalefphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
26828    fn vscalefph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
26829        self.emit(VSCALEFPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26830    }
26831}
26832
26833impl<'a> VscalefphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
26834    fn vscalefph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
26835        self.emit(VSCALEFPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26836    }
26837}
26838
26839/// `VSCALEFPH_ER`.
26840///
26841/// Supported operand variants:
26842///
26843/// ```text
26844/// +---+---------------+
26845/// | # | Operands      |
26846/// +---+---------------+
26847/// | 1 | Zmm, Zmm, Zmm |
26848/// +---+---------------+
26849/// ```
26850pub trait VscalefphErEmitter<A, B, C> {
26851    fn vscalefph_er(&mut self, op0: A, op1: B, op2: C);
26852}
26853
26854impl<'a> VscalefphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
26855    fn vscalefph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
26856        self.emit(VSCALEFPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26857    }
26858}
26859
26860/// `VSCALEFPH_MASK`.
26861///
26862/// Supported operand variants:
26863///
26864/// ```text
26865/// +---+---------------+
26866/// | # | Operands      |
26867/// +---+---------------+
26868/// | 1 | Xmm, Xmm, Mem |
26869/// | 2 | Xmm, Xmm, Xmm |
26870/// | 3 | Ymm, Ymm, Mem |
26871/// | 4 | Ymm, Ymm, Ymm |
26872/// | 5 | Zmm, Zmm, Mem |
26873/// | 6 | Zmm, Zmm, Zmm |
26874/// +---+---------------+
26875/// ```
26876pub trait VscalefphMaskEmitter<A, B, C> {
26877    fn vscalefph_mask(&mut self, op0: A, op1: B, op2: C);
26878}
26879
26880impl<'a> VscalefphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
26881    fn vscalefph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
26882        self.emit(VSCALEFPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26883    }
26884}
26885
26886impl<'a> VscalefphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
26887    fn vscalefph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
26888        self.emit(VSCALEFPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26889    }
26890}
26891
26892impl<'a> VscalefphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
26893    fn vscalefph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
26894        self.emit(VSCALEFPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26895    }
26896}
26897
26898impl<'a> VscalefphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
26899    fn vscalefph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
26900        self.emit(VSCALEFPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26901    }
26902}
26903
26904impl<'a> VscalefphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
26905    fn vscalefph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
26906        self.emit(VSCALEFPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26907    }
26908}
26909
26910impl<'a> VscalefphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
26911    fn vscalefph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
26912        self.emit(VSCALEFPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26913    }
26914}
26915
26916/// `VSCALEFPH_MASK_ER`.
26917///
26918/// Supported operand variants:
26919///
26920/// ```text
26921/// +---+---------------+
26922/// | # | Operands      |
26923/// +---+---------------+
26924/// | 1 | Zmm, Zmm, Zmm |
26925/// +---+---------------+
26926/// ```
26927pub trait VscalefphMaskErEmitter<A, B, C> {
26928    fn vscalefph_mask_er(&mut self, op0: A, op1: B, op2: C);
26929}
26930
26931impl<'a> VscalefphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
26932    fn vscalefph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
26933        self.emit(VSCALEFPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26934    }
26935}
26936
26937/// `VSCALEFPH_MASKZ`.
26938///
26939/// Supported operand variants:
26940///
26941/// ```text
26942/// +---+---------------+
26943/// | # | Operands      |
26944/// +---+---------------+
26945/// | 1 | Xmm, Xmm, Mem |
26946/// | 2 | Xmm, Xmm, Xmm |
26947/// | 3 | Ymm, Ymm, Mem |
26948/// | 4 | Ymm, Ymm, Ymm |
26949/// | 5 | Zmm, Zmm, Mem |
26950/// | 6 | Zmm, Zmm, Zmm |
26951/// +---+---------------+
26952/// ```
26953pub trait VscalefphMaskzEmitter<A, B, C> {
26954    fn vscalefph_maskz(&mut self, op0: A, op1: B, op2: C);
26955}
26956
26957impl<'a> VscalefphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
26958    fn vscalefph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
26959        self.emit(VSCALEFPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26960    }
26961}
26962
26963impl<'a> VscalefphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
26964    fn vscalefph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
26965        self.emit(VSCALEFPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26966    }
26967}
26968
26969impl<'a> VscalefphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
26970    fn vscalefph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
26971        self.emit(VSCALEFPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26972    }
26973}
26974
26975impl<'a> VscalefphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
26976    fn vscalefph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
26977        self.emit(VSCALEFPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26978    }
26979}
26980
26981impl<'a> VscalefphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
26982    fn vscalefph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
26983        self.emit(VSCALEFPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26984    }
26985}
26986
26987impl<'a> VscalefphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
26988    fn vscalefph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
26989        self.emit(VSCALEFPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
26990    }
26991}
26992
26993/// `VSCALEFPH_MASKZ_ER`.
26994///
26995/// Supported operand variants:
26996///
26997/// ```text
26998/// +---+---------------+
26999/// | # | Operands      |
27000/// +---+---------------+
27001/// | 1 | Zmm, Zmm, Zmm |
27002/// +---+---------------+
27003/// ```
27004pub trait VscalefphMaskzErEmitter<A, B, C> {
27005    fn vscalefph_maskz_er(&mut self, op0: A, op1: B, op2: C);
27006}
27007
27008impl<'a> VscalefphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27009    fn vscalefph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27010        self.emit(VSCALEFPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27011    }
27012}
27013
27014/// `VSCALEFSH`.
27015///
27016/// Supported operand variants:
27017///
27018/// ```text
27019/// +---+---------------+
27020/// | # | Operands      |
27021/// +---+---------------+
27022/// | 1 | Xmm, Xmm, Mem |
27023/// | 2 | Xmm, Xmm, Xmm |
27024/// +---+---------------+
27025/// ```
27026pub trait VscalefshEmitter<A, B, C> {
27027    fn vscalefsh(&mut self, op0: A, op1: B, op2: C);
27028}
27029
27030impl<'a> VscalefshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27031    fn vscalefsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27032        self.emit(VSCALEFSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27033    }
27034}
27035
27036impl<'a> VscalefshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27037    fn vscalefsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27038        self.emit(VSCALEFSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27039    }
27040}
27041
27042/// `VSCALEFSH_ER`.
27043///
27044/// Supported operand variants:
27045///
27046/// ```text
27047/// +---+---------------+
27048/// | # | Operands      |
27049/// +---+---------------+
27050/// | 1 | Xmm, Xmm, Xmm |
27051/// +---+---------------+
27052/// ```
27053pub trait VscalefshErEmitter<A, B, C> {
27054    fn vscalefsh_er(&mut self, op0: A, op1: B, op2: C);
27055}
27056
27057impl<'a> VscalefshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27058    fn vscalefsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27059        self.emit(VSCALEFSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27060    }
27061}
27062
27063/// `VSCALEFSH_MASK`.
27064///
27065/// Supported operand variants:
27066///
27067/// ```text
27068/// +---+---------------+
27069/// | # | Operands      |
27070/// +---+---------------+
27071/// | 1 | Xmm, Xmm, Mem |
27072/// | 2 | Xmm, Xmm, Xmm |
27073/// +---+---------------+
27074/// ```
27075pub trait VscalefshMaskEmitter<A, B, C> {
27076    fn vscalefsh_mask(&mut self, op0: A, op1: B, op2: C);
27077}
27078
27079impl<'a> VscalefshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27080    fn vscalefsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27081        self.emit(VSCALEFSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27082    }
27083}
27084
27085impl<'a> VscalefshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27086    fn vscalefsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27087        self.emit(VSCALEFSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27088    }
27089}
27090
27091/// `VSCALEFSH_MASK_ER`.
27092///
27093/// Supported operand variants:
27094///
27095/// ```text
27096/// +---+---------------+
27097/// | # | Operands      |
27098/// +---+---------------+
27099/// | 1 | Xmm, Xmm, Xmm |
27100/// +---+---------------+
27101/// ```
27102pub trait VscalefshMaskErEmitter<A, B, C> {
27103    fn vscalefsh_mask_er(&mut self, op0: A, op1: B, op2: C);
27104}
27105
27106impl<'a> VscalefshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27107    fn vscalefsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27108        self.emit(VSCALEFSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27109    }
27110}
27111
27112/// `VSCALEFSH_MASKZ`.
27113///
27114/// Supported operand variants:
27115///
27116/// ```text
27117/// +---+---------------+
27118/// | # | Operands      |
27119/// +---+---------------+
27120/// | 1 | Xmm, Xmm, Mem |
27121/// | 2 | Xmm, Xmm, Xmm |
27122/// +---+---------------+
27123/// ```
27124pub trait VscalefshMaskzEmitter<A, B, C> {
27125    fn vscalefsh_maskz(&mut self, op0: A, op1: B, op2: C);
27126}
27127
27128impl<'a> VscalefshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27129    fn vscalefsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27130        self.emit(VSCALEFSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27131    }
27132}
27133
27134impl<'a> VscalefshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27135    fn vscalefsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27136        self.emit(VSCALEFSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27137    }
27138}
27139
27140/// `VSCALEFSH_MASKZ_ER`.
27141///
27142/// Supported operand variants:
27143///
27144/// ```text
27145/// +---+---------------+
27146/// | # | Operands      |
27147/// +---+---------------+
27148/// | 1 | Xmm, Xmm, Xmm |
27149/// +---+---------------+
27150/// ```
27151pub trait VscalefshMaskzErEmitter<A, B, C> {
27152    fn vscalefsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
27153}
27154
27155impl<'a> VscalefshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27156    fn vscalefsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27157        self.emit(VSCALEFSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27158    }
27159}
27160
27161/// `VSM4KEY4`.
27162///
27163/// Supported operand variants:
27164///
27165/// ```text
27166/// +---+---------------+
27167/// | # | Operands      |
27168/// +---+---------------+
27169/// | 1 | Xmm, Xmm, Mem |
27170/// | 2 | Xmm, Xmm, Xmm |
27171/// | 3 | Ymm, Ymm, Mem |
27172/// | 4 | Ymm, Ymm, Ymm |
27173/// | 5 | Zmm, Zmm, Mem |
27174/// | 6 | Zmm, Zmm, Zmm |
27175/// +---+---------------+
27176/// ```
27177pub trait Vsm4key4Emitter<A, B, C> {
27178    fn vsm4key4(&mut self, op0: A, op1: B, op2: C);
27179}
27180
27181impl<'a> Vsm4key4Emitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27182    fn vsm4key4(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27183        self.emit(VSM4KEY4_128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27184    }
27185}
27186
27187impl<'a> Vsm4key4Emitter<Xmm, Xmm, Mem> for Assembler<'a> {
27188    fn vsm4key4(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27189        self.emit(VSM4KEY4_128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27190    }
27191}
27192
27193impl<'a> Vsm4key4Emitter<Ymm, Ymm, Ymm> for Assembler<'a> {
27194    fn vsm4key4(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
27195        self.emit(VSM4KEY4_256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27196    }
27197}
27198
27199impl<'a> Vsm4key4Emitter<Ymm, Ymm, Mem> for Assembler<'a> {
27200    fn vsm4key4(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
27201        self.emit(VSM4KEY4_256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27202    }
27203}
27204
27205impl<'a> Vsm4key4Emitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27206    fn vsm4key4(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27207        self.emit(VSM4KEY4_512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27208    }
27209}
27210
27211impl<'a> Vsm4key4Emitter<Zmm, Zmm, Mem> for Assembler<'a> {
27212    fn vsm4key4(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
27213        self.emit(VSM4KEY4_512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27214    }
27215}
27216
27217/// `VSM4RNDS4`.
27218///
27219/// Supported operand variants:
27220///
27221/// ```text
27222/// +---+---------------+
27223/// | # | Operands      |
27224/// +---+---------------+
27225/// | 1 | Xmm, Xmm, Mem |
27226/// | 2 | Xmm, Xmm, Xmm |
27227/// | 3 | Ymm, Ymm, Mem |
27228/// | 4 | Ymm, Ymm, Ymm |
27229/// | 5 | Zmm, Zmm, Mem |
27230/// | 6 | Zmm, Zmm, Zmm |
27231/// +---+---------------+
27232/// ```
27233pub trait Vsm4rnds4Emitter<A, B, C> {
27234    fn vsm4rnds4(&mut self, op0: A, op1: B, op2: C);
27235}
27236
27237impl<'a> Vsm4rnds4Emitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27238    fn vsm4rnds4(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27239        self.emit(VSM4RNDS4_128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27240    }
27241}
27242
27243impl<'a> Vsm4rnds4Emitter<Xmm, Xmm, Mem> for Assembler<'a> {
27244    fn vsm4rnds4(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27245        self.emit(VSM4RNDS4_128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27246    }
27247}
27248
27249impl<'a> Vsm4rnds4Emitter<Ymm, Ymm, Ymm> for Assembler<'a> {
27250    fn vsm4rnds4(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
27251        self.emit(VSM4RNDS4_256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27252    }
27253}
27254
27255impl<'a> Vsm4rnds4Emitter<Ymm, Ymm, Mem> for Assembler<'a> {
27256    fn vsm4rnds4(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
27257        self.emit(VSM4RNDS4_256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27258    }
27259}
27260
27261impl<'a> Vsm4rnds4Emitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27262    fn vsm4rnds4(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27263        self.emit(VSM4RNDS4_512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27264    }
27265}
27266
27267impl<'a> Vsm4rnds4Emitter<Zmm, Zmm, Mem> for Assembler<'a> {
27268    fn vsm4rnds4(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
27269        self.emit(VSM4RNDS4_512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27270    }
27271}
27272
27273/// `VSQRTPH`.
27274///
27275/// Supported operand variants:
27276///
27277/// ```text
27278/// +---+----------+
27279/// | # | Operands |
27280/// +---+----------+
27281/// | 1 | Xmm, Mem |
27282/// | 2 | Xmm, Xmm |
27283/// | 3 | Ymm, Mem |
27284/// | 4 | Ymm, Ymm |
27285/// | 5 | Zmm, Mem |
27286/// | 6 | Zmm, Zmm |
27287/// +---+----------+
27288/// ```
27289pub trait VsqrtphEmitter<A, B> {
27290    fn vsqrtph(&mut self, op0: A, op1: B);
27291}
27292
27293impl<'a> VsqrtphEmitter<Xmm, Xmm> for Assembler<'a> {
27294    fn vsqrtph(&mut self, op0: Xmm, op1: Xmm) {
27295        self.emit(VSQRTPH128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27296    }
27297}
27298
27299impl<'a> VsqrtphEmitter<Xmm, Mem> for Assembler<'a> {
27300    fn vsqrtph(&mut self, op0: Xmm, op1: Mem) {
27301        self.emit(VSQRTPH128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27302    }
27303}
27304
27305impl<'a> VsqrtphEmitter<Ymm, Ymm> for Assembler<'a> {
27306    fn vsqrtph(&mut self, op0: Ymm, op1: Ymm) {
27307        self.emit(VSQRTPH256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27308    }
27309}
27310
27311impl<'a> VsqrtphEmitter<Ymm, Mem> for Assembler<'a> {
27312    fn vsqrtph(&mut self, op0: Ymm, op1: Mem) {
27313        self.emit(VSQRTPH256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27314    }
27315}
27316
27317impl<'a> VsqrtphEmitter<Zmm, Zmm> for Assembler<'a> {
27318    fn vsqrtph(&mut self, op0: Zmm, op1: Zmm) {
27319        self.emit(VSQRTPH512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27320    }
27321}
27322
27323impl<'a> VsqrtphEmitter<Zmm, Mem> for Assembler<'a> {
27324    fn vsqrtph(&mut self, op0: Zmm, op1: Mem) {
27325        self.emit(VSQRTPH512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27326    }
27327}
27328
27329/// `VSQRTPH_ER`.
27330///
27331/// Supported operand variants:
27332///
27333/// ```text
27334/// +---+----------+
27335/// | # | Operands |
27336/// +---+----------+
27337/// | 1 | Zmm, Zmm |
27338/// +---+----------+
27339/// ```
27340pub trait VsqrtphErEmitter<A, B> {
27341    fn vsqrtph_er(&mut self, op0: A, op1: B);
27342}
27343
27344impl<'a> VsqrtphErEmitter<Zmm, Zmm> for Assembler<'a> {
27345    fn vsqrtph_er(&mut self, op0: Zmm, op1: Zmm) {
27346        self.emit(VSQRTPH512RR_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27347    }
27348}
27349
27350/// `VSQRTPH_MASK`.
27351///
27352/// Supported operand variants:
27353///
27354/// ```text
27355/// +---+----------+
27356/// | # | Operands |
27357/// +---+----------+
27358/// | 1 | Xmm, Mem |
27359/// | 2 | Xmm, Xmm |
27360/// | 3 | Ymm, Mem |
27361/// | 4 | Ymm, Ymm |
27362/// | 5 | Zmm, Mem |
27363/// | 6 | Zmm, Zmm |
27364/// +---+----------+
27365/// ```
27366pub trait VsqrtphMaskEmitter<A, B> {
27367    fn vsqrtph_mask(&mut self, op0: A, op1: B);
27368}
27369
27370impl<'a> VsqrtphMaskEmitter<Xmm, Xmm> for Assembler<'a> {
27371    fn vsqrtph_mask(&mut self, op0: Xmm, op1: Xmm) {
27372        self.emit(VSQRTPH128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27373    }
27374}
27375
27376impl<'a> VsqrtphMaskEmitter<Xmm, Mem> for Assembler<'a> {
27377    fn vsqrtph_mask(&mut self, op0: Xmm, op1: Mem) {
27378        self.emit(VSQRTPH128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27379    }
27380}
27381
27382impl<'a> VsqrtphMaskEmitter<Ymm, Ymm> for Assembler<'a> {
27383    fn vsqrtph_mask(&mut self, op0: Ymm, op1: Ymm) {
27384        self.emit(VSQRTPH256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27385    }
27386}
27387
27388impl<'a> VsqrtphMaskEmitter<Ymm, Mem> for Assembler<'a> {
27389    fn vsqrtph_mask(&mut self, op0: Ymm, op1: Mem) {
27390        self.emit(VSQRTPH256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27391    }
27392}
27393
27394impl<'a> VsqrtphMaskEmitter<Zmm, Zmm> for Assembler<'a> {
27395    fn vsqrtph_mask(&mut self, op0: Zmm, op1: Zmm) {
27396        self.emit(VSQRTPH512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27397    }
27398}
27399
27400impl<'a> VsqrtphMaskEmitter<Zmm, Mem> for Assembler<'a> {
27401    fn vsqrtph_mask(&mut self, op0: Zmm, op1: Mem) {
27402        self.emit(VSQRTPH512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27403    }
27404}
27405
27406/// `VSQRTPH_MASK_ER`.
27407///
27408/// Supported operand variants:
27409///
27410/// ```text
27411/// +---+----------+
27412/// | # | Operands |
27413/// +---+----------+
27414/// | 1 | Zmm, Zmm |
27415/// +---+----------+
27416/// ```
27417pub trait VsqrtphMaskErEmitter<A, B> {
27418    fn vsqrtph_mask_er(&mut self, op0: A, op1: B);
27419}
27420
27421impl<'a> VsqrtphMaskErEmitter<Zmm, Zmm> for Assembler<'a> {
27422    fn vsqrtph_mask_er(&mut self, op0: Zmm, op1: Zmm) {
27423        self.emit(VSQRTPH512RR_MASK_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27424    }
27425}
27426
27427/// `VSQRTPH_MASKZ`.
27428///
27429/// Supported operand variants:
27430///
27431/// ```text
27432/// +---+----------+
27433/// | # | Operands |
27434/// +---+----------+
27435/// | 1 | Xmm, Mem |
27436/// | 2 | Xmm, Xmm |
27437/// | 3 | Ymm, Mem |
27438/// | 4 | Ymm, Ymm |
27439/// | 5 | Zmm, Mem |
27440/// | 6 | Zmm, Zmm |
27441/// +---+----------+
27442/// ```
27443pub trait VsqrtphMaskzEmitter<A, B> {
27444    fn vsqrtph_maskz(&mut self, op0: A, op1: B);
27445}
27446
27447impl<'a> VsqrtphMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
27448    fn vsqrtph_maskz(&mut self, op0: Xmm, op1: Xmm) {
27449        self.emit(VSQRTPH128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27450    }
27451}
27452
27453impl<'a> VsqrtphMaskzEmitter<Xmm, Mem> for Assembler<'a> {
27454    fn vsqrtph_maskz(&mut self, op0: Xmm, op1: Mem) {
27455        self.emit(VSQRTPH128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27456    }
27457}
27458
27459impl<'a> VsqrtphMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
27460    fn vsqrtph_maskz(&mut self, op0: Ymm, op1: Ymm) {
27461        self.emit(VSQRTPH256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27462    }
27463}
27464
27465impl<'a> VsqrtphMaskzEmitter<Ymm, Mem> for Assembler<'a> {
27466    fn vsqrtph_maskz(&mut self, op0: Ymm, op1: Mem) {
27467        self.emit(VSQRTPH256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27468    }
27469}
27470
27471impl<'a> VsqrtphMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
27472    fn vsqrtph_maskz(&mut self, op0: Zmm, op1: Zmm) {
27473        self.emit(VSQRTPH512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27474    }
27475}
27476
27477impl<'a> VsqrtphMaskzEmitter<Zmm, Mem> for Assembler<'a> {
27478    fn vsqrtph_maskz(&mut self, op0: Zmm, op1: Mem) {
27479        self.emit(VSQRTPH512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27480    }
27481}
27482
27483/// `VSQRTPH_MASKZ_ER`.
27484///
27485/// Supported operand variants:
27486///
27487/// ```text
27488/// +---+----------+
27489/// | # | Operands |
27490/// +---+----------+
27491/// | 1 | Zmm, Zmm |
27492/// +---+----------+
27493/// ```
27494pub trait VsqrtphMaskzErEmitter<A, B> {
27495    fn vsqrtph_maskz_er(&mut self, op0: A, op1: B);
27496}
27497
27498impl<'a> VsqrtphMaskzErEmitter<Zmm, Zmm> for Assembler<'a> {
27499    fn vsqrtph_maskz_er(&mut self, op0: Zmm, op1: Zmm) {
27500        self.emit(VSQRTPH512RR_MASKZ_ER, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
27501    }
27502}
27503
27504/// `VSQRTSH`.
27505///
27506/// Supported operand variants:
27507///
27508/// ```text
27509/// +---+---------------+
27510/// | # | Operands      |
27511/// +---+---------------+
27512/// | 1 | Xmm, Xmm, Mem |
27513/// | 2 | Xmm, Xmm, Xmm |
27514/// +---+---------------+
27515/// ```
27516pub trait VsqrtshEmitter<A, B, C> {
27517    fn vsqrtsh(&mut self, op0: A, op1: B, op2: C);
27518}
27519
27520impl<'a> VsqrtshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27521    fn vsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27522        self.emit(VSQRTSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27523    }
27524}
27525
27526impl<'a> VsqrtshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27527    fn vsqrtsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27528        self.emit(VSQRTSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27529    }
27530}
27531
27532/// `VSQRTSH_ER`.
27533///
27534/// Supported operand variants:
27535///
27536/// ```text
27537/// +---+---------------+
27538/// | # | Operands      |
27539/// +---+---------------+
27540/// | 1 | Xmm, Xmm, Xmm |
27541/// +---+---------------+
27542/// ```
27543pub trait VsqrtshErEmitter<A, B, C> {
27544    fn vsqrtsh_er(&mut self, op0: A, op1: B, op2: C);
27545}
27546
27547impl<'a> VsqrtshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27548    fn vsqrtsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27549        self.emit(VSQRTSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27550    }
27551}
27552
27553/// `VSQRTSH_MASK`.
27554///
27555/// Supported operand variants:
27556///
27557/// ```text
27558/// +---+---------------+
27559/// | # | Operands      |
27560/// +---+---------------+
27561/// | 1 | Xmm, Xmm, Mem |
27562/// | 2 | Xmm, Xmm, Xmm |
27563/// +---+---------------+
27564/// ```
27565pub trait VsqrtshMaskEmitter<A, B, C> {
27566    fn vsqrtsh_mask(&mut self, op0: A, op1: B, op2: C);
27567}
27568
27569impl<'a> VsqrtshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27570    fn vsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27571        self.emit(VSQRTSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27572    }
27573}
27574
27575impl<'a> VsqrtshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27576    fn vsqrtsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27577        self.emit(VSQRTSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27578    }
27579}
27580
27581/// `VSQRTSH_MASK_ER`.
27582///
27583/// Supported operand variants:
27584///
27585/// ```text
27586/// +---+---------------+
27587/// | # | Operands      |
27588/// +---+---------------+
27589/// | 1 | Xmm, Xmm, Xmm |
27590/// +---+---------------+
27591/// ```
27592pub trait VsqrtshMaskErEmitter<A, B, C> {
27593    fn vsqrtsh_mask_er(&mut self, op0: A, op1: B, op2: C);
27594}
27595
27596impl<'a> VsqrtshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27597    fn vsqrtsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27598        self.emit(VSQRTSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27599    }
27600}
27601
27602/// `VSQRTSH_MASKZ`.
27603///
27604/// Supported operand variants:
27605///
27606/// ```text
27607/// +---+---------------+
27608/// | # | Operands      |
27609/// +---+---------------+
27610/// | 1 | Xmm, Xmm, Mem |
27611/// | 2 | Xmm, Xmm, Xmm |
27612/// +---+---------------+
27613/// ```
27614pub trait VsqrtshMaskzEmitter<A, B, C> {
27615    fn vsqrtsh_maskz(&mut self, op0: A, op1: B, op2: C);
27616}
27617
27618impl<'a> VsqrtshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27619    fn vsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27620        self.emit(VSQRTSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27621    }
27622}
27623
27624impl<'a> VsqrtshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27625    fn vsqrtsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27626        self.emit(VSQRTSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27627    }
27628}
27629
27630/// `VSQRTSH_MASKZ_ER`.
27631///
27632/// Supported operand variants:
27633///
27634/// ```text
27635/// +---+---------------+
27636/// | # | Operands      |
27637/// +---+---------------+
27638/// | 1 | Xmm, Xmm, Xmm |
27639/// +---+---------------+
27640/// ```
27641pub trait VsqrtshMaskzErEmitter<A, B, C> {
27642    fn vsqrtsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
27643}
27644
27645impl<'a> VsqrtshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27646    fn vsqrtsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27647        self.emit(VSQRTSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27648    }
27649}
27650
27651/// `VSUBPH`.
27652///
27653/// Supported operand variants:
27654///
27655/// ```text
27656/// +---+---------------+
27657/// | # | Operands      |
27658/// +---+---------------+
27659/// | 1 | Xmm, Xmm, Mem |
27660/// | 2 | Xmm, Xmm, Xmm |
27661/// | 3 | Ymm, Ymm, Mem |
27662/// | 4 | Ymm, Ymm, Ymm |
27663/// | 5 | Zmm, Zmm, Mem |
27664/// | 6 | Zmm, Zmm, Zmm |
27665/// +---+---------------+
27666/// ```
27667pub trait VsubphEmitter<A, B, C> {
27668    fn vsubph(&mut self, op0: A, op1: B, op2: C);
27669}
27670
27671impl<'a> VsubphEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27672    fn vsubph(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27673        self.emit(VSUBPH128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27674    }
27675}
27676
27677impl<'a> VsubphEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27678    fn vsubph(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27679        self.emit(VSUBPH128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27680    }
27681}
27682
27683impl<'a> VsubphEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
27684    fn vsubph(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
27685        self.emit(VSUBPH256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27686    }
27687}
27688
27689impl<'a> VsubphEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
27690    fn vsubph(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
27691        self.emit(VSUBPH256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27692    }
27693}
27694
27695impl<'a> VsubphEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27696    fn vsubph(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27697        self.emit(VSUBPH512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27698    }
27699}
27700
27701impl<'a> VsubphEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
27702    fn vsubph(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
27703        self.emit(VSUBPH512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27704    }
27705}
27706
27707/// `VSUBPH_ER`.
27708///
27709/// Supported operand variants:
27710///
27711/// ```text
27712/// +---+---------------+
27713/// | # | Operands      |
27714/// +---+---------------+
27715/// | 1 | Zmm, Zmm, Zmm |
27716/// +---+---------------+
27717/// ```
27718pub trait VsubphErEmitter<A, B, C> {
27719    fn vsubph_er(&mut self, op0: A, op1: B, op2: C);
27720}
27721
27722impl<'a> VsubphErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27723    fn vsubph_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27724        self.emit(VSUBPH512RRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27725    }
27726}
27727
27728/// `VSUBPH_MASK`.
27729///
27730/// Supported operand variants:
27731///
27732/// ```text
27733/// +---+---------------+
27734/// | # | Operands      |
27735/// +---+---------------+
27736/// | 1 | Xmm, Xmm, Mem |
27737/// | 2 | Xmm, Xmm, Xmm |
27738/// | 3 | Ymm, Ymm, Mem |
27739/// | 4 | Ymm, Ymm, Ymm |
27740/// | 5 | Zmm, Zmm, Mem |
27741/// | 6 | Zmm, Zmm, Zmm |
27742/// +---+---------------+
27743/// ```
27744pub trait VsubphMaskEmitter<A, B, C> {
27745    fn vsubph_mask(&mut self, op0: A, op1: B, op2: C);
27746}
27747
27748impl<'a> VsubphMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27749    fn vsubph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27750        self.emit(VSUBPH128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27751    }
27752}
27753
27754impl<'a> VsubphMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27755    fn vsubph_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27756        self.emit(VSUBPH128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27757    }
27758}
27759
27760impl<'a> VsubphMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
27761    fn vsubph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
27762        self.emit(VSUBPH256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27763    }
27764}
27765
27766impl<'a> VsubphMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
27767    fn vsubph_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
27768        self.emit(VSUBPH256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27769    }
27770}
27771
27772impl<'a> VsubphMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27773    fn vsubph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27774        self.emit(VSUBPH512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27775    }
27776}
27777
27778impl<'a> VsubphMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
27779    fn vsubph_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
27780        self.emit(VSUBPH512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27781    }
27782}
27783
27784/// `VSUBPH_MASK_ER`.
27785///
27786/// Supported operand variants:
27787///
27788/// ```text
27789/// +---+---------------+
27790/// | # | Operands      |
27791/// +---+---------------+
27792/// | 1 | Zmm, Zmm, Zmm |
27793/// +---+---------------+
27794/// ```
27795pub trait VsubphMaskErEmitter<A, B, C> {
27796    fn vsubph_mask_er(&mut self, op0: A, op1: B, op2: C);
27797}
27798
27799impl<'a> VsubphMaskErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27800    fn vsubph_mask_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27801        self.emit(VSUBPH512RRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27802    }
27803}
27804
27805/// `VSUBPH_MASKZ`.
27806///
27807/// Supported operand variants:
27808///
27809/// ```text
27810/// +---+---------------+
27811/// | # | Operands      |
27812/// +---+---------------+
27813/// | 1 | Xmm, Xmm, Mem |
27814/// | 2 | Xmm, Xmm, Xmm |
27815/// | 3 | Ymm, Ymm, Mem |
27816/// | 4 | Ymm, Ymm, Ymm |
27817/// | 5 | Zmm, Zmm, Mem |
27818/// | 6 | Zmm, Zmm, Zmm |
27819/// +---+---------------+
27820/// ```
27821pub trait VsubphMaskzEmitter<A, B, C> {
27822    fn vsubph_maskz(&mut self, op0: A, op1: B, op2: C);
27823}
27824
27825impl<'a> VsubphMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27826    fn vsubph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27827        self.emit(VSUBPH128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27828    }
27829}
27830
27831impl<'a> VsubphMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27832    fn vsubph_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27833        self.emit(VSUBPH128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27834    }
27835}
27836
27837impl<'a> VsubphMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
27838    fn vsubph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
27839        self.emit(VSUBPH256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27840    }
27841}
27842
27843impl<'a> VsubphMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
27844    fn vsubph_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
27845        self.emit(VSUBPH256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27846    }
27847}
27848
27849impl<'a> VsubphMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27850    fn vsubph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27851        self.emit(VSUBPH512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27852    }
27853}
27854
27855impl<'a> VsubphMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
27856    fn vsubph_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
27857        self.emit(VSUBPH512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27858    }
27859}
27860
27861/// `VSUBPH_MASKZ_ER`.
27862///
27863/// Supported operand variants:
27864///
27865/// ```text
27866/// +---+---------------+
27867/// | # | Operands      |
27868/// +---+---------------+
27869/// | 1 | Zmm, Zmm, Zmm |
27870/// +---+---------------+
27871/// ```
27872pub trait VsubphMaskzErEmitter<A, B, C> {
27873    fn vsubph_maskz_er(&mut self, op0: A, op1: B, op2: C);
27874}
27875
27876impl<'a> VsubphMaskzErEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
27877    fn vsubph_maskz_er(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
27878        self.emit(VSUBPH512RRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27879    }
27880}
27881
27882/// `VSUBSH`.
27883///
27884/// Supported operand variants:
27885///
27886/// ```text
27887/// +---+---------------+
27888/// | # | Operands      |
27889/// +---+---------------+
27890/// | 1 | Xmm, Xmm, Mem |
27891/// | 2 | Xmm, Xmm, Xmm |
27892/// +---+---------------+
27893/// ```
27894pub trait VsubshEmitter<A, B, C> {
27895    fn vsubsh(&mut self, op0: A, op1: B, op2: C);
27896}
27897
27898impl<'a> VsubshEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27899    fn vsubsh(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27900        self.emit(VSUBSHRRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27901    }
27902}
27903
27904impl<'a> VsubshEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27905    fn vsubsh(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27906        self.emit(VSUBSHRRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27907    }
27908}
27909
27910/// `VSUBSH_ER`.
27911///
27912/// Supported operand variants:
27913///
27914/// ```text
27915/// +---+---------------+
27916/// | # | Operands      |
27917/// +---+---------------+
27918/// | 1 | Xmm, Xmm, Xmm |
27919/// +---+---------------+
27920/// ```
27921pub trait VsubshErEmitter<A, B, C> {
27922    fn vsubsh_er(&mut self, op0: A, op1: B, op2: C);
27923}
27924
27925impl<'a> VsubshErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27926    fn vsubsh_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27927        self.emit(VSUBSHRRR_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27928    }
27929}
27930
27931/// `VSUBSH_MASK`.
27932///
27933/// Supported operand variants:
27934///
27935/// ```text
27936/// +---+---------------+
27937/// | # | Operands      |
27938/// +---+---------------+
27939/// | 1 | Xmm, Xmm, Mem |
27940/// | 2 | Xmm, Xmm, Xmm |
27941/// +---+---------------+
27942/// ```
27943pub trait VsubshMaskEmitter<A, B, C> {
27944    fn vsubsh_mask(&mut self, op0: A, op1: B, op2: C);
27945}
27946
27947impl<'a> VsubshMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27948    fn vsubsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27949        self.emit(VSUBSHRRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27950    }
27951}
27952
27953impl<'a> VsubshMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
27954    fn vsubsh_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
27955        self.emit(VSUBSHRRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27956    }
27957}
27958
27959/// `VSUBSH_MASK_ER`.
27960///
27961/// Supported operand variants:
27962///
27963/// ```text
27964/// +---+---------------+
27965/// | # | Operands      |
27966/// +---+---------------+
27967/// | 1 | Xmm, Xmm, Xmm |
27968/// +---+---------------+
27969/// ```
27970pub trait VsubshMaskErEmitter<A, B, C> {
27971    fn vsubsh_mask_er(&mut self, op0: A, op1: B, op2: C);
27972}
27973
27974impl<'a> VsubshMaskErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27975    fn vsubsh_mask_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27976        self.emit(VSUBSHRRR_MASK_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27977    }
27978}
27979
27980/// `VSUBSH_MASKZ`.
27981///
27982/// Supported operand variants:
27983///
27984/// ```text
27985/// +---+---------------+
27986/// | # | Operands      |
27987/// +---+---------------+
27988/// | 1 | Xmm, Xmm, Mem |
27989/// | 2 | Xmm, Xmm, Xmm |
27990/// +---+---------------+
27991/// ```
27992pub trait VsubshMaskzEmitter<A, B, C> {
27993    fn vsubsh_maskz(&mut self, op0: A, op1: B, op2: C);
27994}
27995
27996impl<'a> VsubshMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
27997    fn vsubsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
27998        self.emit(VSUBSHRRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
27999    }
28000}
28001
28002impl<'a> VsubshMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
28003    fn vsubsh_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
28004        self.emit(VSUBSHRRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
28005    }
28006}
28007
28008/// `VSUBSH_MASKZ_ER`.
28009///
28010/// Supported operand variants:
28011///
28012/// ```text
28013/// +---+---------------+
28014/// | # | Operands      |
28015/// +---+---------------+
28016/// | 1 | Xmm, Xmm, Xmm |
28017/// +---+---------------+
28018/// ```
28019pub trait VsubshMaskzErEmitter<A, B, C> {
28020    fn vsubsh_maskz_er(&mut self, op0: A, op1: B, op2: C);
28021}
28022
28023impl<'a> VsubshMaskzErEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
28024    fn vsubsh_maskz_er(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
28025        self.emit(VSUBSHRRR_MASKZ_ER, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
28026    }
28027}
28028
28029/// `VUCOMISH`.
28030///
28031/// Supported operand variants:
28032///
28033/// ```text
28034/// +---+----------+
28035/// | # | Operands |
28036/// +---+----------+
28037/// | 1 | Xmm, Mem |
28038/// | 2 | Xmm, Xmm |
28039/// +---+----------+
28040/// ```
28041pub trait VucomishEmitter<A, B> {
28042    fn vucomish(&mut self, op0: A, op1: B);
28043}
28044
28045impl<'a> VucomishEmitter<Xmm, Xmm> for Assembler<'a> {
28046    fn vucomish(&mut self, op0: Xmm, op1: Xmm) {
28047        self.emit(VUCOMISHRR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28048    }
28049}
28050
28051impl<'a> VucomishEmitter<Xmm, Mem> for Assembler<'a> {
28052    fn vucomish(&mut self, op0: Xmm, op1: Mem) {
28053        self.emit(VUCOMISHRM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28054    }
28055}
28056
28057/// `VUCOMISH_SAE`.
28058///
28059/// Supported operand variants:
28060///
28061/// ```text
28062/// +---+----------+
28063/// | # | Operands |
28064/// +---+----------+
28065/// | 1 | Xmm, Xmm |
28066/// +---+----------+
28067/// ```
28068pub trait VucomishSaeEmitter<A, B> {
28069    fn vucomish_sae(&mut self, op0: A, op1: B);
28070}
28071
28072impl<'a> VucomishSaeEmitter<Xmm, Xmm> for Assembler<'a> {
28073    fn vucomish_sae(&mut self, op0: Xmm, op1: Xmm) {
28074        self.emit(VUCOMISHRR_SAE, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28075    }
28076}
28077
28078/// `XCHG` (XCHG). 
28079/// Exchanges the contents of the destination (first) and source (second) operands. The operands can be two general-purpose registers or a register and a memory location. If a memory operand is referenced, the processor’s locking protocol is automatically implemented for the duration of the exchange operation, regardless of the presence or absence of the LOCK prefix or of the value of the IOPL. (See the LOCK prefix description in this chapter for more information on the locking protocol.)
28080///
28081///
28082/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XCHG.html).
28083///
28084/// Supported operand variants:
28085///
28086/// ```text
28087/// +---+--------------+
28088/// | # | Operands     |
28089/// +---+--------------+
28090/// | 1 | GpbLo, GpbLo |
28091/// | 2 | Gpd, Gpd     |
28092/// | 3 | Gpq, Gpq     |
28093/// | 4 | Gpw, Gpw     |
28094/// | 5 | Mem, GpbLo   |
28095/// | 6 | Mem, Gpd     |
28096/// | 7 | Mem, Gpq     |
28097/// | 8 | Mem, Gpw     |
28098/// +---+--------------+
28099/// ```
28100pub trait XchgEmitter<A, B> {
28101    fn xchg(&mut self, op0: A, op1: B);
28102}
28103
28104impl<'a> XchgEmitter<GpbLo, GpbLo> for Assembler<'a> {
28105    fn xchg(&mut self, op0: GpbLo, op1: GpbLo) {
28106        self.emit(XCHG8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28107    }
28108}
28109
28110impl<'a> XchgEmitter<Mem, GpbLo> for Assembler<'a> {
28111    fn xchg(&mut self, op0: Mem, op1: GpbLo) {
28112        self.emit(XCHG8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28113    }
28114}
28115
28116impl<'a> XchgEmitter<Gpw, Gpw> for Assembler<'a> {
28117    fn xchg(&mut self, op0: Gpw, op1: Gpw) {
28118        self.emit(XCHG16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28119    }
28120}
28121
28122impl<'a> XchgEmitter<Mem, Gpw> for Assembler<'a> {
28123    fn xchg(&mut self, op0: Mem, op1: Gpw) {
28124        self.emit(XCHG16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28125    }
28126}
28127
28128impl<'a> XchgEmitter<Gpd, Gpd> for Assembler<'a> {
28129    fn xchg(&mut self, op0: Gpd, op1: Gpd) {
28130        self.emit(XCHG32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28131    }
28132}
28133
28134impl<'a> XchgEmitter<Mem, Gpd> for Assembler<'a> {
28135    fn xchg(&mut self, op0: Mem, op1: Gpd) {
28136        self.emit(XCHG32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28137    }
28138}
28139
28140impl<'a> XchgEmitter<Gpq, Gpq> for Assembler<'a> {
28141    fn xchg(&mut self, op0: Gpq, op1: Gpq) {
28142        self.emit(XCHG64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28143    }
28144}
28145
28146impl<'a> XchgEmitter<Mem, Gpq> for Assembler<'a> {
28147    fn xchg(&mut self, op0: Mem, op1: Gpq) {
28148        self.emit(XCHG64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28149    }
28150}
28151
28152/// `XLATB` (XLATB). 
28153/// Locates a byte entry in a table in memory, using the contents of the AL register as a table index, then copies the contents of the table entry back into the AL register. The index in the AL register is treated as an unsigned integer. The XLAT and XLATB instructions get the base address of the table in memory from either the DS:EBX or the DS:BX registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). (The DS segment may be overridden with a segment override prefix.)
28154///
28155///
28156/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XLAT%3AXLATB.html).
28157///
28158/// Supported operand variants:
28159///
28160/// ```text
28161/// +---+----------+
28162/// | # | Operands |
28163/// +---+----------+
28164/// | 1 | (none)   |
28165/// +---+----------+
28166/// ```
28167pub trait XlatbEmitter {
28168    fn xlatb(&mut self);
28169}
28170
28171impl<'a> XlatbEmitter for Assembler<'a> {
28172    fn xlatb(&mut self) {
28173        self.emit(XLATB, &NOREG, &NOREG, &NOREG, &NOREG);
28174    }
28175}
28176
28177/// `XOR` (XOR). 
28178/// Performs a bitwise exclusive OR (XOR) operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is 1 if the corresponding bits of the operands are different; each bit is 0 if the corresponding bits are the same.
28179///
28180///
28181/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XOR.html).
28182///
28183/// Supported operand variants:
28184///
28185/// ```text
28186/// +----+--------------+
28187/// | #  | Operands     |
28188/// +----+--------------+
28189/// | 1  | GpbLo, GpbLo |
28190/// | 2  | GpbLo, Imm   |
28191/// | 3  | GpbLo, Mem   |
28192/// | 4  | Gpd, Gpd     |
28193/// | 5  | Gpd, Imm     |
28194/// | 6  | Gpd, Mem     |
28195/// | 7  | Gpq, Gpq     |
28196/// | 8  | Gpq, Imm     |
28197/// | 9  | Gpq, Mem     |
28198/// | 10 | Gpw, Gpw     |
28199/// | 11 | Gpw, Imm     |
28200/// | 12 | Gpw, Mem     |
28201/// | 13 | Mem, GpbLo   |
28202/// | 14 | Mem, Gpd     |
28203/// | 15 | Mem, Gpq     |
28204/// | 16 | Mem, Gpw     |
28205/// | 17 | Mem, Imm     |
28206/// +----+--------------+
28207/// ```
28208pub trait XorEmitter<A, B> {
28209    fn xor(&mut self, op0: A, op1: B);
28210}
28211
28212impl<'a> XorEmitter<GpbLo, GpbLo> for Assembler<'a> {
28213    fn xor(&mut self, op0: GpbLo, op1: GpbLo) {
28214        self.emit(XOR8RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28215    }
28216}
28217
28218impl<'a> XorEmitter<Mem, GpbLo> for Assembler<'a> {
28219    fn xor(&mut self, op0: Mem, op1: GpbLo) {
28220        self.emit(XOR8MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28221    }
28222}
28223
28224impl<'a> XorEmitter<Gpw, Gpw> for Assembler<'a> {
28225    fn xor(&mut self, op0: Gpw, op1: Gpw) {
28226        self.emit(XOR16RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28227    }
28228}
28229
28230impl<'a> XorEmitter<Mem, Gpw> for Assembler<'a> {
28231    fn xor(&mut self, op0: Mem, op1: Gpw) {
28232        self.emit(XOR16MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28233    }
28234}
28235
28236impl<'a> XorEmitter<Gpd, Gpd> for Assembler<'a> {
28237    fn xor(&mut self, op0: Gpd, op1: Gpd) {
28238        self.emit(XOR32RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28239    }
28240}
28241
28242impl<'a> XorEmitter<Mem, Gpd> for Assembler<'a> {
28243    fn xor(&mut self, op0: Mem, op1: Gpd) {
28244        self.emit(XOR32MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28245    }
28246}
28247
28248impl<'a> XorEmitter<Gpq, Gpq> for Assembler<'a> {
28249    fn xor(&mut self, op0: Gpq, op1: Gpq) {
28250        self.emit(XOR64RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28251    }
28252}
28253
28254impl<'a> XorEmitter<Mem, Gpq> for Assembler<'a> {
28255    fn xor(&mut self, op0: Mem, op1: Gpq) {
28256        self.emit(XOR64MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28257    }
28258}
28259
28260impl<'a> XorEmitter<GpbLo, Mem> for Assembler<'a> {
28261    fn xor(&mut self, op0: GpbLo, op1: Mem) {
28262        self.emit(XOR8RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28263    }
28264}
28265
28266impl<'a> XorEmitter<Gpw, Mem> for Assembler<'a> {
28267    fn xor(&mut self, op0: Gpw, op1: Mem) {
28268        self.emit(XOR16RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28269    }
28270}
28271
28272impl<'a> XorEmitter<Gpd, Mem> for Assembler<'a> {
28273    fn xor(&mut self, op0: Gpd, op1: Mem) {
28274        self.emit(XOR32RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28275    }
28276}
28277
28278impl<'a> XorEmitter<Gpq, Mem> for Assembler<'a> {
28279    fn xor(&mut self, op0: Gpq, op1: Mem) {
28280        self.emit(XOR64RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28281    }
28282}
28283
28284impl<'a> XorEmitter<GpbLo, Imm> for Assembler<'a> {
28285    fn xor(&mut self, op0: GpbLo, op1: Imm) {
28286        self.emit(XOR8RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28287    }
28288}
28289
28290impl<'a> XorEmitter<Gpw, Imm> for Assembler<'a> {
28291    fn xor(&mut self, op0: Gpw, op1: Imm) {
28292        self.emit(XOR16RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28293    }
28294}
28295
28296impl<'a> XorEmitter<Gpd, Imm> for Assembler<'a> {
28297    fn xor(&mut self, op0: Gpd, op1: Imm) {
28298        self.emit(XOR32RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28299    }
28300}
28301
28302impl<'a> XorEmitter<Gpq, Imm> for Assembler<'a> {
28303    fn xor(&mut self, op0: Gpq, op1: Imm) {
28304        self.emit(XOR64RI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28305    }
28306}
28307
28308impl<'a> XorEmitter<Mem, Imm> for Assembler<'a> {
28309    fn xor(&mut self, op0: Mem, op1: Imm) {
28310        self.emit(XOR8MI, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
28311    }
28312}
28313
28314
28315impl<'a> Assembler<'a> {
28316    /// `AADD`.
28317    ///
28318    /// Supported operand variants:
28319    ///
28320    /// ```text
28321    /// +---+----------+
28322    /// | # | Operands |
28323    /// +---+----------+
28324    /// | 1 | Mem, Gpd |
28325    /// | 2 | Mem, Gpq |
28326    /// +---+----------+
28327    /// ```
28328    #[inline]
28329    pub fn aadd<A, B>(&mut self, op0: A, op1: B)
28330    where Assembler<'a>: AaddEmitter<A, B> {
28331        <Self as AaddEmitter<A, B>>::aadd(self, op0, op1);
28332    }
28333    /// `AAND`.
28334    ///
28335    /// Supported operand variants:
28336    ///
28337    /// ```text
28338    /// +---+----------+
28339    /// | # | Operands |
28340    /// +---+----------+
28341    /// | 1 | Mem, Gpd |
28342    /// | 2 | Mem, Gpq |
28343    /// +---+----------+
28344    /// ```
28345    #[inline]
28346    pub fn aand<A, B>(&mut self, op0: A, op1: B)
28347    where Assembler<'a>: AandEmitter<A, B> {
28348        <Self as AandEmitter<A, B>>::aand(self, op0, op1);
28349    }
28350    /// `ADC` (ADC). 
28351    /// Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
28352    ///
28353    ///
28354    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADC.html).
28355    ///
28356    /// Supported operand variants:
28357    ///
28358    /// ```text
28359    /// +----+--------------+
28360    /// | #  | Operands     |
28361    /// +----+--------------+
28362    /// | 1  | GpbLo, GpbLo |
28363    /// | 2  | GpbLo, Imm   |
28364    /// | 3  | GpbLo, Mem   |
28365    /// | 4  | Gpd, Gpd     |
28366    /// | 5  | Gpd, Imm     |
28367    /// | 6  | Gpd, Mem     |
28368    /// | 7  | Gpq, Gpq     |
28369    /// | 8  | Gpq, Imm     |
28370    /// | 9  | Gpq, Mem     |
28371    /// | 10 | Gpw, Gpw     |
28372    /// | 11 | Gpw, Imm     |
28373    /// | 12 | Gpw, Mem     |
28374    /// | 13 | Mem, GpbLo   |
28375    /// | 14 | Mem, Gpd     |
28376    /// | 15 | Mem, Gpq     |
28377    /// | 16 | Mem, Gpw     |
28378    /// | 17 | Mem, Imm     |
28379    /// +----+--------------+
28380    /// ```
28381    #[inline]
28382    pub fn adc<A, B>(&mut self, op0: A, op1: B)
28383    where Assembler<'a>: AdcEmitter<A, B> {
28384        <Self as AdcEmitter<A, B>>::adc(self, op0, op1);
28385    }
28386    /// `ADD` (ADD). 
28387    /// Adds the destination operand (first operand) and the source operand (second operand) and then stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
28388    ///
28389    ///
28390    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ADD.html).
28391    ///
28392    /// Supported operand variants:
28393    ///
28394    /// ```text
28395    /// +----+--------------+
28396    /// | #  | Operands     |
28397    /// +----+--------------+
28398    /// | 1  | GpbLo, GpbLo |
28399    /// | 2  | GpbLo, Imm   |
28400    /// | 3  | GpbLo, Mem   |
28401    /// | 4  | Gpd, Gpd     |
28402    /// | 5  | Gpd, Imm     |
28403    /// | 6  | Gpd, Mem     |
28404    /// | 7  | Gpq, Gpq     |
28405    /// | 8  | Gpq, Imm     |
28406    /// | 9  | Gpq, Mem     |
28407    /// | 10 | Gpw, Gpw     |
28408    /// | 11 | Gpw, Imm     |
28409    /// | 12 | Gpw, Mem     |
28410    /// | 13 | Mem, GpbLo   |
28411    /// | 14 | Mem, Gpd     |
28412    /// | 15 | Mem, Gpq     |
28413    /// | 16 | Mem, Gpw     |
28414    /// | 17 | Mem, Imm     |
28415    /// +----+--------------+
28416    /// ```
28417    #[inline]
28418    pub fn add<A, B>(&mut self, op0: A, op1: B)
28419    where Assembler<'a>: AddEmitter<A, B> {
28420        <Self as AddEmitter<A, B>>::add(self, op0, op1);
28421    }
28422    /// `AND` (AND). 
28423    /// Performs a bitwise AND operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is set to 1 if both corresponding bits of the first and second operands are 1; otherwise, it is set to 0.
28424    ///
28425    ///
28426    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AND.html).
28427    ///
28428    /// Supported operand variants:
28429    ///
28430    /// ```text
28431    /// +----+--------------+
28432    /// | #  | Operands     |
28433    /// +----+--------------+
28434    /// | 1  | GpbLo, GpbLo |
28435    /// | 2  | GpbLo, Imm   |
28436    /// | 3  | GpbLo, Mem   |
28437    /// | 4  | Gpd, Gpd     |
28438    /// | 5  | Gpd, Imm     |
28439    /// | 6  | Gpd, Mem     |
28440    /// | 7  | Gpq, Gpq     |
28441    /// | 8  | Gpq, Imm     |
28442    /// | 9  | Gpq, Mem     |
28443    /// | 10 | Gpw, Gpw     |
28444    /// | 11 | Gpw, Imm     |
28445    /// | 12 | Gpw, Mem     |
28446    /// | 13 | Mem, GpbLo   |
28447    /// | 14 | Mem, Gpd     |
28448    /// | 15 | Mem, Gpq     |
28449    /// | 16 | Mem, Gpw     |
28450    /// | 17 | Mem, Imm     |
28451    /// +----+--------------+
28452    /// ```
28453    #[inline]
28454    pub fn and<A, B>(&mut self, op0: A, op1: B)
28455    where Assembler<'a>: AndEmitter<A, B> {
28456        <Self as AndEmitter<A, B>>::and(self, op0, op1);
28457    }
28458    /// `AOR`.
28459    ///
28460    /// Supported operand variants:
28461    ///
28462    /// ```text
28463    /// +---+----------+
28464    /// | # | Operands |
28465    /// +---+----------+
28466    /// | 1 | Mem, Gpd |
28467    /// | 2 | Mem, Gpq |
28468    /// +---+----------+
28469    /// ```
28470    #[inline]
28471    pub fn aor<A, B>(&mut self, op0: A, op1: B)
28472    where Assembler<'a>: AorEmitter<A, B> {
28473        <Self as AorEmitter<A, B>>::aor(self, op0, op1);
28474    }
28475    /// `AXOR`.
28476    ///
28477    /// Supported operand variants:
28478    ///
28479    /// ```text
28480    /// +---+----------+
28481    /// | # | Operands |
28482    /// +---+----------+
28483    /// | 1 | Mem, Gpd |
28484    /// | 2 | Mem, Gpq |
28485    /// +---+----------+
28486    /// ```
28487    #[inline]
28488    pub fn axor<A, B>(&mut self, op0: A, op1: B)
28489    where Assembler<'a>: AxorEmitter<A, B> {
28490        <Self as AxorEmitter<A, B>>::axor(self, op0, op1);
28491    }
28492    /// `BSF` (BSF). 
28493    /// Searches the source operand (second operand) for the least significant set bit (1 bit). If a least significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content of the source operand is 0, the content of the destination operand is undefined.
28494    ///
28495    ///
28496    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSF.html).
28497    ///
28498    /// Supported operand variants:
28499    ///
28500    /// ```text
28501    /// +---+----------+
28502    /// | # | Operands |
28503    /// +---+----------+
28504    /// | 1 | Gpd, Gpd |
28505    /// | 2 | Gpd, Mem |
28506    /// | 3 | Gpq, Gpq |
28507    /// | 4 | Gpq, Mem |
28508    /// | 5 | Gpw, Gpw |
28509    /// | 6 | Gpw, Mem |
28510    /// +---+----------+
28511    /// ```
28512    #[inline]
28513    pub fn bsf<A, B>(&mut self, op0: A, op1: B)
28514    where Assembler<'a>: BsfEmitter<A, B> {
28515        <Self as BsfEmitter<A, B>>::bsf(self, op0, op1);
28516    }
28517    /// `BSR` (BSR). 
28518    /// Searches the source operand (second operand) for the most significant set bit (1 bit). If a most significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the content source operand is 0, the content of the destination operand is undefined.
28519    ///
28520    ///
28521    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BSR.html).
28522    ///
28523    /// Supported operand variants:
28524    ///
28525    /// ```text
28526    /// +---+----------+
28527    /// | # | Operands |
28528    /// +---+----------+
28529    /// | 1 | Gpd, Gpd |
28530    /// | 2 | Gpd, Mem |
28531    /// | 3 | Gpq, Gpq |
28532    /// | 4 | Gpq, Mem |
28533    /// | 5 | Gpw, Gpw |
28534    /// | 6 | Gpw, Mem |
28535    /// +---+----------+
28536    /// ```
28537    #[inline]
28538    pub fn bsr<A, B>(&mut self, op0: A, op1: B)
28539    where Assembler<'a>: BsrEmitter<A, B> {
28540        <Self as BsrEmitter<A, B>>::bsr(self, op0, op1);
28541    }
28542    /// `BT` (BT). 
28543    /// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
28544    ///
28545    ///
28546    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BT.html).
28547    ///
28548    /// Supported operand variants:
28549    ///
28550    /// ```text
28551    /// +----+----------+
28552    /// | #  | Operands |
28553    /// +----+----------+
28554    /// | 1  | Gpd, Gpd |
28555    /// | 2  | Gpd, Imm |
28556    /// | 3  | Gpq, Gpq |
28557    /// | 4  | Gpq, Imm |
28558    /// | 5  | Gpw, Gpw |
28559    /// | 6  | Gpw, Imm |
28560    /// | 7  | Mem, Gpd |
28561    /// | 8  | Mem, Gpq |
28562    /// | 9  | Mem, Gpw |
28563    /// | 10 | Mem, Imm |
28564    /// +----+----------+
28565    /// ```
28566    #[inline]
28567    pub fn bt<A, B>(&mut self, op0: A, op1: B)
28568    where Assembler<'a>: BtEmitter<A, B> {
28569        <Self as BtEmitter<A, B>>::bt(self, op0, op1);
28570    }
28571    /// `BTC` (BTC). 
28572    /// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
28573    ///
28574    ///
28575    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTC.html).
28576    ///
28577    /// Supported operand variants:
28578    ///
28579    /// ```text
28580    /// +----+----------+
28581    /// | #  | Operands |
28582    /// +----+----------+
28583    /// | 1  | Gpd, Gpd |
28584    /// | 2  | Gpd, Imm |
28585    /// | 3  | Gpq, Gpq |
28586    /// | 4  | Gpq, Imm |
28587    /// | 5  | Gpw, Gpw |
28588    /// | 6  | Gpw, Imm |
28589    /// | 7  | Mem, Gpd |
28590    /// | 8  | Mem, Gpq |
28591    /// | 9  | Mem, Gpw |
28592    /// | 10 | Mem, Imm |
28593    /// +----+----------+
28594    /// ```
28595    #[inline]
28596    pub fn btc<A, B>(&mut self, op0: A, op1: B)
28597    where Assembler<'a>: BtcEmitter<A, B> {
28598        <Self as BtcEmitter<A, B>>::btc(self, op0, op1);
28599    }
28600    /// `BTR` (BTR). 
28601    /// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and clears the selected bit in the bit string to 0. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
28602    ///
28603    ///
28604    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTR.html).
28605    ///
28606    /// Supported operand variants:
28607    ///
28608    /// ```text
28609    /// +----+----------+
28610    /// | #  | Operands |
28611    /// +----+----------+
28612    /// | 1  | Gpd, Gpd |
28613    /// | 2  | Gpd, Imm |
28614    /// | 3  | Gpq, Gpq |
28615    /// | 4  | Gpq, Imm |
28616    /// | 5  | Gpw, Gpw |
28617    /// | 6  | Gpw, Imm |
28618    /// | 7  | Mem, Gpd |
28619    /// | 8  | Mem, Gpq |
28620    /// | 9  | Mem, Gpw |
28621    /// | 10 | Mem, Imm |
28622    /// +----+----------+
28623    /// ```
28624    #[inline]
28625    pub fn btr<A, B>(&mut self, op0: A, op1: B)
28626    where Assembler<'a>: BtrEmitter<A, B> {
28627        <Self as BtrEmitter<A, B>>::btr(self, op0, op1);
28628    }
28629    /// `BTS` (BTS). 
28630    /// Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and sets the selected bit in the bit string to 1. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value
28631    ///
28632    ///
28633    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/BTS.html).
28634    ///
28635    /// Supported operand variants:
28636    ///
28637    /// ```text
28638    /// +----+----------+
28639    /// | #  | Operands |
28640    /// +----+----------+
28641    /// | 1  | Gpd, Gpd |
28642    /// | 2  | Gpd, Imm |
28643    /// | 3  | Gpq, Gpq |
28644    /// | 4  | Gpq, Imm |
28645    /// | 5  | Gpw, Gpw |
28646    /// | 6  | Gpw, Imm |
28647    /// | 7  | Mem, Gpd |
28648    /// | 8  | Mem, Gpq |
28649    /// | 9  | Mem, Gpw |
28650    /// | 10 | Mem, Imm |
28651    /// +----+----------+
28652    /// ```
28653    #[inline]
28654    pub fn bts<A, B>(&mut self, op0: A, op1: B)
28655    where Assembler<'a>: BtsEmitter<A, B> {
28656        <Self as BtsEmitter<A, B>>::bts(self, op0, op1);
28657    }
28658    /// `CALL` (CALL). 
28659    /// Saves procedure linking information on the stack and branches to the called procedure specified using the target operand. The target operand specifies the address of the first instruction in the called procedure. The operand can be an immediate value, a general-purpose register, or a memory location.
28660    ///
28661    ///
28662    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CALL.html).
28663    ///
28664    /// Supported operand variants:
28665    ///
28666    /// ```text
28667    /// +---+----------+
28668    /// | # | Operands |
28669    /// +---+----------+
28670    /// | 1 | Gpq      |
28671    /// | 2 | Imm      |
28672    /// | 3 | Label    |
28673    /// | 4 | Mem      |
28674    /// | 5 | Sym      |
28675    /// +---+----------+
28676    /// ```
28677    #[inline]
28678    pub fn call<A>(&mut self, op0: A)
28679    where Assembler<'a>: CallEmitter<A> {
28680        <Self as CallEmitter<A>>::call(self, op0);
28681    }
28682    /// `CALLF`.
28683    ///
28684    /// Supported operand variants:
28685    ///
28686    /// ```text
28687    /// +---+----------+
28688    /// | # | Operands |
28689    /// +---+----------+
28690    /// | 1 | Mem      |
28691    /// +---+----------+
28692    /// ```
28693    #[inline]
28694    pub fn callf<A>(&mut self, op0: A)
28695    where Assembler<'a>: CallfEmitter<A> {
28696        <Self as CallfEmitter<A>>::callf(self, op0);
28697    }
28698    /// `CBW`.
28699    ///
28700    /// Supported operand variants:
28701    ///
28702    /// ```text
28703    /// +---+----------+
28704    /// | # | Operands |
28705    /// +---+----------+
28706    /// | 1 | (none)   |
28707    /// +---+----------+
28708    /// ```
28709    #[inline]
28710    pub fn cbw(&mut self)
28711    where Assembler<'a>: CbwEmitter {
28712        <Self as CbwEmitter>::cbw(self);
28713    }
28714    /// `CDQ`.
28715    ///
28716    /// Supported operand variants:
28717    ///
28718    /// ```text
28719    /// +---+----------+
28720    /// | # | Operands |
28721    /// +---+----------+
28722    /// | 1 | (none)   |
28723    /// +---+----------+
28724    /// ```
28725    #[inline]
28726    pub fn cdq(&mut self)
28727    where Assembler<'a>: CdqEmitter {
28728        <Self as CdqEmitter>::cdq(self);
28729    }
28730    /// `CDQE`.
28731    ///
28732    /// Supported operand variants:
28733    ///
28734    /// ```text
28735    /// +---+----------+
28736    /// | # | Operands |
28737    /// +---+----------+
28738    /// | 1 | (none)   |
28739    /// +---+----------+
28740    /// ```
28741    #[inline]
28742    pub fn cdqe(&mut self)
28743    where Assembler<'a>: CdqeEmitter {
28744        <Self as CdqeEmitter>::cdqe(self);
28745    }
28746    /// `CLC` (CLC). 
28747    /// Clears the CF flag in the EFLAGS register. Operation is the same in all modes.
28748    ///
28749    ///
28750    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLC.html).
28751    ///
28752    /// Supported operand variants:
28753    ///
28754    /// ```text
28755    /// +---+----------+
28756    /// | # | Operands |
28757    /// +---+----------+
28758    /// | 1 | (none)   |
28759    /// +---+----------+
28760    /// ```
28761    #[inline]
28762    pub fn clc(&mut self)
28763    where Assembler<'a>: ClcEmitter {
28764        <Self as ClcEmitter>::clc(self);
28765    }
28766    /// `CLD` (CLD). 
28767    /// Clears the DF flag in the EFLAGS register. When the DF flag is set to 0, string operations increment the index registers (ESI and/or EDI). Operation is the same in all modes.
28768    ///
28769    ///
28770    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLD.html).
28771    ///
28772    /// Supported operand variants:
28773    ///
28774    /// ```text
28775    /// +---+----------+
28776    /// | # | Operands |
28777    /// +---+----------+
28778    /// | 1 | (none)   |
28779    /// +---+----------+
28780    /// ```
28781    #[inline]
28782    pub fn cld(&mut self)
28783    where Assembler<'a>: CldEmitter {
28784        <Self as CldEmitter>::cld(self);
28785    }
28786    /// `CLFLUSH` (CLFLUSH). 
28787    /// Invalidates from every level of the cache hierarchy in the cache coherence domain the cache line that contains the linear address specified with the memory operand. If that cache line contains modified data at any level of the cache hierarchy, that data is written back to memory. The source operand is a byte memory location.
28788    ///
28789    ///
28790    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLFLUSH.html).
28791    ///
28792    /// Supported operand variants:
28793    ///
28794    /// ```text
28795    /// +---+----------+
28796    /// | # | Operands |
28797    /// +---+----------+
28798    /// | 1 | Mem      |
28799    /// +---+----------+
28800    /// ```
28801    #[inline]
28802    pub fn clflush<A>(&mut self, op0: A)
28803    where Assembler<'a>: ClflushEmitter<A> {
28804        <Self as ClflushEmitter<A>>::clflush(self, op0);
28805    }
28806    /// `CLI` (CLI). 
28807    /// In most cases, CLI clears the IF flag in the EFLAGS register and no other flags are affected. Clearing the IF flag causes the processor to ignore maskable external interrupts. The IF flag and the CLI and STI instruction have no effect on the generation of exceptions and NMI interrupts.
28808    ///
28809    ///
28810    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLI.html).
28811    ///
28812    /// Supported operand variants:
28813    ///
28814    /// ```text
28815    /// +---+----------+
28816    /// | # | Operands |
28817    /// +---+----------+
28818    /// | 1 | (none)   |
28819    /// +---+----------+
28820    /// ```
28821    #[inline]
28822    pub fn cli(&mut self)
28823    where Assembler<'a>: CliEmitter {
28824        <Self as CliEmitter>::cli(self);
28825    }
28826    /// `CLTS` (CLTS). 
28827    /// Clears the task-switched (TS) flag in the CR0 register. This instruction is intended for use in operating-system procedures. It is a privileged instruction that can only be executed at a CPL of 0. It is allowed to be executed in real-address mode to allow initialization for protected mode.
28828    ///
28829    ///
28830    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CLTS.html).
28831    ///
28832    /// Supported operand variants:
28833    ///
28834    /// ```text
28835    /// +---+----------+
28836    /// | # | Operands |
28837    /// +---+----------+
28838    /// | 1 | (none)   |
28839    /// +---+----------+
28840    /// ```
28841    #[inline]
28842    pub fn clts(&mut self)
28843    where Assembler<'a>: CltsEmitter {
28844        <Self as CltsEmitter>::clts(self);
28845    }
28846    /// `CMC` (CMC). 
28847    /// Complements the CF flag in the EFLAGS register. CMC operation is the same in non-64-bit modes and 64-bit mode.
28848    ///
28849    ///
28850    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMC.html).
28851    ///
28852    /// Supported operand variants:
28853    ///
28854    /// ```text
28855    /// +---+----------+
28856    /// | # | Operands |
28857    /// +---+----------+
28858    /// | 1 | (none)   |
28859    /// +---+----------+
28860    /// ```
28861    #[inline]
28862    pub fn cmc(&mut self)
28863    where Assembler<'a>: CmcEmitter {
28864        <Self as CmcEmitter>::cmc(self);
28865    }
28866    /// `CMP` (CMP). 
28867    /// Compares the first source operand with the second source operand and sets the status flags in the EFLAGS register according to the results. The comparison is performed by subtracting the second operand from the first operand and then setting the status flags in the same manner as the SUB instruction. When an immediate value is used as an operand, it is sign-extended to the length of the first operand.
28868    ///
28869    ///
28870    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMP.html).
28871    ///
28872    /// Supported operand variants:
28873    ///
28874    /// ```text
28875    /// +----+--------------+
28876    /// | #  | Operands     |
28877    /// +----+--------------+
28878    /// | 1  | GpbLo, GpbLo |
28879    /// | 2  | GpbLo, Imm   |
28880    /// | 3  | GpbLo, Mem   |
28881    /// | 4  | Gpd, Gpd     |
28882    /// | 5  | Gpd, Imm     |
28883    /// | 6  | Gpd, Mem     |
28884    /// | 7  | Gpq, Gpq     |
28885    /// | 8  | Gpq, Imm     |
28886    /// | 9  | Gpq, Mem     |
28887    /// | 10 | Gpw, Gpw     |
28888    /// | 11 | Gpw, Imm     |
28889    /// | 12 | Gpw, Mem     |
28890    /// | 13 | Mem, GpbLo   |
28891    /// | 14 | Mem, Gpd     |
28892    /// | 15 | Mem, Gpq     |
28893    /// | 16 | Mem, Gpw     |
28894    /// | 17 | Mem, Imm     |
28895    /// +----+--------------+
28896    /// ```
28897    #[inline]
28898    pub fn cmp<A, B>(&mut self, op0: A, op1: B)
28899    where Assembler<'a>: CmpEmitter<A, B> {
28900        <Self as CmpEmitter<A, B>>::cmp(self, op0, op1);
28901    }
28902    /// `CMPS` (CMPS). 
28903    /// Compares the byte, word, doubleword, or quadword specified with the first source operand with the byte, word, doubleword, or quadword specified with the second source operand and sets the status flags in the EFLAGS register according to the results.
28904    ///
28905    ///
28906    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/CMPS%3ACMPSB%3ACMPSW%3ACMPSD%3ACMPSQ.html).
28907    ///
28908    /// Supported operand variants:
28909    ///
28910    /// ```text
28911    /// +---+----------+
28912    /// | # | Operands |
28913    /// +---+----------+
28914    /// | 1 | (none)   |
28915    /// +---+----------+
28916    /// ```
28917    #[inline]
28918    pub fn cmps(&mut self)
28919    where Assembler<'a>: CmpsEmitter {
28920        <Self as CmpsEmitter>::cmps(self);
28921    }
28922    /// `CQO`.
28923    ///
28924    /// Supported operand variants:
28925    ///
28926    /// ```text
28927    /// +---+----------+
28928    /// | # | Operands |
28929    /// +---+----------+
28930    /// | 1 | (none)   |
28931    /// +---+----------+
28932    /// ```
28933    #[inline]
28934    pub fn cqo(&mut self)
28935    where Assembler<'a>: CqoEmitter {
28936        <Self as CqoEmitter>::cqo(self);
28937    }
28938    /// `CWD`.
28939    ///
28940    /// Supported operand variants:
28941    ///
28942    /// ```text
28943    /// +---+----------+
28944    /// | # | Operands |
28945    /// +---+----------+
28946    /// | 1 | (none)   |
28947    /// +---+----------+
28948    /// ```
28949    #[inline]
28950    pub fn cwd(&mut self)
28951    where Assembler<'a>: CwdEmitter {
28952        <Self as CwdEmitter>::cwd(self);
28953    }
28954    /// `CWDE`.
28955    ///
28956    /// Supported operand variants:
28957    ///
28958    /// ```text
28959    /// +---+----------+
28960    /// | # | Operands |
28961    /// +---+----------+
28962    /// | 1 | (none)   |
28963    /// +---+----------+
28964    /// ```
28965    #[inline]
28966    pub fn cwde(&mut self)
28967    where Assembler<'a>: CwdeEmitter {
28968        <Self as CwdeEmitter>::cwde(self);
28969    }
28970    /// `C_EX`.
28971    ///
28972    /// Supported operand variants:
28973    ///
28974    /// ```text
28975    /// +---+----------+
28976    /// | # | Operands |
28977    /// +---+----------+
28978    /// | 1 | (none)   |
28979    /// +---+----------+
28980    /// ```
28981    #[inline]
28982    pub fn c_ex(&mut self)
28983    where Assembler<'a>: CExEmitter {
28984        <Self as CExEmitter>::c_ex(self);
28985    }
28986    /// `C_SEP`.
28987    ///
28988    /// Supported operand variants:
28989    ///
28990    /// ```text
28991    /// +---+----------+
28992    /// | # | Operands |
28993    /// +---+----------+
28994    /// | 1 | (none)   |
28995    /// +---+----------+
28996    /// ```
28997    #[inline]
28998    pub fn c_sep(&mut self)
28999    where Assembler<'a>: CSepEmitter {
29000        <Self as CSepEmitter>::c_sep(self);
29001    }
29002    /// `DEC` (DEC). 
29003    /// Subtracts 1 from the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (To perform a decrement operation that updates the CF flag, use a SUB instruction with an immediate operand of 1.)
29004    ///
29005    ///
29006    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DEC.html).
29007    ///
29008    /// Supported operand variants:
29009    ///
29010    /// ```text
29011    /// +---+----------+
29012    /// | # | Operands |
29013    /// +---+----------+
29014    /// | 1 | GpbLo    |
29015    /// | 2 | Gpd      |
29016    /// | 3 | Gpq      |
29017    /// | 4 | Gpw      |
29018    /// | 5 | Mem      |
29019    /// +---+----------+
29020    /// ```
29021    #[inline]
29022    pub fn dec<A>(&mut self, op0: A)
29023    where Assembler<'a>: DecEmitter<A> {
29024        <Self as DecEmitter<A>>::dec(self, op0);
29025    }
29026    /// `DIV` (DIV). 
29027    /// Divides unsigned the value in the AX, DX:AX, EDX:EAX, or RDX:RAX registers (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, EDX:EAX, or RDX:RAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor). Division using 64-bit operand is available only in 64-bit mode.
29028    ///
29029    ///
29030    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/DIV.html).
29031    ///
29032    /// Supported operand variants:
29033    ///
29034    /// ```text
29035    /// +---+----------+
29036    /// | # | Operands |
29037    /// +---+----------+
29038    /// | 1 | GpbLo    |
29039    /// | 2 | Gpd      |
29040    /// | 3 | Gpq      |
29041    /// | 4 | Gpw      |
29042    /// | 5 | Mem      |
29043    /// +---+----------+
29044    /// ```
29045    #[inline]
29046    pub fn div<A>(&mut self, op0: A)
29047    where Assembler<'a>: DivEmitter<A> {
29048        <Self as DivEmitter<A>>::div(self, op0);
29049    }
29050    /// `ENTER` (ENTER). 
29051    /// Creates a stack frame (comprising of space for dynamic storage and 1-32 frame pointer storage) for a procedure. The first operand (imm16) specifies the size of the dynamic storage in the stack frame (that is, the number of bytes of dynamically allocated on the stack for the procedure). The second operand (imm8) gives the lexical nesting level (0 to 31) of the procedure. The nesting level (imm8 mod 32) and the OperandSize attribute determine the size in bytes of the storage space for frame pointers.
29052    ///
29053    ///
29054    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/ENTER.html).
29055    ///
29056    /// Supported operand variants:
29057    ///
29058    /// ```text
29059    /// +---+----------+
29060    /// | # | Operands |
29061    /// +---+----------+
29062    /// | 1 | Imm      |
29063    /// +---+----------+
29064    /// ```
29065    #[inline]
29066    pub fn enter<A>(&mut self, op0: A)
29067    where Assembler<'a>: EnterEmitter<A> {
29068        <Self as EnterEmitter<A>>::enter(self, op0);
29069    }
29070    /// `FWAIT` (FWAIT). 
29071    /// Causes the processor to check for and handle pending, unmasked, floating-point exceptions before proceeding. (FWAIT is an alternate mnemonic for WAIT.)
29072    ///
29073    ///
29074    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/WAIT%3AFWAIT.html).
29075    ///
29076    /// Supported operand variants:
29077    ///
29078    /// ```text
29079    /// +---+----------+
29080    /// | # | Operands |
29081    /// +---+----------+
29082    /// | 1 | (none)   |
29083    /// +---+----------+
29084    /// ```
29085    #[inline]
29086    pub fn fwait(&mut self)
29087    where Assembler<'a>: FwaitEmitter {
29088        <Self as FwaitEmitter>::fwait(self);
29089    }
29090    /// `HLT` (HLT). 
29091    /// Stops instruction execution and places the processor in a HALT state. An enabled interrupt (including NMI and SMI), a debug exception, the BINIT# signal, the INIT# signal, or the RESET# signal will resume execution. If an interrupt (including NMI) is used to resume execution after a HLT instruction, the saved instruction pointer (CS:EIP) points to the instruction following the HLT instruction.
29092    ///
29093    ///
29094    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/HLT.html).
29095    ///
29096    /// Supported operand variants:
29097    ///
29098    /// ```text
29099    /// +---+----------+
29100    /// | # | Operands |
29101    /// +---+----------+
29102    /// | 1 | (none)   |
29103    /// +---+----------+
29104    /// ```
29105    #[inline]
29106    pub fn hlt(&mut self)
29107    where Assembler<'a>: HltEmitter {
29108        <Self as HltEmitter>::hlt(self);
29109    }
29110    /// `IDIV` (IDIV). 
29111    /// Divides the (signed) value in the AX, DX:AX, or EDX:EAX (dividend) by the source operand (divisor) and stores the result in the AX (AH:AL), DX:AX, or EDX:EAX registers. The source operand can be a general-purpose register or a memory location. The action of this instruction depends on the operand size (dividend/divisor).
29112    ///
29113    ///
29114    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IDIV.html).
29115    ///
29116    /// Supported operand variants:
29117    ///
29118    /// ```text
29119    /// +---+----------+
29120    /// | # | Operands |
29121    /// +---+----------+
29122    /// | 1 | GpbLo    |
29123    /// | 2 | Gpd      |
29124    /// | 3 | Gpq      |
29125    /// | 4 | Gpw      |
29126    /// | 5 | Mem      |
29127    /// +---+----------+
29128    /// ```
29129    #[inline]
29130    pub fn idiv<A>(&mut self, op0: A)
29131    where Assembler<'a>: IdivEmitter<A> {
29132        <Self as IdivEmitter<A>>::idiv(self, op0);
29133    }
29134    /// `IMUL` (IMUL). 
29135    /// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
29136    ///
29137    ///
29138    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
29139    ///
29140    /// Supported operand variants:
29141    ///
29142    /// ```text
29143    /// +---+----------+
29144    /// | # | Operands |
29145    /// +---+----------+
29146    /// | 1 | GpbLo    |
29147    /// | 2 | Gpd      |
29148    /// | 3 | Gpq      |
29149    /// | 4 | Gpw      |
29150    /// | 5 | Mem      |
29151    /// +---+----------+
29152    /// ```
29153    #[inline]
29154    pub fn imul_1<A>(&mut self, op0: A)
29155    where Assembler<'a>: ImulEmitter_1<A> {
29156        <Self as ImulEmitter_1<A>>::imul_1(self, op0);
29157    }
29158    /// `IMUL` (IMUL). 
29159    /// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
29160    ///
29161    ///
29162    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
29163    ///
29164    /// Supported operand variants:
29165    ///
29166    /// ```text
29167    /// +---+----------+
29168    /// | # | Operands |
29169    /// +---+----------+
29170    /// | 1 | Gpd, Gpd |
29171    /// | 2 | Gpd, Mem |
29172    /// | 3 | Gpq, Gpq |
29173    /// | 4 | Gpq, Mem |
29174    /// | 5 | Gpw, Gpw |
29175    /// | 6 | Gpw, Mem |
29176    /// +---+----------+
29177    /// ```
29178    #[inline]
29179    pub fn imul_2<A, B>(&mut self, op0: A, op1: B)
29180    where Assembler<'a>: ImulEmitter_2<A, B> {
29181        <Self as ImulEmitter_2<A, B>>::imul_2(self, op0, op1);
29182    }
29183    /// `IMUL` (IMUL). 
29184    /// Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands.
29185    ///
29186    ///
29187    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IMUL.html).
29188    ///
29189    /// Supported operand variants:
29190    ///
29191    /// ```text
29192    /// +---+---------------+
29193    /// | # | Operands      |
29194    /// +---+---------------+
29195    /// | 1 | Gpd, Gpd, Imm |
29196    /// | 2 | Gpd, Mem, Imm |
29197    /// | 3 | Gpq, Gpq, Imm |
29198    /// | 4 | Gpq, Mem, Imm |
29199    /// | 5 | Gpw, Gpw, Imm |
29200    /// | 6 | Gpw, Mem, Imm |
29201    /// +---+---------------+
29202    /// ```
29203    #[inline]
29204    pub fn imul_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
29205    where Assembler<'a>: ImulEmitter_3<A, B, C> {
29206        <Self as ImulEmitter_3<A, B, C>>::imul_3(self, op0, op1, op2);
29207    }
29208    /// `IN` (IN). 
29209    /// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
29210    ///
29211    ///
29212    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
29213    ///
29214    /// Supported operand variants:
29215    ///
29216    /// ```text
29217    /// +---+----------+
29218    /// | # | Operands |
29219    /// +---+----------+
29220    /// | 1 | (none)   |
29221    /// +---+----------+
29222    /// ```
29223    #[inline]
29224    pub fn r#in(&mut self)
29225    where Assembler<'a>: InEmitter {
29226        <Self as InEmitter>::r#in(self);
29227    }
29228    /// `IN` (IN). 
29229    /// Copies the value from the I/O port specified with the second operand (source operand) to the destination operand (first operand). The source operand can be a byte-immediate or the DX register; the destination operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively). Using the DX register as a source operand allows I/O port addresses from 0 to 65,535 to be accessed; using a byte immediate allows I/O port addresses 0 to 255 to be accessed.
29230    ///
29231    ///
29232    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IN.html).
29233    ///
29234    /// Supported operand variants:
29235    ///
29236    /// ```text
29237    /// +---+------------+
29238    /// | # | Operands   |
29239    /// +---+------------+
29240    /// | 1 | GpbLo, Imm |
29241    /// | 2 | Gpd, Imm   |
29242    /// | 3 | Gpq, Imm   |
29243    /// | 4 | Gpw, Imm   |
29244    /// +---+------------+
29245    /// ```
29246    #[inline]
29247    pub fn r#in_2<A, B>(&mut self, op0: A, op1: B)
29248    where Assembler<'a>: InEmitter_2<A, B> {
29249        <Self as InEmitter_2<A, B>>::r#in_2(self, op0, op1);
29250    }
29251    /// `INC` (INC). 
29252    /// Adds 1 to the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (Use a ADD instruction with an immediate operand of 1 to perform an increment operation that does updates the CF flag.)
29253    ///
29254    ///
29255    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INC.html).
29256    ///
29257    /// Supported operand variants:
29258    ///
29259    /// ```text
29260    /// +---+----------+
29261    /// | # | Operands |
29262    /// +---+----------+
29263    /// | 1 | GpbLo    |
29264    /// | 2 | Gpd      |
29265    /// | 3 | Gpq      |
29266    /// | 4 | Gpw      |
29267    /// | 5 | Mem      |
29268    /// +---+----------+
29269    /// ```
29270    #[inline]
29271    pub fn inc<A>(&mut self, op0: A)
29272    where Assembler<'a>: IncEmitter<A> {
29273        <Self as IncEmitter<A>>::inc(self, op0);
29274    }
29275    /// `INS` (INS). 
29276    /// Copies the data from the I/O port specified with the source operand (second operand) to the destination operand (first operand). The source operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The destination operand is a memory location, the address of which is read from either the ES:DI, ES:EDI or the RDI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The ES segment cannot be overridden with a segment override prefix.) The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
29277    ///
29278    ///
29279    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INS%3AINSB%3AINSW%3AINSD.html).
29280    ///
29281    /// Supported operand variants:
29282    ///
29283    /// ```text
29284    /// +---+----------+
29285    /// | # | Operands |
29286    /// +---+----------+
29287    /// | 1 | (none)   |
29288    /// +---+----------+
29289    /// ```
29290    #[inline]
29291    pub fn ins(&mut self)
29292    where Assembler<'a>: InsEmitter {
29293        <Self as InsEmitter>::ins(self);
29294    }
29295    /// `INT` (INT). 
29296    /// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
29297    ///
29298    ///
29299    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
29300    ///
29301    /// Supported operand variants:
29302    ///
29303    /// ```text
29304    /// +---+----------+
29305    /// | # | Operands |
29306    /// +---+----------+
29307    /// | 1 | Imm      |
29308    /// +---+----------+
29309    /// ```
29310    #[inline]
29311    pub fn int<A>(&mut self, op0: A)
29312    where Assembler<'a>: IntEmitter<A> {
29313        <Self as IntEmitter<A>>::int(self, op0);
29314    }
29315    /// `INT1` (INT1). 
29316    /// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
29317    ///
29318    ///
29319    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
29320    ///
29321    /// Supported operand variants:
29322    ///
29323    /// ```text
29324    /// +---+----------+
29325    /// | # | Operands |
29326    /// +---+----------+
29327    /// | 1 | (none)   |
29328    /// +---+----------+
29329    /// ```
29330    #[inline]
29331    pub fn int1(&mut self)
29332    where Assembler<'a>: Int1Emitter {
29333        <Self as Int1Emitter>::int1(self);
29334    }
29335    /// `INT3` (INT3). 
29336    /// The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand (see the section titled “Interrupts and Exceptions” in Chapter 6 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The destination operand specifies a vector from 0 to 255, encoded as an 8-bit unsigned intermediate value. Each vector provides an index to a gate descriptor in the IDT. The first 32 vectors are reserved by Intel for system use. Some of these vectors are used for internally generated exceptions.
29337    ///
29338    ///
29339    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/INTn%3AINTO%3AINT3%3AINT1.html).
29340    ///
29341    /// Supported operand variants:
29342    ///
29343    /// ```text
29344    /// +---+----------+
29345    /// | # | Operands |
29346    /// +---+----------+
29347    /// | 1 | (none)   |
29348    /// +---+----------+
29349    /// ```
29350    #[inline]
29351    pub fn int3(&mut self)
29352    where Assembler<'a>: Int3Emitter {
29353        <Self as Int3Emitter>::int3(self);
29354    }
29355    /// `IRET` (IRET). 
29356    /// Returns program control from an exception or interrupt handler to a program or procedure that was interrupted by an exception, an external interrupt, or a software-generated interrupt. These instructions are also used to perform a return from a nested task. (A nested task is created when a CALL instruction is used to initiate a task switch or when an interrupt or exception causes a task switch to an interrupt or exception handler.) See the section titled “Task Linking” in Chapter 8 of the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 3A.
29357    ///
29358    ///
29359    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/IRET%3AIRETD%3AIRETQ.html).
29360    ///
29361    /// Supported operand variants:
29362    ///
29363    /// ```text
29364    /// +---+----------+
29365    /// | # | Operands |
29366    /// +---+----------+
29367    /// | 1 | (none)   |
29368    /// +---+----------+
29369    /// ```
29370    #[inline]
29371    pub fn iret(&mut self)
29372    where Assembler<'a>: IretEmitter {
29373        <Self as IretEmitter>::iret(self);
29374    }
29375    /// `JA` (JA). 
29376    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29377    ///
29378    ///
29379    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29380    ///
29381    /// Supported operand variants:
29382    ///
29383    /// ```text
29384    /// +---+----------+
29385    /// | # | Operands |
29386    /// +---+----------+
29387    /// | 1 | Imm      |
29388    /// | 2 | Label    |
29389    /// | 3 | Sym      |
29390    /// +---+----------+
29391    /// ```
29392    #[inline]
29393    pub fn ja<A>(&mut self, op0: A)
29394    where Assembler<'a>: JaEmitter<A> {
29395        <Self as JaEmitter<A>>::ja(self, op0);
29396    }
29397    /// `JBE` (JBE). 
29398    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29399    ///
29400    ///
29401    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29402    ///
29403    /// Supported operand variants:
29404    ///
29405    /// ```text
29406    /// +---+----------+
29407    /// | # | Operands |
29408    /// +---+----------+
29409    /// | 1 | Imm      |
29410    /// | 2 | Label    |
29411    /// | 3 | Sym      |
29412    /// +---+----------+
29413    /// ```
29414    #[inline]
29415    pub fn jbe<A>(&mut self, op0: A)
29416    where Assembler<'a>: JbeEmitter<A> {
29417        <Self as JbeEmitter<A>>::jbe(self, op0);
29418    }
29419    /// `JC` (JC). 
29420    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29421    ///
29422    ///
29423    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29424    ///
29425    /// Supported operand variants:
29426    ///
29427    /// ```text
29428    /// +---+----------+
29429    /// | # | Operands |
29430    /// +---+----------+
29431    /// | 1 | Imm      |
29432    /// | 2 | Label    |
29433    /// | 3 | Sym      |
29434    /// +---+----------+
29435    /// ```
29436    #[inline]
29437    pub fn jc<A>(&mut self, op0: A)
29438    where Assembler<'a>: JcEmitter<A> {
29439        <Self as JcEmitter<A>>::jc(self, op0);
29440    }
29441    /// `JCXZ` (JCXZ). 
29442    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29443    ///
29444    ///
29445    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29446    ///
29447    /// Supported operand variants:
29448    ///
29449    /// ```text
29450    /// +---+----------+
29451    /// | # | Operands |
29452    /// +---+----------+
29453    /// | 1 | Imm      |
29454    /// | 2 | Label    |
29455    /// | 3 | Sym      |
29456    /// +---+----------+
29457    /// ```
29458    #[inline]
29459    pub fn jcxz<A>(&mut self, op0: A)
29460    where Assembler<'a>: JcxzEmitter<A> {
29461        <Self as JcxzEmitter<A>>::jcxz(self, op0);
29462    }
29463    /// `JG` (JG). 
29464    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29465    ///
29466    ///
29467    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29468    ///
29469    /// Supported operand variants:
29470    ///
29471    /// ```text
29472    /// +---+----------+
29473    /// | # | Operands |
29474    /// +---+----------+
29475    /// | 1 | Imm      |
29476    /// | 2 | Label    |
29477    /// | 3 | Sym      |
29478    /// +---+----------+
29479    /// ```
29480    #[inline]
29481    pub fn jg<A>(&mut self, op0: A)
29482    where Assembler<'a>: JgEmitter<A> {
29483        <Self as JgEmitter<A>>::jg(self, op0);
29484    }
29485    /// `JGE` (JGE). 
29486    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29487    ///
29488    ///
29489    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29490    ///
29491    /// Supported operand variants:
29492    ///
29493    /// ```text
29494    /// +---+----------+
29495    /// | # | Operands |
29496    /// +---+----------+
29497    /// | 1 | Imm      |
29498    /// | 2 | Label    |
29499    /// | 3 | Sym      |
29500    /// +---+----------+
29501    /// ```
29502    #[inline]
29503    pub fn jge<A>(&mut self, op0: A)
29504    where Assembler<'a>: JgeEmitter<A> {
29505        <Self as JgeEmitter<A>>::jge(self, op0);
29506    }
29507    /// `JL` (JL). 
29508    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29509    ///
29510    ///
29511    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29512    ///
29513    /// Supported operand variants:
29514    ///
29515    /// ```text
29516    /// +---+----------+
29517    /// | # | Operands |
29518    /// +---+----------+
29519    /// | 1 | Imm      |
29520    /// | 2 | Label    |
29521    /// | 3 | Sym      |
29522    /// +---+----------+
29523    /// ```
29524    #[inline]
29525    pub fn jl<A>(&mut self, op0: A)
29526    where Assembler<'a>: JlEmitter<A> {
29527        <Self as JlEmitter<A>>::jl(self, op0);
29528    }
29529    /// `JLE` (JLE). 
29530    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29531    ///
29532    ///
29533    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29534    ///
29535    /// Supported operand variants:
29536    ///
29537    /// ```text
29538    /// +---+----------+
29539    /// | # | Operands |
29540    /// +---+----------+
29541    /// | 1 | Imm      |
29542    /// | 2 | Label    |
29543    /// | 3 | Sym      |
29544    /// +---+----------+
29545    /// ```
29546    #[inline]
29547    pub fn jle<A>(&mut self, op0: A)
29548    where Assembler<'a>: JleEmitter<A> {
29549        <Self as JleEmitter<A>>::jle(self, op0);
29550    }
29551    /// `JMP` (JMP). 
29552    /// Transfers program control to a different point in the instruction stream without recording return information. The destination (target) operand specifies the address of the instruction being jumped to. This operand can be an immediate value, a general-purpose register, or a memory location.
29553    ///
29554    ///
29555    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/JMP.html).
29556    ///
29557    /// Supported operand variants:
29558    ///
29559    /// ```text
29560    /// +---+----------+
29561    /// | # | Operands |
29562    /// +---+----------+
29563    /// | 1 | Gpq      |
29564    /// | 2 | Imm      |
29565    /// | 3 | Label    |
29566    /// | 4 | Mem      |
29567    /// | 5 | Sym      |
29568    /// +---+----------+
29569    /// ```
29570    #[inline]
29571    pub fn jmp<A>(&mut self, op0: A)
29572    where Assembler<'a>: JmpEmitter<A> {
29573        <Self as JmpEmitter<A>>::jmp(self, op0);
29574    }
29575    /// `JMPF`.
29576    ///
29577    /// Supported operand variants:
29578    ///
29579    /// ```text
29580    /// +---+----------+
29581    /// | # | Operands |
29582    /// +---+----------+
29583    /// | 1 | Mem      |
29584    /// +---+----------+
29585    /// ```
29586    #[inline]
29587    pub fn jmpf<A>(&mut self, op0: A)
29588    where Assembler<'a>: JmpfEmitter<A> {
29589        <Self as JmpfEmitter<A>>::jmpf(self, op0);
29590    }
29591    /// `JNC` (JNC). 
29592    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29593    ///
29594    ///
29595    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29596    ///
29597    /// Supported operand variants:
29598    ///
29599    /// ```text
29600    /// +---+----------+
29601    /// | # | Operands |
29602    /// +---+----------+
29603    /// | 1 | Imm      |
29604    /// | 2 | Label    |
29605    /// | 3 | Sym      |
29606    /// +---+----------+
29607    /// ```
29608    #[inline]
29609    pub fn jnc<A>(&mut self, op0: A)
29610    where Assembler<'a>: JncEmitter<A> {
29611        <Self as JncEmitter<A>>::jnc(self, op0);
29612    }
29613    /// `JNO` (JNO). 
29614    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29615    ///
29616    ///
29617    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29618    ///
29619    /// Supported operand variants:
29620    ///
29621    /// ```text
29622    /// +---+----------+
29623    /// | # | Operands |
29624    /// +---+----------+
29625    /// | 1 | Imm      |
29626    /// | 2 | Label    |
29627    /// | 3 | Sym      |
29628    /// +---+----------+
29629    /// ```
29630    #[inline]
29631    pub fn jno<A>(&mut self, op0: A)
29632    where Assembler<'a>: JnoEmitter<A> {
29633        <Self as JnoEmitter<A>>::jno(self, op0);
29634    }
29635    /// `JNP` (JNP). 
29636    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29637    ///
29638    ///
29639    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29640    ///
29641    /// Supported operand variants:
29642    ///
29643    /// ```text
29644    /// +---+----------+
29645    /// | # | Operands |
29646    /// +---+----------+
29647    /// | 1 | Imm      |
29648    /// | 2 | Label    |
29649    /// | 3 | Sym      |
29650    /// +---+----------+
29651    /// ```
29652    #[inline]
29653    pub fn jnp<A>(&mut self, op0: A)
29654    where Assembler<'a>: JnpEmitter<A> {
29655        <Self as JnpEmitter<A>>::jnp(self, op0);
29656    }
29657    /// `JNS` (JNS). 
29658    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29659    ///
29660    ///
29661    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29662    ///
29663    /// Supported operand variants:
29664    ///
29665    /// ```text
29666    /// +---+----------+
29667    /// | # | Operands |
29668    /// +---+----------+
29669    /// | 1 | Imm      |
29670    /// | 2 | Label    |
29671    /// | 3 | Sym      |
29672    /// +---+----------+
29673    /// ```
29674    #[inline]
29675    pub fn jns<A>(&mut self, op0: A)
29676    where Assembler<'a>: JnsEmitter<A> {
29677        <Self as JnsEmitter<A>>::jns(self, op0);
29678    }
29679    /// `JNZ` (JNZ). 
29680    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29681    ///
29682    ///
29683    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29684    ///
29685    /// Supported operand variants:
29686    ///
29687    /// ```text
29688    /// +---+----------+
29689    /// | # | Operands |
29690    /// +---+----------+
29691    /// | 1 | Imm      |
29692    /// | 2 | Label    |
29693    /// | 3 | Sym      |
29694    /// +---+----------+
29695    /// ```
29696    #[inline]
29697    pub fn jnz<A>(&mut self, op0: A)
29698    where Assembler<'a>: JnzEmitter<A> {
29699        <Self as JnzEmitter<A>>::jnz(self, op0);
29700    }
29701    /// `JO` (JO). 
29702    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29703    ///
29704    ///
29705    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29706    ///
29707    /// Supported operand variants:
29708    ///
29709    /// ```text
29710    /// +---+----------+
29711    /// | # | Operands |
29712    /// +---+----------+
29713    /// | 1 | Imm      |
29714    /// | 2 | Label    |
29715    /// | 3 | Sym      |
29716    /// +---+----------+
29717    /// ```
29718    #[inline]
29719    pub fn jo<A>(&mut self, op0: A)
29720    where Assembler<'a>: JoEmitter<A> {
29721        <Self as JoEmitter<A>>::jo(self, op0);
29722    }
29723    /// `JP` (JP). 
29724    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29725    ///
29726    ///
29727    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29728    ///
29729    /// Supported operand variants:
29730    ///
29731    /// ```text
29732    /// +---+----------+
29733    /// | # | Operands |
29734    /// +---+----------+
29735    /// | 1 | Imm      |
29736    /// | 2 | Label    |
29737    /// | 3 | Sym      |
29738    /// +---+----------+
29739    /// ```
29740    #[inline]
29741    pub fn jp<A>(&mut self, op0: A)
29742    where Assembler<'a>: JpEmitter<A> {
29743        <Self as JpEmitter<A>>::jp(self, op0);
29744    }
29745    /// `JS` (JS). 
29746    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29747    ///
29748    ///
29749    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29750    ///
29751    /// Supported operand variants:
29752    ///
29753    /// ```text
29754    /// +---+----------+
29755    /// | # | Operands |
29756    /// +---+----------+
29757    /// | 1 | Imm      |
29758    /// | 2 | Label    |
29759    /// | 3 | Sym      |
29760    /// +---+----------+
29761    /// ```
29762    #[inline]
29763    pub fn js<A>(&mut self, op0: A)
29764    where Assembler<'a>: JsEmitter<A> {
29765        <Self as JsEmitter<A>>::js(self, op0);
29766    }
29767    /// `JZ` (JZ). 
29768    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29769    ///
29770    ///
29771    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29772    ///
29773    /// Supported operand variants:
29774    ///
29775    /// ```text
29776    /// +---+----------+
29777    /// | # | Operands |
29778    /// +---+----------+
29779    /// | 1 | Imm      |
29780    /// | 2 | Label    |
29781    /// | 3 | Sym      |
29782    /// +---+----------+
29783    /// ```
29784    #[inline]
29785    pub fn jz<A>(&mut self, op0: A)
29786    where Assembler<'a>: JzEmitter<A> {
29787        <Self as JzEmitter<A>>::jz(self, op0);
29788    }
29789    /// `JCC` (JO). 
29790    /// Checks the state of one or more of the status flags in the EFLAGS register (CF, OF, PF, SF, and ZF) and, if the flags are in the specified state (condition), performs a jump to the target instruction specified by the destination operand. A condition code (cc) is associated with each instruction to indicate the condition being tested for. If the condition is not satisfied, the jump is not performed and execution continues with the instruction following the Jcc instruction.
29791    ///
29792    ///
29793    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/Jcc.html).
29794    ///
29795    /// Supported operand variants:
29796    ///
29797    /// ```text
29798    /// +---+----------+
29799    /// | # | Operands |
29800    /// +---+----------+
29801    /// | 1 | Imm      |
29802    /// | 2 | Label    |
29803    /// | 3 | Sym      |
29804    /// +---+----------+
29805    /// ```
29806    #[inline]
29807    pub fn jcc<A>(&mut self, op0: A)
29808    where Assembler<'a>: JccEmitter<A> {
29809        <Self as JccEmitter<A>>::jcc(self, op0);
29810    }
29811    /// `LAHF`.
29812    ///
29813    /// Supported operand variants:
29814    ///
29815    /// ```text
29816    /// +---+----------+
29817    /// | # | Operands |
29818    /// +---+----------+
29819    /// | 1 | (none)   |
29820    /// +---+----------+
29821    /// ```
29822    #[inline]
29823    pub fn lahf(&mut self)
29824    where Assembler<'a>: LahfEmitter {
29825        <Self as LahfEmitter>::lahf(self);
29826    }
29827    /// `LAR` (LAR). 
29828    /// Loads the access rights from the segment descriptor specified by the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the flag register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. If the source operand is a memory address, only 16 bits of data are accessed. The destination operand is a general-purpose register.
29829    ///
29830    ///
29831    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LAR.html).
29832    ///
29833    /// Supported operand variants:
29834    ///
29835    /// ```text
29836    /// +---+----------+
29837    /// | # | Operands |
29838    /// +---+----------+
29839    /// | 1 | Gpd, Gpw |
29840    /// | 2 | Gpd, Mem |
29841    /// | 3 | Gpq, Gpw |
29842    /// | 4 | Gpq, Mem |
29843    /// | 5 | Gpw, Gpw |
29844    /// | 6 | Gpw, Mem |
29845    /// +---+----------+
29846    /// ```
29847    #[inline]
29848    pub fn lar<A, B>(&mut self, op0: A, op1: B)
29849    where Assembler<'a>: LarEmitter<A, B> {
29850        <Self as LarEmitter<A, B>>::lar(self, op0, op1);
29851    }
29852    /// `LDTILECFG`.
29853    ///
29854    /// Supported operand variants:
29855    ///
29856    /// ```text
29857    /// +---+----------+
29858    /// | # | Operands |
29859    /// +---+----------+
29860    /// | 1 | Mem      |
29861    /// +---+----------+
29862    /// ```
29863    #[inline]
29864    pub fn ldtilecfg<A>(&mut self, op0: A)
29865    where Assembler<'a>: LdtilecfgEmitter<A> {
29866        <Self as LdtilecfgEmitter<A>>::ldtilecfg(self, op0);
29867    }
29868    /// `LEA` (LEA). 
29869    /// Computes the effective address of the second operand (the source operand) and stores it in the first operand (destination operand). The source operand is a memory address (offset part) specified with one of the processors addressing modes; the destination operand is a general-purpose register. The address-size and operand-size attributes affect the action performed by this instruction, as shown in the following table. The operand-size attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the attribute of the code segment.
29870    ///
29871    ///
29872    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEA.html).
29873    ///
29874    /// Supported operand variants:
29875    ///
29876    /// ```text
29877    /// +---+----------+
29878    /// | # | Operands |
29879    /// +---+----------+
29880    /// | 1 | Gpd, Mem |
29881    /// | 2 | Gpq, Mem |
29882    /// | 3 | Gpw, Mem |
29883    /// +---+----------+
29884    /// ```
29885    #[inline]
29886    pub fn lea<A, B>(&mut self, op0: A, op1: B)
29887    where Assembler<'a>: LeaEmitter<A, B> {
29888        <Self as LeaEmitter<A, B>>::lea(self, op0, op1);
29889    }
29890    /// `LEAVE` (LEAVE). 
29891    /// Releases the stack frame set up by an earlier ENTER instruction. The LEAVE instruction copies the frame pointer (in the EBP register) into the stack pointer register (ESP), which releases the stack space allocated to the stack frame. The old frame pointer (the frame pointer for the calling procedure that was saved by the ENTER instruction) is then popped from the stack into the EBP register, restoring the calling procedure’s stack frame.
29892    ///
29893    ///
29894    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LEAVE.html).
29895    ///
29896    /// Supported operand variants:
29897    ///
29898    /// ```text
29899    /// +---+----------+
29900    /// | # | Operands |
29901    /// +---+----------+
29902    /// | 1 | (none)   |
29903    /// +---+----------+
29904    /// ```
29905    #[inline]
29906    pub fn leave(&mut self)
29907    where Assembler<'a>: LeaveEmitter {
29908        <Self as LeaveEmitter>::leave(self);
29909    }
29910    /// `LFS` (LFS). 
29911    /// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
29912    ///
29913    ///
29914    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
29915    ///
29916    /// Supported operand variants:
29917    ///
29918    /// ```text
29919    /// +---+----------+
29920    /// | # | Operands |
29921    /// +---+----------+
29922    /// | 1 | Gpd, Mem |
29923    /// | 2 | Gpq, Mem |
29924    /// | 3 | Gpw, Mem |
29925    /// +---+----------+
29926    /// ```
29927    #[inline]
29928    pub fn lfs<A, B>(&mut self, op0: A, op1: B)
29929    where Assembler<'a>: LfsEmitter<A, B> {
29930        <Self as LfsEmitter<A, B>>::lfs(self, op0, op1);
29931    }
29932    /// `LGDT` (LGDT). 
29933    /// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
29934    ///
29935    ///
29936    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
29937    ///
29938    /// Supported operand variants:
29939    ///
29940    /// ```text
29941    /// +---+----------+
29942    /// | # | Operands |
29943    /// +---+----------+
29944    /// | 1 | Mem      |
29945    /// +---+----------+
29946    /// ```
29947    #[inline]
29948    pub fn lgdt<A>(&mut self, op0: A)
29949    where Assembler<'a>: LgdtEmitter<A> {
29950        <Self as LgdtEmitter<A>>::lgdt(self, op0);
29951    }
29952    /// `LGS` (LGS). 
29953    /// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
29954    ///
29955    ///
29956    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
29957    ///
29958    /// Supported operand variants:
29959    ///
29960    /// ```text
29961    /// +---+----------+
29962    /// | # | Operands |
29963    /// +---+----------+
29964    /// | 1 | Gpd, Mem |
29965    /// | 2 | Gpq, Mem |
29966    /// | 3 | Gpw, Mem |
29967    /// +---+----------+
29968    /// ```
29969    #[inline]
29970    pub fn lgs<A, B>(&mut self, op0: A, op1: B)
29971    where Assembler<'a>: LgsEmitter<A, B> {
29972        <Self as LgsEmitter<A, B>>::lgs(self, op0, op1);
29973    }
29974    /// `LIDT` (LIDT). 
29975    /// Loads the values in the source operand into the global descriptor table register (GDTR) or the interrupt descriptor table register (IDTR). The source operand specifies a 6-byte memory location that contains the base address (a linear address) and the limit (size of table in bytes) of the global descriptor table (GDT) or the interrupt descriptor table (IDT). If operand-size attribute is 32 bits, a 16-bit limit (lower 2 bytes of the 6-byte data operand) and a 32-bit base address (upper 4 bytes of the data operand) are loaded into the register. If the operand-size attribute is 16 bits, a 16-bit limit (lower 2 bytes) and a 24-bit base address (third, fourth, and fifth byte) are loaded. Here, the high-order byte of the operand is not used and the high-order byte of the base address in the GDTR or IDTR is filled with zeros.
29976    ///
29977    ///
29978    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LGDT%3ALIDT.html).
29979    ///
29980    /// Supported operand variants:
29981    ///
29982    /// ```text
29983    /// +---+----------+
29984    /// | # | Operands |
29985    /// +---+----------+
29986    /// | 1 | Mem      |
29987    /// +---+----------+
29988    /// ```
29989    #[inline]
29990    pub fn lidt<A>(&mut self, op0: A)
29991    where Assembler<'a>: LidtEmitter<A> {
29992        <Self as LidtEmitter<A>>::lidt(self, op0);
29993    }
29994    /// `LLDT` (LLDT). 
29995    /// Loads the source operand into the segment selector field of the local descriptor table register (LDTR). The source operand (a general-purpose register or a memory location) contains a segment selector that points to a local descriptor table (LDT). After the segment selector is loaded in the LDTR, the processor uses the segment selector to locate the segment descriptor for the LDT in the global descriptor table (GDT). It then loads the segment limit and base address for the LDT from the segment descriptor into the LDTR. The segment registers DS, ES, SS, FS, GS, and CS are not affected by this instruction, nor is the LDTR field in the task state segment (TSS) for the current task.
29996    ///
29997    ///
29998    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LLDT.html).
29999    ///
30000    /// Supported operand variants:
30001    ///
30002    /// ```text
30003    /// +---+----------+
30004    /// | # | Operands |
30005    /// +---+----------+
30006    /// | 1 | Gpd      |
30007    /// | 2 | Mem      |
30008    /// +---+----------+
30009    /// ```
30010    #[inline]
30011    pub fn lldt<A>(&mut self, op0: A)
30012    where Assembler<'a>: LldtEmitter<A> {
30013        <Self as LldtEmitter<A>>::lldt(self, op0);
30014    }
30015    /// `LMSW` (LMSW). 
30016    /// Loads the source operand into the machine status word, bits 0 through 15 of register CR0. The source operand can be a 16-bit general-purpose register or a memory location. Only the low-order 4 bits of the source operand (which contains the PE, MP, EM, and TS flags) are loaded into CR0. The PG, CD, NW, AM, WP, NE, and ET flags of CR0 are not affected. The operand-size attribute has no effect on this instruction.
30017    ///
30018    ///
30019    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LMSW.html).
30020    ///
30021    /// Supported operand variants:
30022    ///
30023    /// ```text
30024    /// +---+----------+
30025    /// | # | Operands |
30026    /// +---+----------+
30027    /// | 1 | Gpd      |
30028    /// | 2 | Mem      |
30029    /// +---+----------+
30030    /// ```
30031    #[inline]
30032    pub fn lmsw<A>(&mut self, op0: A)
30033    where Assembler<'a>: LmswEmitter<A> {
30034        <Self as LmswEmitter<A>>::lmsw(self, op0);
30035    }
30036    /// `LODS` (LODS). 
30037    /// Loads a byte, word, or doubleword from the source operand into the AL, AX, or EAX register, respectively. The source operand is a memory location, the address of which is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The DS segment may be overridden with a segment override prefix.
30038    ///
30039    ///
30040    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LODS%3ALODSB%3ALODSW%3ALODSD%3ALODSQ.html).
30041    ///
30042    /// Supported operand variants:
30043    ///
30044    /// ```text
30045    /// +---+----------+
30046    /// | # | Operands |
30047    /// +---+----------+
30048    /// | 1 | (none)   |
30049    /// +---+----------+
30050    /// ```
30051    #[inline]
30052    pub fn lods(&mut self)
30053    where Assembler<'a>: LodsEmitter {
30054        <Self as LodsEmitter>::lods(self);
30055    }
30056    /// `LOOP` (LOOP). 
30057    /// Performs a loop operation using the RCX, ECX or CX register as a counter (depending on whether address size is 64 bits, 32 bits, or 16 bits). Note that the LOOP instruction ignores REX.W; but 64-bit address size can be over-ridden using a 67H prefix.
30058    ///
30059    ///
30060    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LOOP%3ALOOPcc.html).
30061    ///
30062    /// Supported operand variants:
30063    ///
30064    /// ```text
30065    /// +---+----------+
30066    /// | # | Operands |
30067    /// +---+----------+
30068    /// | 1 | Imm      |
30069    /// | 2 | Label    |
30070    /// | 3 | Sym      |
30071    /// +---+----------+
30072    /// ```
30073    #[inline]
30074    pub fn r#loop<A>(&mut self, op0: A)
30075    where Assembler<'a>: LoopEmitter<A> {
30076        <Self as LoopEmitter<A>>::r#loop(self, op0);
30077    }
30078    /// `LOOPNZ`.
30079    ///
30080    /// Supported operand variants:
30081    ///
30082    /// ```text
30083    /// +---+----------+
30084    /// | # | Operands |
30085    /// +---+----------+
30086    /// | 1 | Imm      |
30087    /// | 2 | Label    |
30088    /// | 3 | Sym      |
30089    /// +---+----------+
30090    /// ```
30091    #[inline]
30092    pub fn loopnz<A>(&mut self, op0: A)
30093    where Assembler<'a>: LoopnzEmitter<A> {
30094        <Self as LoopnzEmitter<A>>::loopnz(self, op0);
30095    }
30096    /// `LOOPZ`.
30097    ///
30098    /// Supported operand variants:
30099    ///
30100    /// ```text
30101    /// +---+----------+
30102    /// | # | Operands |
30103    /// +---+----------+
30104    /// | 1 | Imm      |
30105    /// | 2 | Label    |
30106    /// | 3 | Sym      |
30107    /// +---+----------+
30108    /// ```
30109    #[inline]
30110    pub fn loopz<A>(&mut self, op0: A)
30111    where Assembler<'a>: LoopzEmitter<A> {
30112        <Self as LoopzEmitter<A>>::loopz(self, op0);
30113    }
30114    /// `LSL` (LSL). 
30115    /// Loads the unscrambled segment limit from the segment descriptor specified with the second operand (source operand) into the first operand (destination operand) and sets the ZF flag in the EFLAGS register. The source operand (which can be a register or a memory location) contains the segment selector for the segment descriptor being accessed. The destination operand is a general-purpose register.
30116    ///
30117    ///
30118    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LSL.html).
30119    ///
30120    /// Supported operand variants:
30121    ///
30122    /// ```text
30123    /// +---+----------+
30124    /// | # | Operands |
30125    /// +---+----------+
30126    /// | 1 | Gpd, Gpw |
30127    /// | 2 | Gpd, Mem |
30128    /// | 3 | Gpq, Gpw |
30129    /// | 4 | Gpq, Mem |
30130    /// | 5 | Gpw, Gpw |
30131    /// | 6 | Gpw, Mem |
30132    /// +---+----------+
30133    /// ```
30134    #[inline]
30135    pub fn lsl<A, B>(&mut self, op0: A, op1: B)
30136    where Assembler<'a>: LslEmitter<A, B> {
30137        <Self as LslEmitter<A, B>>::lsl(self, op0, op1);
30138    }
30139    /// `LSS` (LSS). 
30140    /// Loads a far pointer (segment selector and offset) from the second operand (source operand) into a segment register and the first operand (destination operand). The source operand specifies a 48-bit or a 32-bit pointer in memory depending on the current setting of the operand-size attribute (32 bits or 16 bits, respectively). The instruction opcode and the destination operand specify a segment register/general-purpose register pair. The 16-bit segment selector from the source operand is loaded into the segment register specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit or 16-bit offset is loaded into the register specified with the destination operand.
30141    ///
30142    ///
30143    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LDS%3ALES%3ALFS%3ALGS%3ALSS.html).
30144    ///
30145    /// Supported operand variants:
30146    ///
30147    /// ```text
30148    /// +---+----------+
30149    /// | # | Operands |
30150    /// +---+----------+
30151    /// | 1 | Gpd, Mem |
30152    /// | 2 | Gpq, Mem |
30153    /// | 3 | Gpw, Mem |
30154    /// +---+----------+
30155    /// ```
30156    #[inline]
30157    pub fn lss<A, B>(&mut self, op0: A, op1: B)
30158    where Assembler<'a>: LssEmitter<A, B> {
30159        <Self as LssEmitter<A, B>>::lss(self, op0, op1);
30160    }
30161    /// `LTR` (LTR). 
30162    /// Loads the source operand into the segment selector field of the task register. The source operand (a general-purpose register or a memory location) contains a segment selector that points to a task state segment (TSS). After the segment selector is loaded in the task register, the processor uses the segment selector to locate the segment descriptor for the TSS in the global descriptor table (GDT). It then loads the segment limit and base address for the TSS from the segment descriptor into the task register. The task pointed to by the task register is marked busy, but a switch to the task does not occur.
30163    ///
30164    ///
30165    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/LTR.html).
30166    ///
30167    /// Supported operand variants:
30168    ///
30169    /// ```text
30170    /// +---+----------+
30171    /// | # | Operands |
30172    /// +---+----------+
30173    /// | 1 | Gpd      |
30174    /// | 2 | Mem      |
30175    /// +---+----------+
30176    /// ```
30177    #[inline]
30178    pub fn ltr<A>(&mut self, op0: A)
30179    where Assembler<'a>: LtrEmitter<A> {
30180        <Self as LtrEmitter<A>>::ltr(self, op0);
30181    }
30182    /// `MOV` (MOV). 
30183    /// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
30184    ///
30185    ///
30186    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
30187    ///
30188    /// Supported operand variants:
30189    ///
30190    /// ```text
30191    /// +----+------------------------+
30192    /// | #  | Operands               |
30193    /// +----+------------------------+
30194    /// | 1  | AbsoluteAddress, GpbLo |
30195    /// | 2  | AbsoluteAddress, Gpd   |
30196    /// | 3  | AbsoluteAddress, Gpq   |
30197    /// | 4  | AbsoluteAddress, Gpw   |
30198    /// | 5  | GpbLo, AbsoluteAddress |
30199    /// | 6  | GpbLo, GpbLo           |
30200    /// | 7  | GpbLo, Imm             |
30201    /// | 8  | GpbLo, Mem             |
30202    /// | 9  | Gpd, AbsoluteAddress   |
30203    /// | 10 | Gpd, Gpd               |
30204    /// | 11 | Gpd, Imm               |
30205    /// | 12 | Gpd, Mem               |
30206    /// | 13 | Gpq, AbsoluteAddress   |
30207    /// | 14 | Gpq, Gpq               |
30208    /// | 15 | Gpq, Imm               |
30209    /// | 16 | Gpq, Mem               |
30210    /// | 17 | Gpw, AbsoluteAddress   |
30211    /// | 18 | Gpw, Gpw               |
30212    /// | 19 | Gpw, Imm               |
30213    /// | 20 | Gpw, Mem               |
30214    /// | 21 | Mem, GpbLo             |
30215    /// | 22 | Mem, Gpd               |
30216    /// | 23 | Mem, Gpq               |
30217    /// | 24 | Mem, Gpw               |
30218    /// | 25 | Mem, Imm               |
30219    /// +----+------------------------+
30220    /// ```
30221    #[inline]
30222    pub fn mov<A, B>(&mut self, op0: A, op1: B)
30223    where Assembler<'a>: MovEmitter<A, B> {
30224        <Self as MovEmitter<A, B>>::mov(self, op0, op1);
30225    }
30226    /// `MOVS` (MOVS). 
30227    /// Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified with the first operand (destination operand). Both the source and destination operands are located in memory. The address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be overridden with a segment override prefix, but the ES segment cannot be overridden.
30228    ///
30229    ///
30230    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVS%3AMOVSB%3AMOVSW%3AMOVSD%3AMOVSQ.html).
30231    ///
30232    /// Supported operand variants:
30233    ///
30234    /// ```text
30235    /// +---+----------+
30236    /// | # | Operands |
30237    /// +---+----------+
30238    /// | 1 | (none)   |
30239    /// +---+----------+
30240    /// ```
30241    #[inline]
30242    pub fn movs(&mut self)
30243    where Assembler<'a>: MovsEmitter {
30244        <Self as MovsEmitter>::movs(self);
30245    }
30246    /// `MOVSX` (MOVSX). 
30247    /// Copies the contents of the source operand (register or memory location) to the destination operand (register) and sign extends the value to 16 or 32 bits (see Figure 7-6 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1). The size of the converted value depends on the operand-size attribute.
30248    ///
30249    ///
30250    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVSX%3AMOVSXD.html).
30251    ///
30252    /// Supported operand variants:
30253    ///
30254    /// ```text
30255    /// +----+------------+
30256    /// | #  | Operands   |
30257    /// +----+------------+
30258    /// | 1  | Gpd, GpbLo |
30259    /// | 2  | Gpd, Gpd   |
30260    /// | 3  | Gpd, Gpw   |
30261    /// | 4  | Gpd, Mem   |
30262    /// | 5  | Gpq, GpbLo |
30263    /// | 6  | Gpq, Gpd   |
30264    /// | 7  | Gpq, Gpw   |
30265    /// | 8  | Gpq, Mem   |
30266    /// | 9  | Gpw, GpbLo |
30267    /// | 10 | Gpw, Gpd   |
30268    /// | 11 | Gpw, Gpw   |
30269    /// | 12 | Gpw, Mem   |
30270    /// +----+------------+
30271    /// ```
30272    #[inline]
30273    pub fn movsx<A, B>(&mut self, op0: A, op1: B)
30274    where Assembler<'a>: MovsxEmitter<A, B> {
30275        <Self as MovsxEmitter<A, B>>::movsx(self, op0, op1);
30276    }
30277    /// `MOVZX` (MOVZX). 
30278    /// Copies the contents of the source operand (register or memory location) to the destination operand (register) and zero extends the value. The size of the converted value depends on the operand-size attribute.
30279    ///
30280    ///
30281    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVZX.html).
30282    ///
30283    /// Supported operand variants:
30284    ///
30285    /// ```text
30286    /// +---+------------+
30287    /// | # | Operands   |
30288    /// +---+------------+
30289    /// | 1 | Gpd, GpbLo |
30290    /// | 2 | Gpd, Gpw   |
30291    /// | 3 | Gpd, Mem   |
30292    /// | 4 | Gpq, GpbLo |
30293    /// | 5 | Gpq, Gpw   |
30294    /// | 6 | Gpq, Mem   |
30295    /// | 7 | Gpw, GpbLo |
30296    /// | 8 | Gpw, Gpw   |
30297    /// | 9 | Gpw, Mem   |
30298    /// +---+------------+
30299    /// ```
30300    #[inline]
30301    pub fn movzx<A, B>(&mut self, op0: A, op1: B)
30302    where Assembler<'a>: MovzxEmitter<A, B> {
30303        <Self as MovzxEmitter<A, B>>::movzx(self, op0, op1);
30304    }
30305    /// `MOV_CR2G`.
30306    ///
30307    /// Supported operand variants:
30308    ///
30309    /// ```text
30310    /// +---+-----------+
30311    /// | # | Operands  |
30312    /// +---+-----------+
30313    /// | 1 | Gpq, CReg |
30314    /// +---+-----------+
30315    /// ```
30316    #[inline]
30317    pub fn mov_cr2g<A, B>(&mut self, op0: A, op1: B)
30318    where Assembler<'a>: MovCr2gEmitter<A, B> {
30319        <Self as MovCr2gEmitter<A, B>>::mov_cr2g(self, op0, op1);
30320    }
30321    /// `MOV_DR2G`.
30322    ///
30323    /// Supported operand variants:
30324    ///
30325    /// ```text
30326    /// +---+-----------+
30327    /// | # | Operands  |
30328    /// +---+-----------+
30329    /// | 1 | Gpq, DReg |
30330    /// +---+-----------+
30331    /// ```
30332    #[inline]
30333    pub fn mov_dr2g<A, B>(&mut self, op0: A, op1: B)
30334    where Assembler<'a>: MovDr2gEmitter<A, B> {
30335        <Self as MovDr2gEmitter<A, B>>::mov_dr2g(self, op0, op1);
30336    }
30337    /// `MOV_G2CR`.
30338    ///
30339    /// Supported operand variants:
30340    ///
30341    /// ```text
30342    /// +---+-----------+
30343    /// | # | Operands  |
30344    /// +---+-----------+
30345    /// | 1 | CReg, Gpq |
30346    /// +---+-----------+
30347    /// ```
30348    #[inline]
30349    pub fn mov_g2cr<A, B>(&mut self, op0: A, op1: B)
30350    where Assembler<'a>: MovG2crEmitter<A, B> {
30351        <Self as MovG2crEmitter<A, B>>::mov_g2cr(self, op0, op1);
30352    }
30353    /// `MOV_G2DR`.
30354    ///
30355    /// Supported operand variants:
30356    ///
30357    /// ```text
30358    /// +---+-----------+
30359    /// | # | Operands  |
30360    /// +---+-----------+
30361    /// | 1 | DReg, Gpq |
30362    /// +---+-----------+
30363    /// ```
30364    #[inline]
30365    pub fn mov_g2dr<A, B>(&mut self, op0: A, op1: B)
30366    where Assembler<'a>: MovG2drEmitter<A, B> {
30367        <Self as MovG2drEmitter<A, B>>::mov_g2dr(self, op0, op1);
30368    }
30369    /// `MOV_G2S` (MOV). 
30370    /// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
30371    ///
30372    ///
30373    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
30374    ///
30375    /// Supported operand variants:
30376    ///
30377    /// ```text
30378    /// +---+-----------+
30379    /// | # | Operands  |
30380    /// +---+-----------+
30381    /// | 1 | SReg, Gpd |
30382    /// | 2 | SReg, Mem |
30383    /// +---+-----------+
30384    /// ```
30385    #[inline]
30386    pub fn mov_g2s<A, B>(&mut self, op0: A, op1: B)
30387    where Assembler<'a>: MovG2sEmitter<A, B> {
30388        <Self as MovG2sEmitter<A, B>>::mov_g2s(self, op0, op1);
30389    }
30390    /// `MOV_S2G` (MOV). 
30391    /// Copies the second operand (source operand) to the first operand (destination operand). The source operand can be an immediate value, general-purpose register, segment register, or memory location; the destination register can be a general-purpose register, segment register, or memory location. Both operands must be the same size, which can be a byte, a word, a doubleword, or a quadword.
30392    ///
30393    ///
30394    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOV.html).
30395    ///
30396    /// Supported operand variants:
30397    ///
30398    /// ```text
30399    /// +---+-----------+
30400    /// | # | Operands  |
30401    /// +---+-----------+
30402    /// | 1 | Gpd, SReg |
30403    /// | 2 | Mem, SReg |
30404    /// +---+-----------+
30405    /// ```
30406    #[inline]
30407    pub fn mov_s2g<A, B>(&mut self, op0: A, op1: B)
30408    where Assembler<'a>: MovS2gEmitter<A, B> {
30409        <Self as MovS2gEmitter<A, B>>::mov_s2g(self, op0, op1);
30410    }
30411    /// `MUL` (MUL). 
30412    /// Performs an unsigned multiplication of the first operand (destination operand) and the second operand (source operand) and stores the result in the destination operand. The destination operand is an implied operand located in register AL, AX or EAX (depending on the size of the operand); the source operand is located in a general-purpose register or a memory location. The action of this instruction and the location of the result depends on the opcode and the operand size as shown in Table 4-9.
30413    ///
30414    ///
30415    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MUL.html).
30416    ///
30417    /// Supported operand variants:
30418    ///
30419    /// ```text
30420    /// +---+----------+
30421    /// | # | Operands |
30422    /// +---+----------+
30423    /// | 1 | GpbLo    |
30424    /// | 2 | Gpd      |
30425    /// | 3 | Gpq      |
30426    /// | 4 | Gpw      |
30427    /// | 5 | Mem      |
30428    /// +---+----------+
30429    /// ```
30430    #[inline]
30431    pub fn mul<A>(&mut self, op0: A)
30432    where Assembler<'a>: MulEmitter<A> {
30433        <Self as MulEmitter<A>>::mul(self, op0);
30434    }
30435    /// `NEG` (NEG). 
30436    /// Replaces the value of operand (the destination operand) with its two's complement. (This operation is equivalent to subtracting the operand from 0.) The destination operand is located in a general-purpose register or a memory location.
30437    ///
30438    ///
30439    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NEG.html).
30440    ///
30441    /// Supported operand variants:
30442    ///
30443    /// ```text
30444    /// +---+----------+
30445    /// | # | Operands |
30446    /// +---+----------+
30447    /// | 1 | GpbLo    |
30448    /// | 2 | Gpd      |
30449    /// | 3 | Gpq      |
30450    /// | 4 | Gpw      |
30451    /// | 5 | Mem      |
30452    /// +---+----------+
30453    /// ```
30454    #[inline]
30455    pub fn neg<A>(&mut self, op0: A)
30456    where Assembler<'a>: NegEmitter<A> {
30457        <Self as NegEmitter<A>>::neg(self, op0);
30458    }
30459    /// `NOP` (NOP). 
30460    /// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
30461    ///
30462    ///
30463    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
30464    ///
30465    /// Supported operand variants:
30466    ///
30467    /// ```text
30468    /// +---+----------+
30469    /// | # | Operands |
30470    /// +---+----------+
30471    /// | 1 | (none)   |
30472    /// +---+----------+
30473    /// ```
30474    #[inline]
30475    pub fn nop(&mut self)
30476    where Assembler<'a>: NopEmitter {
30477        <Self as NopEmitter>::nop(self);
30478    }
30479    /// `NOP` (NOP). 
30480    /// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
30481    ///
30482    ///
30483    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
30484    ///
30485    /// Supported operand variants:
30486    ///
30487    /// ```text
30488    /// +---+----------+
30489    /// | # | Operands |
30490    /// +---+----------+
30491    /// | 1 | Gpd      |
30492    /// | 2 | Gpq      |
30493    /// | 3 | Gpw      |
30494    /// | 4 | Mem      |
30495    /// +---+----------+
30496    /// ```
30497    #[inline]
30498    pub fn nop_1<A>(&mut self, op0: A)
30499    where Assembler<'a>: NopEmitter_1<A> {
30500        <Self as NopEmitter_1<A>>::nop_1(self, op0);
30501    }
30502    /// `NOT` (NOT). 
30503    /// Performs a bitwise NOT operation (each 1 is set to 0, and each 0 is set to 1) on the destination operand and stores the result in the destination operand location. The destination operand can be a register or a memory location.
30504    ///
30505    ///
30506    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOT.html).
30507    ///
30508    /// Supported operand variants:
30509    ///
30510    /// ```text
30511    /// +---+----------+
30512    /// | # | Operands |
30513    /// +---+----------+
30514    /// | 1 | GpbLo    |
30515    /// | 2 | Gpd      |
30516    /// | 3 | Gpq      |
30517    /// | 4 | Gpw      |
30518    /// | 5 | Mem      |
30519    /// +---+----------+
30520    /// ```
30521    #[inline]
30522    pub fn not<A>(&mut self, op0: A)
30523    where Assembler<'a>: NotEmitter<A> {
30524        <Self as NotEmitter<A>>::not(self, op0);
30525    }
30526    /// `OR` (OR). 
30527    /// Performs a bitwise inclusive OR operation between the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result of the OR instruction is set to 0 if both corresponding bits of the first and second operands are 0; otherwise, each bit is set to 1.
30528    ///
30529    ///
30530    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OR.html).
30531    ///
30532    /// Supported operand variants:
30533    ///
30534    /// ```text
30535    /// +----+--------------+
30536    /// | #  | Operands     |
30537    /// +----+--------------+
30538    /// | 1  | GpbLo, GpbLo |
30539    /// | 2  | GpbLo, Imm   |
30540    /// | 3  | GpbLo, Mem   |
30541    /// | 4  | Gpd, Gpd     |
30542    /// | 5  | Gpd, Imm     |
30543    /// | 6  | Gpd, Mem     |
30544    /// | 7  | Gpq, Gpq     |
30545    /// | 8  | Gpq, Imm     |
30546    /// | 9  | Gpq, Mem     |
30547    /// | 10 | Gpw, Gpw     |
30548    /// | 11 | Gpw, Imm     |
30549    /// | 12 | Gpw, Mem     |
30550    /// | 13 | Mem, GpbLo   |
30551    /// | 14 | Mem, Gpd     |
30552    /// | 15 | Mem, Gpq     |
30553    /// | 16 | Mem, Gpw     |
30554    /// | 17 | Mem, Imm     |
30555    /// +----+--------------+
30556    /// ```
30557    #[inline]
30558    pub fn or<A, B>(&mut self, op0: A, op1: B)
30559    where Assembler<'a>: OrEmitter<A, B> {
30560        <Self as OrEmitter<A, B>>::or(self, op0, op1);
30561    }
30562    /// `OUT` (OUT). 
30563    /// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
30564    ///
30565    ///
30566    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
30567    ///
30568    /// Supported operand variants:
30569    ///
30570    /// ```text
30571    /// +---+----------+
30572    /// | # | Operands |
30573    /// +---+----------+
30574    /// | 1 | (none)   |
30575    /// +---+----------+
30576    /// ```
30577    #[inline]
30578    pub fn r#out(&mut self)
30579    where Assembler<'a>: OutEmitter {
30580        <Self as OutEmitter>::r#out(self);
30581    }
30582    /// `OUT` (OUT). 
30583    /// Copies the value from the second operand (source operand) to the I/O port specified with the destination operand (first operand). The source operand can be register AL, AX, or EAX, depending on the size of the port being accessed (8, 16, or 32 bits, respectively); the destination operand can be a byte-immediate or the DX register. Using a byte immediate allows I/O port addresses 0 to 255 to be accessed; using the DX register as a source operand allows I/O ports from 0 to 65,535 to be accessed.
30584    ///
30585    ///
30586    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUT.html).
30587    ///
30588    /// Supported operand variants:
30589    ///
30590    /// ```text
30591    /// +---+------------+
30592    /// | # | Operands   |
30593    /// +---+------------+
30594    /// | 1 | GpbLo, Imm |
30595    /// | 2 | Gpd, Imm   |
30596    /// | 3 | Gpq, Imm   |
30597    /// | 4 | Gpw, Imm   |
30598    /// +---+------------+
30599    /// ```
30600    #[inline]
30601    pub fn r#out_2<A, B>(&mut self, op0: A, op1: B)
30602    where Assembler<'a>: OutEmitter_2<A, B> {
30603        <Self as OutEmitter_2<A, B>>::r#out_2(self, op0, op1);
30604    }
30605    /// `OUTS` (OUTS). 
30606    /// Copies data from the source operand (second operand) to the I/O port specified with the destination operand (first operand). The source operand is a memory location, the address of which is read from either the DS:SI, DS:ESI or the RSI registers (depending on the address-size attribute of the instruction, 16, 32 or 64, respectively). (The DS segment may be overridden with a segment override prefix.) The destination operand is an I/O port address (from 0 to 65,535) that is read from the DX register. The size of the I/O port being accessed (that is, the size of the source and destination operands) is determined by the opcode for an 8-bit I/O port or by the operand-size attribute of the instruction for a 16- or 32-bit I/O port.
30607    ///
30608    ///
30609    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/OUTS%3AOUTSB%3AOUTSW%3AOUTSD.html).
30610    ///
30611    /// Supported operand variants:
30612    ///
30613    /// ```text
30614    /// +---+----------+
30615    /// | # | Operands |
30616    /// +---+----------+
30617    /// | 1 | (none)   |
30618    /// +---+----------+
30619    /// ```
30620    #[inline]
30621    pub fn outs(&mut self)
30622    where Assembler<'a>: OutsEmitter {
30623        <Self as OutsEmitter>::outs(self);
30624    }
30625    /// `PAUSE` (NOP). 
30626    /// This instruction performs no operation. It is a one-byte or multi-byte NOP that takes up space in the instruction stream but does not impact machine context, except for the EIP register.
30627    ///
30628    ///
30629    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/NOP.html).
30630    ///
30631    /// Supported operand variants:
30632    ///
30633    /// ```text
30634    /// +---+----------+
30635    /// | # | Operands |
30636    /// +---+----------+
30637    /// | 1 | (none)   |
30638    /// +---+----------+
30639    /// ```
30640    #[inline]
30641    pub fn pause(&mut self)
30642    where Assembler<'a>: PauseEmitter {
30643        <Self as PauseEmitter>::pause(self);
30644    }
30645    /// `POP` (POP). 
30646    /// Loads the value from the top of the stack to the location specified with the destination operand (or explicit opcode) and then increments the stack pointer. The destination operand can be a general-purpose register, memory location, or segment register.
30647    ///
30648    ///
30649    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POP.html).
30650    ///
30651    /// Supported operand variants:
30652    ///
30653    /// ```text
30654    /// +---+----------+
30655    /// | # | Operands |
30656    /// +---+----------+
30657    /// | 1 | Gpq      |
30658    /// | 2 | Gpw      |
30659    /// | 3 | Mem      |
30660    /// +---+----------+
30661    /// ```
30662    #[inline]
30663    pub fn pop<A>(&mut self, op0: A)
30664    where Assembler<'a>: PopEmitter<A> {
30665        <Self as PopEmitter<A>>::pop(self, op0);
30666    }
30667    /// `POPF` (POPF). 
30668    /// Pops a doubleword (POPFD) from the top of the stack (if the current operand-size attribute is 32) and stores the value in the EFLAGS register, or pops a word from the top of the stack (if the operand-size attribute is 16) and stores it in the lower 16 bits of the EFLAGS register (that is, the FLAGS register). These instructions reverse the operation of the PUSHF/PUSHFD/PUSHFQ instructions.
30669    ///
30670    ///
30671    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/POPF%3APOPFD%3APOPFQ.html).
30672    ///
30673    /// Supported operand variants:
30674    ///
30675    /// ```text
30676    /// +---+----------+
30677    /// | # | Operands |
30678    /// +---+----------+
30679    /// | 1 | (none)   |
30680    /// +---+----------+
30681    /// ```
30682    #[inline]
30683    pub fn popf(&mut self)
30684    where Assembler<'a>: PopfEmitter {
30685        <Self as PopfEmitter>::popf(self);
30686    }
30687    /// `POP_SEG`.
30688    ///
30689    /// Supported operand variants:
30690    ///
30691    /// ```text
30692    /// +---+----------+
30693    /// | # | Operands |
30694    /// +---+----------+
30695    /// | 1 | SReg     |
30696    /// +---+----------+
30697    /// ```
30698    #[inline]
30699    pub fn pop_seg<A>(&mut self, op0: A)
30700    where Assembler<'a>: PopSegEmitter<A> {
30701        <Self as PopSegEmitter<A>>::pop_seg(self, op0);
30702    }
30703    /// `PUSH` (PUSH). 
30704    /// Decrements the stack pointer and then stores the source operand on the top of the stack. Address and operand sizes are determined and used as follows
30705    ///
30706    ///
30707    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSH.html).
30708    ///
30709    /// Supported operand variants:
30710    ///
30711    /// ```text
30712    /// +---+----------+
30713    /// | # | Operands |
30714    /// +---+----------+
30715    /// | 1 | Gpq      |
30716    /// | 2 | Gpw      |
30717    /// | 3 | Imm      |
30718    /// | 4 | Mem      |
30719    /// +---+----------+
30720    /// ```
30721    #[inline]
30722    pub fn push<A>(&mut self, op0: A)
30723    where Assembler<'a>: PushEmitter<A> {
30724        <Self as PushEmitter<A>>::push(self, op0);
30725    }
30726    /// `PUSHF` (PUSHF). 
30727    /// Decrements the stack pointer by 4 (if the current operand-size attribute is 32) and pushes the entire contents of the EFLAGS register onto the stack, or decrements the stack pointer by 2 (if the operand-size attribute is 16) and pushes the lower 16 bits of the EFLAGS register (that is, the FLAGS register) onto the stack. These instructions reverse the operation of the POPF/POPFD instructions.
30728    ///
30729    ///
30730    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUSHF%3APUSHFD%3APUSHFQ.html).
30731    ///
30732    /// Supported operand variants:
30733    ///
30734    /// ```text
30735    /// +---+----------+
30736    /// | # | Operands |
30737    /// +---+----------+
30738    /// | 1 | (none)   |
30739    /// +---+----------+
30740    /// ```
30741    #[inline]
30742    pub fn pushf(&mut self)
30743    where Assembler<'a>: PushfEmitter {
30744        <Self as PushfEmitter>::pushf(self);
30745    }
30746    /// `PUSH_SEG`.
30747    ///
30748    /// Supported operand variants:
30749    ///
30750    /// ```text
30751    /// +---+----------+
30752    /// | # | Operands |
30753    /// +---+----------+
30754    /// | 1 | SReg     |
30755    /// +---+----------+
30756    /// ```
30757    #[inline]
30758    pub fn push_seg<A>(&mut self, op0: A)
30759    where Assembler<'a>: PushSegEmitter<A> {
30760        <Self as PushSegEmitter<A>>::push_seg(self, op0);
30761    }
30762    /// `RCL` (RCL). 
30763    /// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
30764    ///
30765    ///
30766    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
30767    ///
30768    /// Supported operand variants:
30769    ///
30770    /// ```text
30771    /// +----+--------------+
30772    /// | #  | Operands     |
30773    /// +----+--------------+
30774    /// | 1  | GpbLo, GpbLo |
30775    /// | 2  | GpbLo, Imm   |
30776    /// | 3  | Gpd, GpbLo   |
30777    /// | 4  | Gpd, Imm     |
30778    /// | 5  | Gpq, GpbLo   |
30779    /// | 6  | Gpq, Imm     |
30780    /// | 7  | Gpw, GpbLo   |
30781    /// | 8  | Gpw, Imm     |
30782    /// | 9  | Mem, GpbLo   |
30783    /// | 10 | Mem, Imm     |
30784    /// +----+--------------+
30785    /// ```
30786    #[inline]
30787    pub fn rcl<A, B>(&mut self, op0: A, op1: B)
30788    where Assembler<'a>: RclEmitter<A, B> {
30789        <Self as RclEmitter<A, B>>::rcl(self, op0, op1);
30790    }
30791    /// `RCR` (RCR). 
30792    /// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
30793    ///
30794    ///
30795    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
30796    ///
30797    /// Supported operand variants:
30798    ///
30799    /// ```text
30800    /// +----+--------------+
30801    /// | #  | Operands     |
30802    /// +----+--------------+
30803    /// | 1  | GpbLo, GpbLo |
30804    /// | 2  | GpbLo, Imm   |
30805    /// | 3  | Gpd, GpbLo   |
30806    /// | 4  | Gpd, Imm     |
30807    /// | 5  | Gpq, GpbLo   |
30808    /// | 6  | Gpq, Imm     |
30809    /// | 7  | Gpw, GpbLo   |
30810    /// | 8  | Gpw, Imm     |
30811    /// | 9  | Mem, GpbLo   |
30812    /// | 10 | Mem, Imm     |
30813    /// +----+--------------+
30814    /// ```
30815    #[inline]
30816    pub fn rcr<A, B>(&mut self, op0: A, op1: B)
30817    where Assembler<'a>: RcrEmitter<A, B> {
30818        <Self as RcrEmitter<A, B>>::rcr(self, op0, op1);
30819    }
30820    /// `RET` (RET). 
30821    /// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
30822    ///
30823    ///
30824    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
30825    ///
30826    /// Supported operand variants:
30827    ///
30828    /// ```text
30829    /// +---+----------+
30830    /// | # | Operands |
30831    /// +---+----------+
30832    /// | 1 | (none)   |
30833    /// +---+----------+
30834    /// ```
30835    #[inline]
30836    pub fn ret(&mut self)
30837    where Assembler<'a>: RetEmitter {
30838        <Self as RetEmitter>::ret(self);
30839    }
30840    /// `RET` (RET). 
30841    /// Transfers program control to a return address located on the top of the stack. The address is usually placed on the stack by a CALL instruction, and the return is made to the instruction that follows the CALL instruction.
30842    ///
30843    ///
30844    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RET.html).
30845    ///
30846    /// Supported operand variants:
30847    ///
30848    /// ```text
30849    /// +---+----------+
30850    /// | # | Operands |
30851    /// +---+----------+
30852    /// | 1 | Imm      |
30853    /// +---+----------+
30854    /// ```
30855    #[inline]
30856    pub fn ret_1<A>(&mut self, op0: A)
30857    where Assembler<'a>: RetEmitter_1<A> {
30858        <Self as RetEmitter_1<A>>::ret_1(self, op0);
30859    }
30860    /// `RETF`.
30861    ///
30862    /// Supported operand variants:
30863    ///
30864    /// ```text
30865    /// +---+----------+
30866    /// | # | Operands |
30867    /// +---+----------+
30868    /// | 1 | (none)   |
30869    /// +---+----------+
30870    /// ```
30871    #[inline]
30872    pub fn retf(&mut self)
30873    where Assembler<'a>: RetfEmitter {
30874        <Self as RetfEmitter>::retf(self);
30875    }
30876    /// `RETF`.
30877    ///
30878    /// Supported operand variants:
30879    ///
30880    /// ```text
30881    /// +---+----------+
30882    /// | # | Operands |
30883    /// +---+----------+
30884    /// | 1 | Imm      |
30885    /// +---+----------+
30886    /// ```
30887    #[inline]
30888    pub fn retf_1<A>(&mut self, op0: A)
30889    where Assembler<'a>: RetfEmitter_1<A> {
30890        <Self as RetfEmitter_1<A>>::retf_1(self, op0);
30891    }
30892    /// `ROL` (ROL). 
30893    /// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
30894    ///
30895    ///
30896    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
30897    ///
30898    /// Supported operand variants:
30899    ///
30900    /// ```text
30901    /// +----+--------------+
30902    /// | #  | Operands     |
30903    /// +----+--------------+
30904    /// | 1  | GpbLo, GpbLo |
30905    /// | 2  | GpbLo, Imm   |
30906    /// | 3  | Gpd, GpbLo   |
30907    /// | 4  | Gpd, Imm     |
30908    /// | 5  | Gpq, GpbLo   |
30909    /// | 6  | Gpq, Imm     |
30910    /// | 7  | Gpw, GpbLo   |
30911    /// | 8  | Gpw, Imm     |
30912    /// | 9  | Mem, GpbLo   |
30913    /// | 10 | Mem, Imm     |
30914    /// +----+--------------+
30915    /// ```
30916    #[inline]
30917    pub fn rol<A, B>(&mut self, op0: A, op1: B)
30918    where Assembler<'a>: RolEmitter<A, B> {
30919        <Self as RolEmitter<A, B>>::rol(self, op0, op1);
30920    }
30921    /// `ROR` (ROR). 
30922    /// Shifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the second operand (count operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in the CL register. The count is masked to 5 bits (or 6 bits if in 64-bit mode and REX.W = 1).
30923    ///
30924    ///
30925    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/RCL%3ARCR%3AROL%3AROR.html).
30926    ///
30927    /// Supported operand variants:
30928    ///
30929    /// ```text
30930    /// +----+--------------+
30931    /// | #  | Operands     |
30932    /// +----+--------------+
30933    /// | 1  | GpbLo, GpbLo |
30934    /// | 2  | GpbLo, Imm   |
30935    /// | 3  | Gpd, GpbLo   |
30936    /// | 4  | Gpd, Imm     |
30937    /// | 5  | Gpq, GpbLo   |
30938    /// | 6  | Gpq, Imm     |
30939    /// | 7  | Gpw, GpbLo   |
30940    /// | 8  | Gpw, Imm     |
30941    /// | 9  | Mem, GpbLo   |
30942    /// | 10 | Mem, Imm     |
30943    /// +----+--------------+
30944    /// ```
30945    #[inline]
30946    pub fn ror<A, B>(&mut self, op0: A, op1: B)
30947    where Assembler<'a>: RorEmitter<A, B> {
30948        <Self as RorEmitter<A, B>>::ror(self, op0, op1);
30949    }
30950    /// `SAHF` (SAHF). 
30951    /// Loads the SF, ZF, AF, PF, and CF flags of the EFLAGS register with values from the corresponding bits in the AH register (bits 7, 6, 4, 2, and 0, respectively). Bits 1, 3, and 5 of register AH are ignored; the corresponding reserved bits (1, 3, and 5) in the EFLAGS register remain as shown in the “Operation” section below.
30952    ///
30953    ///
30954    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAHF.html).
30955    ///
30956    /// Supported operand variants:
30957    ///
30958    /// ```text
30959    /// +---+----------+
30960    /// | # | Operands |
30961    /// +---+----------+
30962    /// | 1 | (none)   |
30963    /// +---+----------+
30964    /// ```
30965    #[inline]
30966    pub fn sahf(&mut self)
30967    where Assembler<'a>: SahfEmitter {
30968        <Self as SahfEmitter>::sahf(self);
30969    }
30970    /// `SAR` (SAR). 
30971    /// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
30972    ///
30973    ///
30974    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
30975    ///
30976    /// Supported operand variants:
30977    ///
30978    /// ```text
30979    /// +----+--------------+
30980    /// | #  | Operands     |
30981    /// +----+--------------+
30982    /// | 1  | GpbLo, GpbLo |
30983    /// | 2  | GpbLo, Imm   |
30984    /// | 3  | Gpd, GpbLo   |
30985    /// | 4  | Gpd, Imm     |
30986    /// | 5  | Gpq, GpbLo   |
30987    /// | 6  | Gpq, Imm     |
30988    /// | 7  | Gpw, GpbLo   |
30989    /// | 8  | Gpw, Imm     |
30990    /// | 9  | Mem, GpbLo   |
30991    /// | 10 | Mem, Imm     |
30992    /// +----+--------------+
30993    /// ```
30994    #[inline]
30995    pub fn sar<A, B>(&mut self, op0: A, op1: B)
30996    where Assembler<'a>: SarEmitter<A, B> {
30997        <Self as SarEmitter<A, B>>::sar(self, op0, op1);
30998    }
30999    /// `SBB` (SBB). 
31000    /// Adds the source operand (second operand) and the carry (CF) flag, and subtracts the result from the destination operand (first operand). The result of the subtraction is stored in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, a register, or a memory location.
31001    ///
31002    ///
31003    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SBB.html).
31004    ///
31005    /// Supported operand variants:
31006    ///
31007    /// ```text
31008    /// +----+--------------+
31009    /// | #  | Operands     |
31010    /// +----+--------------+
31011    /// | 1  | GpbLo, GpbLo |
31012    /// | 2  | GpbLo, Imm   |
31013    /// | 3  | GpbLo, Mem   |
31014    /// | 4  | Gpd, Gpd     |
31015    /// | 5  | Gpd, Imm     |
31016    /// | 6  | Gpd, Mem     |
31017    /// | 7  | Gpq, Gpq     |
31018    /// | 8  | Gpq, Imm     |
31019    /// | 9  | Gpq, Mem     |
31020    /// | 10 | Gpw, Gpw     |
31021    /// | 11 | Gpw, Imm     |
31022    /// | 12 | Gpw, Mem     |
31023    /// | 13 | Mem, GpbLo   |
31024    /// | 14 | Mem, Gpd     |
31025    /// | 15 | Mem, Gpq     |
31026    /// | 16 | Mem, Gpw     |
31027    /// | 17 | Mem, Imm     |
31028    /// +----+--------------+
31029    /// ```
31030    #[inline]
31031    pub fn sbb<A, B>(&mut self, op0: A, op1: B)
31032    where Assembler<'a>: SbbEmitter<A, B> {
31033        <Self as SbbEmitter<A, B>>::sbb(self, op0, op1);
31034    }
31035    /// `SCAS` (SCAS). 
31036    /// In non-64-bit modes and in default 64-bit mode: this instruction compares a byte, word, doubleword or quadword specified using a memory operand with the value in AL, AX, or EAX. It then sets status flags in EFLAGS recording the results. The memory operand address is read from ES:(E)DI register (depending on the address-size attribute of the instruction and the current operational mode). Note that ES cannot be overridden with a segment override prefix.
31037    ///
31038    ///
31039    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SCAS%3ASCASB%3ASCASW%3ASCASD.html).
31040    ///
31041    /// Supported operand variants:
31042    ///
31043    /// ```text
31044    /// +---+----------+
31045    /// | # | Operands |
31046    /// +---+----------+
31047    /// | 1 | (none)   |
31048    /// +---+----------+
31049    /// ```
31050    #[inline]
31051    pub fn scas(&mut self)
31052    where Assembler<'a>: ScasEmitter {
31053        <Self as ScasEmitter>::scas(self);
31054    }
31055    /// `SETA` (SETA). 
31056    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31057    ///
31058    ///
31059    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31060    ///
31061    /// Supported operand variants:
31062    ///
31063    /// ```text
31064    /// +---+----------+
31065    /// | # | Operands |
31066    /// +---+----------+
31067    /// | 1 | GpbLo    |
31068    /// | 2 | Mem      |
31069    /// +---+----------+
31070    /// ```
31071    #[inline]
31072    pub fn seta<A>(&mut self, op0: A)
31073    where Assembler<'a>: SetaEmitter<A> {
31074        <Self as SetaEmitter<A>>::seta(self, op0);
31075    }
31076    /// `SETBE` (SETBE). 
31077    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31078    ///
31079    ///
31080    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31081    ///
31082    /// Supported operand variants:
31083    ///
31084    /// ```text
31085    /// +---+----------+
31086    /// | # | Operands |
31087    /// +---+----------+
31088    /// | 1 | GpbLo    |
31089    /// | 2 | Mem      |
31090    /// +---+----------+
31091    /// ```
31092    #[inline]
31093    pub fn setbe<A>(&mut self, op0: A)
31094    where Assembler<'a>: SetbeEmitter<A> {
31095        <Self as SetbeEmitter<A>>::setbe(self, op0);
31096    }
31097    /// `SETC` (SETC). 
31098    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31099    ///
31100    ///
31101    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31102    ///
31103    /// Supported operand variants:
31104    ///
31105    /// ```text
31106    /// +---+----------+
31107    /// | # | Operands |
31108    /// +---+----------+
31109    /// | 1 | GpbLo    |
31110    /// | 2 | Mem      |
31111    /// +---+----------+
31112    /// ```
31113    #[inline]
31114    pub fn setc<A>(&mut self, op0: A)
31115    where Assembler<'a>: SetcEmitter<A> {
31116        <Self as SetcEmitter<A>>::setc(self, op0);
31117    }
31118    /// `SETG` (SETG). 
31119    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31120    ///
31121    ///
31122    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31123    ///
31124    /// Supported operand variants:
31125    ///
31126    /// ```text
31127    /// +---+----------+
31128    /// | # | Operands |
31129    /// +---+----------+
31130    /// | 1 | GpbLo    |
31131    /// | 2 | Mem      |
31132    /// +---+----------+
31133    /// ```
31134    #[inline]
31135    pub fn setg<A>(&mut self, op0: A)
31136    where Assembler<'a>: SetgEmitter<A> {
31137        <Self as SetgEmitter<A>>::setg(self, op0);
31138    }
31139    /// `SETGE` (SETGE). 
31140    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31141    ///
31142    ///
31143    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31144    ///
31145    /// Supported operand variants:
31146    ///
31147    /// ```text
31148    /// +---+----------+
31149    /// | # | Operands |
31150    /// +---+----------+
31151    /// | 1 | GpbLo    |
31152    /// | 2 | Mem      |
31153    /// +---+----------+
31154    /// ```
31155    #[inline]
31156    pub fn setge<A>(&mut self, op0: A)
31157    where Assembler<'a>: SetgeEmitter<A> {
31158        <Self as SetgeEmitter<A>>::setge(self, op0);
31159    }
31160    /// `SETL` (SETL). 
31161    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31162    ///
31163    ///
31164    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31165    ///
31166    /// Supported operand variants:
31167    ///
31168    /// ```text
31169    /// +---+----------+
31170    /// | # | Operands |
31171    /// +---+----------+
31172    /// | 1 | GpbLo    |
31173    /// | 2 | Mem      |
31174    /// +---+----------+
31175    /// ```
31176    #[inline]
31177    pub fn setl<A>(&mut self, op0: A)
31178    where Assembler<'a>: SetlEmitter<A> {
31179        <Self as SetlEmitter<A>>::setl(self, op0);
31180    }
31181    /// `SETLE` (SETLE). 
31182    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31183    ///
31184    ///
31185    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31186    ///
31187    /// Supported operand variants:
31188    ///
31189    /// ```text
31190    /// +---+----------+
31191    /// | # | Operands |
31192    /// +---+----------+
31193    /// | 1 | GpbLo    |
31194    /// | 2 | Mem      |
31195    /// +---+----------+
31196    /// ```
31197    #[inline]
31198    pub fn setle<A>(&mut self, op0: A)
31199    where Assembler<'a>: SetleEmitter<A> {
31200        <Self as SetleEmitter<A>>::setle(self, op0);
31201    }
31202    /// `SETNC` (SETNC). 
31203    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31204    ///
31205    ///
31206    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31207    ///
31208    /// Supported operand variants:
31209    ///
31210    /// ```text
31211    /// +---+----------+
31212    /// | # | Operands |
31213    /// +---+----------+
31214    /// | 1 | GpbLo    |
31215    /// | 2 | Mem      |
31216    /// +---+----------+
31217    /// ```
31218    #[inline]
31219    pub fn setnc<A>(&mut self, op0: A)
31220    where Assembler<'a>: SetncEmitter<A> {
31221        <Self as SetncEmitter<A>>::setnc(self, op0);
31222    }
31223    /// `SETNO` (SETNO). 
31224    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31225    ///
31226    ///
31227    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31228    ///
31229    /// Supported operand variants:
31230    ///
31231    /// ```text
31232    /// +---+----------+
31233    /// | # | Operands |
31234    /// +---+----------+
31235    /// | 1 | GpbLo    |
31236    /// | 2 | Mem      |
31237    /// +---+----------+
31238    /// ```
31239    #[inline]
31240    pub fn setno<A>(&mut self, op0: A)
31241    where Assembler<'a>: SetnoEmitter<A> {
31242        <Self as SetnoEmitter<A>>::setno(self, op0);
31243    }
31244    /// `SETNP` (SETNP). 
31245    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31246    ///
31247    ///
31248    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31249    ///
31250    /// Supported operand variants:
31251    ///
31252    /// ```text
31253    /// +---+----------+
31254    /// | # | Operands |
31255    /// +---+----------+
31256    /// | 1 | GpbLo    |
31257    /// | 2 | Mem      |
31258    /// +---+----------+
31259    /// ```
31260    #[inline]
31261    pub fn setnp<A>(&mut self, op0: A)
31262    where Assembler<'a>: SetnpEmitter<A> {
31263        <Self as SetnpEmitter<A>>::setnp(self, op0);
31264    }
31265    /// `SETNS` (SETNS). 
31266    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31267    ///
31268    ///
31269    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31270    ///
31271    /// Supported operand variants:
31272    ///
31273    /// ```text
31274    /// +---+----------+
31275    /// | # | Operands |
31276    /// +---+----------+
31277    /// | 1 | GpbLo    |
31278    /// | 2 | Mem      |
31279    /// +---+----------+
31280    /// ```
31281    #[inline]
31282    pub fn setns<A>(&mut self, op0: A)
31283    where Assembler<'a>: SetnsEmitter<A> {
31284        <Self as SetnsEmitter<A>>::setns(self, op0);
31285    }
31286    /// `SETNZ` (SETNZ). 
31287    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31288    ///
31289    ///
31290    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31291    ///
31292    /// Supported operand variants:
31293    ///
31294    /// ```text
31295    /// +---+----------+
31296    /// | # | Operands |
31297    /// +---+----------+
31298    /// | 1 | GpbLo    |
31299    /// | 2 | Mem      |
31300    /// +---+----------+
31301    /// ```
31302    #[inline]
31303    pub fn setnz<A>(&mut self, op0: A)
31304    where Assembler<'a>: SetnzEmitter<A> {
31305        <Self as SetnzEmitter<A>>::setnz(self, op0);
31306    }
31307    /// `SETO` (SETO). 
31308    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31309    ///
31310    ///
31311    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31312    ///
31313    /// Supported operand variants:
31314    ///
31315    /// ```text
31316    /// +---+----------+
31317    /// | # | Operands |
31318    /// +---+----------+
31319    /// | 1 | GpbLo    |
31320    /// | 2 | Mem      |
31321    /// +---+----------+
31322    /// ```
31323    #[inline]
31324    pub fn seto<A>(&mut self, op0: A)
31325    where Assembler<'a>: SetoEmitter<A> {
31326        <Self as SetoEmitter<A>>::seto(self, op0);
31327    }
31328    /// `SETP` (SETP). 
31329    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31330    ///
31331    ///
31332    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31333    ///
31334    /// Supported operand variants:
31335    ///
31336    /// ```text
31337    /// +---+----------+
31338    /// | # | Operands |
31339    /// +---+----------+
31340    /// | 1 | GpbLo    |
31341    /// | 2 | Mem      |
31342    /// +---+----------+
31343    /// ```
31344    #[inline]
31345    pub fn setp<A>(&mut self, op0: A)
31346    where Assembler<'a>: SetpEmitter<A> {
31347        <Self as SetpEmitter<A>>::setp(self, op0);
31348    }
31349    /// `SETS` (SETS). 
31350    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31351    ///
31352    ///
31353    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31354    ///
31355    /// Supported operand variants:
31356    ///
31357    /// ```text
31358    /// +---+----------+
31359    /// | # | Operands |
31360    /// +---+----------+
31361    /// | 1 | GpbLo    |
31362    /// | 2 | Mem      |
31363    /// +---+----------+
31364    /// ```
31365    #[inline]
31366    pub fn sets<A>(&mut self, op0: A)
31367    where Assembler<'a>: SetsEmitter<A> {
31368        <Self as SetsEmitter<A>>::sets(self, op0);
31369    }
31370    /// `SETZ` (SETZ). 
31371    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31372    ///
31373    ///
31374    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31375    ///
31376    /// Supported operand variants:
31377    ///
31378    /// ```text
31379    /// +---+----------+
31380    /// | # | Operands |
31381    /// +---+----------+
31382    /// | 1 | GpbLo    |
31383    /// | 2 | Mem      |
31384    /// +---+----------+
31385    /// ```
31386    #[inline]
31387    pub fn setz<A>(&mut self, op0: A)
31388    where Assembler<'a>: SetzEmitter<A> {
31389        <Self as SetzEmitter<A>>::setz(self, op0);
31390    }
31391    /// `SETCC` (SETO). 
31392    /// Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF) in the EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix (cc) indicates the condition being tested for.
31393    ///
31394    ///
31395    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SETcc.html).
31396    ///
31397    /// Supported operand variants:
31398    ///
31399    /// ```text
31400    /// +---+----------+
31401    /// | # | Operands |
31402    /// +---+----------+
31403    /// | 1 | GpbLo    |
31404    /// | 2 | Mem      |
31405    /// +---+----------+
31406    /// ```
31407    #[inline]
31408    pub fn setcc<A>(&mut self, op0: A)
31409    where Assembler<'a>: SetccEmitter<A> {
31410        <Self as SetccEmitter<A>>::setcc(self, op0);
31411    }
31412    /// `SGDT`.
31413    ///
31414    /// Supported operand variants:
31415    ///
31416    /// ```text
31417    /// +---+----------+
31418    /// | # | Operands |
31419    /// +---+----------+
31420    /// | 1 | Mem      |
31421    /// +---+----------+
31422    /// ```
31423    #[inline]
31424    pub fn sgdt<A>(&mut self, op0: A)
31425    where Assembler<'a>: SgdtEmitter<A> {
31426        <Self as SgdtEmitter<A>>::sgdt(self, op0);
31427    }
31428    /// `SHL` (SHL). 
31429    /// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
31430    ///
31431    ///
31432    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
31433    ///
31434    /// Supported operand variants:
31435    ///
31436    /// ```text
31437    /// +----+--------------+
31438    /// | #  | Operands     |
31439    /// +----+--------------+
31440    /// | 1  | GpbLo, GpbLo |
31441    /// | 2  | GpbLo, Imm   |
31442    /// | 3  | Gpd, GpbLo   |
31443    /// | 4  | Gpd, Imm     |
31444    /// | 5  | Gpq, GpbLo   |
31445    /// | 6  | Gpq, Imm     |
31446    /// | 7  | Gpw, GpbLo   |
31447    /// | 8  | Gpw, Imm     |
31448    /// | 9  | Mem, GpbLo   |
31449    /// | 10 | Mem, Imm     |
31450    /// +----+--------------+
31451    /// ```
31452    #[inline]
31453    pub fn shl<A, B>(&mut self, op0: A, op1: B)
31454    where Assembler<'a>: ShlEmitter<A, B> {
31455        <Self as ShlEmitter<A, B>>::shl(self, op0, op1);
31456    }
31457    /// `SHLD` (SHLD). 
31458    /// The SHLD instruction is used for multi-precision shifts of 64 bits or more.
31459    ///
31460    ///
31461    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHLD.html).
31462    ///
31463    /// Supported operand variants:
31464    ///
31465    /// ```text
31466    /// +----+-----------------+
31467    /// | #  | Operands        |
31468    /// +----+-----------------+
31469    /// | 1  | Gpd, Gpd, GpbLo |
31470    /// | 2  | Gpd, Gpd, Imm   |
31471    /// | 3  | Gpq, Gpq, GpbLo |
31472    /// | 4  | Gpq, Gpq, Imm   |
31473    /// | 5  | Gpw, Gpw, GpbLo |
31474    /// | 6  | Gpw, Gpw, Imm   |
31475    /// | 7  | Mem, Gpd, GpbLo |
31476    /// | 8  | Mem, Gpd, Imm   |
31477    /// | 9  | Mem, Gpq, GpbLo |
31478    /// | 10 | Mem, Gpq, Imm   |
31479    /// | 11 | Mem, Gpw, GpbLo |
31480    /// | 12 | Mem, Gpw, Imm   |
31481    /// +----+-----------------+
31482    /// ```
31483    #[inline]
31484    pub fn shld<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31485    where Assembler<'a>: ShldEmitter<A, B, C> {
31486        <Self as ShldEmitter<A, B, C>>::shld(self, op0, op1, op2);
31487    }
31488    /// `SHR` (SHR). 
31489    /// Shifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the second operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF flag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination operand.
31490    ///
31491    ///
31492    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SAL%3ASAR%3ASHL%3ASHR.html).
31493    ///
31494    /// Supported operand variants:
31495    ///
31496    /// ```text
31497    /// +----+--------------+
31498    /// | #  | Operands     |
31499    /// +----+--------------+
31500    /// | 1  | GpbLo, GpbLo |
31501    /// | 2  | GpbLo, Imm   |
31502    /// | 3  | Gpd, GpbLo   |
31503    /// | 4  | Gpd, Imm     |
31504    /// | 5  | Gpq, GpbLo   |
31505    /// | 6  | Gpq, Imm     |
31506    /// | 7  | Gpw, GpbLo   |
31507    /// | 8  | Gpw, Imm     |
31508    /// | 9  | Mem, GpbLo   |
31509    /// | 10 | Mem, Imm     |
31510    /// +----+--------------+
31511    /// ```
31512    #[inline]
31513    pub fn shr<A, B>(&mut self, op0: A, op1: B)
31514    where Assembler<'a>: ShrEmitter<A, B> {
31515        <Self as ShrEmitter<A, B>>::shr(self, op0, op1);
31516    }
31517    /// `SHRD` (SHRD). 
31518    /// The SHRD instruction is useful for multi-precision shifts of 64 bits or more.
31519    ///
31520    ///
31521    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SHRD.html).
31522    ///
31523    /// Supported operand variants:
31524    ///
31525    /// ```text
31526    /// +----+-----------------+
31527    /// | #  | Operands        |
31528    /// +----+-----------------+
31529    /// | 1  | Gpd, Gpd, GpbLo |
31530    /// | 2  | Gpd, Gpd, Imm   |
31531    /// | 3  | Gpq, Gpq, GpbLo |
31532    /// | 4  | Gpq, Gpq, Imm   |
31533    /// | 5  | Gpw, Gpw, GpbLo |
31534    /// | 6  | Gpw, Gpw, Imm   |
31535    /// | 7  | Mem, Gpd, GpbLo |
31536    /// | 8  | Mem, Gpd, Imm   |
31537    /// | 9  | Mem, Gpq, GpbLo |
31538    /// | 10 | Mem, Gpq, Imm   |
31539    /// | 11 | Mem, Gpw, GpbLo |
31540    /// | 12 | Mem, Gpw, Imm   |
31541    /// +----+-----------------+
31542    /// ```
31543    #[inline]
31544    pub fn shrd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31545    where Assembler<'a>: ShrdEmitter<A, B, C> {
31546        <Self as ShrdEmitter<A, B, C>>::shrd(self, op0, op1, op2);
31547    }
31548    /// `SIDT`.
31549    ///
31550    /// Supported operand variants:
31551    ///
31552    /// ```text
31553    /// +---+----------+
31554    /// | # | Operands |
31555    /// +---+----------+
31556    /// | 1 | Mem      |
31557    /// +---+----------+
31558    /// ```
31559    #[inline]
31560    pub fn sidt<A>(&mut self, op0: A)
31561    where Assembler<'a>: SidtEmitter<A> {
31562        <Self as SidtEmitter<A>>::sidt(self, op0);
31563    }
31564    /// `SLDT` (SLDT). 
31565    /// Stores the segment selector from the local descriptor table register (LDTR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the segment descriptor (located in the GDT) for the current LDT. This instruction can only be executed in protected mode.
31566    ///
31567    ///
31568    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SLDT.html).
31569    ///
31570    /// Supported operand variants:
31571    ///
31572    /// ```text
31573    /// +---+----------+
31574    /// | # | Operands |
31575    /// +---+----------+
31576    /// | 1 | Gpd      |
31577    /// | 2 | Mem      |
31578    /// +---+----------+
31579    /// ```
31580    #[inline]
31581    pub fn sldt<A>(&mut self, op0: A)
31582    where Assembler<'a>: SldtEmitter<A> {
31583        <Self as SldtEmitter<A>>::sldt(self, op0);
31584    }
31585    /// `SMSW` (SMSW). 
31586    /// Stores the machine status word (bits 0 through 15 of control register CR0) into the destination operand. The destination operand can be a general-purpose register or a memory location.
31587    ///
31588    ///
31589    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SMSW.html).
31590    ///
31591    /// Supported operand variants:
31592    ///
31593    /// ```text
31594    /// +---+----------+
31595    /// | # | Operands |
31596    /// +---+----------+
31597    /// | 1 | Gpd      |
31598    /// | 2 | Gpq      |
31599    /// | 3 | Gpw      |
31600    /// | 4 | Mem      |
31601    /// +---+----------+
31602    /// ```
31603    #[inline]
31604    pub fn smsw<A>(&mut self, op0: A)
31605    where Assembler<'a>: SmswEmitter<A> {
31606        <Self as SmswEmitter<A>>::smsw(self, op0);
31607    }
31608    /// `STC` (STC). 
31609    /// Sets the CF flag in the EFLAGS register. Operation is the same in all modes.
31610    ///
31611    ///
31612    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STC.html).
31613    ///
31614    /// Supported operand variants:
31615    ///
31616    /// ```text
31617    /// +---+----------+
31618    /// | # | Operands |
31619    /// +---+----------+
31620    /// | 1 | (none)   |
31621    /// +---+----------+
31622    /// ```
31623    #[inline]
31624    pub fn stc(&mut self)
31625    where Assembler<'a>: StcEmitter {
31626        <Self as StcEmitter>::stc(self);
31627    }
31628    /// `STD` (STD). 
31629    /// Sets the DF flag in the EFLAGS register. When the DF flag is set to 1, string operations decrement the index registers (ESI and/or EDI). Operation is the same in all modes.
31630    ///
31631    ///
31632    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STD.html).
31633    ///
31634    /// Supported operand variants:
31635    ///
31636    /// ```text
31637    /// +---+----------+
31638    /// | # | Operands |
31639    /// +---+----------+
31640    /// | 1 | (none)   |
31641    /// +---+----------+
31642    /// ```
31643    #[inline]
31644    pub fn std(&mut self)
31645    where Assembler<'a>: StdEmitter {
31646        <Self as StdEmitter>::std(self);
31647    }
31648    /// `STI` (STI). 
31649    /// In most cases, STI sets the interrupt flag (IF) in the EFLAGS register. This allows the processor to respond to maskable hardware interrupts.
31650    ///
31651    ///
31652    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STI.html).
31653    ///
31654    /// Supported operand variants:
31655    ///
31656    /// ```text
31657    /// +---+----------+
31658    /// | # | Operands |
31659    /// +---+----------+
31660    /// | 1 | (none)   |
31661    /// +---+----------+
31662    /// ```
31663    #[inline]
31664    pub fn sti(&mut self)
31665    where Assembler<'a>: StiEmitter {
31666        <Self as StiEmitter>::sti(self);
31667    }
31668    /// `STOS` (STOS). 
31669    /// In non-64-bit and default 64-bit mode; stores a byte, word, or doubleword from the AL, AX, or EAX register (respectively) into the destination operand. The destination operand is a memory location, the address of which is read from either the ES:EDI or ES:DI register (depending on the address-size attribute of the instruction and the mode of operation). The ES segment cannot be overridden with a segment override prefix.
31670    ///
31671    ///
31672    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STOS%3ASTOSB%3ASTOSW%3ASTOSD%3ASTOSQ.html).
31673    ///
31674    /// Supported operand variants:
31675    ///
31676    /// ```text
31677    /// +---+----------+
31678    /// | # | Operands |
31679    /// +---+----------+
31680    /// | 1 | (none)   |
31681    /// +---+----------+
31682    /// ```
31683    #[inline]
31684    pub fn stos(&mut self)
31685    where Assembler<'a>: StosEmitter {
31686        <Self as StosEmitter>::stos(self);
31687    }
31688    /// `STR` (STR). 
31689    /// Stores the segment selector from the task register (TR) in the destination operand. The destination operand can be a general-purpose register or a memory location. The segment selector stored with this instruction points to the task state segment (TSS) for the currently running task.
31690    ///
31691    ///
31692    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/STR.html).
31693    ///
31694    /// Supported operand variants:
31695    ///
31696    /// ```text
31697    /// +---+----------+
31698    /// | # | Operands |
31699    /// +---+----------+
31700    /// | 1 | Gpd      |
31701    /// | 2 | Mem      |
31702    /// +---+----------+
31703    /// ```
31704    #[inline]
31705    pub fn str<A>(&mut self, op0: A)
31706    where Assembler<'a>: StrEmitter<A> {
31707        <Self as StrEmitter<A>>::str(self, op0);
31708    }
31709    /// `STTILECFG`.
31710    ///
31711    /// Supported operand variants:
31712    ///
31713    /// ```text
31714    /// +---+----------+
31715    /// | # | Operands |
31716    /// +---+----------+
31717    /// | 1 | Mem      |
31718    /// +---+----------+
31719    /// ```
31720    #[inline]
31721    pub fn sttilecfg<A>(&mut self, op0: A)
31722    where Assembler<'a>: SttilecfgEmitter<A> {
31723        <Self as SttilecfgEmitter<A>>::sttilecfg(self, op0);
31724    }
31725    /// `SUB` (SUB). 
31726    /// Subtracts the second operand (source operand) from the first operand (destination operand) and stores the result in the destination operand. The destination operand can be a register or a memory location; the source operand can be an immediate, register, or memory location. (However, two memory operands cannot be used in one instruction.) When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format.
31727    ///
31728    ///
31729    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SUB.html).
31730    ///
31731    /// Supported operand variants:
31732    ///
31733    /// ```text
31734    /// +----+--------------+
31735    /// | #  | Operands     |
31736    /// +----+--------------+
31737    /// | 1  | GpbLo, GpbLo |
31738    /// | 2  | GpbLo, Imm   |
31739    /// | 3  | GpbLo, Mem   |
31740    /// | 4  | Gpd, Gpd     |
31741    /// | 5  | Gpd, Imm     |
31742    /// | 6  | Gpd, Mem     |
31743    /// | 7  | Gpq, Gpq     |
31744    /// | 8  | Gpq, Imm     |
31745    /// | 9  | Gpq, Mem     |
31746    /// | 10 | Gpw, Gpw     |
31747    /// | 11 | Gpw, Imm     |
31748    /// | 12 | Gpw, Mem     |
31749    /// | 13 | Mem, GpbLo   |
31750    /// | 14 | Mem, Gpd     |
31751    /// | 15 | Mem, Gpq     |
31752    /// | 16 | Mem, Gpw     |
31753    /// | 17 | Mem, Imm     |
31754    /// +----+--------------+
31755    /// ```
31756    #[inline]
31757    pub fn sub<A, B>(&mut self, op0: A, op1: B)
31758    where Assembler<'a>: SubEmitter<A, B> {
31759        <Self as SubEmitter<A, B>>::sub(self, op0, op1);
31760    }
31761    /// `SWAPGS` (SWAPGS). 
31762    /// SWAPGS exchanges the current GS base register value with the value contained in MSR address C0000102H (IA32_KERNEL_GS_BASE). The SWAPGS instruction is a privileged instruction intended for use by system software.
31763    ///
31764    ///
31765    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SWAPGS.html).
31766    ///
31767    /// Supported operand variants:
31768    ///
31769    /// ```text
31770    /// +---+----------+
31771    /// | # | Operands |
31772    /// +---+----------+
31773    /// | 1 | (none)   |
31774    /// +---+----------+
31775    /// ```
31776    #[inline]
31777    pub fn swapgs(&mut self)
31778    where Assembler<'a>: SwapgsEmitter {
31779        <Self as SwapgsEmitter>::swapgs(self);
31780    }
31781    /// `SYSCALL` (SYSCALL). 
31782    /// SYSCALL invokes an OS system-call handler at privilege level 0. It does so by loading RIP from the IA32_LSTAR MSR (after saving the address of the instruction following SYSCALL into RCX). (The WRMSR instruction ensures that the IA32_LSTAR MSR always contain a canonical address.)
31783    ///
31784    ///
31785    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSCALL.html).
31786    ///
31787    /// Supported operand variants:
31788    ///
31789    /// ```text
31790    /// +---+----------+
31791    /// | # | Operands |
31792    /// +---+----------+
31793    /// | 1 | (none)   |
31794    /// +---+----------+
31795    /// ```
31796    #[inline]
31797    pub fn syscall(&mut self)
31798    where Assembler<'a>: SyscallEmitter {
31799        <Self as SyscallEmitter>::syscall(self);
31800    }
31801    /// `SYSRET` (SYSRET). 
31802    /// SYSRET is a companion instruction to the SYSCALL instruction. It returns from an OS system-call handler to user code at privilege level 3. It does so by loading RIP from RCX and loading RFLAGS from R11.1 With a 64-bit operand size, SYSRET remains in 64-bit mode; otherwise, it enters compatibility mode and only the low 32 bits of the registers are loaded.
31803    ///
31804    ///
31805    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/SYSRET.html).
31806    ///
31807    /// Supported operand variants:
31808    ///
31809    /// ```text
31810    /// +---+----------+
31811    /// | # | Operands |
31812    /// +---+----------+
31813    /// | 1 | (none)   |
31814    /// +---+----------+
31815    /// ```
31816    #[inline]
31817    pub fn sysret(&mut self)
31818    where Assembler<'a>: SysretEmitter {
31819        <Self as SysretEmitter>::sysret(self);
31820    }
31821    /// `TCMMIMFP16PS`.
31822    ///
31823    /// Supported operand variants:
31824    ///
31825    /// ```text
31826    /// +---+---------------+
31827    /// | # | Operands      |
31828    /// +---+---------------+
31829    /// | 1 | Tmm, Tmm, Tmm |
31830    /// +---+---------------+
31831    /// ```
31832    #[inline]
31833    pub fn tcmmimfp16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31834    where Assembler<'a>: Tcmmimfp16psEmitter<A, B, C> {
31835        <Self as Tcmmimfp16psEmitter<A, B, C>>::tcmmimfp16ps(self, op0, op1, op2);
31836    }
31837    /// `TCMMRLFP16PS`.
31838    ///
31839    /// Supported operand variants:
31840    ///
31841    /// ```text
31842    /// +---+---------------+
31843    /// | # | Operands      |
31844    /// +---+---------------+
31845    /// | 1 | Tmm, Tmm, Tmm |
31846    /// +---+---------------+
31847    /// ```
31848    #[inline]
31849    pub fn tcmmrlfp16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31850    where Assembler<'a>: Tcmmrlfp16psEmitter<A, B, C> {
31851        <Self as Tcmmrlfp16psEmitter<A, B, C>>::tcmmrlfp16ps(self, op0, op1, op2);
31852    }
31853    /// `TDPBF16PS`.
31854    ///
31855    /// Supported operand variants:
31856    ///
31857    /// ```text
31858    /// +---+---------------+
31859    /// | # | Operands      |
31860    /// +---+---------------+
31861    /// | 1 | Tmm, Tmm, Tmm |
31862    /// +---+---------------+
31863    /// ```
31864    #[inline]
31865    pub fn tdpbf16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31866    where Assembler<'a>: Tdpbf16psEmitter<A, B, C> {
31867        <Self as Tdpbf16psEmitter<A, B, C>>::tdpbf16ps(self, op0, op1, op2);
31868    }
31869    /// `TDPBSSD`.
31870    ///
31871    /// Supported operand variants:
31872    ///
31873    /// ```text
31874    /// +---+---------------+
31875    /// | # | Operands      |
31876    /// +---+---------------+
31877    /// | 1 | Tmm, Tmm, Tmm |
31878    /// +---+---------------+
31879    /// ```
31880    #[inline]
31881    pub fn tdpbssd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31882    where Assembler<'a>: TdpbssdEmitter<A, B, C> {
31883        <Self as TdpbssdEmitter<A, B, C>>::tdpbssd(self, op0, op1, op2);
31884    }
31885    /// `TDPBSUD`.
31886    ///
31887    /// Supported operand variants:
31888    ///
31889    /// ```text
31890    /// +---+---------------+
31891    /// | # | Operands      |
31892    /// +---+---------------+
31893    /// | 1 | Tmm, Tmm, Tmm |
31894    /// +---+---------------+
31895    /// ```
31896    #[inline]
31897    pub fn tdpbsud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31898    where Assembler<'a>: TdpbsudEmitter<A, B, C> {
31899        <Self as TdpbsudEmitter<A, B, C>>::tdpbsud(self, op0, op1, op2);
31900    }
31901    /// `TDPBUSD`.
31902    ///
31903    /// Supported operand variants:
31904    ///
31905    /// ```text
31906    /// +---+---------------+
31907    /// | # | Operands      |
31908    /// +---+---------------+
31909    /// | 1 | Tmm, Tmm, Tmm |
31910    /// +---+---------------+
31911    /// ```
31912    #[inline]
31913    pub fn tdpbusd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31914    where Assembler<'a>: TdpbusdEmitter<A, B, C> {
31915        <Self as TdpbusdEmitter<A, B, C>>::tdpbusd(self, op0, op1, op2);
31916    }
31917    /// `TDPBUUD`.
31918    ///
31919    /// Supported operand variants:
31920    ///
31921    /// ```text
31922    /// +---+---------------+
31923    /// | # | Operands      |
31924    /// +---+---------------+
31925    /// | 1 | Tmm, Tmm, Tmm |
31926    /// +---+---------------+
31927    /// ```
31928    #[inline]
31929    pub fn tdpbuud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31930    where Assembler<'a>: TdpbuudEmitter<A, B, C> {
31931        <Self as TdpbuudEmitter<A, B, C>>::tdpbuud(self, op0, op1, op2);
31932    }
31933    /// `TDPFP16PS`.
31934    ///
31935    /// Supported operand variants:
31936    ///
31937    /// ```text
31938    /// +---+---------------+
31939    /// | # | Operands      |
31940    /// +---+---------------+
31941    /// | 1 | Tmm, Tmm, Tmm |
31942    /// +---+---------------+
31943    /// ```
31944    #[inline]
31945    pub fn tdpfp16ps<A, B, C>(&mut self, op0: A, op1: B, op2: C)
31946    where Assembler<'a>: Tdpfp16psEmitter<A, B, C> {
31947        <Self as Tdpfp16psEmitter<A, B, C>>::tdpfp16ps(self, op0, op1, op2);
31948    }
31949    /// `TEST` (TEST). 
31950    /// Computes the bit-wise logical AND of first operand (source 1 operand) and the second operand (source 2 operand) and sets the SF, ZF, and PF status flags according to the result. The result is then discarded.
31951    ///
31952    ///
31953    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TEST.html).
31954    ///
31955    /// Supported operand variants:
31956    ///
31957    /// ```text
31958    /// +----+--------------+
31959    /// | #  | Operands     |
31960    /// +----+--------------+
31961    /// | 1  | GpbLo, GpbLo |
31962    /// | 2  | GpbLo, Imm   |
31963    /// | 3  | Gpd, Gpd     |
31964    /// | 4  | Gpd, Imm     |
31965    /// | 5  | Gpq, Gpq     |
31966    /// | 6  | Gpq, Imm     |
31967    /// | 7  | Gpw, Gpw     |
31968    /// | 8  | Gpw, Imm     |
31969    /// | 9  | Mem, GpbLo   |
31970    /// | 10 | Mem, Gpd     |
31971    /// | 11 | Mem, Gpq     |
31972    /// | 12 | Mem, Gpw     |
31973    /// | 13 | Mem, Imm     |
31974    /// +----+--------------+
31975    /// ```
31976    #[inline]
31977    pub fn test<A, B>(&mut self, op0: A, op1: B)
31978    where Assembler<'a>: TestEmitter<A, B> {
31979        <Self as TestEmitter<A, B>>::test(self, op0, op1);
31980    }
31981    /// `TILELOADD`.
31982    ///
31983    /// Supported operand variants:
31984    ///
31985    /// ```text
31986    /// +---+----------+
31987    /// | # | Operands |
31988    /// +---+----------+
31989    /// | 1 | Tmm, Mem |
31990    /// +---+----------+
31991    /// ```
31992    #[inline]
31993    pub fn tileloadd<A, B>(&mut self, op0: A, op1: B)
31994    where Assembler<'a>: TileloaddEmitter<A, B> {
31995        <Self as TileloaddEmitter<A, B>>::tileloadd(self, op0, op1);
31996    }
31997    /// `TILELOADDT1`.
31998    ///
31999    /// Supported operand variants:
32000    ///
32001    /// ```text
32002    /// +---+----------+
32003    /// | # | Operands |
32004    /// +---+----------+
32005    /// | 1 | Tmm, Mem |
32006    /// +---+----------+
32007    /// ```
32008    #[inline]
32009    pub fn tileloaddt1<A, B>(&mut self, op0: A, op1: B)
32010    where Assembler<'a>: Tileloaddt1Emitter<A, B> {
32011        <Self as Tileloaddt1Emitter<A, B>>::tileloaddt1(self, op0, op1);
32012    }
32013    /// `TILERELEASE` (TILERELEASE). 
32014    /// This instruction returns TILECFG and TILEDATA to the INIT state.
32015    ///
32016    ///
32017    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/TILERELEASE.html).
32018    ///
32019    /// Supported operand variants:
32020    ///
32021    /// ```text
32022    /// +---+----------+
32023    /// | # | Operands |
32024    /// +---+----------+
32025    /// | 1 | (none)   |
32026    /// +---+----------+
32027    /// ```
32028    #[inline]
32029    pub fn tilerelease(&mut self)
32030    where Assembler<'a>: TilereleaseEmitter {
32031        <Self as TilereleaseEmitter>::tilerelease(self);
32032    }
32033    /// `TILESTORED`.
32034    ///
32035    /// Supported operand variants:
32036    ///
32037    /// ```text
32038    /// +---+----------+
32039    /// | # | Operands |
32040    /// +---+----------+
32041    /// | 1 | Mem, Tmm |
32042    /// +---+----------+
32043    /// ```
32044    #[inline]
32045    pub fn tilestored<A, B>(&mut self, op0: A, op1: B)
32046    where Assembler<'a>: TilestoredEmitter<A, B> {
32047        <Self as TilestoredEmitter<A, B>>::tilestored(self, op0, op1);
32048    }
32049    /// `TILEZERO`.
32050    ///
32051    /// Supported operand variants:
32052    ///
32053    /// ```text
32054    /// +---+----------+
32055    /// | # | Operands |
32056    /// +---+----------+
32057    /// | 1 | Tmm      |
32058    /// +---+----------+
32059    /// ```
32060    #[inline]
32061    pub fn tilezero<A>(&mut self, op0: A)
32062    where Assembler<'a>: TilezeroEmitter<A> {
32063        <Self as TilezeroEmitter<A>>::tilezero(self, op0);
32064    }
32065    /// `UD0`.
32066    ///
32067    /// Supported operand variants:
32068    ///
32069    /// ```text
32070    /// +---+----------+
32071    /// | # | Operands |
32072    /// +---+----------+
32073    /// | 1 | Gpd, Gpd |
32074    /// | 2 | Gpd, Mem |
32075    /// | 3 | Gpq, Gpq |
32076    /// | 4 | Gpq, Mem |
32077    /// | 5 | Gpw, Gpw |
32078    /// | 6 | Gpw, Mem |
32079    /// +---+----------+
32080    /// ```
32081    #[inline]
32082    pub fn ud0<A, B>(&mut self, op0: A, op1: B)
32083    where Assembler<'a>: Ud0Emitter<A, B> {
32084        <Self as Ud0Emitter<A, B>>::ud0(self, op0, op1);
32085    }
32086    /// `UD1` (UD1). 
32087    /// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
32088    ///
32089    ///
32090    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
32091    ///
32092    /// Supported operand variants:
32093    ///
32094    /// ```text
32095    /// +---+----------+
32096    /// | # | Operands |
32097    /// +---+----------+
32098    /// | 1 | Gpd, Gpd |
32099    /// | 2 | Gpd, Mem |
32100    /// | 3 | Gpq, Gpq |
32101    /// | 4 | Gpq, Mem |
32102    /// | 5 | Gpw, Gpw |
32103    /// | 6 | Gpw, Mem |
32104    /// +---+----------+
32105    /// ```
32106    #[inline]
32107    pub fn ud1<A, B>(&mut self, op0: A, op1: B)
32108    where Assembler<'a>: Ud1Emitter<A, B> {
32109        <Self as Ud1Emitter<A, B>>::ud1(self, op0, op1);
32110    }
32111    /// `UD2` (UD2). 
32112    /// Generates an invalid opcode exception. This instruction is provided for software testing to explicitly generate an invalid opcode exception. The opcodes for this instruction are reserved for this purpose.
32113    ///
32114    ///
32115    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/UD.html).
32116    ///
32117    /// Supported operand variants:
32118    ///
32119    /// ```text
32120    /// +---+----------+
32121    /// | # | Operands |
32122    /// +---+----------+
32123    /// | 1 | (none)   |
32124    /// +---+----------+
32125    /// ```
32126    #[inline]
32127    pub fn ud2(&mut self)
32128    where Assembler<'a>: Ud2Emitter {
32129        <Self as Ud2Emitter>::ud2(self);
32130    }
32131    /// `VADDPH`.
32132    ///
32133    /// Supported operand variants:
32134    ///
32135    /// ```text
32136    /// +---+---------------+
32137    /// | # | Operands      |
32138    /// +---+---------------+
32139    /// | 1 | Xmm, Xmm, Mem |
32140    /// | 2 | Xmm, Xmm, Xmm |
32141    /// | 3 | Ymm, Ymm, Mem |
32142    /// | 4 | Ymm, Ymm, Ymm |
32143    /// | 5 | Zmm, Zmm, Mem |
32144    /// | 6 | Zmm, Zmm, Zmm |
32145    /// +---+---------------+
32146    /// ```
32147    #[inline]
32148    pub fn vaddph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32149    where Assembler<'a>: VaddphEmitter<A, B, C> {
32150        <Self as VaddphEmitter<A, B, C>>::vaddph(self, op0, op1, op2);
32151    }
32152    /// `VADDPH_ER`.
32153    ///
32154    /// Supported operand variants:
32155    ///
32156    /// ```text
32157    /// +---+---------------+
32158    /// | # | Operands      |
32159    /// +---+---------------+
32160    /// | 1 | Zmm, Zmm, Zmm |
32161    /// +---+---------------+
32162    /// ```
32163    #[inline]
32164    pub fn vaddph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32165    where Assembler<'a>: VaddphErEmitter<A, B, C> {
32166        <Self as VaddphErEmitter<A, B, C>>::vaddph_er(self, op0, op1, op2);
32167    }
32168    /// `VADDPH_MASK`.
32169    ///
32170    /// Supported operand variants:
32171    ///
32172    /// ```text
32173    /// +---+---------------+
32174    /// | # | Operands      |
32175    /// +---+---------------+
32176    /// | 1 | Xmm, Xmm, Mem |
32177    /// | 2 | Xmm, Xmm, Xmm |
32178    /// | 3 | Ymm, Ymm, Mem |
32179    /// | 4 | Ymm, Ymm, Ymm |
32180    /// | 5 | Zmm, Zmm, Mem |
32181    /// | 6 | Zmm, Zmm, Zmm |
32182    /// +---+---------------+
32183    /// ```
32184    #[inline]
32185    pub fn vaddph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32186    where Assembler<'a>: VaddphMaskEmitter<A, B, C> {
32187        <Self as VaddphMaskEmitter<A, B, C>>::vaddph_mask(self, op0, op1, op2);
32188    }
32189    /// `VADDPH_MASK_ER`.
32190    ///
32191    /// Supported operand variants:
32192    ///
32193    /// ```text
32194    /// +---+---------------+
32195    /// | # | Operands      |
32196    /// +---+---------------+
32197    /// | 1 | Zmm, Zmm, Zmm |
32198    /// +---+---------------+
32199    /// ```
32200    #[inline]
32201    pub fn vaddph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32202    where Assembler<'a>: VaddphMaskErEmitter<A, B, C> {
32203        <Self as VaddphMaskErEmitter<A, B, C>>::vaddph_mask_er(self, op0, op1, op2);
32204    }
32205    /// `VADDPH_MASKZ`.
32206    ///
32207    /// Supported operand variants:
32208    ///
32209    /// ```text
32210    /// +---+---------------+
32211    /// | # | Operands      |
32212    /// +---+---------------+
32213    /// | 1 | Xmm, Xmm, Mem |
32214    /// | 2 | Xmm, Xmm, Xmm |
32215    /// | 3 | Ymm, Ymm, Mem |
32216    /// | 4 | Ymm, Ymm, Ymm |
32217    /// | 5 | Zmm, Zmm, Mem |
32218    /// | 6 | Zmm, Zmm, Zmm |
32219    /// +---+---------------+
32220    /// ```
32221    #[inline]
32222    pub fn vaddph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32223    where Assembler<'a>: VaddphMaskzEmitter<A, B, C> {
32224        <Self as VaddphMaskzEmitter<A, B, C>>::vaddph_maskz(self, op0, op1, op2);
32225    }
32226    /// `VADDPH_MASKZ_ER`.
32227    ///
32228    /// Supported operand variants:
32229    ///
32230    /// ```text
32231    /// +---+---------------+
32232    /// | # | Operands      |
32233    /// +---+---------------+
32234    /// | 1 | Zmm, Zmm, Zmm |
32235    /// +---+---------------+
32236    /// ```
32237    #[inline]
32238    pub fn vaddph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32239    where Assembler<'a>: VaddphMaskzErEmitter<A, B, C> {
32240        <Self as VaddphMaskzErEmitter<A, B, C>>::vaddph_maskz_er(self, op0, op1, op2);
32241    }
32242    /// `VADDSH`.
32243    ///
32244    /// Supported operand variants:
32245    ///
32246    /// ```text
32247    /// +---+---------------+
32248    /// | # | Operands      |
32249    /// +---+---------------+
32250    /// | 1 | Xmm, Xmm, Mem |
32251    /// | 2 | Xmm, Xmm, Xmm |
32252    /// +---+---------------+
32253    /// ```
32254    #[inline]
32255    pub fn vaddsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32256    where Assembler<'a>: VaddshEmitter<A, B, C> {
32257        <Self as VaddshEmitter<A, B, C>>::vaddsh(self, op0, op1, op2);
32258    }
32259    /// `VADDSH_ER`.
32260    ///
32261    /// Supported operand variants:
32262    ///
32263    /// ```text
32264    /// +---+---------------+
32265    /// | # | Operands      |
32266    /// +---+---------------+
32267    /// | 1 | Xmm, Xmm, Xmm |
32268    /// +---+---------------+
32269    /// ```
32270    #[inline]
32271    pub fn vaddsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32272    where Assembler<'a>: VaddshErEmitter<A, B, C> {
32273        <Self as VaddshErEmitter<A, B, C>>::vaddsh_er(self, op0, op1, op2);
32274    }
32275    /// `VADDSH_MASK`.
32276    ///
32277    /// Supported operand variants:
32278    ///
32279    /// ```text
32280    /// +---+---------------+
32281    /// | # | Operands      |
32282    /// +---+---------------+
32283    /// | 1 | Xmm, Xmm, Mem |
32284    /// | 2 | Xmm, Xmm, Xmm |
32285    /// +---+---------------+
32286    /// ```
32287    #[inline]
32288    pub fn vaddsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32289    where Assembler<'a>: VaddshMaskEmitter<A, B, C> {
32290        <Self as VaddshMaskEmitter<A, B, C>>::vaddsh_mask(self, op0, op1, op2);
32291    }
32292    /// `VADDSH_MASK_ER`.
32293    ///
32294    /// Supported operand variants:
32295    ///
32296    /// ```text
32297    /// +---+---------------+
32298    /// | # | Operands      |
32299    /// +---+---------------+
32300    /// | 1 | Xmm, Xmm, Xmm |
32301    /// +---+---------------+
32302    /// ```
32303    #[inline]
32304    pub fn vaddsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32305    where Assembler<'a>: VaddshMaskErEmitter<A, B, C> {
32306        <Self as VaddshMaskErEmitter<A, B, C>>::vaddsh_mask_er(self, op0, op1, op2);
32307    }
32308    /// `VADDSH_MASKZ`.
32309    ///
32310    /// Supported operand variants:
32311    ///
32312    /// ```text
32313    /// +---+---------------+
32314    /// | # | Operands      |
32315    /// +---+---------------+
32316    /// | 1 | Xmm, Xmm, Mem |
32317    /// | 2 | Xmm, Xmm, Xmm |
32318    /// +---+---------------+
32319    /// ```
32320    #[inline]
32321    pub fn vaddsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32322    where Assembler<'a>: VaddshMaskzEmitter<A, B, C> {
32323        <Self as VaddshMaskzEmitter<A, B, C>>::vaddsh_maskz(self, op0, op1, op2);
32324    }
32325    /// `VADDSH_MASKZ_ER`.
32326    ///
32327    /// Supported operand variants:
32328    ///
32329    /// ```text
32330    /// +---+---------------+
32331    /// | # | Operands      |
32332    /// +---+---------------+
32333    /// | 1 | Xmm, Xmm, Xmm |
32334    /// +---+---------------+
32335    /// ```
32336    #[inline]
32337    pub fn vaddsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32338    where Assembler<'a>: VaddshMaskzErEmitter<A, B, C> {
32339        <Self as VaddshMaskzErEmitter<A, B, C>>::vaddsh_maskz_er(self, op0, op1, op2);
32340    }
32341    /// `VAESDEC` (VAESDEC). 
32342    /// This instruction performs a single round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
32343    ///
32344    ///
32345    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDEC.html).
32346    ///
32347    /// Supported operand variants:
32348    ///
32349    /// ```text
32350    /// +---+---------------+
32351    /// | # | Operands      |
32352    /// +---+---------------+
32353    /// | 1 | Xmm, Xmm, Mem |
32354    /// | 2 | Xmm, Xmm, Xmm |
32355    /// | 3 | Ymm, Ymm, Mem |
32356    /// | 4 | Ymm, Ymm, Ymm |
32357    /// | 5 | Zmm, Zmm, Mem |
32358    /// | 6 | Zmm, Zmm, Zmm |
32359    /// +---+---------------+
32360    /// ```
32361    #[inline]
32362    pub fn vaesdec<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32363    where Assembler<'a>: VaesdecEmitter<A, B, C> {
32364        <Self as VaesdecEmitter<A, B, C>>::vaesdec(self, op0, op1, op2);
32365    }
32366    /// `VAESDECLAST` (VAESDECLAST). 
32367    /// This instruction performs the last round of the AES decryption flow using the Equivalent Inverse Cipher, using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
32368    ///
32369    ///
32370    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESDECLAST.html).
32371    ///
32372    /// Supported operand variants:
32373    ///
32374    /// ```text
32375    /// +---+---------------+
32376    /// | # | Operands      |
32377    /// +---+---------------+
32378    /// | 1 | Xmm, Xmm, Mem |
32379    /// | 2 | Xmm, Xmm, Xmm |
32380    /// | 3 | Ymm, Ymm, Mem |
32381    /// | 4 | Ymm, Ymm, Ymm |
32382    /// | 5 | Zmm, Zmm, Mem |
32383    /// | 6 | Zmm, Zmm, Zmm |
32384    /// +---+---------------+
32385    /// ```
32386    #[inline]
32387    pub fn vaesdeclast<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32388    where Assembler<'a>: VaesdeclastEmitter<A, B, C> {
32389        <Self as VaesdeclastEmitter<A, B, C>>::vaesdeclast(self, op0, op1, op2);
32390    }
32391    /// `VAESENC` (VAESENC). 
32392    /// This instruction performs a single round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
32393    ///
32394    ///
32395    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENC.html).
32396    ///
32397    /// Supported operand variants:
32398    ///
32399    /// ```text
32400    /// +---+---------------+
32401    /// | # | Operands      |
32402    /// +---+---------------+
32403    /// | 1 | Xmm, Xmm, Mem |
32404    /// | 2 | Xmm, Xmm, Xmm |
32405    /// | 3 | Ymm, Ymm, Mem |
32406    /// | 4 | Ymm, Ymm, Ymm |
32407    /// | 5 | Zmm, Zmm, Mem |
32408    /// | 6 | Zmm, Zmm, Zmm |
32409    /// +---+---------------+
32410    /// ```
32411    #[inline]
32412    pub fn vaesenc<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32413    where Assembler<'a>: VaesencEmitter<A, B, C> {
32414        <Self as VaesencEmitter<A, B, C>>::vaesenc(self, op0, op1, op2);
32415    }
32416    /// `VAESENCLAST` (VAESENCLAST). 
32417    /// This instruction performs the last round of an AES encryption flow using one/two/four (depending on vector length) 128-bit data (state) from the first source operand with one/two/four (depending on vector length) round key(s) from the second source operand, and stores the result in the destination operand.
32418    ///
32419    ///
32420    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESENCLAST.html).
32421    ///
32422    /// Supported operand variants:
32423    ///
32424    /// ```text
32425    /// +---+---------------+
32426    /// | # | Operands      |
32427    /// +---+---------------+
32428    /// | 1 | Xmm, Xmm, Mem |
32429    /// | 2 | Xmm, Xmm, Xmm |
32430    /// | 3 | Ymm, Ymm, Mem |
32431    /// | 4 | Ymm, Ymm, Ymm |
32432    /// | 5 | Zmm, Zmm, Mem |
32433    /// | 6 | Zmm, Zmm, Zmm |
32434    /// +---+---------------+
32435    /// ```
32436    #[inline]
32437    pub fn vaesenclast<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32438    where Assembler<'a>: VaesenclastEmitter<A, B, C> {
32439        <Self as VaesenclastEmitter<A, B, C>>::vaesenclast(self, op0, op1, op2);
32440    }
32441    /// `VAESIMC` (VAESIMC). 
32442    /// Perform the InvMixColumns transformation on the source operand and store the result in the destination operand. The destination operand is an XMM register. The source operand can be an XMM register or a 128-bit memory location.
32443    ///
32444    ///
32445    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESIMC.html).
32446    ///
32447    /// Supported operand variants:
32448    ///
32449    /// ```text
32450    /// +---+----------+
32451    /// | # | Operands |
32452    /// +---+----------+
32453    /// | 1 | Xmm, Mem |
32454    /// | 2 | Xmm, Xmm |
32455    /// +---+----------+
32456    /// ```
32457    #[inline]
32458    pub fn vaesimc<A, B>(&mut self, op0: A, op1: B)
32459    where Assembler<'a>: VaesimcEmitter<A, B> {
32460        <Self as VaesimcEmitter<A, B>>::vaesimc(self, op0, op1);
32461    }
32462    /// `VAESKEYGENASSIST` (VAESKEYGENASSIST). 
32463    /// Assist in expanding the AES cipher key, by computing steps towards generating a round key for encryption, using 128-bit data specified in the source operand and an 8-bit round constant specified as an immediate, store the result in the destination operand.
32464    ///
32465    ///
32466    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/AESKEYGENASSIST.html).
32467    ///
32468    /// Supported operand variants:
32469    ///
32470    /// ```text
32471    /// +---+---------------+
32472    /// | # | Operands      |
32473    /// +---+---------------+
32474    /// | 1 | Xmm, Mem, Imm |
32475    /// | 2 | Xmm, Xmm, Imm |
32476    /// +---+---------------+
32477    /// ```
32478    #[inline]
32479    pub fn vaeskeygenassist<A, B, C>(&mut self, op0: A, op1: B, op2: C)
32480    where Assembler<'a>: VaeskeygenassistEmitter<A, B, C> {
32481        <Self as VaeskeygenassistEmitter<A, B, C>>::vaeskeygenassist(self, op0, op1, op2);
32482    }
32483    /// `VBCSTNEBF162PS`.
32484    ///
32485    /// Supported operand variants:
32486    ///
32487    /// ```text
32488    /// +---+----------+
32489    /// | # | Operands |
32490    /// +---+----------+
32491    /// | 1 | Xmm, Mem |
32492    /// | 2 | Ymm, Mem |
32493    /// +---+----------+
32494    /// ```
32495    #[inline]
32496    pub fn vbcstnebf162ps<A, B>(&mut self, op0: A, op1: B)
32497    where Assembler<'a>: Vbcstnebf162psEmitter<A, B> {
32498        <Self as Vbcstnebf162psEmitter<A, B>>::vbcstnebf162ps(self, op0, op1);
32499    }
32500    /// `VBCSTNESH2PS`.
32501    ///
32502    /// Supported operand variants:
32503    ///
32504    /// ```text
32505    /// +---+----------+
32506    /// | # | Operands |
32507    /// +---+----------+
32508    /// | 1 | Xmm, Mem |
32509    /// | 2 | Ymm, Mem |
32510    /// +---+----------+
32511    /// ```
32512    #[inline]
32513    pub fn vbcstnesh2ps<A, B>(&mut self, op0: A, op1: B)
32514    where Assembler<'a>: Vbcstnesh2psEmitter<A, B> {
32515        <Self as Vbcstnesh2psEmitter<A, B>>::vbcstnesh2ps(self, op0, op1);
32516    }
32517    /// `VCMPPH`.
32518    ///
32519    /// Supported operand variants:
32520    ///
32521    /// ```text
32522    /// +---+---------------------+
32523    /// | # | Operands            |
32524    /// +---+---------------------+
32525    /// | 1 | KReg, Xmm, Mem, Imm |
32526    /// | 2 | KReg, Xmm, Xmm, Imm |
32527    /// | 3 | KReg, Ymm, Mem, Imm |
32528    /// | 4 | KReg, Ymm, Ymm, Imm |
32529    /// | 5 | KReg, Zmm, Mem, Imm |
32530    /// | 6 | KReg, Zmm, Zmm, Imm |
32531    /// +---+---------------------+
32532    /// ```
32533    #[inline]
32534    pub fn vcmpph<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32535    where Assembler<'a>: VcmpphEmitter<A, B, C, D> {
32536        <Self as VcmpphEmitter<A, B, C, D>>::vcmpph(self, op0, op1, op2, op3);
32537    }
32538    /// `VCMPPH_MASK`.
32539    ///
32540    /// Supported operand variants:
32541    ///
32542    /// ```text
32543    /// +---+---------------------+
32544    /// | # | Operands            |
32545    /// +---+---------------------+
32546    /// | 1 | KReg, Xmm, Mem, Imm |
32547    /// | 2 | KReg, Xmm, Xmm, Imm |
32548    /// | 3 | KReg, Ymm, Mem, Imm |
32549    /// | 4 | KReg, Ymm, Ymm, Imm |
32550    /// | 5 | KReg, Zmm, Mem, Imm |
32551    /// | 6 | KReg, Zmm, Zmm, Imm |
32552    /// +---+---------------------+
32553    /// ```
32554    #[inline]
32555    pub fn vcmpph_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32556    where Assembler<'a>: VcmpphMaskEmitter<A, B, C, D> {
32557        <Self as VcmpphMaskEmitter<A, B, C, D>>::vcmpph_mask(self, op0, op1, op2, op3);
32558    }
32559    /// `VCMPPH_MASK_SAE`.
32560    ///
32561    /// Supported operand variants:
32562    ///
32563    /// ```text
32564    /// +---+---------------------+
32565    /// | # | Operands            |
32566    /// +---+---------------------+
32567    /// | 1 | KReg, Zmm, Zmm, Imm |
32568    /// +---+---------------------+
32569    /// ```
32570    #[inline]
32571    pub fn vcmpph_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32572    where Assembler<'a>: VcmpphMaskSaeEmitter<A, B, C, D> {
32573        <Self as VcmpphMaskSaeEmitter<A, B, C, D>>::vcmpph_mask_sae(self, op0, op1, op2, op3);
32574    }
32575    /// `VCMPPH_SAE`.
32576    ///
32577    /// Supported operand variants:
32578    ///
32579    /// ```text
32580    /// +---+---------------------+
32581    /// | # | Operands            |
32582    /// +---+---------------------+
32583    /// | 1 | KReg, Zmm, Zmm, Imm |
32584    /// +---+---------------------+
32585    /// ```
32586    #[inline]
32587    pub fn vcmpph_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32588    where Assembler<'a>: VcmpphSaeEmitter<A, B, C, D> {
32589        <Self as VcmpphSaeEmitter<A, B, C, D>>::vcmpph_sae(self, op0, op1, op2, op3);
32590    }
32591    /// `VCMPSH`.
32592    ///
32593    /// Supported operand variants:
32594    ///
32595    /// ```text
32596    /// +---+---------------------+
32597    /// | # | Operands            |
32598    /// +---+---------------------+
32599    /// | 1 | KReg, Xmm, Mem, Imm |
32600    /// | 2 | KReg, Xmm, Xmm, Imm |
32601    /// +---+---------------------+
32602    /// ```
32603    #[inline]
32604    pub fn vcmpsh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32605    where Assembler<'a>: VcmpshEmitter<A, B, C, D> {
32606        <Self as VcmpshEmitter<A, B, C, D>>::vcmpsh(self, op0, op1, op2, op3);
32607    }
32608    /// `VCMPSH_MASK`.
32609    ///
32610    /// Supported operand variants:
32611    ///
32612    /// ```text
32613    /// +---+---------------------+
32614    /// | # | Operands            |
32615    /// +---+---------------------+
32616    /// | 1 | KReg, Xmm, Mem, Imm |
32617    /// | 2 | KReg, Xmm, Xmm, Imm |
32618    /// +---+---------------------+
32619    /// ```
32620    #[inline]
32621    pub fn vcmpsh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32622    where Assembler<'a>: VcmpshMaskEmitter<A, B, C, D> {
32623        <Self as VcmpshMaskEmitter<A, B, C, D>>::vcmpsh_mask(self, op0, op1, op2, op3);
32624    }
32625    /// `VCMPSH_MASK_SAE`.
32626    ///
32627    /// Supported operand variants:
32628    ///
32629    /// ```text
32630    /// +---+---------------------+
32631    /// | # | Operands            |
32632    /// +---+---------------------+
32633    /// | 1 | KReg, Xmm, Xmm, Imm |
32634    /// +---+---------------------+
32635    /// ```
32636    #[inline]
32637    pub fn vcmpsh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32638    where Assembler<'a>: VcmpshMaskSaeEmitter<A, B, C, D> {
32639        <Self as VcmpshMaskSaeEmitter<A, B, C, D>>::vcmpsh_mask_sae(self, op0, op1, op2, op3);
32640    }
32641    /// `VCMPSH_SAE`.
32642    ///
32643    /// Supported operand variants:
32644    ///
32645    /// ```text
32646    /// +---+---------------------+
32647    /// | # | Operands            |
32648    /// +---+---------------------+
32649    /// | 1 | KReg, Xmm, Xmm, Imm |
32650    /// +---+---------------------+
32651    /// ```
32652    #[inline]
32653    pub fn vcmpsh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
32654    where Assembler<'a>: VcmpshSaeEmitter<A, B, C, D> {
32655        <Self as VcmpshSaeEmitter<A, B, C, D>>::vcmpsh_sae(self, op0, op1, op2, op3);
32656    }
32657    /// `VCOMISH`.
32658    ///
32659    /// Supported operand variants:
32660    ///
32661    /// ```text
32662    /// +---+----------+
32663    /// | # | Operands |
32664    /// +---+----------+
32665    /// | 1 | Xmm, Mem |
32666    /// | 2 | Xmm, Xmm |
32667    /// +---+----------+
32668    /// ```
32669    #[inline]
32670    pub fn vcomish<A, B>(&mut self, op0: A, op1: B)
32671    where Assembler<'a>: VcomishEmitter<A, B> {
32672        <Self as VcomishEmitter<A, B>>::vcomish(self, op0, op1);
32673    }
32674    /// `VCOMISH_SAE`.
32675    ///
32676    /// Supported operand variants:
32677    ///
32678    /// ```text
32679    /// +---+----------+
32680    /// | # | Operands |
32681    /// +---+----------+
32682    /// | 1 | Xmm, Xmm |
32683    /// +---+----------+
32684    /// ```
32685    #[inline]
32686    pub fn vcomish_sae<A, B>(&mut self, op0: A, op1: B)
32687    where Assembler<'a>: VcomishSaeEmitter<A, B> {
32688        <Self as VcomishSaeEmitter<A, B>>::vcomish_sae(self, op0, op1);
32689    }
32690    /// `VCVTDQ2PH`.
32691    ///
32692    /// Supported operand variants:
32693    ///
32694    /// ```text
32695    /// +---+----------+
32696    /// | # | Operands |
32697    /// +---+----------+
32698    /// | 1 | Xmm, Mem |
32699    /// | 2 | Xmm, Xmm |
32700    /// | 3 | Xmm, Ymm |
32701    /// | 4 | Ymm, Mem |
32702    /// | 5 | Ymm, Zmm |
32703    /// +---+----------+
32704    /// ```
32705    #[inline]
32706    pub fn vcvtdq2ph<A, B>(&mut self, op0: A, op1: B)
32707    where Assembler<'a>: Vcvtdq2phEmitter<A, B> {
32708        <Self as Vcvtdq2phEmitter<A, B>>::vcvtdq2ph(self, op0, op1);
32709    }
32710    /// `VCVTDQ2PH_ER`.
32711    ///
32712    /// Supported operand variants:
32713    ///
32714    /// ```text
32715    /// +---+----------+
32716    /// | # | Operands |
32717    /// +---+----------+
32718    /// | 1 | Ymm, Zmm |
32719    /// +---+----------+
32720    /// ```
32721    #[inline]
32722    pub fn vcvtdq2ph_er<A, B>(&mut self, op0: A, op1: B)
32723    where Assembler<'a>: Vcvtdq2phErEmitter<A, B> {
32724        <Self as Vcvtdq2phErEmitter<A, B>>::vcvtdq2ph_er(self, op0, op1);
32725    }
32726    /// `VCVTDQ2PH_MASK`.
32727    ///
32728    /// Supported operand variants:
32729    ///
32730    /// ```text
32731    /// +---+----------+
32732    /// | # | Operands |
32733    /// +---+----------+
32734    /// | 1 | Xmm, Mem |
32735    /// | 2 | Xmm, Xmm |
32736    /// | 3 | Xmm, Ymm |
32737    /// | 4 | Ymm, Mem |
32738    /// | 5 | Ymm, Zmm |
32739    /// +---+----------+
32740    /// ```
32741    #[inline]
32742    pub fn vcvtdq2ph_mask<A, B>(&mut self, op0: A, op1: B)
32743    where Assembler<'a>: Vcvtdq2phMaskEmitter<A, B> {
32744        <Self as Vcvtdq2phMaskEmitter<A, B>>::vcvtdq2ph_mask(self, op0, op1);
32745    }
32746    /// `VCVTDQ2PH_MASK_ER`.
32747    ///
32748    /// Supported operand variants:
32749    ///
32750    /// ```text
32751    /// +---+----------+
32752    /// | # | Operands |
32753    /// +---+----------+
32754    /// | 1 | Ymm, Zmm |
32755    /// +---+----------+
32756    /// ```
32757    #[inline]
32758    pub fn vcvtdq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
32759    where Assembler<'a>: Vcvtdq2phMaskErEmitter<A, B> {
32760        <Self as Vcvtdq2phMaskErEmitter<A, B>>::vcvtdq2ph_mask_er(self, op0, op1);
32761    }
32762    /// `VCVTDQ2PH_MASKZ`.
32763    ///
32764    /// Supported operand variants:
32765    ///
32766    /// ```text
32767    /// +---+----------+
32768    /// | # | Operands |
32769    /// +---+----------+
32770    /// | 1 | Xmm, Mem |
32771    /// | 2 | Xmm, Xmm |
32772    /// | 3 | Xmm, Ymm |
32773    /// | 4 | Ymm, Mem |
32774    /// | 5 | Ymm, Zmm |
32775    /// +---+----------+
32776    /// ```
32777    #[inline]
32778    pub fn vcvtdq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
32779    where Assembler<'a>: Vcvtdq2phMaskzEmitter<A, B> {
32780        <Self as Vcvtdq2phMaskzEmitter<A, B>>::vcvtdq2ph_maskz(self, op0, op1);
32781    }
32782    /// `VCVTDQ2PH_MASKZ_ER`.
32783    ///
32784    /// Supported operand variants:
32785    ///
32786    /// ```text
32787    /// +---+----------+
32788    /// | # | Operands |
32789    /// +---+----------+
32790    /// | 1 | Ymm, Zmm |
32791    /// +---+----------+
32792    /// ```
32793    #[inline]
32794    pub fn vcvtdq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
32795    where Assembler<'a>: Vcvtdq2phMaskzErEmitter<A, B> {
32796        <Self as Vcvtdq2phMaskzErEmitter<A, B>>::vcvtdq2ph_maskz_er(self, op0, op1);
32797    }
32798    /// `VCVTNEEBF162PS`.
32799    ///
32800    /// Supported operand variants:
32801    ///
32802    /// ```text
32803    /// +---+----------+
32804    /// | # | Operands |
32805    /// +---+----------+
32806    /// | 1 | Xmm, Mem |
32807    /// | 2 | Ymm, Mem |
32808    /// +---+----------+
32809    /// ```
32810    #[inline]
32811    pub fn vcvtneebf162ps<A, B>(&mut self, op0: A, op1: B)
32812    where Assembler<'a>: Vcvtneebf162psEmitter<A, B> {
32813        <Self as Vcvtneebf162psEmitter<A, B>>::vcvtneebf162ps(self, op0, op1);
32814    }
32815    /// `VCVTNEEPH2PS`.
32816    ///
32817    /// Supported operand variants:
32818    ///
32819    /// ```text
32820    /// +---+----------+
32821    /// | # | Operands |
32822    /// +---+----------+
32823    /// | 1 | Xmm, Mem |
32824    /// | 2 | Ymm, Mem |
32825    /// +---+----------+
32826    /// ```
32827    #[inline]
32828    pub fn vcvtneeph2ps<A, B>(&mut self, op0: A, op1: B)
32829    where Assembler<'a>: Vcvtneeph2psEmitter<A, B> {
32830        <Self as Vcvtneeph2psEmitter<A, B>>::vcvtneeph2ps(self, op0, op1);
32831    }
32832    /// `VCVTNEOBF162PS`.
32833    ///
32834    /// Supported operand variants:
32835    ///
32836    /// ```text
32837    /// +---+----------+
32838    /// | # | Operands |
32839    /// +---+----------+
32840    /// | 1 | Xmm, Mem |
32841    /// | 2 | Ymm, Mem |
32842    /// +---+----------+
32843    /// ```
32844    #[inline]
32845    pub fn vcvtneobf162ps<A, B>(&mut self, op0: A, op1: B)
32846    where Assembler<'a>: Vcvtneobf162psEmitter<A, B> {
32847        <Self as Vcvtneobf162psEmitter<A, B>>::vcvtneobf162ps(self, op0, op1);
32848    }
32849    /// `VCVTNEOPH2PS`.
32850    ///
32851    /// Supported operand variants:
32852    ///
32853    /// ```text
32854    /// +---+----------+
32855    /// | # | Operands |
32856    /// +---+----------+
32857    /// | 1 | Xmm, Mem |
32858    /// | 2 | Ymm, Mem |
32859    /// +---+----------+
32860    /// ```
32861    #[inline]
32862    pub fn vcvtneoph2ps<A, B>(&mut self, op0: A, op1: B)
32863    where Assembler<'a>: Vcvtneoph2psEmitter<A, B> {
32864        <Self as Vcvtneoph2psEmitter<A, B>>::vcvtneoph2ps(self, op0, op1);
32865    }
32866    /// `VCVTPD2PH`.
32867    ///
32868    /// Supported operand variants:
32869    ///
32870    /// ```text
32871    /// +---+----------+
32872    /// | # | Operands |
32873    /// +---+----------+
32874    /// | 1 | Xmm, Mem |
32875    /// | 2 | Xmm, Xmm |
32876    /// | 3 | Xmm, Ymm |
32877    /// | 4 | Xmm, Zmm |
32878    /// +---+----------+
32879    /// ```
32880    #[inline]
32881    pub fn vcvtpd2ph<A, B>(&mut self, op0: A, op1: B)
32882    where Assembler<'a>: Vcvtpd2phEmitter<A, B> {
32883        <Self as Vcvtpd2phEmitter<A, B>>::vcvtpd2ph(self, op0, op1);
32884    }
32885    /// `VCVTPD2PH_ER`.
32886    ///
32887    /// Supported operand variants:
32888    ///
32889    /// ```text
32890    /// +---+----------+
32891    /// | # | Operands |
32892    /// +---+----------+
32893    /// | 1 | Xmm, Zmm |
32894    /// +---+----------+
32895    /// ```
32896    #[inline]
32897    pub fn vcvtpd2ph_er<A, B>(&mut self, op0: A, op1: B)
32898    where Assembler<'a>: Vcvtpd2phErEmitter<A, B> {
32899        <Self as Vcvtpd2phErEmitter<A, B>>::vcvtpd2ph_er(self, op0, op1);
32900    }
32901    /// `VCVTPD2PH_MASK`.
32902    ///
32903    /// Supported operand variants:
32904    ///
32905    /// ```text
32906    /// +---+----------+
32907    /// | # | Operands |
32908    /// +---+----------+
32909    /// | 1 | Xmm, Mem |
32910    /// | 2 | Xmm, Xmm |
32911    /// | 3 | Xmm, Ymm |
32912    /// | 4 | Xmm, Zmm |
32913    /// +---+----------+
32914    /// ```
32915    #[inline]
32916    pub fn vcvtpd2ph_mask<A, B>(&mut self, op0: A, op1: B)
32917    where Assembler<'a>: Vcvtpd2phMaskEmitter<A, B> {
32918        <Self as Vcvtpd2phMaskEmitter<A, B>>::vcvtpd2ph_mask(self, op0, op1);
32919    }
32920    /// `VCVTPD2PH_MASK_ER`.
32921    ///
32922    /// Supported operand variants:
32923    ///
32924    /// ```text
32925    /// +---+----------+
32926    /// | # | Operands |
32927    /// +---+----------+
32928    /// | 1 | Xmm, Zmm |
32929    /// +---+----------+
32930    /// ```
32931    #[inline]
32932    pub fn vcvtpd2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
32933    where Assembler<'a>: Vcvtpd2phMaskErEmitter<A, B> {
32934        <Self as Vcvtpd2phMaskErEmitter<A, B>>::vcvtpd2ph_mask_er(self, op0, op1);
32935    }
32936    /// `VCVTPD2PH_MASKZ`.
32937    ///
32938    /// Supported operand variants:
32939    ///
32940    /// ```text
32941    /// +---+----------+
32942    /// | # | Operands |
32943    /// +---+----------+
32944    /// | 1 | Xmm, Mem |
32945    /// | 2 | Xmm, Xmm |
32946    /// | 3 | Xmm, Ymm |
32947    /// | 4 | Xmm, Zmm |
32948    /// +---+----------+
32949    /// ```
32950    #[inline]
32951    pub fn vcvtpd2ph_maskz<A, B>(&mut self, op0: A, op1: B)
32952    where Assembler<'a>: Vcvtpd2phMaskzEmitter<A, B> {
32953        <Self as Vcvtpd2phMaskzEmitter<A, B>>::vcvtpd2ph_maskz(self, op0, op1);
32954    }
32955    /// `VCVTPD2PH_MASKZ_ER`.
32956    ///
32957    /// Supported operand variants:
32958    ///
32959    /// ```text
32960    /// +---+----------+
32961    /// | # | Operands |
32962    /// +---+----------+
32963    /// | 1 | Xmm, Zmm |
32964    /// +---+----------+
32965    /// ```
32966    #[inline]
32967    pub fn vcvtpd2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
32968    where Assembler<'a>: Vcvtpd2phMaskzErEmitter<A, B> {
32969        <Self as Vcvtpd2phMaskzErEmitter<A, B>>::vcvtpd2ph_maskz_er(self, op0, op1);
32970    }
32971    /// `VCVTPH2DQ`.
32972    ///
32973    /// Supported operand variants:
32974    ///
32975    /// ```text
32976    /// +---+----------+
32977    /// | # | Operands |
32978    /// +---+----------+
32979    /// | 1 | Xmm, Mem |
32980    /// | 2 | Xmm, Xmm |
32981    /// | 3 | Ymm, Mem |
32982    /// | 4 | Ymm, Xmm |
32983    /// | 5 | Zmm, Mem |
32984    /// | 6 | Zmm, Ymm |
32985    /// +---+----------+
32986    /// ```
32987    #[inline]
32988    pub fn vcvtph2dq<A, B>(&mut self, op0: A, op1: B)
32989    where Assembler<'a>: Vcvtph2dqEmitter<A, B> {
32990        <Self as Vcvtph2dqEmitter<A, B>>::vcvtph2dq(self, op0, op1);
32991    }
32992    /// `VCVTPH2DQ_ER`.
32993    ///
32994    /// Supported operand variants:
32995    ///
32996    /// ```text
32997    /// +---+----------+
32998    /// | # | Operands |
32999    /// +---+----------+
33000    /// | 1 | Zmm, Ymm |
33001    /// +---+----------+
33002    /// ```
33003    #[inline]
33004    pub fn vcvtph2dq_er<A, B>(&mut self, op0: A, op1: B)
33005    where Assembler<'a>: Vcvtph2dqErEmitter<A, B> {
33006        <Self as Vcvtph2dqErEmitter<A, B>>::vcvtph2dq_er(self, op0, op1);
33007    }
33008    /// `VCVTPH2DQ_MASK`.
33009    ///
33010    /// Supported operand variants:
33011    ///
33012    /// ```text
33013    /// +---+----------+
33014    /// | # | Operands |
33015    /// +---+----------+
33016    /// | 1 | Xmm, Mem |
33017    /// | 2 | Xmm, Xmm |
33018    /// | 3 | Ymm, Mem |
33019    /// | 4 | Ymm, Xmm |
33020    /// | 5 | Zmm, Mem |
33021    /// | 6 | Zmm, Ymm |
33022    /// +---+----------+
33023    /// ```
33024    #[inline]
33025    pub fn vcvtph2dq_mask<A, B>(&mut self, op0: A, op1: B)
33026    where Assembler<'a>: Vcvtph2dqMaskEmitter<A, B> {
33027        <Self as Vcvtph2dqMaskEmitter<A, B>>::vcvtph2dq_mask(self, op0, op1);
33028    }
33029    /// `VCVTPH2DQ_MASK_ER`.
33030    ///
33031    /// Supported operand variants:
33032    ///
33033    /// ```text
33034    /// +---+----------+
33035    /// | # | Operands |
33036    /// +---+----------+
33037    /// | 1 | Zmm, Ymm |
33038    /// +---+----------+
33039    /// ```
33040    #[inline]
33041    pub fn vcvtph2dq_mask_er<A, B>(&mut self, op0: A, op1: B)
33042    where Assembler<'a>: Vcvtph2dqMaskErEmitter<A, B> {
33043        <Self as Vcvtph2dqMaskErEmitter<A, B>>::vcvtph2dq_mask_er(self, op0, op1);
33044    }
33045    /// `VCVTPH2DQ_MASKZ`.
33046    ///
33047    /// Supported operand variants:
33048    ///
33049    /// ```text
33050    /// +---+----------+
33051    /// | # | Operands |
33052    /// +---+----------+
33053    /// | 1 | Xmm, Mem |
33054    /// | 2 | Xmm, Xmm |
33055    /// | 3 | Ymm, Mem |
33056    /// | 4 | Ymm, Xmm |
33057    /// | 5 | Zmm, Mem |
33058    /// | 6 | Zmm, Ymm |
33059    /// +---+----------+
33060    /// ```
33061    #[inline]
33062    pub fn vcvtph2dq_maskz<A, B>(&mut self, op0: A, op1: B)
33063    where Assembler<'a>: Vcvtph2dqMaskzEmitter<A, B> {
33064        <Self as Vcvtph2dqMaskzEmitter<A, B>>::vcvtph2dq_maskz(self, op0, op1);
33065    }
33066    /// `VCVTPH2DQ_MASKZ_ER`.
33067    ///
33068    /// Supported operand variants:
33069    ///
33070    /// ```text
33071    /// +---+----------+
33072    /// | # | Operands |
33073    /// +---+----------+
33074    /// | 1 | Zmm, Ymm |
33075    /// +---+----------+
33076    /// ```
33077    #[inline]
33078    pub fn vcvtph2dq_maskz_er<A, B>(&mut self, op0: A, op1: B)
33079    where Assembler<'a>: Vcvtph2dqMaskzErEmitter<A, B> {
33080        <Self as Vcvtph2dqMaskzErEmitter<A, B>>::vcvtph2dq_maskz_er(self, op0, op1);
33081    }
33082    /// `VCVTPH2PD`.
33083    ///
33084    /// Supported operand variants:
33085    ///
33086    /// ```text
33087    /// +---+----------+
33088    /// | # | Operands |
33089    /// +---+----------+
33090    /// | 1 | Xmm, Mem |
33091    /// | 2 | Xmm, Xmm |
33092    /// | 3 | Ymm, Mem |
33093    /// | 4 | Ymm, Xmm |
33094    /// | 5 | Zmm, Mem |
33095    /// | 6 | Zmm, Xmm |
33096    /// +---+----------+
33097    /// ```
33098    #[inline]
33099    pub fn vcvtph2pd<A, B>(&mut self, op0: A, op1: B)
33100    where Assembler<'a>: Vcvtph2pdEmitter<A, B> {
33101        <Self as Vcvtph2pdEmitter<A, B>>::vcvtph2pd(self, op0, op1);
33102    }
33103    /// `VCVTPH2PD_MASK`.
33104    ///
33105    /// Supported operand variants:
33106    ///
33107    /// ```text
33108    /// +---+----------+
33109    /// | # | Operands |
33110    /// +---+----------+
33111    /// | 1 | Xmm, Mem |
33112    /// | 2 | Xmm, Xmm |
33113    /// | 3 | Ymm, Mem |
33114    /// | 4 | Ymm, Xmm |
33115    /// | 5 | Zmm, Mem |
33116    /// | 6 | Zmm, Xmm |
33117    /// +---+----------+
33118    /// ```
33119    #[inline]
33120    pub fn vcvtph2pd_mask<A, B>(&mut self, op0: A, op1: B)
33121    where Assembler<'a>: Vcvtph2pdMaskEmitter<A, B> {
33122        <Self as Vcvtph2pdMaskEmitter<A, B>>::vcvtph2pd_mask(self, op0, op1);
33123    }
33124    /// `VCVTPH2PD_MASK_SAE`.
33125    ///
33126    /// Supported operand variants:
33127    ///
33128    /// ```text
33129    /// +---+----------+
33130    /// | # | Operands |
33131    /// +---+----------+
33132    /// | 1 | Zmm, Xmm |
33133    /// +---+----------+
33134    /// ```
33135    #[inline]
33136    pub fn vcvtph2pd_mask_sae<A, B>(&mut self, op0: A, op1: B)
33137    where Assembler<'a>: Vcvtph2pdMaskSaeEmitter<A, B> {
33138        <Self as Vcvtph2pdMaskSaeEmitter<A, B>>::vcvtph2pd_mask_sae(self, op0, op1);
33139    }
33140    /// `VCVTPH2PD_MASKZ`.
33141    ///
33142    /// Supported operand variants:
33143    ///
33144    /// ```text
33145    /// +---+----------+
33146    /// | # | Operands |
33147    /// +---+----------+
33148    /// | 1 | Xmm, Mem |
33149    /// | 2 | Xmm, Xmm |
33150    /// | 3 | Ymm, Mem |
33151    /// | 4 | Ymm, Xmm |
33152    /// | 5 | Zmm, Mem |
33153    /// | 6 | Zmm, Xmm |
33154    /// +---+----------+
33155    /// ```
33156    #[inline]
33157    pub fn vcvtph2pd_maskz<A, B>(&mut self, op0: A, op1: B)
33158    where Assembler<'a>: Vcvtph2pdMaskzEmitter<A, B> {
33159        <Self as Vcvtph2pdMaskzEmitter<A, B>>::vcvtph2pd_maskz(self, op0, op1);
33160    }
33161    /// `VCVTPH2PD_MASKZ_SAE`.
33162    ///
33163    /// Supported operand variants:
33164    ///
33165    /// ```text
33166    /// +---+----------+
33167    /// | # | Operands |
33168    /// +---+----------+
33169    /// | 1 | Zmm, Xmm |
33170    /// +---+----------+
33171    /// ```
33172    #[inline]
33173    pub fn vcvtph2pd_maskz_sae<A, B>(&mut self, op0: A, op1: B)
33174    where Assembler<'a>: Vcvtph2pdMaskzSaeEmitter<A, B> {
33175        <Self as Vcvtph2pdMaskzSaeEmitter<A, B>>::vcvtph2pd_maskz_sae(self, op0, op1);
33176    }
33177    /// `VCVTPH2PD_SAE`.
33178    ///
33179    /// Supported operand variants:
33180    ///
33181    /// ```text
33182    /// +---+----------+
33183    /// | # | Operands |
33184    /// +---+----------+
33185    /// | 1 | Zmm, Xmm |
33186    /// +---+----------+
33187    /// ```
33188    #[inline]
33189    pub fn vcvtph2pd_sae<A, B>(&mut self, op0: A, op1: B)
33190    where Assembler<'a>: Vcvtph2pdSaeEmitter<A, B> {
33191        <Self as Vcvtph2pdSaeEmitter<A, B>>::vcvtph2pd_sae(self, op0, op1);
33192    }
33193    /// `VCVTPH2PSX` (VCVTPH2PSX). 
33194    /// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
33195    ///
33196    ///
33197    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
33198    ///
33199    /// Supported operand variants:
33200    ///
33201    /// ```text
33202    /// +---+----------+
33203    /// | # | Operands |
33204    /// +---+----------+
33205    /// | 1 | Xmm, Mem |
33206    /// | 2 | Xmm, Xmm |
33207    /// | 3 | Ymm, Mem |
33208    /// | 4 | Ymm, Xmm |
33209    /// | 5 | Zmm, Mem |
33210    /// | 6 | Zmm, Ymm |
33211    /// +---+----------+
33212    /// ```
33213    #[inline]
33214    pub fn vcvtph2psx<A, B>(&mut self, op0: A, op1: B)
33215    where Assembler<'a>: Vcvtph2psxEmitter<A, B> {
33216        <Self as Vcvtph2psxEmitter<A, B>>::vcvtph2psx(self, op0, op1);
33217    }
33218    /// `VCVTPH2PSX_MASK` (VCVTPH2PSX). 
33219    /// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
33220    ///
33221    ///
33222    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
33223    ///
33224    /// Supported operand variants:
33225    ///
33226    /// ```text
33227    /// +---+----------+
33228    /// | # | Operands |
33229    /// +---+----------+
33230    /// | 1 | Xmm, Mem |
33231    /// | 2 | Xmm, Xmm |
33232    /// | 3 | Ymm, Mem |
33233    /// | 4 | Ymm, Xmm |
33234    /// | 5 | Zmm, Mem |
33235    /// | 6 | Zmm, Ymm |
33236    /// +---+----------+
33237    /// ```
33238    #[inline]
33239    pub fn vcvtph2psx_mask<A, B>(&mut self, op0: A, op1: B)
33240    where Assembler<'a>: Vcvtph2psxMaskEmitter<A, B> {
33241        <Self as Vcvtph2psxMaskEmitter<A, B>>::vcvtph2psx_mask(self, op0, op1);
33242    }
33243    /// `VCVTPH2PSX_MASK_SAE` (VCVTPH2PSX). 
33244    /// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
33245    ///
33246    ///
33247    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
33248    ///
33249    /// Supported operand variants:
33250    ///
33251    /// ```text
33252    /// +---+----------+
33253    /// | # | Operands |
33254    /// +---+----------+
33255    /// | 1 | Zmm, Ymm |
33256    /// +---+----------+
33257    /// ```
33258    #[inline]
33259    pub fn vcvtph2psx_mask_sae<A, B>(&mut self, op0: A, op1: B)
33260    where Assembler<'a>: Vcvtph2psxMaskSaeEmitter<A, B> {
33261        <Self as Vcvtph2psxMaskSaeEmitter<A, B>>::vcvtph2psx_mask_sae(self, op0, op1);
33262    }
33263    /// `VCVTPH2PSX_MASKZ` (VCVTPH2PSX). 
33264    /// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
33265    ///
33266    ///
33267    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
33268    ///
33269    /// Supported operand variants:
33270    ///
33271    /// ```text
33272    /// +---+----------+
33273    /// | # | Operands |
33274    /// +---+----------+
33275    /// | 1 | Xmm, Mem |
33276    /// | 2 | Xmm, Xmm |
33277    /// | 3 | Ymm, Mem |
33278    /// | 4 | Ymm, Xmm |
33279    /// | 5 | Zmm, Mem |
33280    /// | 6 | Zmm, Ymm |
33281    /// +---+----------+
33282    /// ```
33283    #[inline]
33284    pub fn vcvtph2psx_maskz<A, B>(&mut self, op0: A, op1: B)
33285    where Assembler<'a>: Vcvtph2psxMaskzEmitter<A, B> {
33286        <Self as Vcvtph2psxMaskzEmitter<A, B>>::vcvtph2psx_maskz(self, op0, op1);
33287    }
33288    /// `VCVTPH2PSX_MASKZ_SAE` (VCVTPH2PSX). 
33289    /// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
33290    ///
33291    ///
33292    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
33293    ///
33294    /// Supported operand variants:
33295    ///
33296    /// ```text
33297    /// +---+----------+
33298    /// | # | Operands |
33299    /// +---+----------+
33300    /// | 1 | Zmm, Ymm |
33301    /// +---+----------+
33302    /// ```
33303    #[inline]
33304    pub fn vcvtph2psx_maskz_sae<A, B>(&mut self, op0: A, op1: B)
33305    where Assembler<'a>: Vcvtph2psxMaskzSaeEmitter<A, B> {
33306        <Self as Vcvtph2psxMaskzSaeEmitter<A, B>>::vcvtph2psx_maskz_sae(self, op0, op1);
33307    }
33308    /// `VCVTPH2PSX_SAE` (VCVTPH2PSX). 
33309    /// This instruction converts packed half precision (16-bits) floating-point values in the low-order bits of the source operand (the second operand) to packed single precision floating-point values and writes the converted values into the destination operand (the first operand).
33310    ///
33311    ///
33312    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPH2PS%3AVCVTPH2PSX.html).
33313    ///
33314    /// Supported operand variants:
33315    ///
33316    /// ```text
33317    /// +---+----------+
33318    /// | # | Operands |
33319    /// +---+----------+
33320    /// | 1 | Zmm, Ymm |
33321    /// +---+----------+
33322    /// ```
33323    #[inline]
33324    pub fn vcvtph2psx_sae<A, B>(&mut self, op0: A, op1: B)
33325    where Assembler<'a>: Vcvtph2psxSaeEmitter<A, B> {
33326        <Self as Vcvtph2psxSaeEmitter<A, B>>::vcvtph2psx_sae(self, op0, op1);
33327    }
33328    /// `VCVTPH2QQ`.
33329    ///
33330    /// Supported operand variants:
33331    ///
33332    /// ```text
33333    /// +---+----------+
33334    /// | # | Operands |
33335    /// +---+----------+
33336    /// | 1 | Xmm, Mem |
33337    /// | 2 | Xmm, Xmm |
33338    /// | 3 | Ymm, Mem |
33339    /// | 4 | Ymm, Xmm |
33340    /// | 5 | Zmm, Mem |
33341    /// | 6 | Zmm, Xmm |
33342    /// +---+----------+
33343    /// ```
33344    #[inline]
33345    pub fn vcvtph2qq<A, B>(&mut self, op0: A, op1: B)
33346    where Assembler<'a>: Vcvtph2qqEmitter<A, B> {
33347        <Self as Vcvtph2qqEmitter<A, B>>::vcvtph2qq(self, op0, op1);
33348    }
33349    /// `VCVTPH2QQ_ER`.
33350    ///
33351    /// Supported operand variants:
33352    ///
33353    /// ```text
33354    /// +---+----------+
33355    /// | # | Operands |
33356    /// +---+----------+
33357    /// | 1 | Zmm, Xmm |
33358    /// +---+----------+
33359    /// ```
33360    #[inline]
33361    pub fn vcvtph2qq_er<A, B>(&mut self, op0: A, op1: B)
33362    where Assembler<'a>: Vcvtph2qqErEmitter<A, B> {
33363        <Self as Vcvtph2qqErEmitter<A, B>>::vcvtph2qq_er(self, op0, op1);
33364    }
33365    /// `VCVTPH2QQ_MASK`.
33366    ///
33367    /// Supported operand variants:
33368    ///
33369    /// ```text
33370    /// +---+----------+
33371    /// | # | Operands |
33372    /// +---+----------+
33373    /// | 1 | Xmm, Mem |
33374    /// | 2 | Xmm, Xmm |
33375    /// | 3 | Ymm, Mem |
33376    /// | 4 | Ymm, Xmm |
33377    /// | 5 | Zmm, Mem |
33378    /// | 6 | Zmm, Xmm |
33379    /// +---+----------+
33380    /// ```
33381    #[inline]
33382    pub fn vcvtph2qq_mask<A, B>(&mut self, op0: A, op1: B)
33383    where Assembler<'a>: Vcvtph2qqMaskEmitter<A, B> {
33384        <Self as Vcvtph2qqMaskEmitter<A, B>>::vcvtph2qq_mask(self, op0, op1);
33385    }
33386    /// `VCVTPH2QQ_MASK_ER`.
33387    ///
33388    /// Supported operand variants:
33389    ///
33390    /// ```text
33391    /// +---+----------+
33392    /// | # | Operands |
33393    /// +---+----------+
33394    /// | 1 | Zmm, Xmm |
33395    /// +---+----------+
33396    /// ```
33397    #[inline]
33398    pub fn vcvtph2qq_mask_er<A, B>(&mut self, op0: A, op1: B)
33399    where Assembler<'a>: Vcvtph2qqMaskErEmitter<A, B> {
33400        <Self as Vcvtph2qqMaskErEmitter<A, B>>::vcvtph2qq_mask_er(self, op0, op1);
33401    }
33402    /// `VCVTPH2QQ_MASKZ`.
33403    ///
33404    /// Supported operand variants:
33405    ///
33406    /// ```text
33407    /// +---+----------+
33408    /// | # | Operands |
33409    /// +---+----------+
33410    /// | 1 | Xmm, Mem |
33411    /// | 2 | Xmm, Xmm |
33412    /// | 3 | Ymm, Mem |
33413    /// | 4 | Ymm, Xmm |
33414    /// | 5 | Zmm, Mem |
33415    /// | 6 | Zmm, Xmm |
33416    /// +---+----------+
33417    /// ```
33418    #[inline]
33419    pub fn vcvtph2qq_maskz<A, B>(&mut self, op0: A, op1: B)
33420    where Assembler<'a>: Vcvtph2qqMaskzEmitter<A, B> {
33421        <Self as Vcvtph2qqMaskzEmitter<A, B>>::vcvtph2qq_maskz(self, op0, op1);
33422    }
33423    /// `VCVTPH2QQ_MASKZ_ER`.
33424    ///
33425    /// Supported operand variants:
33426    ///
33427    /// ```text
33428    /// +---+----------+
33429    /// | # | Operands |
33430    /// +---+----------+
33431    /// | 1 | Zmm, Xmm |
33432    /// +---+----------+
33433    /// ```
33434    #[inline]
33435    pub fn vcvtph2qq_maskz_er<A, B>(&mut self, op0: A, op1: B)
33436    where Assembler<'a>: Vcvtph2qqMaskzErEmitter<A, B> {
33437        <Self as Vcvtph2qqMaskzErEmitter<A, B>>::vcvtph2qq_maskz_er(self, op0, op1);
33438    }
33439    /// `VCVTPH2UDQ`.
33440    ///
33441    /// Supported operand variants:
33442    ///
33443    /// ```text
33444    /// +---+----------+
33445    /// | # | Operands |
33446    /// +---+----------+
33447    /// | 1 | Xmm, Mem |
33448    /// | 2 | Xmm, Xmm |
33449    /// | 3 | Ymm, Mem |
33450    /// | 4 | Ymm, Xmm |
33451    /// | 5 | Zmm, Mem |
33452    /// | 6 | Zmm, Ymm |
33453    /// +---+----------+
33454    /// ```
33455    #[inline]
33456    pub fn vcvtph2udq<A, B>(&mut self, op0: A, op1: B)
33457    where Assembler<'a>: Vcvtph2udqEmitter<A, B> {
33458        <Self as Vcvtph2udqEmitter<A, B>>::vcvtph2udq(self, op0, op1);
33459    }
33460    /// `VCVTPH2UDQ_ER`.
33461    ///
33462    /// Supported operand variants:
33463    ///
33464    /// ```text
33465    /// +---+----------+
33466    /// | # | Operands |
33467    /// +---+----------+
33468    /// | 1 | Zmm, Ymm |
33469    /// +---+----------+
33470    /// ```
33471    #[inline]
33472    pub fn vcvtph2udq_er<A, B>(&mut self, op0: A, op1: B)
33473    where Assembler<'a>: Vcvtph2udqErEmitter<A, B> {
33474        <Self as Vcvtph2udqErEmitter<A, B>>::vcvtph2udq_er(self, op0, op1);
33475    }
33476    /// `VCVTPH2UDQ_MASK`.
33477    ///
33478    /// Supported operand variants:
33479    ///
33480    /// ```text
33481    /// +---+----------+
33482    /// | # | Operands |
33483    /// +---+----------+
33484    /// | 1 | Xmm, Mem |
33485    /// | 2 | Xmm, Xmm |
33486    /// | 3 | Ymm, Mem |
33487    /// | 4 | Ymm, Xmm |
33488    /// | 5 | Zmm, Mem |
33489    /// | 6 | Zmm, Ymm |
33490    /// +---+----------+
33491    /// ```
33492    #[inline]
33493    pub fn vcvtph2udq_mask<A, B>(&mut self, op0: A, op1: B)
33494    where Assembler<'a>: Vcvtph2udqMaskEmitter<A, B> {
33495        <Self as Vcvtph2udqMaskEmitter<A, B>>::vcvtph2udq_mask(self, op0, op1);
33496    }
33497    /// `VCVTPH2UDQ_MASK_ER`.
33498    ///
33499    /// Supported operand variants:
33500    ///
33501    /// ```text
33502    /// +---+----------+
33503    /// | # | Operands |
33504    /// +---+----------+
33505    /// | 1 | Zmm, Ymm |
33506    /// +---+----------+
33507    /// ```
33508    #[inline]
33509    pub fn vcvtph2udq_mask_er<A, B>(&mut self, op0: A, op1: B)
33510    where Assembler<'a>: Vcvtph2udqMaskErEmitter<A, B> {
33511        <Self as Vcvtph2udqMaskErEmitter<A, B>>::vcvtph2udq_mask_er(self, op0, op1);
33512    }
33513    /// `VCVTPH2UDQ_MASKZ`.
33514    ///
33515    /// Supported operand variants:
33516    ///
33517    /// ```text
33518    /// +---+----------+
33519    /// | # | Operands |
33520    /// +---+----------+
33521    /// | 1 | Xmm, Mem |
33522    /// | 2 | Xmm, Xmm |
33523    /// | 3 | Ymm, Mem |
33524    /// | 4 | Ymm, Xmm |
33525    /// | 5 | Zmm, Mem |
33526    /// | 6 | Zmm, Ymm |
33527    /// +---+----------+
33528    /// ```
33529    #[inline]
33530    pub fn vcvtph2udq_maskz<A, B>(&mut self, op0: A, op1: B)
33531    where Assembler<'a>: Vcvtph2udqMaskzEmitter<A, B> {
33532        <Self as Vcvtph2udqMaskzEmitter<A, B>>::vcvtph2udq_maskz(self, op0, op1);
33533    }
33534    /// `VCVTPH2UDQ_MASKZ_ER`.
33535    ///
33536    /// Supported operand variants:
33537    ///
33538    /// ```text
33539    /// +---+----------+
33540    /// | # | Operands |
33541    /// +---+----------+
33542    /// | 1 | Zmm, Ymm |
33543    /// +---+----------+
33544    /// ```
33545    #[inline]
33546    pub fn vcvtph2udq_maskz_er<A, B>(&mut self, op0: A, op1: B)
33547    where Assembler<'a>: Vcvtph2udqMaskzErEmitter<A, B> {
33548        <Self as Vcvtph2udqMaskzErEmitter<A, B>>::vcvtph2udq_maskz_er(self, op0, op1);
33549    }
33550    /// `VCVTPH2UQQ`.
33551    ///
33552    /// Supported operand variants:
33553    ///
33554    /// ```text
33555    /// +---+----------+
33556    /// | # | Operands |
33557    /// +---+----------+
33558    /// | 1 | Xmm, Mem |
33559    /// | 2 | Xmm, Xmm |
33560    /// | 3 | Ymm, Mem |
33561    /// | 4 | Ymm, Xmm |
33562    /// | 5 | Zmm, Mem |
33563    /// | 6 | Zmm, Xmm |
33564    /// +---+----------+
33565    /// ```
33566    #[inline]
33567    pub fn vcvtph2uqq<A, B>(&mut self, op0: A, op1: B)
33568    where Assembler<'a>: Vcvtph2uqqEmitter<A, B> {
33569        <Self as Vcvtph2uqqEmitter<A, B>>::vcvtph2uqq(self, op0, op1);
33570    }
33571    /// `VCVTPH2UQQ_ER`.
33572    ///
33573    /// Supported operand variants:
33574    ///
33575    /// ```text
33576    /// +---+----------+
33577    /// | # | Operands |
33578    /// +---+----------+
33579    /// | 1 | Zmm, Xmm |
33580    /// +---+----------+
33581    /// ```
33582    #[inline]
33583    pub fn vcvtph2uqq_er<A, B>(&mut self, op0: A, op1: B)
33584    where Assembler<'a>: Vcvtph2uqqErEmitter<A, B> {
33585        <Self as Vcvtph2uqqErEmitter<A, B>>::vcvtph2uqq_er(self, op0, op1);
33586    }
33587    /// `VCVTPH2UQQ_MASK`.
33588    ///
33589    /// Supported operand variants:
33590    ///
33591    /// ```text
33592    /// +---+----------+
33593    /// | # | Operands |
33594    /// +---+----------+
33595    /// | 1 | Xmm, Mem |
33596    /// | 2 | Xmm, Xmm |
33597    /// | 3 | Ymm, Mem |
33598    /// | 4 | Ymm, Xmm |
33599    /// | 5 | Zmm, Mem |
33600    /// | 6 | Zmm, Xmm |
33601    /// +---+----------+
33602    /// ```
33603    #[inline]
33604    pub fn vcvtph2uqq_mask<A, B>(&mut self, op0: A, op1: B)
33605    where Assembler<'a>: Vcvtph2uqqMaskEmitter<A, B> {
33606        <Self as Vcvtph2uqqMaskEmitter<A, B>>::vcvtph2uqq_mask(self, op0, op1);
33607    }
33608    /// `VCVTPH2UQQ_MASK_ER`.
33609    ///
33610    /// Supported operand variants:
33611    ///
33612    /// ```text
33613    /// +---+----------+
33614    /// | # | Operands |
33615    /// +---+----------+
33616    /// | 1 | Zmm, Xmm |
33617    /// +---+----------+
33618    /// ```
33619    #[inline]
33620    pub fn vcvtph2uqq_mask_er<A, B>(&mut self, op0: A, op1: B)
33621    where Assembler<'a>: Vcvtph2uqqMaskErEmitter<A, B> {
33622        <Self as Vcvtph2uqqMaskErEmitter<A, B>>::vcvtph2uqq_mask_er(self, op0, op1);
33623    }
33624    /// `VCVTPH2UQQ_MASKZ`.
33625    ///
33626    /// Supported operand variants:
33627    ///
33628    /// ```text
33629    /// +---+----------+
33630    /// | # | Operands |
33631    /// +---+----------+
33632    /// | 1 | Xmm, Mem |
33633    /// | 2 | Xmm, Xmm |
33634    /// | 3 | Ymm, Mem |
33635    /// | 4 | Ymm, Xmm |
33636    /// | 5 | Zmm, Mem |
33637    /// | 6 | Zmm, Xmm |
33638    /// +---+----------+
33639    /// ```
33640    #[inline]
33641    pub fn vcvtph2uqq_maskz<A, B>(&mut self, op0: A, op1: B)
33642    where Assembler<'a>: Vcvtph2uqqMaskzEmitter<A, B> {
33643        <Self as Vcvtph2uqqMaskzEmitter<A, B>>::vcvtph2uqq_maskz(self, op0, op1);
33644    }
33645    /// `VCVTPH2UQQ_MASKZ_ER`.
33646    ///
33647    /// Supported operand variants:
33648    ///
33649    /// ```text
33650    /// +---+----------+
33651    /// | # | Operands |
33652    /// +---+----------+
33653    /// | 1 | Zmm, Xmm |
33654    /// +---+----------+
33655    /// ```
33656    #[inline]
33657    pub fn vcvtph2uqq_maskz_er<A, B>(&mut self, op0: A, op1: B)
33658    where Assembler<'a>: Vcvtph2uqqMaskzErEmitter<A, B> {
33659        <Self as Vcvtph2uqqMaskzErEmitter<A, B>>::vcvtph2uqq_maskz_er(self, op0, op1);
33660    }
33661    /// `VCVTPH2UW`.
33662    ///
33663    /// Supported operand variants:
33664    ///
33665    /// ```text
33666    /// +---+----------+
33667    /// | # | Operands |
33668    /// +---+----------+
33669    /// | 1 | Xmm, Mem |
33670    /// | 2 | Xmm, Xmm |
33671    /// | 3 | Ymm, Mem |
33672    /// | 4 | Ymm, Ymm |
33673    /// | 5 | Zmm, Mem |
33674    /// | 6 | Zmm, Zmm |
33675    /// +---+----------+
33676    /// ```
33677    #[inline]
33678    pub fn vcvtph2uw<A, B>(&mut self, op0: A, op1: B)
33679    where Assembler<'a>: Vcvtph2uwEmitter<A, B> {
33680        <Self as Vcvtph2uwEmitter<A, B>>::vcvtph2uw(self, op0, op1);
33681    }
33682    /// `VCVTPH2UW_ER`.
33683    ///
33684    /// Supported operand variants:
33685    ///
33686    /// ```text
33687    /// +---+----------+
33688    /// | # | Operands |
33689    /// +---+----------+
33690    /// | 1 | Zmm, Zmm |
33691    /// +---+----------+
33692    /// ```
33693    #[inline]
33694    pub fn vcvtph2uw_er<A, B>(&mut self, op0: A, op1: B)
33695    where Assembler<'a>: Vcvtph2uwErEmitter<A, B> {
33696        <Self as Vcvtph2uwErEmitter<A, B>>::vcvtph2uw_er(self, op0, op1);
33697    }
33698    /// `VCVTPH2UW_MASK`.
33699    ///
33700    /// Supported operand variants:
33701    ///
33702    /// ```text
33703    /// +---+----------+
33704    /// | # | Operands |
33705    /// +---+----------+
33706    /// | 1 | Xmm, Mem |
33707    /// | 2 | Xmm, Xmm |
33708    /// | 3 | Ymm, Mem |
33709    /// | 4 | Ymm, Ymm |
33710    /// | 5 | Zmm, Mem |
33711    /// | 6 | Zmm, Zmm |
33712    /// +---+----------+
33713    /// ```
33714    #[inline]
33715    pub fn vcvtph2uw_mask<A, B>(&mut self, op0: A, op1: B)
33716    where Assembler<'a>: Vcvtph2uwMaskEmitter<A, B> {
33717        <Self as Vcvtph2uwMaskEmitter<A, B>>::vcvtph2uw_mask(self, op0, op1);
33718    }
33719    /// `VCVTPH2UW_MASK_ER`.
33720    ///
33721    /// Supported operand variants:
33722    ///
33723    /// ```text
33724    /// +---+----------+
33725    /// | # | Operands |
33726    /// +---+----------+
33727    /// | 1 | Zmm, Zmm |
33728    /// +---+----------+
33729    /// ```
33730    #[inline]
33731    pub fn vcvtph2uw_mask_er<A, B>(&mut self, op0: A, op1: B)
33732    where Assembler<'a>: Vcvtph2uwMaskErEmitter<A, B> {
33733        <Self as Vcvtph2uwMaskErEmitter<A, B>>::vcvtph2uw_mask_er(self, op0, op1);
33734    }
33735    /// `VCVTPH2UW_MASKZ`.
33736    ///
33737    /// Supported operand variants:
33738    ///
33739    /// ```text
33740    /// +---+----------+
33741    /// | # | Operands |
33742    /// +---+----------+
33743    /// | 1 | Xmm, Mem |
33744    /// | 2 | Xmm, Xmm |
33745    /// | 3 | Ymm, Mem |
33746    /// | 4 | Ymm, Ymm |
33747    /// | 5 | Zmm, Mem |
33748    /// | 6 | Zmm, Zmm |
33749    /// +---+----------+
33750    /// ```
33751    #[inline]
33752    pub fn vcvtph2uw_maskz<A, B>(&mut self, op0: A, op1: B)
33753    where Assembler<'a>: Vcvtph2uwMaskzEmitter<A, B> {
33754        <Self as Vcvtph2uwMaskzEmitter<A, B>>::vcvtph2uw_maskz(self, op0, op1);
33755    }
33756    /// `VCVTPH2UW_MASKZ_ER`.
33757    ///
33758    /// Supported operand variants:
33759    ///
33760    /// ```text
33761    /// +---+----------+
33762    /// | # | Operands |
33763    /// +---+----------+
33764    /// | 1 | Zmm, Zmm |
33765    /// +---+----------+
33766    /// ```
33767    #[inline]
33768    pub fn vcvtph2uw_maskz_er<A, B>(&mut self, op0: A, op1: B)
33769    where Assembler<'a>: Vcvtph2uwMaskzErEmitter<A, B> {
33770        <Self as Vcvtph2uwMaskzErEmitter<A, B>>::vcvtph2uw_maskz_er(self, op0, op1);
33771    }
33772    /// `VCVTPH2W`.
33773    ///
33774    /// Supported operand variants:
33775    ///
33776    /// ```text
33777    /// +---+----------+
33778    /// | # | Operands |
33779    /// +---+----------+
33780    /// | 1 | Xmm, Mem |
33781    /// | 2 | Xmm, Xmm |
33782    /// | 3 | Ymm, Mem |
33783    /// | 4 | Ymm, Ymm |
33784    /// | 5 | Zmm, Mem |
33785    /// | 6 | Zmm, Zmm |
33786    /// +---+----------+
33787    /// ```
33788    #[inline]
33789    pub fn vcvtph2w<A, B>(&mut self, op0: A, op1: B)
33790    where Assembler<'a>: Vcvtph2wEmitter<A, B> {
33791        <Self as Vcvtph2wEmitter<A, B>>::vcvtph2w(self, op0, op1);
33792    }
33793    /// `VCVTPH2W_ER`.
33794    ///
33795    /// Supported operand variants:
33796    ///
33797    /// ```text
33798    /// +---+----------+
33799    /// | # | Operands |
33800    /// +---+----------+
33801    /// | 1 | Zmm, Zmm |
33802    /// +---+----------+
33803    /// ```
33804    #[inline]
33805    pub fn vcvtph2w_er<A, B>(&mut self, op0: A, op1: B)
33806    where Assembler<'a>: Vcvtph2wErEmitter<A, B> {
33807        <Self as Vcvtph2wErEmitter<A, B>>::vcvtph2w_er(self, op0, op1);
33808    }
33809    /// `VCVTPH2W_MASK`.
33810    ///
33811    /// Supported operand variants:
33812    ///
33813    /// ```text
33814    /// +---+----------+
33815    /// | # | Operands |
33816    /// +---+----------+
33817    /// | 1 | Xmm, Mem |
33818    /// | 2 | Xmm, Xmm |
33819    /// | 3 | Ymm, Mem |
33820    /// | 4 | Ymm, Ymm |
33821    /// | 5 | Zmm, Mem |
33822    /// | 6 | Zmm, Zmm |
33823    /// +---+----------+
33824    /// ```
33825    #[inline]
33826    pub fn vcvtph2w_mask<A, B>(&mut self, op0: A, op1: B)
33827    where Assembler<'a>: Vcvtph2wMaskEmitter<A, B> {
33828        <Self as Vcvtph2wMaskEmitter<A, B>>::vcvtph2w_mask(self, op0, op1);
33829    }
33830    /// `VCVTPH2W_MASK_ER`.
33831    ///
33832    /// Supported operand variants:
33833    ///
33834    /// ```text
33835    /// +---+----------+
33836    /// | # | Operands |
33837    /// +---+----------+
33838    /// | 1 | Zmm, Zmm |
33839    /// +---+----------+
33840    /// ```
33841    #[inline]
33842    pub fn vcvtph2w_mask_er<A, B>(&mut self, op0: A, op1: B)
33843    where Assembler<'a>: Vcvtph2wMaskErEmitter<A, B> {
33844        <Self as Vcvtph2wMaskErEmitter<A, B>>::vcvtph2w_mask_er(self, op0, op1);
33845    }
33846    /// `VCVTPH2W_MASKZ`.
33847    ///
33848    /// Supported operand variants:
33849    ///
33850    /// ```text
33851    /// +---+----------+
33852    /// | # | Operands |
33853    /// +---+----------+
33854    /// | 1 | Xmm, Mem |
33855    /// | 2 | Xmm, Xmm |
33856    /// | 3 | Ymm, Mem |
33857    /// | 4 | Ymm, Ymm |
33858    /// | 5 | Zmm, Mem |
33859    /// | 6 | Zmm, Zmm |
33860    /// +---+----------+
33861    /// ```
33862    #[inline]
33863    pub fn vcvtph2w_maskz<A, B>(&mut self, op0: A, op1: B)
33864    where Assembler<'a>: Vcvtph2wMaskzEmitter<A, B> {
33865        <Self as Vcvtph2wMaskzEmitter<A, B>>::vcvtph2w_maskz(self, op0, op1);
33866    }
33867    /// `VCVTPH2W_MASKZ_ER`.
33868    ///
33869    /// Supported operand variants:
33870    ///
33871    /// ```text
33872    /// +---+----------+
33873    /// | # | Operands |
33874    /// +---+----------+
33875    /// | 1 | Zmm, Zmm |
33876    /// +---+----------+
33877    /// ```
33878    #[inline]
33879    pub fn vcvtph2w_maskz_er<A, B>(&mut self, op0: A, op1: B)
33880    where Assembler<'a>: Vcvtph2wMaskzErEmitter<A, B> {
33881        <Self as Vcvtph2wMaskzErEmitter<A, B>>::vcvtph2w_maskz_er(self, op0, op1);
33882    }
33883    /// `VCVTPS2PHX` (VCVTPS2PHX). 
33884    /// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
33885    ///
33886    ///
33887    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
33888    ///
33889    /// Supported operand variants:
33890    ///
33891    /// ```text
33892    /// +---+----------+
33893    /// | # | Operands |
33894    /// +---+----------+
33895    /// | 1 | Xmm, Mem |
33896    /// | 2 | Xmm, Xmm |
33897    /// | 3 | Xmm, Ymm |
33898    /// | 4 | Ymm, Mem |
33899    /// | 5 | Ymm, Zmm |
33900    /// +---+----------+
33901    /// ```
33902    #[inline]
33903    pub fn vcvtps2phx<A, B>(&mut self, op0: A, op1: B)
33904    where Assembler<'a>: Vcvtps2phxEmitter<A, B> {
33905        <Self as Vcvtps2phxEmitter<A, B>>::vcvtps2phx(self, op0, op1);
33906    }
33907    /// `VCVTPS2PHX_ER` (VCVTPS2PHX). 
33908    /// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
33909    ///
33910    ///
33911    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
33912    ///
33913    /// Supported operand variants:
33914    ///
33915    /// ```text
33916    /// +---+----------+
33917    /// | # | Operands |
33918    /// +---+----------+
33919    /// | 1 | Ymm, Zmm |
33920    /// +---+----------+
33921    /// ```
33922    #[inline]
33923    pub fn vcvtps2phx_er<A, B>(&mut self, op0: A, op1: B)
33924    where Assembler<'a>: Vcvtps2phxErEmitter<A, B> {
33925        <Self as Vcvtps2phxErEmitter<A, B>>::vcvtps2phx_er(self, op0, op1);
33926    }
33927    /// `VCVTPS2PHX_MASK` (VCVTPS2PHX). 
33928    /// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
33929    ///
33930    ///
33931    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
33932    ///
33933    /// Supported operand variants:
33934    ///
33935    /// ```text
33936    /// +---+----------+
33937    /// | # | Operands |
33938    /// +---+----------+
33939    /// | 1 | Xmm, Mem |
33940    /// | 2 | Xmm, Xmm |
33941    /// | 3 | Xmm, Ymm |
33942    /// | 4 | Ymm, Mem |
33943    /// | 5 | Ymm, Zmm |
33944    /// +---+----------+
33945    /// ```
33946    #[inline]
33947    pub fn vcvtps2phx_mask<A, B>(&mut self, op0: A, op1: B)
33948    where Assembler<'a>: Vcvtps2phxMaskEmitter<A, B> {
33949        <Self as Vcvtps2phxMaskEmitter<A, B>>::vcvtps2phx_mask(self, op0, op1);
33950    }
33951    /// `VCVTPS2PHX_MASK_ER` (VCVTPS2PHX). 
33952    /// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
33953    ///
33954    ///
33955    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
33956    ///
33957    /// Supported operand variants:
33958    ///
33959    /// ```text
33960    /// +---+----------+
33961    /// | # | Operands |
33962    /// +---+----------+
33963    /// | 1 | Ymm, Zmm |
33964    /// +---+----------+
33965    /// ```
33966    #[inline]
33967    pub fn vcvtps2phx_mask_er<A, B>(&mut self, op0: A, op1: B)
33968    where Assembler<'a>: Vcvtps2phxMaskErEmitter<A, B> {
33969        <Self as Vcvtps2phxMaskErEmitter<A, B>>::vcvtps2phx_mask_er(self, op0, op1);
33970    }
33971    /// `VCVTPS2PHX_MASKZ` (VCVTPS2PHX). 
33972    /// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
33973    ///
33974    ///
33975    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
33976    ///
33977    /// Supported operand variants:
33978    ///
33979    /// ```text
33980    /// +---+----------+
33981    /// | # | Operands |
33982    /// +---+----------+
33983    /// | 1 | Xmm, Mem |
33984    /// | 2 | Xmm, Xmm |
33985    /// | 3 | Xmm, Ymm |
33986    /// | 4 | Ymm, Mem |
33987    /// | 5 | Ymm, Zmm |
33988    /// +---+----------+
33989    /// ```
33990    #[inline]
33991    pub fn vcvtps2phx_maskz<A, B>(&mut self, op0: A, op1: B)
33992    where Assembler<'a>: Vcvtps2phxMaskzEmitter<A, B> {
33993        <Self as Vcvtps2phxMaskzEmitter<A, B>>::vcvtps2phx_maskz(self, op0, op1);
33994    }
33995    /// `VCVTPS2PHX_MASKZ_ER` (VCVTPS2PHX). 
33996    /// This instruction converts packed single precision floating values in the source operand to FP16 values and stores to the destination operand.
33997    ///
33998    ///
33999    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VCVTPS2PHX.html).
34000    ///
34001    /// Supported operand variants:
34002    ///
34003    /// ```text
34004    /// +---+----------+
34005    /// | # | Operands |
34006    /// +---+----------+
34007    /// | 1 | Ymm, Zmm |
34008    /// +---+----------+
34009    /// ```
34010    #[inline]
34011    pub fn vcvtps2phx_maskz_er<A, B>(&mut self, op0: A, op1: B)
34012    where Assembler<'a>: Vcvtps2phxMaskzErEmitter<A, B> {
34013        <Self as Vcvtps2phxMaskzErEmitter<A, B>>::vcvtps2phx_maskz_er(self, op0, op1);
34014    }
34015    /// `VCVTQQ2PH`.
34016    ///
34017    /// Supported operand variants:
34018    ///
34019    /// ```text
34020    /// +---+----------+
34021    /// | # | Operands |
34022    /// +---+----------+
34023    /// | 1 | Xmm, Mem |
34024    /// | 2 | Xmm, Xmm |
34025    /// | 3 | Xmm, Ymm |
34026    /// | 4 | Xmm, Zmm |
34027    /// +---+----------+
34028    /// ```
34029    #[inline]
34030    pub fn vcvtqq2ph<A, B>(&mut self, op0: A, op1: B)
34031    where Assembler<'a>: Vcvtqq2phEmitter<A, B> {
34032        <Self as Vcvtqq2phEmitter<A, B>>::vcvtqq2ph(self, op0, op1);
34033    }
34034    /// `VCVTQQ2PH_ER`.
34035    ///
34036    /// Supported operand variants:
34037    ///
34038    /// ```text
34039    /// +---+----------+
34040    /// | # | Operands |
34041    /// +---+----------+
34042    /// | 1 | Xmm, Zmm |
34043    /// +---+----------+
34044    /// ```
34045    #[inline]
34046    pub fn vcvtqq2ph_er<A, B>(&mut self, op0: A, op1: B)
34047    where Assembler<'a>: Vcvtqq2phErEmitter<A, B> {
34048        <Self as Vcvtqq2phErEmitter<A, B>>::vcvtqq2ph_er(self, op0, op1);
34049    }
34050    /// `VCVTQQ2PH_MASK`.
34051    ///
34052    /// Supported operand variants:
34053    ///
34054    /// ```text
34055    /// +---+----------+
34056    /// | # | Operands |
34057    /// +---+----------+
34058    /// | 1 | Xmm, Mem |
34059    /// | 2 | Xmm, Xmm |
34060    /// | 3 | Xmm, Ymm |
34061    /// | 4 | Xmm, Zmm |
34062    /// +---+----------+
34063    /// ```
34064    #[inline]
34065    pub fn vcvtqq2ph_mask<A, B>(&mut self, op0: A, op1: B)
34066    where Assembler<'a>: Vcvtqq2phMaskEmitter<A, B> {
34067        <Self as Vcvtqq2phMaskEmitter<A, B>>::vcvtqq2ph_mask(self, op0, op1);
34068    }
34069    /// `VCVTQQ2PH_MASK_ER`.
34070    ///
34071    /// Supported operand variants:
34072    ///
34073    /// ```text
34074    /// +---+----------+
34075    /// | # | Operands |
34076    /// +---+----------+
34077    /// | 1 | Xmm, Zmm |
34078    /// +---+----------+
34079    /// ```
34080    #[inline]
34081    pub fn vcvtqq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
34082    where Assembler<'a>: Vcvtqq2phMaskErEmitter<A, B> {
34083        <Self as Vcvtqq2phMaskErEmitter<A, B>>::vcvtqq2ph_mask_er(self, op0, op1);
34084    }
34085    /// `VCVTQQ2PH_MASKZ`.
34086    ///
34087    /// Supported operand variants:
34088    ///
34089    /// ```text
34090    /// +---+----------+
34091    /// | # | Operands |
34092    /// +---+----------+
34093    /// | 1 | Xmm, Mem |
34094    /// | 2 | Xmm, Xmm |
34095    /// | 3 | Xmm, Ymm |
34096    /// | 4 | Xmm, Zmm |
34097    /// +---+----------+
34098    /// ```
34099    #[inline]
34100    pub fn vcvtqq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
34101    where Assembler<'a>: Vcvtqq2phMaskzEmitter<A, B> {
34102        <Self as Vcvtqq2phMaskzEmitter<A, B>>::vcvtqq2ph_maskz(self, op0, op1);
34103    }
34104    /// `VCVTQQ2PH_MASKZ_ER`.
34105    ///
34106    /// Supported operand variants:
34107    ///
34108    /// ```text
34109    /// +---+----------+
34110    /// | # | Operands |
34111    /// +---+----------+
34112    /// | 1 | Xmm, Zmm |
34113    /// +---+----------+
34114    /// ```
34115    #[inline]
34116    pub fn vcvtqq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
34117    where Assembler<'a>: Vcvtqq2phMaskzErEmitter<A, B> {
34118        <Self as Vcvtqq2phMaskzErEmitter<A, B>>::vcvtqq2ph_maskz_er(self, op0, op1);
34119    }
34120    /// `VCVTSD2SH`.
34121    ///
34122    /// Supported operand variants:
34123    ///
34124    /// ```text
34125    /// +---+---------------+
34126    /// | # | Operands      |
34127    /// +---+---------------+
34128    /// | 1 | Xmm, Xmm, Mem |
34129    /// | 2 | Xmm, Xmm, Xmm |
34130    /// +---+---------------+
34131    /// ```
34132    #[inline]
34133    pub fn vcvtsd2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34134    where Assembler<'a>: Vcvtsd2shEmitter<A, B, C> {
34135        <Self as Vcvtsd2shEmitter<A, B, C>>::vcvtsd2sh(self, op0, op1, op2);
34136    }
34137    /// `VCVTSD2SH_ER`.
34138    ///
34139    /// Supported operand variants:
34140    ///
34141    /// ```text
34142    /// +---+---------------+
34143    /// | # | Operands      |
34144    /// +---+---------------+
34145    /// | 1 | Xmm, Xmm, Xmm |
34146    /// +---+---------------+
34147    /// ```
34148    #[inline]
34149    pub fn vcvtsd2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34150    where Assembler<'a>: Vcvtsd2shErEmitter<A, B, C> {
34151        <Self as Vcvtsd2shErEmitter<A, B, C>>::vcvtsd2sh_er(self, op0, op1, op2);
34152    }
34153    /// `VCVTSD2SH_MASK`.
34154    ///
34155    /// Supported operand variants:
34156    ///
34157    /// ```text
34158    /// +---+---------------+
34159    /// | # | Operands      |
34160    /// +---+---------------+
34161    /// | 1 | Xmm, Xmm, Mem |
34162    /// | 2 | Xmm, Xmm, Xmm |
34163    /// +---+---------------+
34164    /// ```
34165    #[inline]
34166    pub fn vcvtsd2sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34167    where Assembler<'a>: Vcvtsd2shMaskEmitter<A, B, C> {
34168        <Self as Vcvtsd2shMaskEmitter<A, B, C>>::vcvtsd2sh_mask(self, op0, op1, op2);
34169    }
34170    /// `VCVTSD2SH_MASK_ER`.
34171    ///
34172    /// Supported operand variants:
34173    ///
34174    /// ```text
34175    /// +---+---------------+
34176    /// | # | Operands      |
34177    /// +---+---------------+
34178    /// | 1 | Xmm, Xmm, Xmm |
34179    /// +---+---------------+
34180    /// ```
34181    #[inline]
34182    pub fn vcvtsd2sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34183    where Assembler<'a>: Vcvtsd2shMaskErEmitter<A, B, C> {
34184        <Self as Vcvtsd2shMaskErEmitter<A, B, C>>::vcvtsd2sh_mask_er(self, op0, op1, op2);
34185    }
34186    /// `VCVTSD2SH_MASKZ`.
34187    ///
34188    /// Supported operand variants:
34189    ///
34190    /// ```text
34191    /// +---+---------------+
34192    /// | # | Operands      |
34193    /// +---+---------------+
34194    /// | 1 | Xmm, Xmm, Mem |
34195    /// | 2 | Xmm, Xmm, Xmm |
34196    /// +---+---------------+
34197    /// ```
34198    #[inline]
34199    pub fn vcvtsd2sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34200    where Assembler<'a>: Vcvtsd2shMaskzEmitter<A, B, C> {
34201        <Self as Vcvtsd2shMaskzEmitter<A, B, C>>::vcvtsd2sh_maskz(self, op0, op1, op2);
34202    }
34203    /// `VCVTSD2SH_MASKZ_ER`.
34204    ///
34205    /// Supported operand variants:
34206    ///
34207    /// ```text
34208    /// +---+---------------+
34209    /// | # | Operands      |
34210    /// +---+---------------+
34211    /// | 1 | Xmm, Xmm, Xmm |
34212    /// +---+---------------+
34213    /// ```
34214    #[inline]
34215    pub fn vcvtsd2sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34216    where Assembler<'a>: Vcvtsd2shMaskzErEmitter<A, B, C> {
34217        <Self as Vcvtsd2shMaskzErEmitter<A, B, C>>::vcvtsd2sh_maskz_er(self, op0, op1, op2);
34218    }
34219    /// `VCVTSH2SD`.
34220    ///
34221    /// Supported operand variants:
34222    ///
34223    /// ```text
34224    /// +---+---------------+
34225    /// | # | Operands      |
34226    /// +---+---------------+
34227    /// | 1 | Xmm, Xmm, Mem |
34228    /// | 2 | Xmm, Xmm, Xmm |
34229    /// +---+---------------+
34230    /// ```
34231    #[inline]
34232    pub fn vcvtsh2sd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34233    where Assembler<'a>: Vcvtsh2sdEmitter<A, B, C> {
34234        <Self as Vcvtsh2sdEmitter<A, B, C>>::vcvtsh2sd(self, op0, op1, op2);
34235    }
34236    /// `VCVTSH2SD_MASK`.
34237    ///
34238    /// Supported operand variants:
34239    ///
34240    /// ```text
34241    /// +---+---------------+
34242    /// | # | Operands      |
34243    /// +---+---------------+
34244    /// | 1 | Xmm, Xmm, Mem |
34245    /// | 2 | Xmm, Xmm, Xmm |
34246    /// +---+---------------+
34247    /// ```
34248    #[inline]
34249    pub fn vcvtsh2sd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34250    where Assembler<'a>: Vcvtsh2sdMaskEmitter<A, B, C> {
34251        <Self as Vcvtsh2sdMaskEmitter<A, B, C>>::vcvtsh2sd_mask(self, op0, op1, op2);
34252    }
34253    /// `VCVTSH2SD_MASK_SAE`.
34254    ///
34255    /// Supported operand variants:
34256    ///
34257    /// ```text
34258    /// +---+---------------+
34259    /// | # | Operands      |
34260    /// +---+---------------+
34261    /// | 1 | Xmm, Xmm, Xmm |
34262    /// +---+---------------+
34263    /// ```
34264    #[inline]
34265    pub fn vcvtsh2sd_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34266    where Assembler<'a>: Vcvtsh2sdMaskSaeEmitter<A, B, C> {
34267        <Self as Vcvtsh2sdMaskSaeEmitter<A, B, C>>::vcvtsh2sd_mask_sae(self, op0, op1, op2);
34268    }
34269    /// `VCVTSH2SD_MASKZ`.
34270    ///
34271    /// Supported operand variants:
34272    ///
34273    /// ```text
34274    /// +---+---------------+
34275    /// | # | Operands      |
34276    /// +---+---------------+
34277    /// | 1 | Xmm, Xmm, Mem |
34278    /// | 2 | Xmm, Xmm, Xmm |
34279    /// +---+---------------+
34280    /// ```
34281    #[inline]
34282    pub fn vcvtsh2sd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34283    where Assembler<'a>: Vcvtsh2sdMaskzEmitter<A, B, C> {
34284        <Self as Vcvtsh2sdMaskzEmitter<A, B, C>>::vcvtsh2sd_maskz(self, op0, op1, op2);
34285    }
34286    /// `VCVTSH2SD_MASKZ_SAE`.
34287    ///
34288    /// Supported operand variants:
34289    ///
34290    /// ```text
34291    /// +---+---------------+
34292    /// | # | Operands      |
34293    /// +---+---------------+
34294    /// | 1 | Xmm, Xmm, Xmm |
34295    /// +---+---------------+
34296    /// ```
34297    #[inline]
34298    pub fn vcvtsh2sd_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34299    where Assembler<'a>: Vcvtsh2sdMaskzSaeEmitter<A, B, C> {
34300        <Self as Vcvtsh2sdMaskzSaeEmitter<A, B, C>>::vcvtsh2sd_maskz_sae(self, op0, op1, op2);
34301    }
34302    /// `VCVTSH2SD_SAE`.
34303    ///
34304    /// Supported operand variants:
34305    ///
34306    /// ```text
34307    /// +---+---------------+
34308    /// | # | Operands      |
34309    /// +---+---------------+
34310    /// | 1 | Xmm, Xmm, Xmm |
34311    /// +---+---------------+
34312    /// ```
34313    #[inline]
34314    pub fn vcvtsh2sd_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34315    where Assembler<'a>: Vcvtsh2sdSaeEmitter<A, B, C> {
34316        <Self as Vcvtsh2sdSaeEmitter<A, B, C>>::vcvtsh2sd_sae(self, op0, op1, op2);
34317    }
34318    /// `VCVTSH2SI`.
34319    ///
34320    /// Supported operand variants:
34321    ///
34322    /// ```text
34323    /// +---+----------+
34324    /// | # | Operands |
34325    /// +---+----------+
34326    /// | 1 | Gpd, Mem |
34327    /// | 2 | Gpd, Xmm |
34328    /// | 3 | Gpq, Mem |
34329    /// | 4 | Gpq, Xmm |
34330    /// +---+----------+
34331    /// ```
34332    #[inline]
34333    pub fn vcvtsh2si<A, B>(&mut self, op0: A, op1: B)
34334    where Assembler<'a>: Vcvtsh2siEmitter<A, B> {
34335        <Self as Vcvtsh2siEmitter<A, B>>::vcvtsh2si(self, op0, op1);
34336    }
34337    /// `VCVTSH2SI_ER`.
34338    ///
34339    /// Supported operand variants:
34340    ///
34341    /// ```text
34342    /// +---+----------+
34343    /// | # | Operands |
34344    /// +---+----------+
34345    /// | 1 | Gpd, Xmm |
34346    /// | 2 | Gpq, Xmm |
34347    /// +---+----------+
34348    /// ```
34349    #[inline]
34350    pub fn vcvtsh2si_er<A, B>(&mut self, op0: A, op1: B)
34351    where Assembler<'a>: Vcvtsh2siErEmitter<A, B> {
34352        <Self as Vcvtsh2siErEmitter<A, B>>::vcvtsh2si_er(self, op0, op1);
34353    }
34354    /// `VCVTSH2SS`.
34355    ///
34356    /// Supported operand variants:
34357    ///
34358    /// ```text
34359    /// +---+---------------+
34360    /// | # | Operands      |
34361    /// +---+---------------+
34362    /// | 1 | Xmm, Xmm, Mem |
34363    /// | 2 | Xmm, Xmm, Xmm |
34364    /// +---+---------------+
34365    /// ```
34366    #[inline]
34367    pub fn vcvtsh2ss<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34368    where Assembler<'a>: Vcvtsh2ssEmitter<A, B, C> {
34369        <Self as Vcvtsh2ssEmitter<A, B, C>>::vcvtsh2ss(self, op0, op1, op2);
34370    }
34371    /// `VCVTSH2SS_MASK`.
34372    ///
34373    /// Supported operand variants:
34374    ///
34375    /// ```text
34376    /// +---+---------------+
34377    /// | # | Operands      |
34378    /// +---+---------------+
34379    /// | 1 | Xmm, Xmm, Mem |
34380    /// | 2 | Xmm, Xmm, Xmm |
34381    /// +---+---------------+
34382    /// ```
34383    #[inline]
34384    pub fn vcvtsh2ss_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34385    where Assembler<'a>: Vcvtsh2ssMaskEmitter<A, B, C> {
34386        <Self as Vcvtsh2ssMaskEmitter<A, B, C>>::vcvtsh2ss_mask(self, op0, op1, op2);
34387    }
34388    /// `VCVTSH2SS_MASK_SAE`.
34389    ///
34390    /// Supported operand variants:
34391    ///
34392    /// ```text
34393    /// +---+---------------+
34394    /// | # | Operands      |
34395    /// +---+---------------+
34396    /// | 1 | Xmm, Xmm, Xmm |
34397    /// +---+---------------+
34398    /// ```
34399    #[inline]
34400    pub fn vcvtsh2ss_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34401    where Assembler<'a>: Vcvtsh2ssMaskSaeEmitter<A, B, C> {
34402        <Self as Vcvtsh2ssMaskSaeEmitter<A, B, C>>::vcvtsh2ss_mask_sae(self, op0, op1, op2);
34403    }
34404    /// `VCVTSH2SS_MASKZ`.
34405    ///
34406    /// Supported operand variants:
34407    ///
34408    /// ```text
34409    /// +---+---------------+
34410    /// | # | Operands      |
34411    /// +---+---------------+
34412    /// | 1 | Xmm, Xmm, Mem |
34413    /// | 2 | Xmm, Xmm, Xmm |
34414    /// +---+---------------+
34415    /// ```
34416    #[inline]
34417    pub fn vcvtsh2ss_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34418    where Assembler<'a>: Vcvtsh2ssMaskzEmitter<A, B, C> {
34419        <Self as Vcvtsh2ssMaskzEmitter<A, B, C>>::vcvtsh2ss_maskz(self, op0, op1, op2);
34420    }
34421    /// `VCVTSH2SS_MASKZ_SAE`.
34422    ///
34423    /// Supported operand variants:
34424    ///
34425    /// ```text
34426    /// +---+---------------+
34427    /// | # | Operands      |
34428    /// +---+---------------+
34429    /// | 1 | Xmm, Xmm, Xmm |
34430    /// +---+---------------+
34431    /// ```
34432    #[inline]
34433    pub fn vcvtsh2ss_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34434    where Assembler<'a>: Vcvtsh2ssMaskzSaeEmitter<A, B, C> {
34435        <Self as Vcvtsh2ssMaskzSaeEmitter<A, B, C>>::vcvtsh2ss_maskz_sae(self, op0, op1, op2);
34436    }
34437    /// `VCVTSH2SS_SAE`.
34438    ///
34439    /// Supported operand variants:
34440    ///
34441    /// ```text
34442    /// +---+---------------+
34443    /// | # | Operands      |
34444    /// +---+---------------+
34445    /// | 1 | Xmm, Xmm, Xmm |
34446    /// +---+---------------+
34447    /// ```
34448    #[inline]
34449    pub fn vcvtsh2ss_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34450    where Assembler<'a>: Vcvtsh2ssSaeEmitter<A, B, C> {
34451        <Self as Vcvtsh2ssSaeEmitter<A, B, C>>::vcvtsh2ss_sae(self, op0, op1, op2);
34452    }
34453    /// `VCVTSH2USI`.
34454    ///
34455    /// Supported operand variants:
34456    ///
34457    /// ```text
34458    /// +---+----------+
34459    /// | # | Operands |
34460    /// +---+----------+
34461    /// | 1 | Gpd, Mem |
34462    /// | 2 | Gpd, Xmm |
34463    /// | 3 | Gpq, Mem |
34464    /// | 4 | Gpq, Xmm |
34465    /// +---+----------+
34466    /// ```
34467    #[inline]
34468    pub fn vcvtsh2usi<A, B>(&mut self, op0: A, op1: B)
34469    where Assembler<'a>: Vcvtsh2usiEmitter<A, B> {
34470        <Self as Vcvtsh2usiEmitter<A, B>>::vcvtsh2usi(self, op0, op1);
34471    }
34472    /// `VCVTSH2USI_ER`.
34473    ///
34474    /// Supported operand variants:
34475    ///
34476    /// ```text
34477    /// +---+----------+
34478    /// | # | Operands |
34479    /// +---+----------+
34480    /// | 1 | Gpd, Xmm |
34481    /// | 2 | Gpq, Xmm |
34482    /// +---+----------+
34483    /// ```
34484    #[inline]
34485    pub fn vcvtsh2usi_er<A, B>(&mut self, op0: A, op1: B)
34486    where Assembler<'a>: Vcvtsh2usiErEmitter<A, B> {
34487        <Self as Vcvtsh2usiErEmitter<A, B>>::vcvtsh2usi_er(self, op0, op1);
34488    }
34489    /// `VCVTSI2SH`.
34490    ///
34491    /// Supported operand variants:
34492    ///
34493    /// ```text
34494    /// +---+---------------+
34495    /// | # | Operands      |
34496    /// +---+---------------+
34497    /// | 1 | Xmm, Xmm, Gpd |
34498    /// | 2 | Xmm, Xmm, Gpq |
34499    /// | 3 | Xmm, Xmm, Mem |
34500    /// +---+---------------+
34501    /// ```
34502    #[inline]
34503    pub fn vcvtsi2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34504    where Assembler<'a>: Vcvtsi2shEmitter<A, B, C> {
34505        <Self as Vcvtsi2shEmitter<A, B, C>>::vcvtsi2sh(self, op0, op1, op2);
34506    }
34507    /// `VCVTSI2SH_ER`.
34508    ///
34509    /// Supported operand variants:
34510    ///
34511    /// ```text
34512    /// +---+---------------+
34513    /// | # | Operands      |
34514    /// +---+---------------+
34515    /// | 1 | Xmm, Xmm, Gpd |
34516    /// | 2 | Xmm, Xmm, Gpq |
34517    /// +---+---------------+
34518    /// ```
34519    #[inline]
34520    pub fn vcvtsi2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34521    where Assembler<'a>: Vcvtsi2shErEmitter<A, B, C> {
34522        <Self as Vcvtsi2shErEmitter<A, B, C>>::vcvtsi2sh_er(self, op0, op1, op2);
34523    }
34524    /// `VCVTSS2SH`.
34525    ///
34526    /// Supported operand variants:
34527    ///
34528    /// ```text
34529    /// +---+---------------+
34530    /// | # | Operands      |
34531    /// +---+---------------+
34532    /// | 1 | Xmm, Xmm, Mem |
34533    /// | 2 | Xmm, Xmm, Xmm |
34534    /// +---+---------------+
34535    /// ```
34536    #[inline]
34537    pub fn vcvtss2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34538    where Assembler<'a>: Vcvtss2shEmitter<A, B, C> {
34539        <Self as Vcvtss2shEmitter<A, B, C>>::vcvtss2sh(self, op0, op1, op2);
34540    }
34541    /// `VCVTSS2SH_ER`.
34542    ///
34543    /// Supported operand variants:
34544    ///
34545    /// ```text
34546    /// +---+---------------+
34547    /// | # | Operands      |
34548    /// +---+---------------+
34549    /// | 1 | Xmm, Xmm, Xmm |
34550    /// +---+---------------+
34551    /// ```
34552    #[inline]
34553    pub fn vcvtss2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34554    where Assembler<'a>: Vcvtss2shErEmitter<A, B, C> {
34555        <Self as Vcvtss2shErEmitter<A, B, C>>::vcvtss2sh_er(self, op0, op1, op2);
34556    }
34557    /// `VCVTSS2SH_MASK`.
34558    ///
34559    /// Supported operand variants:
34560    ///
34561    /// ```text
34562    /// +---+---------------+
34563    /// | # | Operands      |
34564    /// +---+---------------+
34565    /// | 1 | Xmm, Xmm, Mem |
34566    /// | 2 | Xmm, Xmm, Xmm |
34567    /// +---+---------------+
34568    /// ```
34569    #[inline]
34570    pub fn vcvtss2sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34571    where Assembler<'a>: Vcvtss2shMaskEmitter<A, B, C> {
34572        <Self as Vcvtss2shMaskEmitter<A, B, C>>::vcvtss2sh_mask(self, op0, op1, op2);
34573    }
34574    /// `VCVTSS2SH_MASK_ER`.
34575    ///
34576    /// Supported operand variants:
34577    ///
34578    /// ```text
34579    /// +---+---------------+
34580    /// | # | Operands      |
34581    /// +---+---------------+
34582    /// | 1 | Xmm, Xmm, Xmm |
34583    /// +---+---------------+
34584    /// ```
34585    #[inline]
34586    pub fn vcvtss2sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34587    where Assembler<'a>: Vcvtss2shMaskErEmitter<A, B, C> {
34588        <Self as Vcvtss2shMaskErEmitter<A, B, C>>::vcvtss2sh_mask_er(self, op0, op1, op2);
34589    }
34590    /// `VCVTSS2SH_MASKZ`.
34591    ///
34592    /// Supported operand variants:
34593    ///
34594    /// ```text
34595    /// +---+---------------+
34596    /// | # | Operands      |
34597    /// +---+---------------+
34598    /// | 1 | Xmm, Xmm, Mem |
34599    /// | 2 | Xmm, Xmm, Xmm |
34600    /// +---+---------------+
34601    /// ```
34602    #[inline]
34603    pub fn vcvtss2sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34604    where Assembler<'a>: Vcvtss2shMaskzEmitter<A, B, C> {
34605        <Self as Vcvtss2shMaskzEmitter<A, B, C>>::vcvtss2sh_maskz(self, op0, op1, op2);
34606    }
34607    /// `VCVTSS2SH_MASKZ_ER`.
34608    ///
34609    /// Supported operand variants:
34610    ///
34611    /// ```text
34612    /// +---+---------------+
34613    /// | # | Operands      |
34614    /// +---+---------------+
34615    /// | 1 | Xmm, Xmm, Xmm |
34616    /// +---+---------------+
34617    /// ```
34618    #[inline]
34619    pub fn vcvtss2sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
34620    where Assembler<'a>: Vcvtss2shMaskzErEmitter<A, B, C> {
34621        <Self as Vcvtss2shMaskzErEmitter<A, B, C>>::vcvtss2sh_maskz_er(self, op0, op1, op2);
34622    }
34623    /// `VCVTTPH2DQ`.
34624    ///
34625    /// Supported operand variants:
34626    ///
34627    /// ```text
34628    /// +---+----------+
34629    /// | # | Operands |
34630    /// +---+----------+
34631    /// | 1 | Xmm, Mem |
34632    /// | 2 | Xmm, Xmm |
34633    /// | 3 | Ymm, Mem |
34634    /// | 4 | Ymm, Xmm |
34635    /// | 5 | Zmm, Mem |
34636    /// | 6 | Zmm, Ymm |
34637    /// +---+----------+
34638    /// ```
34639    #[inline]
34640    pub fn vcvttph2dq<A, B>(&mut self, op0: A, op1: B)
34641    where Assembler<'a>: Vcvttph2dqEmitter<A, B> {
34642        <Self as Vcvttph2dqEmitter<A, B>>::vcvttph2dq(self, op0, op1);
34643    }
34644    /// `VCVTTPH2DQ_MASK`.
34645    ///
34646    /// Supported operand variants:
34647    ///
34648    /// ```text
34649    /// +---+----------+
34650    /// | # | Operands |
34651    /// +---+----------+
34652    /// | 1 | Xmm, Mem |
34653    /// | 2 | Xmm, Xmm |
34654    /// | 3 | Ymm, Mem |
34655    /// | 4 | Ymm, Xmm |
34656    /// | 5 | Zmm, Mem |
34657    /// | 6 | Zmm, Ymm |
34658    /// +---+----------+
34659    /// ```
34660    #[inline]
34661    pub fn vcvttph2dq_mask<A, B>(&mut self, op0: A, op1: B)
34662    where Assembler<'a>: Vcvttph2dqMaskEmitter<A, B> {
34663        <Self as Vcvttph2dqMaskEmitter<A, B>>::vcvttph2dq_mask(self, op0, op1);
34664    }
34665    /// `VCVTTPH2DQ_MASK_SAE`.
34666    ///
34667    /// Supported operand variants:
34668    ///
34669    /// ```text
34670    /// +---+----------+
34671    /// | # | Operands |
34672    /// +---+----------+
34673    /// | 1 | Zmm, Ymm |
34674    /// +---+----------+
34675    /// ```
34676    #[inline]
34677    pub fn vcvttph2dq_mask_sae<A, B>(&mut self, op0: A, op1: B)
34678    where Assembler<'a>: Vcvttph2dqMaskSaeEmitter<A, B> {
34679        <Self as Vcvttph2dqMaskSaeEmitter<A, B>>::vcvttph2dq_mask_sae(self, op0, op1);
34680    }
34681    /// `VCVTTPH2DQ_MASKZ`.
34682    ///
34683    /// Supported operand variants:
34684    ///
34685    /// ```text
34686    /// +---+----------+
34687    /// | # | Operands |
34688    /// +---+----------+
34689    /// | 1 | Xmm, Mem |
34690    /// | 2 | Xmm, Xmm |
34691    /// | 3 | Ymm, Mem |
34692    /// | 4 | Ymm, Xmm |
34693    /// | 5 | Zmm, Mem |
34694    /// | 6 | Zmm, Ymm |
34695    /// +---+----------+
34696    /// ```
34697    #[inline]
34698    pub fn vcvttph2dq_maskz<A, B>(&mut self, op0: A, op1: B)
34699    where Assembler<'a>: Vcvttph2dqMaskzEmitter<A, B> {
34700        <Self as Vcvttph2dqMaskzEmitter<A, B>>::vcvttph2dq_maskz(self, op0, op1);
34701    }
34702    /// `VCVTTPH2DQ_MASKZ_SAE`.
34703    ///
34704    /// Supported operand variants:
34705    ///
34706    /// ```text
34707    /// +---+----------+
34708    /// | # | Operands |
34709    /// +---+----------+
34710    /// | 1 | Zmm, Ymm |
34711    /// +---+----------+
34712    /// ```
34713    #[inline]
34714    pub fn vcvttph2dq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
34715    where Assembler<'a>: Vcvttph2dqMaskzSaeEmitter<A, B> {
34716        <Self as Vcvttph2dqMaskzSaeEmitter<A, B>>::vcvttph2dq_maskz_sae(self, op0, op1);
34717    }
34718    /// `VCVTTPH2DQ_SAE`.
34719    ///
34720    /// Supported operand variants:
34721    ///
34722    /// ```text
34723    /// +---+----------+
34724    /// | # | Operands |
34725    /// +---+----------+
34726    /// | 1 | Zmm, Ymm |
34727    /// +---+----------+
34728    /// ```
34729    #[inline]
34730    pub fn vcvttph2dq_sae<A, B>(&mut self, op0: A, op1: B)
34731    where Assembler<'a>: Vcvttph2dqSaeEmitter<A, B> {
34732        <Self as Vcvttph2dqSaeEmitter<A, B>>::vcvttph2dq_sae(self, op0, op1);
34733    }
34734    /// `VCVTTPH2QQ`.
34735    ///
34736    /// Supported operand variants:
34737    ///
34738    /// ```text
34739    /// +---+----------+
34740    /// | # | Operands |
34741    /// +---+----------+
34742    /// | 1 | Xmm, Mem |
34743    /// | 2 | Xmm, Xmm |
34744    /// | 3 | Ymm, Mem |
34745    /// | 4 | Ymm, Xmm |
34746    /// | 5 | Zmm, Mem |
34747    /// | 6 | Zmm, Xmm |
34748    /// +---+----------+
34749    /// ```
34750    #[inline]
34751    pub fn vcvttph2qq<A, B>(&mut self, op0: A, op1: B)
34752    where Assembler<'a>: Vcvttph2qqEmitter<A, B> {
34753        <Self as Vcvttph2qqEmitter<A, B>>::vcvttph2qq(self, op0, op1);
34754    }
34755    /// `VCVTTPH2QQ_MASK`.
34756    ///
34757    /// Supported operand variants:
34758    ///
34759    /// ```text
34760    /// +---+----------+
34761    /// | # | Operands |
34762    /// +---+----------+
34763    /// | 1 | Xmm, Mem |
34764    /// | 2 | Xmm, Xmm |
34765    /// | 3 | Ymm, Mem |
34766    /// | 4 | Ymm, Xmm |
34767    /// | 5 | Zmm, Mem |
34768    /// | 6 | Zmm, Xmm |
34769    /// +---+----------+
34770    /// ```
34771    #[inline]
34772    pub fn vcvttph2qq_mask<A, B>(&mut self, op0: A, op1: B)
34773    where Assembler<'a>: Vcvttph2qqMaskEmitter<A, B> {
34774        <Self as Vcvttph2qqMaskEmitter<A, B>>::vcvttph2qq_mask(self, op0, op1);
34775    }
34776    /// `VCVTTPH2QQ_MASK_SAE`.
34777    ///
34778    /// Supported operand variants:
34779    ///
34780    /// ```text
34781    /// +---+----------+
34782    /// | # | Operands |
34783    /// +---+----------+
34784    /// | 1 | Zmm, Xmm |
34785    /// +---+----------+
34786    /// ```
34787    #[inline]
34788    pub fn vcvttph2qq_mask_sae<A, B>(&mut self, op0: A, op1: B)
34789    where Assembler<'a>: Vcvttph2qqMaskSaeEmitter<A, B> {
34790        <Self as Vcvttph2qqMaskSaeEmitter<A, B>>::vcvttph2qq_mask_sae(self, op0, op1);
34791    }
34792    /// `VCVTTPH2QQ_MASKZ`.
34793    ///
34794    /// Supported operand variants:
34795    ///
34796    /// ```text
34797    /// +---+----------+
34798    /// | # | Operands |
34799    /// +---+----------+
34800    /// | 1 | Xmm, Mem |
34801    /// | 2 | Xmm, Xmm |
34802    /// | 3 | Ymm, Mem |
34803    /// | 4 | Ymm, Xmm |
34804    /// | 5 | Zmm, Mem |
34805    /// | 6 | Zmm, Xmm |
34806    /// +---+----------+
34807    /// ```
34808    #[inline]
34809    pub fn vcvttph2qq_maskz<A, B>(&mut self, op0: A, op1: B)
34810    where Assembler<'a>: Vcvttph2qqMaskzEmitter<A, B> {
34811        <Self as Vcvttph2qqMaskzEmitter<A, B>>::vcvttph2qq_maskz(self, op0, op1);
34812    }
34813    /// `VCVTTPH2QQ_MASKZ_SAE`.
34814    ///
34815    /// Supported operand variants:
34816    ///
34817    /// ```text
34818    /// +---+----------+
34819    /// | # | Operands |
34820    /// +---+----------+
34821    /// | 1 | Zmm, Xmm |
34822    /// +---+----------+
34823    /// ```
34824    #[inline]
34825    pub fn vcvttph2qq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
34826    where Assembler<'a>: Vcvttph2qqMaskzSaeEmitter<A, B> {
34827        <Self as Vcvttph2qqMaskzSaeEmitter<A, B>>::vcvttph2qq_maskz_sae(self, op0, op1);
34828    }
34829    /// `VCVTTPH2QQ_SAE`.
34830    ///
34831    /// Supported operand variants:
34832    ///
34833    /// ```text
34834    /// +---+----------+
34835    /// | # | Operands |
34836    /// +---+----------+
34837    /// | 1 | Zmm, Xmm |
34838    /// +---+----------+
34839    /// ```
34840    #[inline]
34841    pub fn vcvttph2qq_sae<A, B>(&mut self, op0: A, op1: B)
34842    where Assembler<'a>: Vcvttph2qqSaeEmitter<A, B> {
34843        <Self as Vcvttph2qqSaeEmitter<A, B>>::vcvttph2qq_sae(self, op0, op1);
34844    }
34845    /// `VCVTTPH2UDQ`.
34846    ///
34847    /// Supported operand variants:
34848    ///
34849    /// ```text
34850    /// +---+----------+
34851    /// | # | Operands |
34852    /// +---+----------+
34853    /// | 1 | Xmm, Mem |
34854    /// | 2 | Xmm, Xmm |
34855    /// | 3 | Ymm, Mem |
34856    /// | 4 | Ymm, Xmm |
34857    /// | 5 | Zmm, Mem |
34858    /// | 6 | Zmm, Ymm |
34859    /// +---+----------+
34860    /// ```
34861    #[inline]
34862    pub fn vcvttph2udq<A, B>(&mut self, op0: A, op1: B)
34863    where Assembler<'a>: Vcvttph2udqEmitter<A, B> {
34864        <Self as Vcvttph2udqEmitter<A, B>>::vcvttph2udq(self, op0, op1);
34865    }
34866    /// `VCVTTPH2UDQ_MASK`.
34867    ///
34868    /// Supported operand variants:
34869    ///
34870    /// ```text
34871    /// +---+----------+
34872    /// | # | Operands |
34873    /// +---+----------+
34874    /// | 1 | Xmm, Mem |
34875    /// | 2 | Xmm, Xmm |
34876    /// | 3 | Ymm, Mem |
34877    /// | 4 | Ymm, Xmm |
34878    /// | 5 | Zmm, Mem |
34879    /// | 6 | Zmm, Ymm |
34880    /// +---+----------+
34881    /// ```
34882    #[inline]
34883    pub fn vcvttph2udq_mask<A, B>(&mut self, op0: A, op1: B)
34884    where Assembler<'a>: Vcvttph2udqMaskEmitter<A, B> {
34885        <Self as Vcvttph2udqMaskEmitter<A, B>>::vcvttph2udq_mask(self, op0, op1);
34886    }
34887    /// `VCVTTPH2UDQ_MASK_SAE`.
34888    ///
34889    /// Supported operand variants:
34890    ///
34891    /// ```text
34892    /// +---+----------+
34893    /// | # | Operands |
34894    /// +---+----------+
34895    /// | 1 | Zmm, Ymm |
34896    /// +---+----------+
34897    /// ```
34898    #[inline]
34899    pub fn vcvttph2udq_mask_sae<A, B>(&mut self, op0: A, op1: B)
34900    where Assembler<'a>: Vcvttph2udqMaskSaeEmitter<A, B> {
34901        <Self as Vcvttph2udqMaskSaeEmitter<A, B>>::vcvttph2udq_mask_sae(self, op0, op1);
34902    }
34903    /// `VCVTTPH2UDQ_MASKZ`.
34904    ///
34905    /// Supported operand variants:
34906    ///
34907    /// ```text
34908    /// +---+----------+
34909    /// | # | Operands |
34910    /// +---+----------+
34911    /// | 1 | Xmm, Mem |
34912    /// | 2 | Xmm, Xmm |
34913    /// | 3 | Ymm, Mem |
34914    /// | 4 | Ymm, Xmm |
34915    /// | 5 | Zmm, Mem |
34916    /// | 6 | Zmm, Ymm |
34917    /// +---+----------+
34918    /// ```
34919    #[inline]
34920    pub fn vcvttph2udq_maskz<A, B>(&mut self, op0: A, op1: B)
34921    where Assembler<'a>: Vcvttph2udqMaskzEmitter<A, B> {
34922        <Self as Vcvttph2udqMaskzEmitter<A, B>>::vcvttph2udq_maskz(self, op0, op1);
34923    }
34924    /// `VCVTTPH2UDQ_MASKZ_SAE`.
34925    ///
34926    /// Supported operand variants:
34927    ///
34928    /// ```text
34929    /// +---+----------+
34930    /// | # | Operands |
34931    /// +---+----------+
34932    /// | 1 | Zmm, Ymm |
34933    /// +---+----------+
34934    /// ```
34935    #[inline]
34936    pub fn vcvttph2udq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
34937    where Assembler<'a>: Vcvttph2udqMaskzSaeEmitter<A, B> {
34938        <Self as Vcvttph2udqMaskzSaeEmitter<A, B>>::vcvttph2udq_maskz_sae(self, op0, op1);
34939    }
34940    /// `VCVTTPH2UDQ_SAE`.
34941    ///
34942    /// Supported operand variants:
34943    ///
34944    /// ```text
34945    /// +---+----------+
34946    /// | # | Operands |
34947    /// +---+----------+
34948    /// | 1 | Zmm, Ymm |
34949    /// +---+----------+
34950    /// ```
34951    #[inline]
34952    pub fn vcvttph2udq_sae<A, B>(&mut self, op0: A, op1: B)
34953    where Assembler<'a>: Vcvttph2udqSaeEmitter<A, B> {
34954        <Self as Vcvttph2udqSaeEmitter<A, B>>::vcvttph2udq_sae(self, op0, op1);
34955    }
34956    /// `VCVTTPH2UQQ`.
34957    ///
34958    /// Supported operand variants:
34959    ///
34960    /// ```text
34961    /// +---+----------+
34962    /// | # | Operands |
34963    /// +---+----------+
34964    /// | 1 | Xmm, Mem |
34965    /// | 2 | Xmm, Xmm |
34966    /// | 3 | Ymm, Mem |
34967    /// | 4 | Ymm, Xmm |
34968    /// | 5 | Zmm, Mem |
34969    /// | 6 | Zmm, Xmm |
34970    /// +---+----------+
34971    /// ```
34972    #[inline]
34973    pub fn vcvttph2uqq<A, B>(&mut self, op0: A, op1: B)
34974    where Assembler<'a>: Vcvttph2uqqEmitter<A, B> {
34975        <Self as Vcvttph2uqqEmitter<A, B>>::vcvttph2uqq(self, op0, op1);
34976    }
34977    /// `VCVTTPH2UQQ_MASK`.
34978    ///
34979    /// Supported operand variants:
34980    ///
34981    /// ```text
34982    /// +---+----------+
34983    /// | # | Operands |
34984    /// +---+----------+
34985    /// | 1 | Xmm, Mem |
34986    /// | 2 | Xmm, Xmm |
34987    /// | 3 | Ymm, Mem |
34988    /// | 4 | Ymm, Xmm |
34989    /// | 5 | Zmm, Mem |
34990    /// | 6 | Zmm, Xmm |
34991    /// +---+----------+
34992    /// ```
34993    #[inline]
34994    pub fn vcvttph2uqq_mask<A, B>(&mut self, op0: A, op1: B)
34995    where Assembler<'a>: Vcvttph2uqqMaskEmitter<A, B> {
34996        <Self as Vcvttph2uqqMaskEmitter<A, B>>::vcvttph2uqq_mask(self, op0, op1);
34997    }
34998    /// `VCVTTPH2UQQ_MASK_SAE`.
34999    ///
35000    /// Supported operand variants:
35001    ///
35002    /// ```text
35003    /// +---+----------+
35004    /// | # | Operands |
35005    /// +---+----------+
35006    /// | 1 | Zmm, Xmm |
35007    /// +---+----------+
35008    /// ```
35009    #[inline]
35010    pub fn vcvttph2uqq_mask_sae<A, B>(&mut self, op0: A, op1: B)
35011    where Assembler<'a>: Vcvttph2uqqMaskSaeEmitter<A, B> {
35012        <Self as Vcvttph2uqqMaskSaeEmitter<A, B>>::vcvttph2uqq_mask_sae(self, op0, op1);
35013    }
35014    /// `VCVTTPH2UQQ_MASKZ`.
35015    ///
35016    /// Supported operand variants:
35017    ///
35018    /// ```text
35019    /// +---+----------+
35020    /// | # | Operands |
35021    /// +---+----------+
35022    /// | 1 | Xmm, Mem |
35023    /// | 2 | Xmm, Xmm |
35024    /// | 3 | Ymm, Mem |
35025    /// | 4 | Ymm, Xmm |
35026    /// | 5 | Zmm, Mem |
35027    /// | 6 | Zmm, Xmm |
35028    /// +---+----------+
35029    /// ```
35030    #[inline]
35031    pub fn vcvttph2uqq_maskz<A, B>(&mut self, op0: A, op1: B)
35032    where Assembler<'a>: Vcvttph2uqqMaskzEmitter<A, B> {
35033        <Self as Vcvttph2uqqMaskzEmitter<A, B>>::vcvttph2uqq_maskz(self, op0, op1);
35034    }
35035    /// `VCVTTPH2UQQ_MASKZ_SAE`.
35036    ///
35037    /// Supported operand variants:
35038    ///
35039    /// ```text
35040    /// +---+----------+
35041    /// | # | Operands |
35042    /// +---+----------+
35043    /// | 1 | Zmm, Xmm |
35044    /// +---+----------+
35045    /// ```
35046    #[inline]
35047    pub fn vcvttph2uqq_maskz_sae<A, B>(&mut self, op0: A, op1: B)
35048    where Assembler<'a>: Vcvttph2uqqMaskzSaeEmitter<A, B> {
35049        <Self as Vcvttph2uqqMaskzSaeEmitter<A, B>>::vcvttph2uqq_maskz_sae(self, op0, op1);
35050    }
35051    /// `VCVTTPH2UQQ_SAE`.
35052    ///
35053    /// Supported operand variants:
35054    ///
35055    /// ```text
35056    /// +---+----------+
35057    /// | # | Operands |
35058    /// +---+----------+
35059    /// | 1 | Zmm, Xmm |
35060    /// +---+----------+
35061    /// ```
35062    #[inline]
35063    pub fn vcvttph2uqq_sae<A, B>(&mut self, op0: A, op1: B)
35064    where Assembler<'a>: Vcvttph2uqqSaeEmitter<A, B> {
35065        <Self as Vcvttph2uqqSaeEmitter<A, B>>::vcvttph2uqq_sae(self, op0, op1);
35066    }
35067    /// `VCVTTPH2UW`.
35068    ///
35069    /// Supported operand variants:
35070    ///
35071    /// ```text
35072    /// +---+----------+
35073    /// | # | Operands |
35074    /// +---+----------+
35075    /// | 1 | Xmm, Mem |
35076    /// | 2 | Xmm, Xmm |
35077    /// | 3 | Ymm, Mem |
35078    /// | 4 | Ymm, Ymm |
35079    /// | 5 | Zmm, Mem |
35080    /// | 6 | Zmm, Zmm |
35081    /// +---+----------+
35082    /// ```
35083    #[inline]
35084    pub fn vcvttph2uw<A, B>(&mut self, op0: A, op1: B)
35085    where Assembler<'a>: Vcvttph2uwEmitter<A, B> {
35086        <Self as Vcvttph2uwEmitter<A, B>>::vcvttph2uw(self, op0, op1);
35087    }
35088    /// `VCVTTPH2UW_MASK`.
35089    ///
35090    /// Supported operand variants:
35091    ///
35092    /// ```text
35093    /// +---+----------+
35094    /// | # | Operands |
35095    /// +---+----------+
35096    /// | 1 | Xmm, Mem |
35097    /// | 2 | Xmm, Xmm |
35098    /// | 3 | Ymm, Mem |
35099    /// | 4 | Ymm, Ymm |
35100    /// | 5 | Zmm, Mem |
35101    /// | 6 | Zmm, Zmm |
35102    /// +---+----------+
35103    /// ```
35104    #[inline]
35105    pub fn vcvttph2uw_mask<A, B>(&mut self, op0: A, op1: B)
35106    where Assembler<'a>: Vcvttph2uwMaskEmitter<A, B> {
35107        <Self as Vcvttph2uwMaskEmitter<A, B>>::vcvttph2uw_mask(self, op0, op1);
35108    }
35109    /// `VCVTTPH2UW_MASK_SAE`.
35110    ///
35111    /// Supported operand variants:
35112    ///
35113    /// ```text
35114    /// +---+----------+
35115    /// | # | Operands |
35116    /// +---+----------+
35117    /// | 1 | Zmm, Zmm |
35118    /// +---+----------+
35119    /// ```
35120    #[inline]
35121    pub fn vcvttph2uw_mask_sae<A, B>(&mut self, op0: A, op1: B)
35122    where Assembler<'a>: Vcvttph2uwMaskSaeEmitter<A, B> {
35123        <Self as Vcvttph2uwMaskSaeEmitter<A, B>>::vcvttph2uw_mask_sae(self, op0, op1);
35124    }
35125    /// `VCVTTPH2UW_MASKZ`.
35126    ///
35127    /// Supported operand variants:
35128    ///
35129    /// ```text
35130    /// +---+----------+
35131    /// | # | Operands |
35132    /// +---+----------+
35133    /// | 1 | Xmm, Mem |
35134    /// | 2 | Xmm, Xmm |
35135    /// | 3 | Ymm, Mem |
35136    /// | 4 | Ymm, Ymm |
35137    /// | 5 | Zmm, Mem |
35138    /// | 6 | Zmm, Zmm |
35139    /// +---+----------+
35140    /// ```
35141    #[inline]
35142    pub fn vcvttph2uw_maskz<A, B>(&mut self, op0: A, op1: B)
35143    where Assembler<'a>: Vcvttph2uwMaskzEmitter<A, B> {
35144        <Self as Vcvttph2uwMaskzEmitter<A, B>>::vcvttph2uw_maskz(self, op0, op1);
35145    }
35146    /// `VCVTTPH2UW_MASKZ_SAE`.
35147    ///
35148    /// Supported operand variants:
35149    ///
35150    /// ```text
35151    /// +---+----------+
35152    /// | # | Operands |
35153    /// +---+----------+
35154    /// | 1 | Zmm, Zmm |
35155    /// +---+----------+
35156    /// ```
35157    #[inline]
35158    pub fn vcvttph2uw_maskz_sae<A, B>(&mut self, op0: A, op1: B)
35159    where Assembler<'a>: Vcvttph2uwMaskzSaeEmitter<A, B> {
35160        <Self as Vcvttph2uwMaskzSaeEmitter<A, B>>::vcvttph2uw_maskz_sae(self, op0, op1);
35161    }
35162    /// `VCVTTPH2UW_SAE`.
35163    ///
35164    /// Supported operand variants:
35165    ///
35166    /// ```text
35167    /// +---+----------+
35168    /// | # | Operands |
35169    /// +---+----------+
35170    /// | 1 | Zmm, Zmm |
35171    /// +---+----------+
35172    /// ```
35173    #[inline]
35174    pub fn vcvttph2uw_sae<A, B>(&mut self, op0: A, op1: B)
35175    where Assembler<'a>: Vcvttph2uwSaeEmitter<A, B> {
35176        <Self as Vcvttph2uwSaeEmitter<A, B>>::vcvttph2uw_sae(self, op0, op1);
35177    }
35178    /// `VCVTTPH2W`.
35179    ///
35180    /// Supported operand variants:
35181    ///
35182    /// ```text
35183    /// +---+----------+
35184    /// | # | Operands |
35185    /// +---+----------+
35186    /// | 1 | Xmm, Mem |
35187    /// | 2 | Xmm, Xmm |
35188    /// | 3 | Ymm, Mem |
35189    /// | 4 | Ymm, Ymm |
35190    /// | 5 | Zmm, Mem |
35191    /// | 6 | Zmm, Zmm |
35192    /// +---+----------+
35193    /// ```
35194    #[inline]
35195    pub fn vcvttph2w<A, B>(&mut self, op0: A, op1: B)
35196    where Assembler<'a>: Vcvttph2wEmitter<A, B> {
35197        <Self as Vcvttph2wEmitter<A, B>>::vcvttph2w(self, op0, op1);
35198    }
35199    /// `VCVTTPH2W_MASK`.
35200    ///
35201    /// Supported operand variants:
35202    ///
35203    /// ```text
35204    /// +---+----------+
35205    /// | # | Operands |
35206    /// +---+----------+
35207    /// | 1 | Xmm, Mem |
35208    /// | 2 | Xmm, Xmm |
35209    /// | 3 | Ymm, Mem |
35210    /// | 4 | Ymm, Ymm |
35211    /// | 5 | Zmm, Mem |
35212    /// | 6 | Zmm, Zmm |
35213    /// +---+----------+
35214    /// ```
35215    #[inline]
35216    pub fn vcvttph2w_mask<A, B>(&mut self, op0: A, op1: B)
35217    where Assembler<'a>: Vcvttph2wMaskEmitter<A, B> {
35218        <Self as Vcvttph2wMaskEmitter<A, B>>::vcvttph2w_mask(self, op0, op1);
35219    }
35220    /// `VCVTTPH2W_MASK_SAE`.
35221    ///
35222    /// Supported operand variants:
35223    ///
35224    /// ```text
35225    /// +---+----------+
35226    /// | # | Operands |
35227    /// +---+----------+
35228    /// | 1 | Zmm, Zmm |
35229    /// +---+----------+
35230    /// ```
35231    #[inline]
35232    pub fn vcvttph2w_mask_sae<A, B>(&mut self, op0: A, op1: B)
35233    where Assembler<'a>: Vcvttph2wMaskSaeEmitter<A, B> {
35234        <Self as Vcvttph2wMaskSaeEmitter<A, B>>::vcvttph2w_mask_sae(self, op0, op1);
35235    }
35236    /// `VCVTTPH2W_MASKZ`.
35237    ///
35238    /// Supported operand variants:
35239    ///
35240    /// ```text
35241    /// +---+----------+
35242    /// | # | Operands |
35243    /// +---+----------+
35244    /// | 1 | Xmm, Mem |
35245    /// | 2 | Xmm, Xmm |
35246    /// | 3 | Ymm, Mem |
35247    /// | 4 | Ymm, Ymm |
35248    /// | 5 | Zmm, Mem |
35249    /// | 6 | Zmm, Zmm |
35250    /// +---+----------+
35251    /// ```
35252    #[inline]
35253    pub fn vcvttph2w_maskz<A, B>(&mut self, op0: A, op1: B)
35254    where Assembler<'a>: Vcvttph2wMaskzEmitter<A, B> {
35255        <Self as Vcvttph2wMaskzEmitter<A, B>>::vcvttph2w_maskz(self, op0, op1);
35256    }
35257    /// `VCVTTPH2W_MASKZ_SAE`.
35258    ///
35259    /// Supported operand variants:
35260    ///
35261    /// ```text
35262    /// +---+----------+
35263    /// | # | Operands |
35264    /// +---+----------+
35265    /// | 1 | Zmm, Zmm |
35266    /// +---+----------+
35267    /// ```
35268    #[inline]
35269    pub fn vcvttph2w_maskz_sae<A, B>(&mut self, op0: A, op1: B)
35270    where Assembler<'a>: Vcvttph2wMaskzSaeEmitter<A, B> {
35271        <Self as Vcvttph2wMaskzSaeEmitter<A, B>>::vcvttph2w_maskz_sae(self, op0, op1);
35272    }
35273    /// `VCVTTPH2W_SAE`.
35274    ///
35275    /// Supported operand variants:
35276    ///
35277    /// ```text
35278    /// +---+----------+
35279    /// | # | Operands |
35280    /// +---+----------+
35281    /// | 1 | Zmm, Zmm |
35282    /// +---+----------+
35283    /// ```
35284    #[inline]
35285    pub fn vcvttph2w_sae<A, B>(&mut self, op0: A, op1: B)
35286    where Assembler<'a>: Vcvttph2wSaeEmitter<A, B> {
35287        <Self as Vcvttph2wSaeEmitter<A, B>>::vcvttph2w_sae(self, op0, op1);
35288    }
35289    /// `VCVTTSH2SI`.
35290    ///
35291    /// Supported operand variants:
35292    ///
35293    /// ```text
35294    /// +---+----------+
35295    /// | # | Operands |
35296    /// +---+----------+
35297    /// | 1 | Gpd, Mem |
35298    /// | 2 | Gpd, Xmm |
35299    /// | 3 | Gpq, Mem |
35300    /// | 4 | Gpq, Xmm |
35301    /// +---+----------+
35302    /// ```
35303    #[inline]
35304    pub fn vcvttsh2si<A, B>(&mut self, op0: A, op1: B)
35305    where Assembler<'a>: Vcvttsh2siEmitter<A, B> {
35306        <Self as Vcvttsh2siEmitter<A, B>>::vcvttsh2si(self, op0, op1);
35307    }
35308    /// `VCVTTSH2SI_SAE`.
35309    ///
35310    /// Supported operand variants:
35311    ///
35312    /// ```text
35313    /// +---+----------+
35314    /// | # | Operands |
35315    /// +---+----------+
35316    /// | 1 | Gpd, Xmm |
35317    /// | 2 | Gpq, Xmm |
35318    /// +---+----------+
35319    /// ```
35320    #[inline]
35321    pub fn vcvttsh2si_sae<A, B>(&mut self, op0: A, op1: B)
35322    where Assembler<'a>: Vcvttsh2siSaeEmitter<A, B> {
35323        <Self as Vcvttsh2siSaeEmitter<A, B>>::vcvttsh2si_sae(self, op0, op1);
35324    }
35325    /// `VCVTTSH2USI`.
35326    ///
35327    /// Supported operand variants:
35328    ///
35329    /// ```text
35330    /// +---+----------+
35331    /// | # | Operands |
35332    /// +---+----------+
35333    /// | 1 | Gpd, Mem |
35334    /// | 2 | Gpd, Xmm |
35335    /// | 3 | Gpq, Mem |
35336    /// | 4 | Gpq, Xmm |
35337    /// +---+----------+
35338    /// ```
35339    #[inline]
35340    pub fn vcvttsh2usi<A, B>(&mut self, op0: A, op1: B)
35341    where Assembler<'a>: Vcvttsh2usiEmitter<A, B> {
35342        <Self as Vcvttsh2usiEmitter<A, B>>::vcvttsh2usi(self, op0, op1);
35343    }
35344    /// `VCVTTSH2USI_SAE`.
35345    ///
35346    /// Supported operand variants:
35347    ///
35348    /// ```text
35349    /// +---+----------+
35350    /// | # | Operands |
35351    /// +---+----------+
35352    /// | 1 | Gpd, Xmm |
35353    /// | 2 | Gpq, Xmm |
35354    /// +---+----------+
35355    /// ```
35356    #[inline]
35357    pub fn vcvttsh2usi_sae<A, B>(&mut self, op0: A, op1: B)
35358    where Assembler<'a>: Vcvttsh2usiSaeEmitter<A, B> {
35359        <Self as Vcvttsh2usiSaeEmitter<A, B>>::vcvttsh2usi_sae(self, op0, op1);
35360    }
35361    /// `VCVTUDQ2PH`.
35362    ///
35363    /// Supported operand variants:
35364    ///
35365    /// ```text
35366    /// +---+----------+
35367    /// | # | Operands |
35368    /// +---+----------+
35369    /// | 1 | Xmm, Mem |
35370    /// | 2 | Xmm, Xmm |
35371    /// | 3 | Xmm, Ymm |
35372    /// | 4 | Ymm, Mem |
35373    /// | 5 | Ymm, Zmm |
35374    /// +---+----------+
35375    /// ```
35376    #[inline]
35377    pub fn vcvtudq2ph<A, B>(&mut self, op0: A, op1: B)
35378    where Assembler<'a>: Vcvtudq2phEmitter<A, B> {
35379        <Self as Vcvtudq2phEmitter<A, B>>::vcvtudq2ph(self, op0, op1);
35380    }
35381    /// `VCVTUDQ2PH_ER`.
35382    ///
35383    /// Supported operand variants:
35384    ///
35385    /// ```text
35386    /// +---+----------+
35387    /// | # | Operands |
35388    /// +---+----------+
35389    /// | 1 | Ymm, Zmm |
35390    /// +---+----------+
35391    /// ```
35392    #[inline]
35393    pub fn vcvtudq2ph_er<A, B>(&mut self, op0: A, op1: B)
35394    where Assembler<'a>: Vcvtudq2phErEmitter<A, B> {
35395        <Self as Vcvtudq2phErEmitter<A, B>>::vcvtudq2ph_er(self, op0, op1);
35396    }
35397    /// `VCVTUDQ2PH_MASK`.
35398    ///
35399    /// Supported operand variants:
35400    ///
35401    /// ```text
35402    /// +---+----------+
35403    /// | # | Operands |
35404    /// +---+----------+
35405    /// | 1 | Xmm, Mem |
35406    /// | 2 | Xmm, Xmm |
35407    /// | 3 | Xmm, Ymm |
35408    /// | 4 | Ymm, Mem |
35409    /// | 5 | Ymm, Zmm |
35410    /// +---+----------+
35411    /// ```
35412    #[inline]
35413    pub fn vcvtudq2ph_mask<A, B>(&mut self, op0: A, op1: B)
35414    where Assembler<'a>: Vcvtudq2phMaskEmitter<A, B> {
35415        <Self as Vcvtudq2phMaskEmitter<A, B>>::vcvtudq2ph_mask(self, op0, op1);
35416    }
35417    /// `VCVTUDQ2PH_MASK_ER`.
35418    ///
35419    /// Supported operand variants:
35420    ///
35421    /// ```text
35422    /// +---+----------+
35423    /// | # | Operands |
35424    /// +---+----------+
35425    /// | 1 | Ymm, Zmm |
35426    /// +---+----------+
35427    /// ```
35428    #[inline]
35429    pub fn vcvtudq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
35430    where Assembler<'a>: Vcvtudq2phMaskErEmitter<A, B> {
35431        <Self as Vcvtudq2phMaskErEmitter<A, B>>::vcvtudq2ph_mask_er(self, op0, op1);
35432    }
35433    /// `VCVTUDQ2PH_MASKZ`.
35434    ///
35435    /// Supported operand variants:
35436    ///
35437    /// ```text
35438    /// +---+----------+
35439    /// | # | Operands |
35440    /// +---+----------+
35441    /// | 1 | Xmm, Mem |
35442    /// | 2 | Xmm, Xmm |
35443    /// | 3 | Xmm, Ymm |
35444    /// | 4 | Ymm, Mem |
35445    /// | 5 | Ymm, Zmm |
35446    /// +---+----------+
35447    /// ```
35448    #[inline]
35449    pub fn vcvtudq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
35450    where Assembler<'a>: Vcvtudq2phMaskzEmitter<A, B> {
35451        <Self as Vcvtudq2phMaskzEmitter<A, B>>::vcvtudq2ph_maskz(self, op0, op1);
35452    }
35453    /// `VCVTUDQ2PH_MASKZ_ER`.
35454    ///
35455    /// Supported operand variants:
35456    ///
35457    /// ```text
35458    /// +---+----------+
35459    /// | # | Operands |
35460    /// +---+----------+
35461    /// | 1 | Ymm, Zmm |
35462    /// +---+----------+
35463    /// ```
35464    #[inline]
35465    pub fn vcvtudq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
35466    where Assembler<'a>: Vcvtudq2phMaskzErEmitter<A, B> {
35467        <Self as Vcvtudq2phMaskzErEmitter<A, B>>::vcvtudq2ph_maskz_er(self, op0, op1);
35468    }
35469    /// `VCVTUQQ2PH`.
35470    ///
35471    /// Supported operand variants:
35472    ///
35473    /// ```text
35474    /// +---+----------+
35475    /// | # | Operands |
35476    /// +---+----------+
35477    /// | 1 | Xmm, Mem |
35478    /// | 2 | Xmm, Xmm |
35479    /// | 3 | Xmm, Ymm |
35480    /// | 4 | Xmm, Zmm |
35481    /// +---+----------+
35482    /// ```
35483    #[inline]
35484    pub fn vcvtuqq2ph<A, B>(&mut self, op0: A, op1: B)
35485    where Assembler<'a>: Vcvtuqq2phEmitter<A, B> {
35486        <Self as Vcvtuqq2phEmitter<A, B>>::vcvtuqq2ph(self, op0, op1);
35487    }
35488    /// `VCVTUQQ2PH_ER`.
35489    ///
35490    /// Supported operand variants:
35491    ///
35492    /// ```text
35493    /// +---+----------+
35494    /// | # | Operands |
35495    /// +---+----------+
35496    /// | 1 | Xmm, Zmm |
35497    /// +---+----------+
35498    /// ```
35499    #[inline]
35500    pub fn vcvtuqq2ph_er<A, B>(&mut self, op0: A, op1: B)
35501    where Assembler<'a>: Vcvtuqq2phErEmitter<A, B> {
35502        <Self as Vcvtuqq2phErEmitter<A, B>>::vcvtuqq2ph_er(self, op0, op1);
35503    }
35504    /// `VCVTUQQ2PH_MASK`.
35505    ///
35506    /// Supported operand variants:
35507    ///
35508    /// ```text
35509    /// +---+----------+
35510    /// | # | Operands |
35511    /// +---+----------+
35512    /// | 1 | Xmm, Mem |
35513    /// | 2 | Xmm, Xmm |
35514    /// | 3 | Xmm, Ymm |
35515    /// | 4 | Xmm, Zmm |
35516    /// +---+----------+
35517    /// ```
35518    #[inline]
35519    pub fn vcvtuqq2ph_mask<A, B>(&mut self, op0: A, op1: B)
35520    where Assembler<'a>: Vcvtuqq2phMaskEmitter<A, B> {
35521        <Self as Vcvtuqq2phMaskEmitter<A, B>>::vcvtuqq2ph_mask(self, op0, op1);
35522    }
35523    /// `VCVTUQQ2PH_MASK_ER`.
35524    ///
35525    /// Supported operand variants:
35526    ///
35527    /// ```text
35528    /// +---+----------+
35529    /// | # | Operands |
35530    /// +---+----------+
35531    /// | 1 | Xmm, Zmm |
35532    /// +---+----------+
35533    /// ```
35534    #[inline]
35535    pub fn vcvtuqq2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
35536    where Assembler<'a>: Vcvtuqq2phMaskErEmitter<A, B> {
35537        <Self as Vcvtuqq2phMaskErEmitter<A, B>>::vcvtuqq2ph_mask_er(self, op0, op1);
35538    }
35539    /// `VCVTUQQ2PH_MASKZ`.
35540    ///
35541    /// Supported operand variants:
35542    ///
35543    /// ```text
35544    /// +---+----------+
35545    /// | # | Operands |
35546    /// +---+----------+
35547    /// | 1 | Xmm, Mem |
35548    /// | 2 | Xmm, Xmm |
35549    /// | 3 | Xmm, Ymm |
35550    /// | 4 | Xmm, Zmm |
35551    /// +---+----------+
35552    /// ```
35553    #[inline]
35554    pub fn vcvtuqq2ph_maskz<A, B>(&mut self, op0: A, op1: B)
35555    where Assembler<'a>: Vcvtuqq2phMaskzEmitter<A, B> {
35556        <Self as Vcvtuqq2phMaskzEmitter<A, B>>::vcvtuqq2ph_maskz(self, op0, op1);
35557    }
35558    /// `VCVTUQQ2PH_MASKZ_ER`.
35559    ///
35560    /// Supported operand variants:
35561    ///
35562    /// ```text
35563    /// +---+----------+
35564    /// | # | Operands |
35565    /// +---+----------+
35566    /// | 1 | Xmm, Zmm |
35567    /// +---+----------+
35568    /// ```
35569    #[inline]
35570    pub fn vcvtuqq2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
35571    where Assembler<'a>: Vcvtuqq2phMaskzErEmitter<A, B> {
35572        <Self as Vcvtuqq2phMaskzErEmitter<A, B>>::vcvtuqq2ph_maskz_er(self, op0, op1);
35573    }
35574    /// `VCVTUSI2SH`.
35575    ///
35576    /// Supported operand variants:
35577    ///
35578    /// ```text
35579    /// +---+---------------+
35580    /// | # | Operands      |
35581    /// +---+---------------+
35582    /// | 1 | Xmm, Xmm, Gpd |
35583    /// | 2 | Xmm, Xmm, Gpq |
35584    /// | 3 | Xmm, Xmm, Mem |
35585    /// +---+---------------+
35586    /// ```
35587    #[inline]
35588    pub fn vcvtusi2sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35589    where Assembler<'a>: Vcvtusi2shEmitter<A, B, C> {
35590        <Self as Vcvtusi2shEmitter<A, B, C>>::vcvtusi2sh(self, op0, op1, op2);
35591    }
35592    /// `VCVTUSI2SH_ER`.
35593    ///
35594    /// Supported operand variants:
35595    ///
35596    /// ```text
35597    /// +---+---------------+
35598    /// | # | Operands      |
35599    /// +---+---------------+
35600    /// | 1 | Xmm, Xmm, Gpd |
35601    /// | 2 | Xmm, Xmm, Gpq |
35602    /// +---+---------------+
35603    /// ```
35604    #[inline]
35605    pub fn vcvtusi2sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35606    where Assembler<'a>: Vcvtusi2shErEmitter<A, B, C> {
35607        <Self as Vcvtusi2shErEmitter<A, B, C>>::vcvtusi2sh_er(self, op0, op1, op2);
35608    }
35609    /// `VCVTUW2PH`.
35610    ///
35611    /// Supported operand variants:
35612    ///
35613    /// ```text
35614    /// +---+----------+
35615    /// | # | Operands |
35616    /// +---+----------+
35617    /// | 1 | Xmm, Mem |
35618    /// | 2 | Xmm, Xmm |
35619    /// | 3 | Ymm, Mem |
35620    /// | 4 | Ymm, Ymm |
35621    /// | 5 | Zmm, Mem |
35622    /// | 6 | Zmm, Zmm |
35623    /// +---+----------+
35624    /// ```
35625    #[inline]
35626    pub fn vcvtuw2ph<A, B>(&mut self, op0: A, op1: B)
35627    where Assembler<'a>: Vcvtuw2phEmitter<A, B> {
35628        <Self as Vcvtuw2phEmitter<A, B>>::vcvtuw2ph(self, op0, op1);
35629    }
35630    /// `VCVTUW2PH_ER`.
35631    ///
35632    /// Supported operand variants:
35633    ///
35634    /// ```text
35635    /// +---+----------+
35636    /// | # | Operands |
35637    /// +---+----------+
35638    /// | 1 | Zmm, Zmm |
35639    /// +---+----------+
35640    /// ```
35641    #[inline]
35642    pub fn vcvtuw2ph_er<A, B>(&mut self, op0: A, op1: B)
35643    where Assembler<'a>: Vcvtuw2phErEmitter<A, B> {
35644        <Self as Vcvtuw2phErEmitter<A, B>>::vcvtuw2ph_er(self, op0, op1);
35645    }
35646    /// `VCVTUW2PH_MASK`.
35647    ///
35648    /// Supported operand variants:
35649    ///
35650    /// ```text
35651    /// +---+----------+
35652    /// | # | Operands |
35653    /// +---+----------+
35654    /// | 1 | Xmm, Mem |
35655    /// | 2 | Xmm, Xmm |
35656    /// | 3 | Ymm, Mem |
35657    /// | 4 | Ymm, Ymm |
35658    /// | 5 | Zmm, Mem |
35659    /// | 6 | Zmm, Zmm |
35660    /// +---+----------+
35661    /// ```
35662    #[inline]
35663    pub fn vcvtuw2ph_mask<A, B>(&mut self, op0: A, op1: B)
35664    where Assembler<'a>: Vcvtuw2phMaskEmitter<A, B> {
35665        <Self as Vcvtuw2phMaskEmitter<A, B>>::vcvtuw2ph_mask(self, op0, op1);
35666    }
35667    /// `VCVTUW2PH_MASK_ER`.
35668    ///
35669    /// Supported operand variants:
35670    ///
35671    /// ```text
35672    /// +---+----------+
35673    /// | # | Operands |
35674    /// +---+----------+
35675    /// | 1 | Zmm, Zmm |
35676    /// +---+----------+
35677    /// ```
35678    #[inline]
35679    pub fn vcvtuw2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
35680    where Assembler<'a>: Vcvtuw2phMaskErEmitter<A, B> {
35681        <Self as Vcvtuw2phMaskErEmitter<A, B>>::vcvtuw2ph_mask_er(self, op0, op1);
35682    }
35683    /// `VCVTUW2PH_MASKZ`.
35684    ///
35685    /// Supported operand variants:
35686    ///
35687    /// ```text
35688    /// +---+----------+
35689    /// | # | Operands |
35690    /// +---+----------+
35691    /// | 1 | Xmm, Mem |
35692    /// | 2 | Xmm, Xmm |
35693    /// | 3 | Ymm, Mem |
35694    /// | 4 | Ymm, Ymm |
35695    /// | 5 | Zmm, Mem |
35696    /// | 6 | Zmm, Zmm |
35697    /// +---+----------+
35698    /// ```
35699    #[inline]
35700    pub fn vcvtuw2ph_maskz<A, B>(&mut self, op0: A, op1: B)
35701    where Assembler<'a>: Vcvtuw2phMaskzEmitter<A, B> {
35702        <Self as Vcvtuw2phMaskzEmitter<A, B>>::vcvtuw2ph_maskz(self, op0, op1);
35703    }
35704    /// `VCVTUW2PH_MASKZ_ER`.
35705    ///
35706    /// Supported operand variants:
35707    ///
35708    /// ```text
35709    /// +---+----------+
35710    /// | # | Operands |
35711    /// +---+----------+
35712    /// | 1 | Zmm, Zmm |
35713    /// +---+----------+
35714    /// ```
35715    #[inline]
35716    pub fn vcvtuw2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
35717    where Assembler<'a>: Vcvtuw2phMaskzErEmitter<A, B> {
35718        <Self as Vcvtuw2phMaskzErEmitter<A, B>>::vcvtuw2ph_maskz_er(self, op0, op1);
35719    }
35720    /// `VCVTW2PH`.
35721    ///
35722    /// Supported operand variants:
35723    ///
35724    /// ```text
35725    /// +---+----------+
35726    /// | # | Operands |
35727    /// +---+----------+
35728    /// | 1 | Xmm, Mem |
35729    /// | 2 | Xmm, Xmm |
35730    /// | 3 | Ymm, Mem |
35731    /// | 4 | Ymm, Ymm |
35732    /// | 5 | Zmm, Mem |
35733    /// | 6 | Zmm, Zmm |
35734    /// +---+----------+
35735    /// ```
35736    #[inline]
35737    pub fn vcvtw2ph<A, B>(&mut self, op0: A, op1: B)
35738    where Assembler<'a>: Vcvtw2phEmitter<A, B> {
35739        <Self as Vcvtw2phEmitter<A, B>>::vcvtw2ph(self, op0, op1);
35740    }
35741    /// `VCVTW2PH_ER`.
35742    ///
35743    /// Supported operand variants:
35744    ///
35745    /// ```text
35746    /// +---+----------+
35747    /// | # | Operands |
35748    /// +---+----------+
35749    /// | 1 | Zmm, Zmm |
35750    /// +---+----------+
35751    /// ```
35752    #[inline]
35753    pub fn vcvtw2ph_er<A, B>(&mut self, op0: A, op1: B)
35754    where Assembler<'a>: Vcvtw2phErEmitter<A, B> {
35755        <Self as Vcvtw2phErEmitter<A, B>>::vcvtw2ph_er(self, op0, op1);
35756    }
35757    /// `VCVTW2PH_MASK`.
35758    ///
35759    /// Supported operand variants:
35760    ///
35761    /// ```text
35762    /// +---+----------+
35763    /// | # | Operands |
35764    /// +---+----------+
35765    /// | 1 | Xmm, Mem |
35766    /// | 2 | Xmm, Xmm |
35767    /// | 3 | Ymm, Mem |
35768    /// | 4 | Ymm, Ymm |
35769    /// | 5 | Zmm, Mem |
35770    /// | 6 | Zmm, Zmm |
35771    /// +---+----------+
35772    /// ```
35773    #[inline]
35774    pub fn vcvtw2ph_mask<A, B>(&mut self, op0: A, op1: B)
35775    where Assembler<'a>: Vcvtw2phMaskEmitter<A, B> {
35776        <Self as Vcvtw2phMaskEmitter<A, B>>::vcvtw2ph_mask(self, op0, op1);
35777    }
35778    /// `VCVTW2PH_MASK_ER`.
35779    ///
35780    /// Supported operand variants:
35781    ///
35782    /// ```text
35783    /// +---+----------+
35784    /// | # | Operands |
35785    /// +---+----------+
35786    /// | 1 | Zmm, Zmm |
35787    /// +---+----------+
35788    /// ```
35789    #[inline]
35790    pub fn vcvtw2ph_mask_er<A, B>(&mut self, op0: A, op1: B)
35791    where Assembler<'a>: Vcvtw2phMaskErEmitter<A, B> {
35792        <Self as Vcvtw2phMaskErEmitter<A, B>>::vcvtw2ph_mask_er(self, op0, op1);
35793    }
35794    /// `VCVTW2PH_MASKZ`.
35795    ///
35796    /// Supported operand variants:
35797    ///
35798    /// ```text
35799    /// +---+----------+
35800    /// | # | Operands |
35801    /// +---+----------+
35802    /// | 1 | Xmm, Mem |
35803    /// | 2 | Xmm, Xmm |
35804    /// | 3 | Ymm, Mem |
35805    /// | 4 | Ymm, Ymm |
35806    /// | 5 | Zmm, Mem |
35807    /// | 6 | Zmm, Zmm |
35808    /// +---+----------+
35809    /// ```
35810    #[inline]
35811    pub fn vcvtw2ph_maskz<A, B>(&mut self, op0: A, op1: B)
35812    where Assembler<'a>: Vcvtw2phMaskzEmitter<A, B> {
35813        <Self as Vcvtw2phMaskzEmitter<A, B>>::vcvtw2ph_maskz(self, op0, op1);
35814    }
35815    /// `VCVTW2PH_MASKZ_ER`.
35816    ///
35817    /// Supported operand variants:
35818    ///
35819    /// ```text
35820    /// +---+----------+
35821    /// | # | Operands |
35822    /// +---+----------+
35823    /// | 1 | Zmm, Zmm |
35824    /// +---+----------+
35825    /// ```
35826    #[inline]
35827    pub fn vcvtw2ph_maskz_er<A, B>(&mut self, op0: A, op1: B)
35828    where Assembler<'a>: Vcvtw2phMaskzErEmitter<A, B> {
35829        <Self as Vcvtw2phMaskzErEmitter<A, B>>::vcvtw2ph_maskz_er(self, op0, op1);
35830    }
35831    /// `VDIVPH`.
35832    ///
35833    /// Supported operand variants:
35834    ///
35835    /// ```text
35836    /// +---+---------------+
35837    /// | # | Operands      |
35838    /// +---+---------------+
35839    /// | 1 | Xmm, Xmm, Mem |
35840    /// | 2 | Xmm, Xmm, Xmm |
35841    /// | 3 | Ymm, Ymm, Mem |
35842    /// | 4 | Ymm, Ymm, Ymm |
35843    /// | 5 | Zmm, Zmm, Mem |
35844    /// | 6 | Zmm, Zmm, Zmm |
35845    /// +---+---------------+
35846    /// ```
35847    #[inline]
35848    pub fn vdivph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35849    where Assembler<'a>: VdivphEmitter<A, B, C> {
35850        <Self as VdivphEmitter<A, B, C>>::vdivph(self, op0, op1, op2);
35851    }
35852    /// `VDIVPH_ER`.
35853    ///
35854    /// Supported operand variants:
35855    ///
35856    /// ```text
35857    /// +---+---------------+
35858    /// | # | Operands      |
35859    /// +---+---------------+
35860    /// | 1 | Zmm, Zmm, Zmm |
35861    /// +---+---------------+
35862    /// ```
35863    #[inline]
35864    pub fn vdivph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35865    where Assembler<'a>: VdivphErEmitter<A, B, C> {
35866        <Self as VdivphErEmitter<A, B, C>>::vdivph_er(self, op0, op1, op2);
35867    }
35868    /// `VDIVPH_MASK`.
35869    ///
35870    /// Supported operand variants:
35871    ///
35872    /// ```text
35873    /// +---+---------------+
35874    /// | # | Operands      |
35875    /// +---+---------------+
35876    /// | 1 | Xmm, Xmm, Mem |
35877    /// | 2 | Xmm, Xmm, Xmm |
35878    /// | 3 | Ymm, Ymm, Mem |
35879    /// | 4 | Ymm, Ymm, Ymm |
35880    /// | 5 | Zmm, Zmm, Mem |
35881    /// | 6 | Zmm, Zmm, Zmm |
35882    /// +---+---------------+
35883    /// ```
35884    #[inline]
35885    pub fn vdivph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35886    where Assembler<'a>: VdivphMaskEmitter<A, B, C> {
35887        <Self as VdivphMaskEmitter<A, B, C>>::vdivph_mask(self, op0, op1, op2);
35888    }
35889    /// `VDIVPH_MASK_ER`.
35890    ///
35891    /// Supported operand variants:
35892    ///
35893    /// ```text
35894    /// +---+---------------+
35895    /// | # | Operands      |
35896    /// +---+---------------+
35897    /// | 1 | Zmm, Zmm, Zmm |
35898    /// +---+---------------+
35899    /// ```
35900    #[inline]
35901    pub fn vdivph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35902    where Assembler<'a>: VdivphMaskErEmitter<A, B, C> {
35903        <Self as VdivphMaskErEmitter<A, B, C>>::vdivph_mask_er(self, op0, op1, op2);
35904    }
35905    /// `VDIVPH_MASKZ`.
35906    ///
35907    /// Supported operand variants:
35908    ///
35909    /// ```text
35910    /// +---+---------------+
35911    /// | # | Operands      |
35912    /// +---+---------------+
35913    /// | 1 | Xmm, Xmm, Mem |
35914    /// | 2 | Xmm, Xmm, Xmm |
35915    /// | 3 | Ymm, Ymm, Mem |
35916    /// | 4 | Ymm, Ymm, Ymm |
35917    /// | 5 | Zmm, Zmm, Mem |
35918    /// | 6 | Zmm, Zmm, Zmm |
35919    /// +---+---------------+
35920    /// ```
35921    #[inline]
35922    pub fn vdivph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35923    where Assembler<'a>: VdivphMaskzEmitter<A, B, C> {
35924        <Self as VdivphMaskzEmitter<A, B, C>>::vdivph_maskz(self, op0, op1, op2);
35925    }
35926    /// `VDIVPH_MASKZ_ER`.
35927    ///
35928    /// Supported operand variants:
35929    ///
35930    /// ```text
35931    /// +---+---------------+
35932    /// | # | Operands      |
35933    /// +---+---------------+
35934    /// | 1 | Zmm, Zmm, Zmm |
35935    /// +---+---------------+
35936    /// ```
35937    #[inline]
35938    pub fn vdivph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35939    where Assembler<'a>: VdivphMaskzErEmitter<A, B, C> {
35940        <Self as VdivphMaskzErEmitter<A, B, C>>::vdivph_maskz_er(self, op0, op1, op2);
35941    }
35942    /// `VDIVSH`.
35943    ///
35944    /// Supported operand variants:
35945    ///
35946    /// ```text
35947    /// +---+---------------+
35948    /// | # | Operands      |
35949    /// +---+---------------+
35950    /// | 1 | Xmm, Xmm, Mem |
35951    /// | 2 | Xmm, Xmm, Xmm |
35952    /// +---+---------------+
35953    /// ```
35954    #[inline]
35955    pub fn vdivsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35956    where Assembler<'a>: VdivshEmitter<A, B, C> {
35957        <Self as VdivshEmitter<A, B, C>>::vdivsh(self, op0, op1, op2);
35958    }
35959    /// `VDIVSH_ER`.
35960    ///
35961    /// Supported operand variants:
35962    ///
35963    /// ```text
35964    /// +---+---------------+
35965    /// | # | Operands      |
35966    /// +---+---------------+
35967    /// | 1 | Xmm, Xmm, Xmm |
35968    /// +---+---------------+
35969    /// ```
35970    #[inline]
35971    pub fn vdivsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35972    where Assembler<'a>: VdivshErEmitter<A, B, C> {
35973        <Self as VdivshErEmitter<A, B, C>>::vdivsh_er(self, op0, op1, op2);
35974    }
35975    /// `VDIVSH_MASK`.
35976    ///
35977    /// Supported operand variants:
35978    ///
35979    /// ```text
35980    /// +---+---------------+
35981    /// | # | Operands      |
35982    /// +---+---------------+
35983    /// | 1 | Xmm, Xmm, Mem |
35984    /// | 2 | Xmm, Xmm, Xmm |
35985    /// +---+---------------+
35986    /// ```
35987    #[inline]
35988    pub fn vdivsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
35989    where Assembler<'a>: VdivshMaskEmitter<A, B, C> {
35990        <Self as VdivshMaskEmitter<A, B, C>>::vdivsh_mask(self, op0, op1, op2);
35991    }
35992    /// `VDIVSH_MASK_ER`.
35993    ///
35994    /// Supported operand variants:
35995    ///
35996    /// ```text
35997    /// +---+---------------+
35998    /// | # | Operands      |
35999    /// +---+---------------+
36000    /// | 1 | Xmm, Xmm, Xmm |
36001    /// +---+---------------+
36002    /// ```
36003    #[inline]
36004    pub fn vdivsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36005    where Assembler<'a>: VdivshMaskErEmitter<A, B, C> {
36006        <Self as VdivshMaskErEmitter<A, B, C>>::vdivsh_mask_er(self, op0, op1, op2);
36007    }
36008    /// `VDIVSH_MASKZ`.
36009    ///
36010    /// Supported operand variants:
36011    ///
36012    /// ```text
36013    /// +---+---------------+
36014    /// | # | Operands      |
36015    /// +---+---------------+
36016    /// | 1 | Xmm, Xmm, Mem |
36017    /// | 2 | Xmm, Xmm, Xmm |
36018    /// +---+---------------+
36019    /// ```
36020    #[inline]
36021    pub fn vdivsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36022    where Assembler<'a>: VdivshMaskzEmitter<A, B, C> {
36023        <Self as VdivshMaskzEmitter<A, B, C>>::vdivsh_maskz(self, op0, op1, op2);
36024    }
36025    /// `VDIVSH_MASKZ_ER`.
36026    ///
36027    /// Supported operand variants:
36028    ///
36029    /// ```text
36030    /// +---+---------------+
36031    /// | # | Operands      |
36032    /// +---+---------------+
36033    /// | 1 | Xmm, Xmm, Xmm |
36034    /// +---+---------------+
36035    /// ```
36036    #[inline]
36037    pub fn vdivsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36038    where Assembler<'a>: VdivshMaskzErEmitter<A, B, C> {
36039        <Self as VdivshMaskzErEmitter<A, B, C>>::vdivsh_maskz_er(self, op0, op1, op2);
36040    }
36041    /// `VERR` (VERR). 
36042    /// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
36043    ///
36044    ///
36045    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
36046    ///
36047    /// Supported operand variants:
36048    ///
36049    /// ```text
36050    /// +---+----------+
36051    /// | # | Operands |
36052    /// +---+----------+
36053    /// | 1 | Gpd      |
36054    /// | 2 | Mem      |
36055    /// +---+----------+
36056    /// ```
36057    #[inline]
36058    pub fn verr<A>(&mut self, op0: A)
36059    where Assembler<'a>: VerrEmitter<A> {
36060        <Self as VerrEmitter<A>>::verr(self, op0);
36061    }
36062    /// `VERW` (VERW). 
36063    /// Verifies whether the code or data segment specified with the source operand is readable (VERR) or writable (VERW) from the current privilege level (CPL). The source operand is a 16-bit register or a memory location that contains the segment selector for the segment to be verified. If the segment is accessible and readable (VERR) or writable (VERW), the ZF flag is set; otherwise, the ZF flag is cleared. Code segments are never verified as writable. This check cannot be performed on system segments.
36064    ///
36065    ///
36066    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VERR%3AVERW.html).
36067    ///
36068    /// Supported operand variants:
36069    ///
36070    /// ```text
36071    /// +---+----------+
36072    /// | # | Operands |
36073    /// +---+----------+
36074    /// | 1 | Gpd      |
36075    /// | 2 | Mem      |
36076    /// +---+----------+
36077    /// ```
36078    #[inline]
36079    pub fn verw<A>(&mut self, op0: A)
36080    where Assembler<'a>: VerwEmitter<A> {
36081        <Self as VerwEmitter<A>>::verw(self, op0);
36082    }
36083    /// `VFCMADDCPH`.
36084    ///
36085    /// Supported operand variants:
36086    ///
36087    /// ```text
36088    /// +---+---------------+
36089    /// | # | Operands      |
36090    /// +---+---------------+
36091    /// | 1 | Xmm, Xmm, Mem |
36092    /// | 2 | Xmm, Xmm, Xmm |
36093    /// | 3 | Ymm, Ymm, Mem |
36094    /// | 4 | Ymm, Ymm, Ymm |
36095    /// | 5 | Zmm, Zmm, Mem |
36096    /// | 6 | Zmm, Zmm, Zmm |
36097    /// +---+---------------+
36098    /// ```
36099    #[inline]
36100    pub fn vfcmaddcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36101    where Assembler<'a>: VfcmaddcphEmitter<A, B, C> {
36102        <Self as VfcmaddcphEmitter<A, B, C>>::vfcmaddcph(self, op0, op1, op2);
36103    }
36104    /// `VFCMADDCPH_ER`.
36105    ///
36106    /// Supported operand variants:
36107    ///
36108    /// ```text
36109    /// +---+---------------+
36110    /// | # | Operands      |
36111    /// +---+---------------+
36112    /// | 1 | Zmm, Zmm, Zmm |
36113    /// +---+---------------+
36114    /// ```
36115    #[inline]
36116    pub fn vfcmaddcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36117    where Assembler<'a>: VfcmaddcphErEmitter<A, B, C> {
36118        <Self as VfcmaddcphErEmitter<A, B, C>>::vfcmaddcph_er(self, op0, op1, op2);
36119    }
36120    /// `VFCMADDCPH_MASK`.
36121    ///
36122    /// Supported operand variants:
36123    ///
36124    /// ```text
36125    /// +---+---------------+
36126    /// | # | Operands      |
36127    /// +---+---------------+
36128    /// | 1 | Xmm, Xmm, Mem |
36129    /// | 2 | Xmm, Xmm, Xmm |
36130    /// | 3 | Ymm, Ymm, Mem |
36131    /// | 4 | Ymm, Ymm, Ymm |
36132    /// | 5 | Zmm, Zmm, Mem |
36133    /// | 6 | Zmm, Zmm, Zmm |
36134    /// +---+---------------+
36135    /// ```
36136    #[inline]
36137    pub fn vfcmaddcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36138    where Assembler<'a>: VfcmaddcphMaskEmitter<A, B, C> {
36139        <Self as VfcmaddcphMaskEmitter<A, B, C>>::vfcmaddcph_mask(self, op0, op1, op2);
36140    }
36141    /// `VFCMADDCPH_MASK_ER`.
36142    ///
36143    /// Supported operand variants:
36144    ///
36145    /// ```text
36146    /// +---+---------------+
36147    /// | # | Operands      |
36148    /// +---+---------------+
36149    /// | 1 | Zmm, Zmm, Zmm |
36150    /// +---+---------------+
36151    /// ```
36152    #[inline]
36153    pub fn vfcmaddcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36154    where Assembler<'a>: VfcmaddcphMaskErEmitter<A, B, C> {
36155        <Self as VfcmaddcphMaskErEmitter<A, B, C>>::vfcmaddcph_mask_er(self, op0, op1, op2);
36156    }
36157    /// `VFCMADDCPH_MASKZ`.
36158    ///
36159    /// Supported operand variants:
36160    ///
36161    /// ```text
36162    /// +---+---------------+
36163    /// | # | Operands      |
36164    /// +---+---------------+
36165    /// | 1 | Xmm, Xmm, Mem |
36166    /// | 2 | Xmm, Xmm, Xmm |
36167    /// | 3 | Ymm, Ymm, Mem |
36168    /// | 4 | Ymm, Ymm, Ymm |
36169    /// | 5 | Zmm, Zmm, Mem |
36170    /// | 6 | Zmm, Zmm, Zmm |
36171    /// +---+---------------+
36172    /// ```
36173    #[inline]
36174    pub fn vfcmaddcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36175    where Assembler<'a>: VfcmaddcphMaskzEmitter<A, B, C> {
36176        <Self as VfcmaddcphMaskzEmitter<A, B, C>>::vfcmaddcph_maskz(self, op0, op1, op2);
36177    }
36178    /// `VFCMADDCPH_MASKZ_ER`.
36179    ///
36180    /// Supported operand variants:
36181    ///
36182    /// ```text
36183    /// +---+---------------+
36184    /// | # | Operands      |
36185    /// +---+---------------+
36186    /// | 1 | Zmm, Zmm, Zmm |
36187    /// +---+---------------+
36188    /// ```
36189    #[inline]
36190    pub fn vfcmaddcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36191    where Assembler<'a>: VfcmaddcphMaskzErEmitter<A, B, C> {
36192        <Self as VfcmaddcphMaskzErEmitter<A, B, C>>::vfcmaddcph_maskz_er(self, op0, op1, op2);
36193    }
36194    /// `VFCMADDCSH`.
36195    ///
36196    /// Supported operand variants:
36197    ///
36198    /// ```text
36199    /// +---+---------------+
36200    /// | # | Operands      |
36201    /// +---+---------------+
36202    /// | 1 | Xmm, Xmm, Mem |
36203    /// | 2 | Xmm, Xmm, Xmm |
36204    /// +---+---------------+
36205    /// ```
36206    #[inline]
36207    pub fn vfcmaddcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36208    where Assembler<'a>: VfcmaddcshEmitter<A, B, C> {
36209        <Self as VfcmaddcshEmitter<A, B, C>>::vfcmaddcsh(self, op0, op1, op2);
36210    }
36211    /// `VFCMADDCSH_ER`.
36212    ///
36213    /// Supported operand variants:
36214    ///
36215    /// ```text
36216    /// +---+---------------+
36217    /// | # | Operands      |
36218    /// +---+---------------+
36219    /// | 1 | Xmm, Xmm, Xmm |
36220    /// +---+---------------+
36221    /// ```
36222    #[inline]
36223    pub fn vfcmaddcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36224    where Assembler<'a>: VfcmaddcshErEmitter<A, B, C> {
36225        <Self as VfcmaddcshErEmitter<A, B, C>>::vfcmaddcsh_er(self, op0, op1, op2);
36226    }
36227    /// `VFCMADDCSH_MASK`.
36228    ///
36229    /// Supported operand variants:
36230    ///
36231    /// ```text
36232    /// +---+---------------+
36233    /// | # | Operands      |
36234    /// +---+---------------+
36235    /// | 1 | Xmm, Xmm, Mem |
36236    /// | 2 | Xmm, Xmm, Xmm |
36237    /// +---+---------------+
36238    /// ```
36239    #[inline]
36240    pub fn vfcmaddcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36241    where Assembler<'a>: VfcmaddcshMaskEmitter<A, B, C> {
36242        <Self as VfcmaddcshMaskEmitter<A, B, C>>::vfcmaddcsh_mask(self, op0, op1, op2);
36243    }
36244    /// `VFCMADDCSH_MASK_ER`.
36245    ///
36246    /// Supported operand variants:
36247    ///
36248    /// ```text
36249    /// +---+---------------+
36250    /// | # | Operands      |
36251    /// +---+---------------+
36252    /// | 1 | Xmm, Xmm, Xmm |
36253    /// +---+---------------+
36254    /// ```
36255    #[inline]
36256    pub fn vfcmaddcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36257    where Assembler<'a>: VfcmaddcshMaskErEmitter<A, B, C> {
36258        <Self as VfcmaddcshMaskErEmitter<A, B, C>>::vfcmaddcsh_mask_er(self, op0, op1, op2);
36259    }
36260    /// `VFCMADDCSH_MASKZ`.
36261    ///
36262    /// Supported operand variants:
36263    ///
36264    /// ```text
36265    /// +---+---------------+
36266    /// | # | Operands      |
36267    /// +---+---------------+
36268    /// | 1 | Xmm, Xmm, Mem |
36269    /// | 2 | Xmm, Xmm, Xmm |
36270    /// +---+---------------+
36271    /// ```
36272    #[inline]
36273    pub fn vfcmaddcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36274    where Assembler<'a>: VfcmaddcshMaskzEmitter<A, B, C> {
36275        <Self as VfcmaddcshMaskzEmitter<A, B, C>>::vfcmaddcsh_maskz(self, op0, op1, op2);
36276    }
36277    /// `VFCMADDCSH_MASKZ_ER`.
36278    ///
36279    /// Supported operand variants:
36280    ///
36281    /// ```text
36282    /// +---+---------------+
36283    /// | # | Operands      |
36284    /// +---+---------------+
36285    /// | 1 | Xmm, Xmm, Xmm |
36286    /// +---+---------------+
36287    /// ```
36288    #[inline]
36289    pub fn vfcmaddcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36290    where Assembler<'a>: VfcmaddcshMaskzErEmitter<A, B, C> {
36291        <Self as VfcmaddcshMaskzErEmitter<A, B, C>>::vfcmaddcsh_maskz_er(self, op0, op1, op2);
36292    }
36293    /// `VFCMULCPH`.
36294    ///
36295    /// Supported operand variants:
36296    ///
36297    /// ```text
36298    /// +---+---------------+
36299    /// | # | Operands      |
36300    /// +---+---------------+
36301    /// | 1 | Xmm, Xmm, Mem |
36302    /// | 2 | Xmm, Xmm, Xmm |
36303    /// | 3 | Ymm, Ymm, Mem |
36304    /// | 4 | Ymm, Ymm, Ymm |
36305    /// | 5 | Zmm, Zmm, Mem |
36306    /// | 6 | Zmm, Zmm, Zmm |
36307    /// +---+---------------+
36308    /// ```
36309    #[inline]
36310    pub fn vfcmulcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36311    where Assembler<'a>: VfcmulcphEmitter<A, B, C> {
36312        <Self as VfcmulcphEmitter<A, B, C>>::vfcmulcph(self, op0, op1, op2);
36313    }
36314    /// `VFCMULCPH_ER`.
36315    ///
36316    /// Supported operand variants:
36317    ///
36318    /// ```text
36319    /// +---+---------------+
36320    /// | # | Operands      |
36321    /// +---+---------------+
36322    /// | 1 | Zmm, Zmm, Zmm |
36323    /// +---+---------------+
36324    /// ```
36325    #[inline]
36326    pub fn vfcmulcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36327    where Assembler<'a>: VfcmulcphErEmitter<A, B, C> {
36328        <Self as VfcmulcphErEmitter<A, B, C>>::vfcmulcph_er(self, op0, op1, op2);
36329    }
36330    /// `VFCMULCPH_MASK`.
36331    ///
36332    /// Supported operand variants:
36333    ///
36334    /// ```text
36335    /// +---+---------------+
36336    /// | # | Operands      |
36337    /// +---+---------------+
36338    /// | 1 | Xmm, Xmm, Mem |
36339    /// | 2 | Xmm, Xmm, Xmm |
36340    /// | 3 | Ymm, Ymm, Mem |
36341    /// | 4 | Ymm, Ymm, Ymm |
36342    /// | 5 | Zmm, Zmm, Mem |
36343    /// | 6 | Zmm, Zmm, Zmm |
36344    /// +---+---------------+
36345    /// ```
36346    #[inline]
36347    pub fn vfcmulcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36348    where Assembler<'a>: VfcmulcphMaskEmitter<A, B, C> {
36349        <Self as VfcmulcphMaskEmitter<A, B, C>>::vfcmulcph_mask(self, op0, op1, op2);
36350    }
36351    /// `VFCMULCPH_MASK_ER`.
36352    ///
36353    /// Supported operand variants:
36354    ///
36355    /// ```text
36356    /// +---+---------------+
36357    /// | # | Operands      |
36358    /// +---+---------------+
36359    /// | 1 | Zmm, Zmm, Zmm |
36360    /// +---+---------------+
36361    /// ```
36362    #[inline]
36363    pub fn vfcmulcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36364    where Assembler<'a>: VfcmulcphMaskErEmitter<A, B, C> {
36365        <Self as VfcmulcphMaskErEmitter<A, B, C>>::vfcmulcph_mask_er(self, op0, op1, op2);
36366    }
36367    /// `VFCMULCPH_MASKZ`.
36368    ///
36369    /// Supported operand variants:
36370    ///
36371    /// ```text
36372    /// +---+---------------+
36373    /// | # | Operands      |
36374    /// +---+---------------+
36375    /// | 1 | Xmm, Xmm, Mem |
36376    /// | 2 | Xmm, Xmm, Xmm |
36377    /// | 3 | Ymm, Ymm, Mem |
36378    /// | 4 | Ymm, Ymm, Ymm |
36379    /// | 5 | Zmm, Zmm, Mem |
36380    /// | 6 | Zmm, Zmm, Zmm |
36381    /// +---+---------------+
36382    /// ```
36383    #[inline]
36384    pub fn vfcmulcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36385    where Assembler<'a>: VfcmulcphMaskzEmitter<A, B, C> {
36386        <Self as VfcmulcphMaskzEmitter<A, B, C>>::vfcmulcph_maskz(self, op0, op1, op2);
36387    }
36388    /// `VFCMULCPH_MASKZ_ER`.
36389    ///
36390    /// Supported operand variants:
36391    ///
36392    /// ```text
36393    /// +---+---------------+
36394    /// | # | Operands      |
36395    /// +---+---------------+
36396    /// | 1 | Zmm, Zmm, Zmm |
36397    /// +---+---------------+
36398    /// ```
36399    #[inline]
36400    pub fn vfcmulcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36401    where Assembler<'a>: VfcmulcphMaskzErEmitter<A, B, C> {
36402        <Self as VfcmulcphMaskzErEmitter<A, B, C>>::vfcmulcph_maskz_er(self, op0, op1, op2);
36403    }
36404    /// `VFCMULCSH`.
36405    ///
36406    /// Supported operand variants:
36407    ///
36408    /// ```text
36409    /// +---+---------------+
36410    /// | # | Operands      |
36411    /// +---+---------------+
36412    /// | 1 | Xmm, Xmm, Mem |
36413    /// | 2 | Xmm, Xmm, Xmm |
36414    /// +---+---------------+
36415    /// ```
36416    #[inline]
36417    pub fn vfcmulcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36418    where Assembler<'a>: VfcmulcshEmitter<A, B, C> {
36419        <Self as VfcmulcshEmitter<A, B, C>>::vfcmulcsh(self, op0, op1, op2);
36420    }
36421    /// `VFCMULCSH_ER`.
36422    ///
36423    /// Supported operand variants:
36424    ///
36425    /// ```text
36426    /// +---+---------------+
36427    /// | # | Operands      |
36428    /// +---+---------------+
36429    /// | 1 | Xmm, Xmm, Xmm |
36430    /// +---+---------------+
36431    /// ```
36432    #[inline]
36433    pub fn vfcmulcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36434    where Assembler<'a>: VfcmulcshErEmitter<A, B, C> {
36435        <Self as VfcmulcshErEmitter<A, B, C>>::vfcmulcsh_er(self, op0, op1, op2);
36436    }
36437    /// `VFCMULCSH_MASK`.
36438    ///
36439    /// Supported operand variants:
36440    ///
36441    /// ```text
36442    /// +---+---------------+
36443    /// | # | Operands      |
36444    /// +---+---------------+
36445    /// | 1 | Xmm, Xmm, Mem |
36446    /// | 2 | Xmm, Xmm, Xmm |
36447    /// +---+---------------+
36448    /// ```
36449    #[inline]
36450    pub fn vfcmulcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36451    where Assembler<'a>: VfcmulcshMaskEmitter<A, B, C> {
36452        <Self as VfcmulcshMaskEmitter<A, B, C>>::vfcmulcsh_mask(self, op0, op1, op2);
36453    }
36454    /// `VFCMULCSH_MASK_ER`.
36455    ///
36456    /// Supported operand variants:
36457    ///
36458    /// ```text
36459    /// +---+---------------+
36460    /// | # | Operands      |
36461    /// +---+---------------+
36462    /// | 1 | Xmm, Xmm, Xmm |
36463    /// +---+---------------+
36464    /// ```
36465    #[inline]
36466    pub fn vfcmulcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36467    where Assembler<'a>: VfcmulcshMaskErEmitter<A, B, C> {
36468        <Self as VfcmulcshMaskErEmitter<A, B, C>>::vfcmulcsh_mask_er(self, op0, op1, op2);
36469    }
36470    /// `VFCMULCSH_MASKZ`.
36471    ///
36472    /// Supported operand variants:
36473    ///
36474    /// ```text
36475    /// +---+---------------+
36476    /// | # | Operands      |
36477    /// +---+---------------+
36478    /// | 1 | Xmm, Xmm, Mem |
36479    /// | 2 | Xmm, Xmm, Xmm |
36480    /// +---+---------------+
36481    /// ```
36482    #[inline]
36483    pub fn vfcmulcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36484    where Assembler<'a>: VfcmulcshMaskzEmitter<A, B, C> {
36485        <Self as VfcmulcshMaskzEmitter<A, B, C>>::vfcmulcsh_maskz(self, op0, op1, op2);
36486    }
36487    /// `VFCMULCSH_MASKZ_ER`.
36488    ///
36489    /// Supported operand variants:
36490    ///
36491    /// ```text
36492    /// +---+---------------+
36493    /// | # | Operands      |
36494    /// +---+---------------+
36495    /// | 1 | Xmm, Xmm, Xmm |
36496    /// +---+---------------+
36497    /// ```
36498    #[inline]
36499    pub fn vfcmulcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36500    where Assembler<'a>: VfcmulcshMaskzErEmitter<A, B, C> {
36501        <Self as VfcmulcshMaskzErEmitter<A, B, C>>::vfcmulcsh_maskz_er(self, op0, op1, op2);
36502    }
36503    /// `VFMADD132PH`.
36504    ///
36505    /// Supported operand variants:
36506    ///
36507    /// ```text
36508    /// +---+---------------+
36509    /// | # | Operands      |
36510    /// +---+---------------+
36511    /// | 1 | Xmm, Xmm, Mem |
36512    /// | 2 | Xmm, Xmm, Xmm |
36513    /// | 3 | Ymm, Ymm, Mem |
36514    /// | 4 | Ymm, Ymm, Ymm |
36515    /// | 5 | Zmm, Zmm, Mem |
36516    /// | 6 | Zmm, Zmm, Zmm |
36517    /// +---+---------------+
36518    /// ```
36519    #[inline]
36520    pub fn vfmadd132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36521    where Assembler<'a>: Vfmadd132phEmitter<A, B, C> {
36522        <Self as Vfmadd132phEmitter<A, B, C>>::vfmadd132ph(self, op0, op1, op2);
36523    }
36524    /// `VFMADD132PH_ER`.
36525    ///
36526    /// Supported operand variants:
36527    ///
36528    /// ```text
36529    /// +---+---------------+
36530    /// | # | Operands      |
36531    /// +---+---------------+
36532    /// | 1 | Zmm, Zmm, Zmm |
36533    /// +---+---------------+
36534    /// ```
36535    #[inline]
36536    pub fn vfmadd132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36537    where Assembler<'a>: Vfmadd132phErEmitter<A, B, C> {
36538        <Self as Vfmadd132phErEmitter<A, B, C>>::vfmadd132ph_er(self, op0, op1, op2);
36539    }
36540    /// `VFMADD132PH_MASK`.
36541    ///
36542    /// Supported operand variants:
36543    ///
36544    /// ```text
36545    /// +---+---------------+
36546    /// | # | Operands      |
36547    /// +---+---------------+
36548    /// | 1 | Xmm, Xmm, Mem |
36549    /// | 2 | Xmm, Xmm, Xmm |
36550    /// | 3 | Ymm, Ymm, Mem |
36551    /// | 4 | Ymm, Ymm, Ymm |
36552    /// | 5 | Zmm, Zmm, Mem |
36553    /// | 6 | Zmm, Zmm, Zmm |
36554    /// +---+---------------+
36555    /// ```
36556    #[inline]
36557    pub fn vfmadd132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36558    where Assembler<'a>: Vfmadd132phMaskEmitter<A, B, C> {
36559        <Self as Vfmadd132phMaskEmitter<A, B, C>>::vfmadd132ph_mask(self, op0, op1, op2);
36560    }
36561    /// `VFMADD132PH_MASK_ER`.
36562    ///
36563    /// Supported operand variants:
36564    ///
36565    /// ```text
36566    /// +---+---------------+
36567    /// | # | Operands      |
36568    /// +---+---------------+
36569    /// | 1 | Zmm, Zmm, Zmm |
36570    /// +---+---------------+
36571    /// ```
36572    #[inline]
36573    pub fn vfmadd132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36574    where Assembler<'a>: Vfmadd132phMaskErEmitter<A, B, C> {
36575        <Self as Vfmadd132phMaskErEmitter<A, B, C>>::vfmadd132ph_mask_er(self, op0, op1, op2);
36576    }
36577    /// `VFMADD132PH_MASKZ`.
36578    ///
36579    /// Supported operand variants:
36580    ///
36581    /// ```text
36582    /// +---+---------------+
36583    /// | # | Operands      |
36584    /// +---+---------------+
36585    /// | 1 | Xmm, Xmm, Mem |
36586    /// | 2 | Xmm, Xmm, Xmm |
36587    /// | 3 | Ymm, Ymm, Mem |
36588    /// | 4 | Ymm, Ymm, Ymm |
36589    /// | 5 | Zmm, Zmm, Mem |
36590    /// | 6 | Zmm, Zmm, Zmm |
36591    /// +---+---------------+
36592    /// ```
36593    #[inline]
36594    pub fn vfmadd132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36595    where Assembler<'a>: Vfmadd132phMaskzEmitter<A, B, C> {
36596        <Self as Vfmadd132phMaskzEmitter<A, B, C>>::vfmadd132ph_maskz(self, op0, op1, op2);
36597    }
36598    /// `VFMADD132PH_MASKZ_ER`.
36599    ///
36600    /// Supported operand variants:
36601    ///
36602    /// ```text
36603    /// +---+---------------+
36604    /// | # | Operands      |
36605    /// +---+---------------+
36606    /// | 1 | Zmm, Zmm, Zmm |
36607    /// +---+---------------+
36608    /// ```
36609    #[inline]
36610    pub fn vfmadd132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36611    where Assembler<'a>: Vfmadd132phMaskzErEmitter<A, B, C> {
36612        <Self as Vfmadd132phMaskzErEmitter<A, B, C>>::vfmadd132ph_maskz_er(self, op0, op1, op2);
36613    }
36614    /// `VFMADD132SH`.
36615    ///
36616    /// Supported operand variants:
36617    ///
36618    /// ```text
36619    /// +---+---------------+
36620    /// | # | Operands      |
36621    /// +---+---------------+
36622    /// | 1 | Xmm, Xmm, Mem |
36623    /// | 2 | Xmm, Xmm, Xmm |
36624    /// +---+---------------+
36625    /// ```
36626    #[inline]
36627    pub fn vfmadd132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36628    where Assembler<'a>: Vfmadd132shEmitter<A, B, C> {
36629        <Self as Vfmadd132shEmitter<A, B, C>>::vfmadd132sh(self, op0, op1, op2);
36630    }
36631    /// `VFMADD132SH_ER`.
36632    ///
36633    /// Supported operand variants:
36634    ///
36635    /// ```text
36636    /// +---+---------------+
36637    /// | # | Operands      |
36638    /// +---+---------------+
36639    /// | 1 | Xmm, Xmm, Xmm |
36640    /// +---+---------------+
36641    /// ```
36642    #[inline]
36643    pub fn vfmadd132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36644    where Assembler<'a>: Vfmadd132shErEmitter<A, B, C> {
36645        <Self as Vfmadd132shErEmitter<A, B, C>>::vfmadd132sh_er(self, op0, op1, op2);
36646    }
36647    /// `VFMADD132SH_MASK`.
36648    ///
36649    /// Supported operand variants:
36650    ///
36651    /// ```text
36652    /// +---+---------------+
36653    /// | # | Operands      |
36654    /// +---+---------------+
36655    /// | 1 | Xmm, Xmm, Mem |
36656    /// | 2 | Xmm, Xmm, Xmm |
36657    /// +---+---------------+
36658    /// ```
36659    #[inline]
36660    pub fn vfmadd132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36661    where Assembler<'a>: Vfmadd132shMaskEmitter<A, B, C> {
36662        <Self as Vfmadd132shMaskEmitter<A, B, C>>::vfmadd132sh_mask(self, op0, op1, op2);
36663    }
36664    /// `VFMADD132SH_MASK_ER`.
36665    ///
36666    /// Supported operand variants:
36667    ///
36668    /// ```text
36669    /// +---+---------------+
36670    /// | # | Operands      |
36671    /// +---+---------------+
36672    /// | 1 | Xmm, Xmm, Xmm |
36673    /// +---+---------------+
36674    /// ```
36675    #[inline]
36676    pub fn vfmadd132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36677    where Assembler<'a>: Vfmadd132shMaskErEmitter<A, B, C> {
36678        <Self as Vfmadd132shMaskErEmitter<A, B, C>>::vfmadd132sh_mask_er(self, op0, op1, op2);
36679    }
36680    /// `VFMADD132SH_MASKZ`.
36681    ///
36682    /// Supported operand variants:
36683    ///
36684    /// ```text
36685    /// +---+---------------+
36686    /// | # | Operands      |
36687    /// +---+---------------+
36688    /// | 1 | Xmm, Xmm, Mem |
36689    /// | 2 | Xmm, Xmm, Xmm |
36690    /// +---+---------------+
36691    /// ```
36692    #[inline]
36693    pub fn vfmadd132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36694    where Assembler<'a>: Vfmadd132shMaskzEmitter<A, B, C> {
36695        <Self as Vfmadd132shMaskzEmitter<A, B, C>>::vfmadd132sh_maskz(self, op0, op1, op2);
36696    }
36697    /// `VFMADD132SH_MASKZ_ER`.
36698    ///
36699    /// Supported operand variants:
36700    ///
36701    /// ```text
36702    /// +---+---------------+
36703    /// | # | Operands      |
36704    /// +---+---------------+
36705    /// | 1 | Xmm, Xmm, Xmm |
36706    /// +---+---------------+
36707    /// ```
36708    #[inline]
36709    pub fn vfmadd132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36710    where Assembler<'a>: Vfmadd132shMaskzErEmitter<A, B, C> {
36711        <Self as Vfmadd132shMaskzErEmitter<A, B, C>>::vfmadd132sh_maskz_er(self, op0, op1, op2);
36712    }
36713    /// `VFMADD213PH`.
36714    ///
36715    /// Supported operand variants:
36716    ///
36717    /// ```text
36718    /// +---+---------------+
36719    /// | # | Operands      |
36720    /// +---+---------------+
36721    /// | 1 | Xmm, Xmm, Mem |
36722    /// | 2 | Xmm, Xmm, Xmm |
36723    /// | 3 | Ymm, Ymm, Mem |
36724    /// | 4 | Ymm, Ymm, Ymm |
36725    /// | 5 | Zmm, Zmm, Mem |
36726    /// | 6 | Zmm, Zmm, Zmm |
36727    /// +---+---------------+
36728    /// ```
36729    #[inline]
36730    pub fn vfmadd213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36731    where Assembler<'a>: Vfmadd213phEmitter<A, B, C> {
36732        <Self as Vfmadd213phEmitter<A, B, C>>::vfmadd213ph(self, op0, op1, op2);
36733    }
36734    /// `VFMADD213PH_ER`.
36735    ///
36736    /// Supported operand variants:
36737    ///
36738    /// ```text
36739    /// +---+---------------+
36740    /// | # | Operands      |
36741    /// +---+---------------+
36742    /// | 1 | Zmm, Zmm, Zmm |
36743    /// +---+---------------+
36744    /// ```
36745    #[inline]
36746    pub fn vfmadd213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36747    where Assembler<'a>: Vfmadd213phErEmitter<A, B, C> {
36748        <Self as Vfmadd213phErEmitter<A, B, C>>::vfmadd213ph_er(self, op0, op1, op2);
36749    }
36750    /// `VFMADD213PH_MASK`.
36751    ///
36752    /// Supported operand variants:
36753    ///
36754    /// ```text
36755    /// +---+---------------+
36756    /// | # | Operands      |
36757    /// +---+---------------+
36758    /// | 1 | Xmm, Xmm, Mem |
36759    /// | 2 | Xmm, Xmm, Xmm |
36760    /// | 3 | Ymm, Ymm, Mem |
36761    /// | 4 | Ymm, Ymm, Ymm |
36762    /// | 5 | Zmm, Zmm, Mem |
36763    /// | 6 | Zmm, Zmm, Zmm |
36764    /// +---+---------------+
36765    /// ```
36766    #[inline]
36767    pub fn vfmadd213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36768    where Assembler<'a>: Vfmadd213phMaskEmitter<A, B, C> {
36769        <Self as Vfmadd213phMaskEmitter<A, B, C>>::vfmadd213ph_mask(self, op0, op1, op2);
36770    }
36771    /// `VFMADD213PH_MASK_ER`.
36772    ///
36773    /// Supported operand variants:
36774    ///
36775    /// ```text
36776    /// +---+---------------+
36777    /// | # | Operands      |
36778    /// +---+---------------+
36779    /// | 1 | Zmm, Zmm, Zmm |
36780    /// +---+---------------+
36781    /// ```
36782    #[inline]
36783    pub fn vfmadd213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36784    where Assembler<'a>: Vfmadd213phMaskErEmitter<A, B, C> {
36785        <Self as Vfmadd213phMaskErEmitter<A, B, C>>::vfmadd213ph_mask_er(self, op0, op1, op2);
36786    }
36787    /// `VFMADD213PH_MASKZ`.
36788    ///
36789    /// Supported operand variants:
36790    ///
36791    /// ```text
36792    /// +---+---------------+
36793    /// | # | Operands      |
36794    /// +---+---------------+
36795    /// | 1 | Xmm, Xmm, Mem |
36796    /// | 2 | Xmm, Xmm, Xmm |
36797    /// | 3 | Ymm, Ymm, Mem |
36798    /// | 4 | Ymm, Ymm, Ymm |
36799    /// | 5 | Zmm, Zmm, Mem |
36800    /// | 6 | Zmm, Zmm, Zmm |
36801    /// +---+---------------+
36802    /// ```
36803    #[inline]
36804    pub fn vfmadd213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36805    where Assembler<'a>: Vfmadd213phMaskzEmitter<A, B, C> {
36806        <Self as Vfmadd213phMaskzEmitter<A, B, C>>::vfmadd213ph_maskz(self, op0, op1, op2);
36807    }
36808    /// `VFMADD213PH_MASKZ_ER`.
36809    ///
36810    /// Supported operand variants:
36811    ///
36812    /// ```text
36813    /// +---+---------------+
36814    /// | # | Operands      |
36815    /// +---+---------------+
36816    /// | 1 | Zmm, Zmm, Zmm |
36817    /// +---+---------------+
36818    /// ```
36819    #[inline]
36820    pub fn vfmadd213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36821    where Assembler<'a>: Vfmadd213phMaskzErEmitter<A, B, C> {
36822        <Self as Vfmadd213phMaskzErEmitter<A, B, C>>::vfmadd213ph_maskz_er(self, op0, op1, op2);
36823    }
36824    /// `VFMADD213SH`.
36825    ///
36826    /// Supported operand variants:
36827    ///
36828    /// ```text
36829    /// +---+---------------+
36830    /// | # | Operands      |
36831    /// +---+---------------+
36832    /// | 1 | Xmm, Xmm, Mem |
36833    /// | 2 | Xmm, Xmm, Xmm |
36834    /// +---+---------------+
36835    /// ```
36836    #[inline]
36837    pub fn vfmadd213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36838    where Assembler<'a>: Vfmadd213shEmitter<A, B, C> {
36839        <Self as Vfmadd213shEmitter<A, B, C>>::vfmadd213sh(self, op0, op1, op2);
36840    }
36841    /// `VFMADD213SH_ER`.
36842    ///
36843    /// Supported operand variants:
36844    ///
36845    /// ```text
36846    /// +---+---------------+
36847    /// | # | Operands      |
36848    /// +---+---------------+
36849    /// | 1 | Xmm, Xmm, Xmm |
36850    /// +---+---------------+
36851    /// ```
36852    #[inline]
36853    pub fn vfmadd213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36854    where Assembler<'a>: Vfmadd213shErEmitter<A, B, C> {
36855        <Self as Vfmadd213shErEmitter<A, B, C>>::vfmadd213sh_er(self, op0, op1, op2);
36856    }
36857    /// `VFMADD213SH_MASK`.
36858    ///
36859    /// Supported operand variants:
36860    ///
36861    /// ```text
36862    /// +---+---------------+
36863    /// | # | Operands      |
36864    /// +---+---------------+
36865    /// | 1 | Xmm, Xmm, Mem |
36866    /// | 2 | Xmm, Xmm, Xmm |
36867    /// +---+---------------+
36868    /// ```
36869    #[inline]
36870    pub fn vfmadd213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36871    where Assembler<'a>: Vfmadd213shMaskEmitter<A, B, C> {
36872        <Self as Vfmadd213shMaskEmitter<A, B, C>>::vfmadd213sh_mask(self, op0, op1, op2);
36873    }
36874    /// `VFMADD213SH_MASK_ER`.
36875    ///
36876    /// Supported operand variants:
36877    ///
36878    /// ```text
36879    /// +---+---------------+
36880    /// | # | Operands      |
36881    /// +---+---------------+
36882    /// | 1 | Xmm, Xmm, Xmm |
36883    /// +---+---------------+
36884    /// ```
36885    #[inline]
36886    pub fn vfmadd213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36887    where Assembler<'a>: Vfmadd213shMaskErEmitter<A, B, C> {
36888        <Self as Vfmadd213shMaskErEmitter<A, B, C>>::vfmadd213sh_mask_er(self, op0, op1, op2);
36889    }
36890    /// `VFMADD213SH_MASKZ`.
36891    ///
36892    /// Supported operand variants:
36893    ///
36894    /// ```text
36895    /// +---+---------------+
36896    /// | # | Operands      |
36897    /// +---+---------------+
36898    /// | 1 | Xmm, Xmm, Mem |
36899    /// | 2 | Xmm, Xmm, Xmm |
36900    /// +---+---------------+
36901    /// ```
36902    #[inline]
36903    pub fn vfmadd213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36904    where Assembler<'a>: Vfmadd213shMaskzEmitter<A, B, C> {
36905        <Self as Vfmadd213shMaskzEmitter<A, B, C>>::vfmadd213sh_maskz(self, op0, op1, op2);
36906    }
36907    /// `VFMADD213SH_MASKZ_ER`.
36908    ///
36909    /// Supported operand variants:
36910    ///
36911    /// ```text
36912    /// +---+---------------+
36913    /// | # | Operands      |
36914    /// +---+---------------+
36915    /// | 1 | Xmm, Xmm, Xmm |
36916    /// +---+---------------+
36917    /// ```
36918    #[inline]
36919    pub fn vfmadd213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36920    where Assembler<'a>: Vfmadd213shMaskzErEmitter<A, B, C> {
36921        <Self as Vfmadd213shMaskzErEmitter<A, B, C>>::vfmadd213sh_maskz_er(self, op0, op1, op2);
36922    }
36923    /// `VFMADD231PH`.
36924    ///
36925    /// Supported operand variants:
36926    ///
36927    /// ```text
36928    /// +---+---------------+
36929    /// | # | Operands      |
36930    /// +---+---------------+
36931    /// | 1 | Xmm, Xmm, Mem |
36932    /// | 2 | Xmm, Xmm, Xmm |
36933    /// | 3 | Ymm, Ymm, Mem |
36934    /// | 4 | Ymm, Ymm, Ymm |
36935    /// | 5 | Zmm, Zmm, Mem |
36936    /// | 6 | Zmm, Zmm, Zmm |
36937    /// +---+---------------+
36938    /// ```
36939    #[inline]
36940    pub fn vfmadd231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36941    where Assembler<'a>: Vfmadd231phEmitter<A, B, C> {
36942        <Self as Vfmadd231phEmitter<A, B, C>>::vfmadd231ph(self, op0, op1, op2);
36943    }
36944    /// `VFMADD231PH_ER`.
36945    ///
36946    /// Supported operand variants:
36947    ///
36948    /// ```text
36949    /// +---+---------------+
36950    /// | # | Operands      |
36951    /// +---+---------------+
36952    /// | 1 | Zmm, Zmm, Zmm |
36953    /// +---+---------------+
36954    /// ```
36955    #[inline]
36956    pub fn vfmadd231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36957    where Assembler<'a>: Vfmadd231phErEmitter<A, B, C> {
36958        <Self as Vfmadd231phErEmitter<A, B, C>>::vfmadd231ph_er(self, op0, op1, op2);
36959    }
36960    /// `VFMADD231PH_MASK`.
36961    ///
36962    /// Supported operand variants:
36963    ///
36964    /// ```text
36965    /// +---+---------------+
36966    /// | # | Operands      |
36967    /// +---+---------------+
36968    /// | 1 | Xmm, Xmm, Mem |
36969    /// | 2 | Xmm, Xmm, Xmm |
36970    /// | 3 | Ymm, Ymm, Mem |
36971    /// | 4 | Ymm, Ymm, Ymm |
36972    /// | 5 | Zmm, Zmm, Mem |
36973    /// | 6 | Zmm, Zmm, Zmm |
36974    /// +---+---------------+
36975    /// ```
36976    #[inline]
36977    pub fn vfmadd231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36978    where Assembler<'a>: Vfmadd231phMaskEmitter<A, B, C> {
36979        <Self as Vfmadd231phMaskEmitter<A, B, C>>::vfmadd231ph_mask(self, op0, op1, op2);
36980    }
36981    /// `VFMADD231PH_MASK_ER`.
36982    ///
36983    /// Supported operand variants:
36984    ///
36985    /// ```text
36986    /// +---+---------------+
36987    /// | # | Operands      |
36988    /// +---+---------------+
36989    /// | 1 | Zmm, Zmm, Zmm |
36990    /// +---+---------------+
36991    /// ```
36992    #[inline]
36993    pub fn vfmadd231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
36994    where Assembler<'a>: Vfmadd231phMaskErEmitter<A, B, C> {
36995        <Self as Vfmadd231phMaskErEmitter<A, B, C>>::vfmadd231ph_mask_er(self, op0, op1, op2);
36996    }
36997    /// `VFMADD231PH_MASKZ`.
36998    ///
36999    /// Supported operand variants:
37000    ///
37001    /// ```text
37002    /// +---+---------------+
37003    /// | # | Operands      |
37004    /// +---+---------------+
37005    /// | 1 | Xmm, Xmm, Mem |
37006    /// | 2 | Xmm, Xmm, Xmm |
37007    /// | 3 | Ymm, Ymm, Mem |
37008    /// | 4 | Ymm, Ymm, Ymm |
37009    /// | 5 | Zmm, Zmm, Mem |
37010    /// | 6 | Zmm, Zmm, Zmm |
37011    /// +---+---------------+
37012    /// ```
37013    #[inline]
37014    pub fn vfmadd231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37015    where Assembler<'a>: Vfmadd231phMaskzEmitter<A, B, C> {
37016        <Self as Vfmadd231phMaskzEmitter<A, B, C>>::vfmadd231ph_maskz(self, op0, op1, op2);
37017    }
37018    /// `VFMADD231PH_MASKZ_ER`.
37019    ///
37020    /// Supported operand variants:
37021    ///
37022    /// ```text
37023    /// +---+---------------+
37024    /// | # | Operands      |
37025    /// +---+---------------+
37026    /// | 1 | Zmm, Zmm, Zmm |
37027    /// +---+---------------+
37028    /// ```
37029    #[inline]
37030    pub fn vfmadd231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37031    where Assembler<'a>: Vfmadd231phMaskzErEmitter<A, B, C> {
37032        <Self as Vfmadd231phMaskzErEmitter<A, B, C>>::vfmadd231ph_maskz_er(self, op0, op1, op2);
37033    }
37034    /// `VFMADD231SH`.
37035    ///
37036    /// Supported operand variants:
37037    ///
37038    /// ```text
37039    /// +---+---------------+
37040    /// | # | Operands      |
37041    /// +---+---------------+
37042    /// | 1 | Xmm, Xmm, Mem |
37043    /// | 2 | Xmm, Xmm, Xmm |
37044    /// +---+---------------+
37045    /// ```
37046    #[inline]
37047    pub fn vfmadd231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37048    where Assembler<'a>: Vfmadd231shEmitter<A, B, C> {
37049        <Self as Vfmadd231shEmitter<A, B, C>>::vfmadd231sh(self, op0, op1, op2);
37050    }
37051    /// `VFMADD231SH_ER`.
37052    ///
37053    /// Supported operand variants:
37054    ///
37055    /// ```text
37056    /// +---+---------------+
37057    /// | # | Operands      |
37058    /// +---+---------------+
37059    /// | 1 | Xmm, Xmm, Xmm |
37060    /// +---+---------------+
37061    /// ```
37062    #[inline]
37063    pub fn vfmadd231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37064    where Assembler<'a>: Vfmadd231shErEmitter<A, B, C> {
37065        <Self as Vfmadd231shErEmitter<A, B, C>>::vfmadd231sh_er(self, op0, op1, op2);
37066    }
37067    /// `VFMADD231SH_MASK`.
37068    ///
37069    /// Supported operand variants:
37070    ///
37071    /// ```text
37072    /// +---+---------------+
37073    /// | # | Operands      |
37074    /// +---+---------------+
37075    /// | 1 | Xmm, Xmm, Mem |
37076    /// | 2 | Xmm, Xmm, Xmm |
37077    /// +---+---------------+
37078    /// ```
37079    #[inline]
37080    pub fn vfmadd231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37081    where Assembler<'a>: Vfmadd231shMaskEmitter<A, B, C> {
37082        <Self as Vfmadd231shMaskEmitter<A, B, C>>::vfmadd231sh_mask(self, op0, op1, op2);
37083    }
37084    /// `VFMADD231SH_MASK_ER`.
37085    ///
37086    /// Supported operand variants:
37087    ///
37088    /// ```text
37089    /// +---+---------------+
37090    /// | # | Operands      |
37091    /// +---+---------------+
37092    /// | 1 | Xmm, Xmm, Xmm |
37093    /// +---+---------------+
37094    /// ```
37095    #[inline]
37096    pub fn vfmadd231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37097    where Assembler<'a>: Vfmadd231shMaskErEmitter<A, B, C> {
37098        <Self as Vfmadd231shMaskErEmitter<A, B, C>>::vfmadd231sh_mask_er(self, op0, op1, op2);
37099    }
37100    /// `VFMADD231SH_MASKZ`.
37101    ///
37102    /// Supported operand variants:
37103    ///
37104    /// ```text
37105    /// +---+---------------+
37106    /// | # | Operands      |
37107    /// +---+---------------+
37108    /// | 1 | Xmm, Xmm, Mem |
37109    /// | 2 | Xmm, Xmm, Xmm |
37110    /// +---+---------------+
37111    /// ```
37112    #[inline]
37113    pub fn vfmadd231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37114    where Assembler<'a>: Vfmadd231shMaskzEmitter<A, B, C> {
37115        <Self as Vfmadd231shMaskzEmitter<A, B, C>>::vfmadd231sh_maskz(self, op0, op1, op2);
37116    }
37117    /// `VFMADD231SH_MASKZ_ER`.
37118    ///
37119    /// Supported operand variants:
37120    ///
37121    /// ```text
37122    /// +---+---------------+
37123    /// | # | Operands      |
37124    /// +---+---------------+
37125    /// | 1 | Xmm, Xmm, Xmm |
37126    /// +---+---------------+
37127    /// ```
37128    #[inline]
37129    pub fn vfmadd231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37130    where Assembler<'a>: Vfmadd231shMaskzErEmitter<A, B, C> {
37131        <Self as Vfmadd231shMaskzErEmitter<A, B, C>>::vfmadd231sh_maskz_er(self, op0, op1, op2);
37132    }
37133    /// `VFMADDCPH`.
37134    ///
37135    /// Supported operand variants:
37136    ///
37137    /// ```text
37138    /// +---+---------------+
37139    /// | # | Operands      |
37140    /// +---+---------------+
37141    /// | 1 | Xmm, Xmm, Mem |
37142    /// | 2 | Xmm, Xmm, Xmm |
37143    /// | 3 | Ymm, Ymm, Mem |
37144    /// | 4 | Ymm, Ymm, Ymm |
37145    /// | 5 | Zmm, Zmm, Mem |
37146    /// | 6 | Zmm, Zmm, Zmm |
37147    /// +---+---------------+
37148    /// ```
37149    #[inline]
37150    pub fn vfmaddcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37151    where Assembler<'a>: VfmaddcphEmitter<A, B, C> {
37152        <Self as VfmaddcphEmitter<A, B, C>>::vfmaddcph(self, op0, op1, op2);
37153    }
37154    /// `VFMADDCPH_ER`.
37155    ///
37156    /// Supported operand variants:
37157    ///
37158    /// ```text
37159    /// +---+---------------+
37160    /// | # | Operands      |
37161    /// +---+---------------+
37162    /// | 1 | Zmm, Zmm, Zmm |
37163    /// +---+---------------+
37164    /// ```
37165    #[inline]
37166    pub fn vfmaddcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37167    where Assembler<'a>: VfmaddcphErEmitter<A, B, C> {
37168        <Self as VfmaddcphErEmitter<A, B, C>>::vfmaddcph_er(self, op0, op1, op2);
37169    }
37170    /// `VFMADDCPH_MASK`.
37171    ///
37172    /// Supported operand variants:
37173    ///
37174    /// ```text
37175    /// +---+---------------+
37176    /// | # | Operands      |
37177    /// +---+---------------+
37178    /// | 1 | Xmm, Xmm, Mem |
37179    /// | 2 | Xmm, Xmm, Xmm |
37180    /// | 3 | Ymm, Ymm, Mem |
37181    /// | 4 | Ymm, Ymm, Ymm |
37182    /// | 5 | Zmm, Zmm, Mem |
37183    /// | 6 | Zmm, Zmm, Zmm |
37184    /// +---+---------------+
37185    /// ```
37186    #[inline]
37187    pub fn vfmaddcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37188    where Assembler<'a>: VfmaddcphMaskEmitter<A, B, C> {
37189        <Self as VfmaddcphMaskEmitter<A, B, C>>::vfmaddcph_mask(self, op0, op1, op2);
37190    }
37191    /// `VFMADDCPH_MASK_ER`.
37192    ///
37193    /// Supported operand variants:
37194    ///
37195    /// ```text
37196    /// +---+---------------+
37197    /// | # | Operands      |
37198    /// +---+---------------+
37199    /// | 1 | Zmm, Zmm, Zmm |
37200    /// +---+---------------+
37201    /// ```
37202    #[inline]
37203    pub fn vfmaddcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37204    where Assembler<'a>: VfmaddcphMaskErEmitter<A, B, C> {
37205        <Self as VfmaddcphMaskErEmitter<A, B, C>>::vfmaddcph_mask_er(self, op0, op1, op2);
37206    }
37207    /// `VFMADDCPH_MASKZ`.
37208    ///
37209    /// Supported operand variants:
37210    ///
37211    /// ```text
37212    /// +---+---------------+
37213    /// | # | Operands      |
37214    /// +---+---------------+
37215    /// | 1 | Xmm, Xmm, Mem |
37216    /// | 2 | Xmm, Xmm, Xmm |
37217    /// | 3 | Ymm, Ymm, Mem |
37218    /// | 4 | Ymm, Ymm, Ymm |
37219    /// | 5 | Zmm, Zmm, Mem |
37220    /// | 6 | Zmm, Zmm, Zmm |
37221    /// +---+---------------+
37222    /// ```
37223    #[inline]
37224    pub fn vfmaddcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37225    where Assembler<'a>: VfmaddcphMaskzEmitter<A, B, C> {
37226        <Self as VfmaddcphMaskzEmitter<A, B, C>>::vfmaddcph_maskz(self, op0, op1, op2);
37227    }
37228    /// `VFMADDCPH_MASKZ_ER`.
37229    ///
37230    /// Supported operand variants:
37231    ///
37232    /// ```text
37233    /// +---+---------------+
37234    /// | # | Operands      |
37235    /// +---+---------------+
37236    /// | 1 | Zmm, Zmm, Zmm |
37237    /// +---+---------------+
37238    /// ```
37239    #[inline]
37240    pub fn vfmaddcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37241    where Assembler<'a>: VfmaddcphMaskzErEmitter<A, B, C> {
37242        <Self as VfmaddcphMaskzErEmitter<A, B, C>>::vfmaddcph_maskz_er(self, op0, op1, op2);
37243    }
37244    /// `VFMADDCSH`.
37245    ///
37246    /// Supported operand variants:
37247    ///
37248    /// ```text
37249    /// +---+---------------+
37250    /// | # | Operands      |
37251    /// +---+---------------+
37252    /// | 1 | Xmm, Xmm, Mem |
37253    /// | 2 | Xmm, Xmm, Xmm |
37254    /// +---+---------------+
37255    /// ```
37256    #[inline]
37257    pub fn vfmaddcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37258    where Assembler<'a>: VfmaddcshEmitter<A, B, C> {
37259        <Self as VfmaddcshEmitter<A, B, C>>::vfmaddcsh(self, op0, op1, op2);
37260    }
37261    /// `VFMADDCSH_ER`.
37262    ///
37263    /// Supported operand variants:
37264    ///
37265    /// ```text
37266    /// +---+---------------+
37267    /// | # | Operands      |
37268    /// +---+---------------+
37269    /// | 1 | Xmm, Xmm, Xmm |
37270    /// +---+---------------+
37271    /// ```
37272    #[inline]
37273    pub fn vfmaddcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37274    where Assembler<'a>: VfmaddcshErEmitter<A, B, C> {
37275        <Self as VfmaddcshErEmitter<A, B, C>>::vfmaddcsh_er(self, op0, op1, op2);
37276    }
37277    /// `VFMADDCSH_MASK`.
37278    ///
37279    /// Supported operand variants:
37280    ///
37281    /// ```text
37282    /// +---+---------------+
37283    /// | # | Operands      |
37284    /// +---+---------------+
37285    /// | 1 | Xmm, Xmm, Mem |
37286    /// | 2 | Xmm, Xmm, Xmm |
37287    /// +---+---------------+
37288    /// ```
37289    #[inline]
37290    pub fn vfmaddcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37291    where Assembler<'a>: VfmaddcshMaskEmitter<A, B, C> {
37292        <Self as VfmaddcshMaskEmitter<A, B, C>>::vfmaddcsh_mask(self, op0, op1, op2);
37293    }
37294    /// `VFMADDCSH_MASK_ER`.
37295    ///
37296    /// Supported operand variants:
37297    ///
37298    /// ```text
37299    /// +---+---------------+
37300    /// | # | Operands      |
37301    /// +---+---------------+
37302    /// | 1 | Xmm, Xmm, Xmm |
37303    /// +---+---------------+
37304    /// ```
37305    #[inline]
37306    pub fn vfmaddcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37307    where Assembler<'a>: VfmaddcshMaskErEmitter<A, B, C> {
37308        <Self as VfmaddcshMaskErEmitter<A, B, C>>::vfmaddcsh_mask_er(self, op0, op1, op2);
37309    }
37310    /// `VFMADDCSH_MASKZ`.
37311    ///
37312    /// Supported operand variants:
37313    ///
37314    /// ```text
37315    /// +---+---------------+
37316    /// | # | Operands      |
37317    /// +---+---------------+
37318    /// | 1 | Xmm, Xmm, Mem |
37319    /// | 2 | Xmm, Xmm, Xmm |
37320    /// +---+---------------+
37321    /// ```
37322    #[inline]
37323    pub fn vfmaddcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37324    where Assembler<'a>: VfmaddcshMaskzEmitter<A, B, C> {
37325        <Self as VfmaddcshMaskzEmitter<A, B, C>>::vfmaddcsh_maskz(self, op0, op1, op2);
37326    }
37327    /// `VFMADDCSH_MASKZ_ER`.
37328    ///
37329    /// Supported operand variants:
37330    ///
37331    /// ```text
37332    /// +---+---------------+
37333    /// | # | Operands      |
37334    /// +---+---------------+
37335    /// | 1 | Xmm, Xmm, Xmm |
37336    /// +---+---------------+
37337    /// ```
37338    #[inline]
37339    pub fn vfmaddcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37340    where Assembler<'a>: VfmaddcshMaskzErEmitter<A, B, C> {
37341        <Self as VfmaddcshMaskzErEmitter<A, B, C>>::vfmaddcsh_maskz_er(self, op0, op1, op2);
37342    }
37343    /// `VFMADDSUB132PH`.
37344    ///
37345    /// Supported operand variants:
37346    ///
37347    /// ```text
37348    /// +---+---------------+
37349    /// | # | Operands      |
37350    /// +---+---------------+
37351    /// | 1 | Xmm, Xmm, Mem |
37352    /// | 2 | Xmm, Xmm, Xmm |
37353    /// | 3 | Ymm, Ymm, Mem |
37354    /// | 4 | Ymm, Ymm, Ymm |
37355    /// | 5 | Zmm, Zmm, Mem |
37356    /// | 6 | Zmm, Zmm, Zmm |
37357    /// +---+---------------+
37358    /// ```
37359    #[inline]
37360    pub fn vfmaddsub132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37361    where Assembler<'a>: Vfmaddsub132phEmitter<A, B, C> {
37362        <Self as Vfmaddsub132phEmitter<A, B, C>>::vfmaddsub132ph(self, op0, op1, op2);
37363    }
37364    /// `VFMADDSUB132PH_ER`.
37365    ///
37366    /// Supported operand variants:
37367    ///
37368    /// ```text
37369    /// +---+---------------+
37370    /// | # | Operands      |
37371    /// +---+---------------+
37372    /// | 1 | Zmm, Zmm, Zmm |
37373    /// +---+---------------+
37374    /// ```
37375    #[inline]
37376    pub fn vfmaddsub132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37377    where Assembler<'a>: Vfmaddsub132phErEmitter<A, B, C> {
37378        <Self as Vfmaddsub132phErEmitter<A, B, C>>::vfmaddsub132ph_er(self, op0, op1, op2);
37379    }
37380    /// `VFMADDSUB132PH_MASK`.
37381    ///
37382    /// Supported operand variants:
37383    ///
37384    /// ```text
37385    /// +---+---------------+
37386    /// | # | Operands      |
37387    /// +---+---------------+
37388    /// | 1 | Xmm, Xmm, Mem |
37389    /// | 2 | Xmm, Xmm, Xmm |
37390    /// | 3 | Ymm, Ymm, Mem |
37391    /// | 4 | Ymm, Ymm, Ymm |
37392    /// | 5 | Zmm, Zmm, Mem |
37393    /// | 6 | Zmm, Zmm, Zmm |
37394    /// +---+---------------+
37395    /// ```
37396    #[inline]
37397    pub fn vfmaddsub132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37398    where Assembler<'a>: Vfmaddsub132phMaskEmitter<A, B, C> {
37399        <Self as Vfmaddsub132phMaskEmitter<A, B, C>>::vfmaddsub132ph_mask(self, op0, op1, op2);
37400    }
37401    /// `VFMADDSUB132PH_MASK_ER`.
37402    ///
37403    /// Supported operand variants:
37404    ///
37405    /// ```text
37406    /// +---+---------------+
37407    /// | # | Operands      |
37408    /// +---+---------------+
37409    /// | 1 | Zmm, Zmm, Zmm |
37410    /// +---+---------------+
37411    /// ```
37412    #[inline]
37413    pub fn vfmaddsub132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37414    where Assembler<'a>: Vfmaddsub132phMaskErEmitter<A, B, C> {
37415        <Self as Vfmaddsub132phMaskErEmitter<A, B, C>>::vfmaddsub132ph_mask_er(self, op0, op1, op2);
37416    }
37417    /// `VFMADDSUB132PH_MASKZ`.
37418    ///
37419    /// Supported operand variants:
37420    ///
37421    /// ```text
37422    /// +---+---------------+
37423    /// | # | Operands      |
37424    /// +---+---------------+
37425    /// | 1 | Xmm, Xmm, Mem |
37426    /// | 2 | Xmm, Xmm, Xmm |
37427    /// | 3 | Ymm, Ymm, Mem |
37428    /// | 4 | Ymm, Ymm, Ymm |
37429    /// | 5 | Zmm, Zmm, Mem |
37430    /// | 6 | Zmm, Zmm, Zmm |
37431    /// +---+---------------+
37432    /// ```
37433    #[inline]
37434    pub fn vfmaddsub132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37435    where Assembler<'a>: Vfmaddsub132phMaskzEmitter<A, B, C> {
37436        <Self as Vfmaddsub132phMaskzEmitter<A, B, C>>::vfmaddsub132ph_maskz(self, op0, op1, op2);
37437    }
37438    /// `VFMADDSUB132PH_MASKZ_ER`.
37439    ///
37440    /// Supported operand variants:
37441    ///
37442    /// ```text
37443    /// +---+---------------+
37444    /// | # | Operands      |
37445    /// +---+---------------+
37446    /// | 1 | Zmm, Zmm, Zmm |
37447    /// +---+---------------+
37448    /// ```
37449    #[inline]
37450    pub fn vfmaddsub132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37451    where Assembler<'a>: Vfmaddsub132phMaskzErEmitter<A, B, C> {
37452        <Self as Vfmaddsub132phMaskzErEmitter<A, B, C>>::vfmaddsub132ph_maskz_er(self, op0, op1, op2);
37453    }
37454    /// `VFMADDSUB213PH`.
37455    ///
37456    /// Supported operand variants:
37457    ///
37458    /// ```text
37459    /// +---+---------------+
37460    /// | # | Operands      |
37461    /// +---+---------------+
37462    /// | 1 | Xmm, Xmm, Mem |
37463    /// | 2 | Xmm, Xmm, Xmm |
37464    /// | 3 | Ymm, Ymm, Mem |
37465    /// | 4 | Ymm, Ymm, Ymm |
37466    /// | 5 | Zmm, Zmm, Mem |
37467    /// | 6 | Zmm, Zmm, Zmm |
37468    /// +---+---------------+
37469    /// ```
37470    #[inline]
37471    pub fn vfmaddsub213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37472    where Assembler<'a>: Vfmaddsub213phEmitter<A, B, C> {
37473        <Self as Vfmaddsub213phEmitter<A, B, C>>::vfmaddsub213ph(self, op0, op1, op2);
37474    }
37475    /// `VFMADDSUB213PH_ER`.
37476    ///
37477    /// Supported operand variants:
37478    ///
37479    /// ```text
37480    /// +---+---------------+
37481    /// | # | Operands      |
37482    /// +---+---------------+
37483    /// | 1 | Zmm, Zmm, Zmm |
37484    /// +---+---------------+
37485    /// ```
37486    #[inline]
37487    pub fn vfmaddsub213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37488    where Assembler<'a>: Vfmaddsub213phErEmitter<A, B, C> {
37489        <Self as Vfmaddsub213phErEmitter<A, B, C>>::vfmaddsub213ph_er(self, op0, op1, op2);
37490    }
37491    /// `VFMADDSUB213PH_MASK`.
37492    ///
37493    /// Supported operand variants:
37494    ///
37495    /// ```text
37496    /// +---+---------------+
37497    /// | # | Operands      |
37498    /// +---+---------------+
37499    /// | 1 | Xmm, Xmm, Mem |
37500    /// | 2 | Xmm, Xmm, Xmm |
37501    /// | 3 | Ymm, Ymm, Mem |
37502    /// | 4 | Ymm, Ymm, Ymm |
37503    /// | 5 | Zmm, Zmm, Mem |
37504    /// | 6 | Zmm, Zmm, Zmm |
37505    /// +---+---------------+
37506    /// ```
37507    #[inline]
37508    pub fn vfmaddsub213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37509    where Assembler<'a>: Vfmaddsub213phMaskEmitter<A, B, C> {
37510        <Self as Vfmaddsub213phMaskEmitter<A, B, C>>::vfmaddsub213ph_mask(self, op0, op1, op2);
37511    }
37512    /// `VFMADDSUB213PH_MASK_ER`.
37513    ///
37514    /// Supported operand variants:
37515    ///
37516    /// ```text
37517    /// +---+---------------+
37518    /// | # | Operands      |
37519    /// +---+---------------+
37520    /// | 1 | Zmm, Zmm, Zmm |
37521    /// +---+---------------+
37522    /// ```
37523    #[inline]
37524    pub fn vfmaddsub213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37525    where Assembler<'a>: Vfmaddsub213phMaskErEmitter<A, B, C> {
37526        <Self as Vfmaddsub213phMaskErEmitter<A, B, C>>::vfmaddsub213ph_mask_er(self, op0, op1, op2);
37527    }
37528    /// `VFMADDSUB213PH_MASKZ`.
37529    ///
37530    /// Supported operand variants:
37531    ///
37532    /// ```text
37533    /// +---+---------------+
37534    /// | # | Operands      |
37535    /// +---+---------------+
37536    /// | 1 | Xmm, Xmm, Mem |
37537    /// | 2 | Xmm, Xmm, Xmm |
37538    /// | 3 | Ymm, Ymm, Mem |
37539    /// | 4 | Ymm, Ymm, Ymm |
37540    /// | 5 | Zmm, Zmm, Mem |
37541    /// | 6 | Zmm, Zmm, Zmm |
37542    /// +---+---------------+
37543    /// ```
37544    #[inline]
37545    pub fn vfmaddsub213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37546    where Assembler<'a>: Vfmaddsub213phMaskzEmitter<A, B, C> {
37547        <Self as Vfmaddsub213phMaskzEmitter<A, B, C>>::vfmaddsub213ph_maskz(self, op0, op1, op2);
37548    }
37549    /// `VFMADDSUB213PH_MASKZ_ER`.
37550    ///
37551    /// Supported operand variants:
37552    ///
37553    /// ```text
37554    /// +---+---------------+
37555    /// | # | Operands      |
37556    /// +---+---------------+
37557    /// | 1 | Zmm, Zmm, Zmm |
37558    /// +---+---------------+
37559    /// ```
37560    #[inline]
37561    pub fn vfmaddsub213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37562    where Assembler<'a>: Vfmaddsub213phMaskzErEmitter<A, B, C> {
37563        <Self as Vfmaddsub213phMaskzErEmitter<A, B, C>>::vfmaddsub213ph_maskz_er(self, op0, op1, op2);
37564    }
37565    /// `VFMADDSUB231PH`.
37566    ///
37567    /// Supported operand variants:
37568    ///
37569    /// ```text
37570    /// +---+---------------+
37571    /// | # | Operands      |
37572    /// +---+---------------+
37573    /// | 1 | Xmm, Xmm, Mem |
37574    /// | 2 | Xmm, Xmm, Xmm |
37575    /// | 3 | Ymm, Ymm, Mem |
37576    /// | 4 | Ymm, Ymm, Ymm |
37577    /// | 5 | Zmm, Zmm, Mem |
37578    /// | 6 | Zmm, Zmm, Zmm |
37579    /// +---+---------------+
37580    /// ```
37581    #[inline]
37582    pub fn vfmaddsub231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37583    where Assembler<'a>: Vfmaddsub231phEmitter<A, B, C> {
37584        <Self as Vfmaddsub231phEmitter<A, B, C>>::vfmaddsub231ph(self, op0, op1, op2);
37585    }
37586    /// `VFMADDSUB231PH_ER`.
37587    ///
37588    /// Supported operand variants:
37589    ///
37590    /// ```text
37591    /// +---+---------------+
37592    /// | # | Operands      |
37593    /// +---+---------------+
37594    /// | 1 | Zmm, Zmm, Zmm |
37595    /// +---+---------------+
37596    /// ```
37597    #[inline]
37598    pub fn vfmaddsub231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37599    where Assembler<'a>: Vfmaddsub231phErEmitter<A, B, C> {
37600        <Self as Vfmaddsub231phErEmitter<A, B, C>>::vfmaddsub231ph_er(self, op0, op1, op2);
37601    }
37602    /// `VFMADDSUB231PH_MASK`.
37603    ///
37604    /// Supported operand variants:
37605    ///
37606    /// ```text
37607    /// +---+---------------+
37608    /// | # | Operands      |
37609    /// +---+---------------+
37610    /// | 1 | Xmm, Xmm, Mem |
37611    /// | 2 | Xmm, Xmm, Xmm |
37612    /// | 3 | Ymm, Ymm, Mem |
37613    /// | 4 | Ymm, Ymm, Ymm |
37614    /// | 5 | Zmm, Zmm, Mem |
37615    /// | 6 | Zmm, Zmm, Zmm |
37616    /// +---+---------------+
37617    /// ```
37618    #[inline]
37619    pub fn vfmaddsub231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37620    where Assembler<'a>: Vfmaddsub231phMaskEmitter<A, B, C> {
37621        <Self as Vfmaddsub231phMaskEmitter<A, B, C>>::vfmaddsub231ph_mask(self, op0, op1, op2);
37622    }
37623    /// `VFMADDSUB231PH_MASK_ER`.
37624    ///
37625    /// Supported operand variants:
37626    ///
37627    /// ```text
37628    /// +---+---------------+
37629    /// | # | Operands      |
37630    /// +---+---------------+
37631    /// | 1 | Zmm, Zmm, Zmm |
37632    /// +---+---------------+
37633    /// ```
37634    #[inline]
37635    pub fn vfmaddsub231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37636    where Assembler<'a>: Vfmaddsub231phMaskErEmitter<A, B, C> {
37637        <Self as Vfmaddsub231phMaskErEmitter<A, B, C>>::vfmaddsub231ph_mask_er(self, op0, op1, op2);
37638    }
37639    /// `VFMADDSUB231PH_MASKZ`.
37640    ///
37641    /// Supported operand variants:
37642    ///
37643    /// ```text
37644    /// +---+---------------+
37645    /// | # | Operands      |
37646    /// +---+---------------+
37647    /// | 1 | Xmm, Xmm, Mem |
37648    /// | 2 | Xmm, Xmm, Xmm |
37649    /// | 3 | Ymm, Ymm, Mem |
37650    /// | 4 | Ymm, Ymm, Ymm |
37651    /// | 5 | Zmm, Zmm, Mem |
37652    /// | 6 | Zmm, Zmm, Zmm |
37653    /// +---+---------------+
37654    /// ```
37655    #[inline]
37656    pub fn vfmaddsub231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37657    where Assembler<'a>: Vfmaddsub231phMaskzEmitter<A, B, C> {
37658        <Self as Vfmaddsub231phMaskzEmitter<A, B, C>>::vfmaddsub231ph_maskz(self, op0, op1, op2);
37659    }
37660    /// `VFMADDSUB231PH_MASKZ_ER`.
37661    ///
37662    /// Supported operand variants:
37663    ///
37664    /// ```text
37665    /// +---+---------------+
37666    /// | # | Operands      |
37667    /// +---+---------------+
37668    /// | 1 | Zmm, Zmm, Zmm |
37669    /// +---+---------------+
37670    /// ```
37671    #[inline]
37672    pub fn vfmaddsub231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37673    where Assembler<'a>: Vfmaddsub231phMaskzErEmitter<A, B, C> {
37674        <Self as Vfmaddsub231phMaskzErEmitter<A, B, C>>::vfmaddsub231ph_maskz_er(self, op0, op1, op2);
37675    }
37676    /// `VFMSUB132PH`.
37677    ///
37678    /// Supported operand variants:
37679    ///
37680    /// ```text
37681    /// +---+---------------+
37682    /// | # | Operands      |
37683    /// +---+---------------+
37684    /// | 1 | Xmm, Xmm, Mem |
37685    /// | 2 | Xmm, Xmm, Xmm |
37686    /// | 3 | Ymm, Ymm, Mem |
37687    /// | 4 | Ymm, Ymm, Ymm |
37688    /// | 5 | Zmm, Zmm, Mem |
37689    /// | 6 | Zmm, Zmm, Zmm |
37690    /// +---+---------------+
37691    /// ```
37692    #[inline]
37693    pub fn vfmsub132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37694    where Assembler<'a>: Vfmsub132phEmitter<A, B, C> {
37695        <Self as Vfmsub132phEmitter<A, B, C>>::vfmsub132ph(self, op0, op1, op2);
37696    }
37697    /// `VFMSUB132PH_ER`.
37698    ///
37699    /// Supported operand variants:
37700    ///
37701    /// ```text
37702    /// +---+---------------+
37703    /// | # | Operands      |
37704    /// +---+---------------+
37705    /// | 1 | Zmm, Zmm, Zmm |
37706    /// +---+---------------+
37707    /// ```
37708    #[inline]
37709    pub fn vfmsub132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37710    where Assembler<'a>: Vfmsub132phErEmitter<A, B, C> {
37711        <Self as Vfmsub132phErEmitter<A, B, C>>::vfmsub132ph_er(self, op0, op1, op2);
37712    }
37713    /// `VFMSUB132PH_MASK`.
37714    ///
37715    /// Supported operand variants:
37716    ///
37717    /// ```text
37718    /// +---+---------------+
37719    /// | # | Operands      |
37720    /// +---+---------------+
37721    /// | 1 | Xmm, Xmm, Mem |
37722    /// | 2 | Xmm, Xmm, Xmm |
37723    /// | 3 | Ymm, Ymm, Mem |
37724    /// | 4 | Ymm, Ymm, Ymm |
37725    /// | 5 | Zmm, Zmm, Mem |
37726    /// | 6 | Zmm, Zmm, Zmm |
37727    /// +---+---------------+
37728    /// ```
37729    #[inline]
37730    pub fn vfmsub132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37731    where Assembler<'a>: Vfmsub132phMaskEmitter<A, B, C> {
37732        <Self as Vfmsub132phMaskEmitter<A, B, C>>::vfmsub132ph_mask(self, op0, op1, op2);
37733    }
37734    /// `VFMSUB132PH_MASK_ER`.
37735    ///
37736    /// Supported operand variants:
37737    ///
37738    /// ```text
37739    /// +---+---------------+
37740    /// | # | Operands      |
37741    /// +---+---------------+
37742    /// | 1 | Zmm, Zmm, Zmm |
37743    /// +---+---------------+
37744    /// ```
37745    #[inline]
37746    pub fn vfmsub132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37747    where Assembler<'a>: Vfmsub132phMaskErEmitter<A, B, C> {
37748        <Self as Vfmsub132phMaskErEmitter<A, B, C>>::vfmsub132ph_mask_er(self, op0, op1, op2);
37749    }
37750    /// `VFMSUB132PH_MASKZ`.
37751    ///
37752    /// Supported operand variants:
37753    ///
37754    /// ```text
37755    /// +---+---------------+
37756    /// | # | Operands      |
37757    /// +---+---------------+
37758    /// | 1 | Xmm, Xmm, Mem |
37759    /// | 2 | Xmm, Xmm, Xmm |
37760    /// | 3 | Ymm, Ymm, Mem |
37761    /// | 4 | Ymm, Ymm, Ymm |
37762    /// | 5 | Zmm, Zmm, Mem |
37763    /// | 6 | Zmm, Zmm, Zmm |
37764    /// +---+---------------+
37765    /// ```
37766    #[inline]
37767    pub fn vfmsub132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37768    where Assembler<'a>: Vfmsub132phMaskzEmitter<A, B, C> {
37769        <Self as Vfmsub132phMaskzEmitter<A, B, C>>::vfmsub132ph_maskz(self, op0, op1, op2);
37770    }
37771    /// `VFMSUB132PH_MASKZ_ER`.
37772    ///
37773    /// Supported operand variants:
37774    ///
37775    /// ```text
37776    /// +---+---------------+
37777    /// | # | Operands      |
37778    /// +---+---------------+
37779    /// | 1 | Zmm, Zmm, Zmm |
37780    /// +---+---------------+
37781    /// ```
37782    #[inline]
37783    pub fn vfmsub132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37784    where Assembler<'a>: Vfmsub132phMaskzErEmitter<A, B, C> {
37785        <Self as Vfmsub132phMaskzErEmitter<A, B, C>>::vfmsub132ph_maskz_er(self, op0, op1, op2);
37786    }
37787    /// `VFMSUB132SH`.
37788    ///
37789    /// Supported operand variants:
37790    ///
37791    /// ```text
37792    /// +---+---------------+
37793    /// | # | Operands      |
37794    /// +---+---------------+
37795    /// | 1 | Xmm, Xmm, Mem |
37796    /// | 2 | Xmm, Xmm, Xmm |
37797    /// +---+---------------+
37798    /// ```
37799    #[inline]
37800    pub fn vfmsub132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37801    where Assembler<'a>: Vfmsub132shEmitter<A, B, C> {
37802        <Self as Vfmsub132shEmitter<A, B, C>>::vfmsub132sh(self, op0, op1, op2);
37803    }
37804    /// `VFMSUB132SH_ER`.
37805    ///
37806    /// Supported operand variants:
37807    ///
37808    /// ```text
37809    /// +---+---------------+
37810    /// | # | Operands      |
37811    /// +---+---------------+
37812    /// | 1 | Xmm, Xmm, Xmm |
37813    /// +---+---------------+
37814    /// ```
37815    #[inline]
37816    pub fn vfmsub132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37817    where Assembler<'a>: Vfmsub132shErEmitter<A, B, C> {
37818        <Self as Vfmsub132shErEmitter<A, B, C>>::vfmsub132sh_er(self, op0, op1, op2);
37819    }
37820    /// `VFMSUB132SH_MASK`.
37821    ///
37822    /// Supported operand variants:
37823    ///
37824    /// ```text
37825    /// +---+---------------+
37826    /// | # | Operands      |
37827    /// +---+---------------+
37828    /// | 1 | Xmm, Xmm, Mem |
37829    /// | 2 | Xmm, Xmm, Xmm |
37830    /// +---+---------------+
37831    /// ```
37832    #[inline]
37833    pub fn vfmsub132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37834    where Assembler<'a>: Vfmsub132shMaskEmitter<A, B, C> {
37835        <Self as Vfmsub132shMaskEmitter<A, B, C>>::vfmsub132sh_mask(self, op0, op1, op2);
37836    }
37837    /// `VFMSUB132SH_MASK_ER`.
37838    ///
37839    /// Supported operand variants:
37840    ///
37841    /// ```text
37842    /// +---+---------------+
37843    /// | # | Operands      |
37844    /// +---+---------------+
37845    /// | 1 | Xmm, Xmm, Xmm |
37846    /// +---+---------------+
37847    /// ```
37848    #[inline]
37849    pub fn vfmsub132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37850    where Assembler<'a>: Vfmsub132shMaskErEmitter<A, B, C> {
37851        <Self as Vfmsub132shMaskErEmitter<A, B, C>>::vfmsub132sh_mask_er(self, op0, op1, op2);
37852    }
37853    /// `VFMSUB132SH_MASKZ`.
37854    ///
37855    /// Supported operand variants:
37856    ///
37857    /// ```text
37858    /// +---+---------------+
37859    /// | # | Operands      |
37860    /// +---+---------------+
37861    /// | 1 | Xmm, Xmm, Mem |
37862    /// | 2 | Xmm, Xmm, Xmm |
37863    /// +---+---------------+
37864    /// ```
37865    #[inline]
37866    pub fn vfmsub132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37867    where Assembler<'a>: Vfmsub132shMaskzEmitter<A, B, C> {
37868        <Self as Vfmsub132shMaskzEmitter<A, B, C>>::vfmsub132sh_maskz(self, op0, op1, op2);
37869    }
37870    /// `VFMSUB132SH_MASKZ_ER`.
37871    ///
37872    /// Supported operand variants:
37873    ///
37874    /// ```text
37875    /// +---+---------------+
37876    /// | # | Operands      |
37877    /// +---+---------------+
37878    /// | 1 | Xmm, Xmm, Xmm |
37879    /// +---+---------------+
37880    /// ```
37881    #[inline]
37882    pub fn vfmsub132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37883    where Assembler<'a>: Vfmsub132shMaskzErEmitter<A, B, C> {
37884        <Self as Vfmsub132shMaskzErEmitter<A, B, C>>::vfmsub132sh_maskz_er(self, op0, op1, op2);
37885    }
37886    /// `VFMSUB213PH`.
37887    ///
37888    /// Supported operand variants:
37889    ///
37890    /// ```text
37891    /// +---+---------------+
37892    /// | # | Operands      |
37893    /// +---+---------------+
37894    /// | 1 | Xmm, Xmm, Mem |
37895    /// | 2 | Xmm, Xmm, Xmm |
37896    /// | 3 | Ymm, Ymm, Mem |
37897    /// | 4 | Ymm, Ymm, Ymm |
37898    /// | 5 | Zmm, Zmm, Mem |
37899    /// | 6 | Zmm, Zmm, Zmm |
37900    /// +---+---------------+
37901    /// ```
37902    #[inline]
37903    pub fn vfmsub213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37904    where Assembler<'a>: Vfmsub213phEmitter<A, B, C> {
37905        <Self as Vfmsub213phEmitter<A, B, C>>::vfmsub213ph(self, op0, op1, op2);
37906    }
37907    /// `VFMSUB213PH_ER`.
37908    ///
37909    /// Supported operand variants:
37910    ///
37911    /// ```text
37912    /// +---+---------------+
37913    /// | # | Operands      |
37914    /// +---+---------------+
37915    /// | 1 | Zmm, Zmm, Zmm |
37916    /// +---+---------------+
37917    /// ```
37918    #[inline]
37919    pub fn vfmsub213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37920    where Assembler<'a>: Vfmsub213phErEmitter<A, B, C> {
37921        <Self as Vfmsub213phErEmitter<A, B, C>>::vfmsub213ph_er(self, op0, op1, op2);
37922    }
37923    /// `VFMSUB213PH_MASK`.
37924    ///
37925    /// Supported operand variants:
37926    ///
37927    /// ```text
37928    /// +---+---------------+
37929    /// | # | Operands      |
37930    /// +---+---------------+
37931    /// | 1 | Xmm, Xmm, Mem |
37932    /// | 2 | Xmm, Xmm, Xmm |
37933    /// | 3 | Ymm, Ymm, Mem |
37934    /// | 4 | Ymm, Ymm, Ymm |
37935    /// | 5 | Zmm, Zmm, Mem |
37936    /// | 6 | Zmm, Zmm, Zmm |
37937    /// +---+---------------+
37938    /// ```
37939    #[inline]
37940    pub fn vfmsub213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37941    where Assembler<'a>: Vfmsub213phMaskEmitter<A, B, C> {
37942        <Self as Vfmsub213phMaskEmitter<A, B, C>>::vfmsub213ph_mask(self, op0, op1, op2);
37943    }
37944    /// `VFMSUB213PH_MASK_ER`.
37945    ///
37946    /// Supported operand variants:
37947    ///
37948    /// ```text
37949    /// +---+---------------+
37950    /// | # | Operands      |
37951    /// +---+---------------+
37952    /// | 1 | Zmm, Zmm, Zmm |
37953    /// +---+---------------+
37954    /// ```
37955    #[inline]
37956    pub fn vfmsub213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37957    where Assembler<'a>: Vfmsub213phMaskErEmitter<A, B, C> {
37958        <Self as Vfmsub213phMaskErEmitter<A, B, C>>::vfmsub213ph_mask_er(self, op0, op1, op2);
37959    }
37960    /// `VFMSUB213PH_MASKZ`.
37961    ///
37962    /// Supported operand variants:
37963    ///
37964    /// ```text
37965    /// +---+---------------+
37966    /// | # | Operands      |
37967    /// +---+---------------+
37968    /// | 1 | Xmm, Xmm, Mem |
37969    /// | 2 | Xmm, Xmm, Xmm |
37970    /// | 3 | Ymm, Ymm, Mem |
37971    /// | 4 | Ymm, Ymm, Ymm |
37972    /// | 5 | Zmm, Zmm, Mem |
37973    /// | 6 | Zmm, Zmm, Zmm |
37974    /// +---+---------------+
37975    /// ```
37976    #[inline]
37977    pub fn vfmsub213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37978    where Assembler<'a>: Vfmsub213phMaskzEmitter<A, B, C> {
37979        <Self as Vfmsub213phMaskzEmitter<A, B, C>>::vfmsub213ph_maskz(self, op0, op1, op2);
37980    }
37981    /// `VFMSUB213PH_MASKZ_ER`.
37982    ///
37983    /// Supported operand variants:
37984    ///
37985    /// ```text
37986    /// +---+---------------+
37987    /// | # | Operands      |
37988    /// +---+---------------+
37989    /// | 1 | Zmm, Zmm, Zmm |
37990    /// +---+---------------+
37991    /// ```
37992    #[inline]
37993    pub fn vfmsub213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
37994    where Assembler<'a>: Vfmsub213phMaskzErEmitter<A, B, C> {
37995        <Self as Vfmsub213phMaskzErEmitter<A, B, C>>::vfmsub213ph_maskz_er(self, op0, op1, op2);
37996    }
37997    /// `VFMSUB213SH`.
37998    ///
37999    /// Supported operand variants:
38000    ///
38001    /// ```text
38002    /// +---+---------------+
38003    /// | # | Operands      |
38004    /// +---+---------------+
38005    /// | 1 | Xmm, Xmm, Mem |
38006    /// | 2 | Xmm, Xmm, Xmm |
38007    /// +---+---------------+
38008    /// ```
38009    #[inline]
38010    pub fn vfmsub213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38011    where Assembler<'a>: Vfmsub213shEmitter<A, B, C> {
38012        <Self as Vfmsub213shEmitter<A, B, C>>::vfmsub213sh(self, op0, op1, op2);
38013    }
38014    /// `VFMSUB213SH_ER`.
38015    ///
38016    /// Supported operand variants:
38017    ///
38018    /// ```text
38019    /// +---+---------------+
38020    /// | # | Operands      |
38021    /// +---+---------------+
38022    /// | 1 | Xmm, Xmm, Xmm |
38023    /// +---+---------------+
38024    /// ```
38025    #[inline]
38026    pub fn vfmsub213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38027    where Assembler<'a>: Vfmsub213shErEmitter<A, B, C> {
38028        <Self as Vfmsub213shErEmitter<A, B, C>>::vfmsub213sh_er(self, op0, op1, op2);
38029    }
38030    /// `VFMSUB213SH_MASK`.
38031    ///
38032    /// Supported operand variants:
38033    ///
38034    /// ```text
38035    /// +---+---------------+
38036    /// | # | Operands      |
38037    /// +---+---------------+
38038    /// | 1 | Xmm, Xmm, Mem |
38039    /// | 2 | Xmm, Xmm, Xmm |
38040    /// +---+---------------+
38041    /// ```
38042    #[inline]
38043    pub fn vfmsub213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38044    where Assembler<'a>: Vfmsub213shMaskEmitter<A, B, C> {
38045        <Self as Vfmsub213shMaskEmitter<A, B, C>>::vfmsub213sh_mask(self, op0, op1, op2);
38046    }
38047    /// `VFMSUB213SH_MASK_ER`.
38048    ///
38049    /// Supported operand variants:
38050    ///
38051    /// ```text
38052    /// +---+---------------+
38053    /// | # | Operands      |
38054    /// +---+---------------+
38055    /// | 1 | Xmm, Xmm, Xmm |
38056    /// +---+---------------+
38057    /// ```
38058    #[inline]
38059    pub fn vfmsub213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38060    where Assembler<'a>: Vfmsub213shMaskErEmitter<A, B, C> {
38061        <Self as Vfmsub213shMaskErEmitter<A, B, C>>::vfmsub213sh_mask_er(self, op0, op1, op2);
38062    }
38063    /// `VFMSUB213SH_MASKZ`.
38064    ///
38065    /// Supported operand variants:
38066    ///
38067    /// ```text
38068    /// +---+---------------+
38069    /// | # | Operands      |
38070    /// +---+---------------+
38071    /// | 1 | Xmm, Xmm, Mem |
38072    /// | 2 | Xmm, Xmm, Xmm |
38073    /// +---+---------------+
38074    /// ```
38075    #[inline]
38076    pub fn vfmsub213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38077    where Assembler<'a>: Vfmsub213shMaskzEmitter<A, B, C> {
38078        <Self as Vfmsub213shMaskzEmitter<A, B, C>>::vfmsub213sh_maskz(self, op0, op1, op2);
38079    }
38080    /// `VFMSUB213SH_MASKZ_ER`.
38081    ///
38082    /// Supported operand variants:
38083    ///
38084    /// ```text
38085    /// +---+---------------+
38086    /// | # | Operands      |
38087    /// +---+---------------+
38088    /// | 1 | Xmm, Xmm, Xmm |
38089    /// +---+---------------+
38090    /// ```
38091    #[inline]
38092    pub fn vfmsub213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38093    where Assembler<'a>: Vfmsub213shMaskzErEmitter<A, B, C> {
38094        <Self as Vfmsub213shMaskzErEmitter<A, B, C>>::vfmsub213sh_maskz_er(self, op0, op1, op2);
38095    }
38096    /// `VFMSUB231PH`.
38097    ///
38098    /// Supported operand variants:
38099    ///
38100    /// ```text
38101    /// +---+---------------+
38102    /// | # | Operands      |
38103    /// +---+---------------+
38104    /// | 1 | Xmm, Xmm, Mem |
38105    /// | 2 | Xmm, Xmm, Xmm |
38106    /// | 3 | Ymm, Ymm, Mem |
38107    /// | 4 | Ymm, Ymm, Ymm |
38108    /// | 5 | Zmm, Zmm, Mem |
38109    /// | 6 | Zmm, Zmm, Zmm |
38110    /// +---+---------------+
38111    /// ```
38112    #[inline]
38113    pub fn vfmsub231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38114    where Assembler<'a>: Vfmsub231phEmitter<A, B, C> {
38115        <Self as Vfmsub231phEmitter<A, B, C>>::vfmsub231ph(self, op0, op1, op2);
38116    }
38117    /// `VFMSUB231PH_ER`.
38118    ///
38119    /// Supported operand variants:
38120    ///
38121    /// ```text
38122    /// +---+---------------+
38123    /// | # | Operands      |
38124    /// +---+---------------+
38125    /// | 1 | Zmm, Zmm, Zmm |
38126    /// +---+---------------+
38127    /// ```
38128    #[inline]
38129    pub fn vfmsub231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38130    where Assembler<'a>: Vfmsub231phErEmitter<A, B, C> {
38131        <Self as Vfmsub231phErEmitter<A, B, C>>::vfmsub231ph_er(self, op0, op1, op2);
38132    }
38133    /// `VFMSUB231PH_MASK`.
38134    ///
38135    /// Supported operand variants:
38136    ///
38137    /// ```text
38138    /// +---+---------------+
38139    /// | # | Operands      |
38140    /// +---+---------------+
38141    /// | 1 | Xmm, Xmm, Mem |
38142    /// | 2 | Xmm, Xmm, Xmm |
38143    /// | 3 | Ymm, Ymm, Mem |
38144    /// | 4 | Ymm, Ymm, Ymm |
38145    /// | 5 | Zmm, Zmm, Mem |
38146    /// | 6 | Zmm, Zmm, Zmm |
38147    /// +---+---------------+
38148    /// ```
38149    #[inline]
38150    pub fn vfmsub231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38151    where Assembler<'a>: Vfmsub231phMaskEmitter<A, B, C> {
38152        <Self as Vfmsub231phMaskEmitter<A, B, C>>::vfmsub231ph_mask(self, op0, op1, op2);
38153    }
38154    /// `VFMSUB231PH_MASK_ER`.
38155    ///
38156    /// Supported operand variants:
38157    ///
38158    /// ```text
38159    /// +---+---------------+
38160    /// | # | Operands      |
38161    /// +---+---------------+
38162    /// | 1 | Zmm, Zmm, Zmm |
38163    /// +---+---------------+
38164    /// ```
38165    #[inline]
38166    pub fn vfmsub231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38167    where Assembler<'a>: Vfmsub231phMaskErEmitter<A, B, C> {
38168        <Self as Vfmsub231phMaskErEmitter<A, B, C>>::vfmsub231ph_mask_er(self, op0, op1, op2);
38169    }
38170    /// `VFMSUB231PH_MASKZ`.
38171    ///
38172    /// Supported operand variants:
38173    ///
38174    /// ```text
38175    /// +---+---------------+
38176    /// | # | Operands      |
38177    /// +---+---------------+
38178    /// | 1 | Xmm, Xmm, Mem |
38179    /// | 2 | Xmm, Xmm, Xmm |
38180    /// | 3 | Ymm, Ymm, Mem |
38181    /// | 4 | Ymm, Ymm, Ymm |
38182    /// | 5 | Zmm, Zmm, Mem |
38183    /// | 6 | Zmm, Zmm, Zmm |
38184    /// +---+---------------+
38185    /// ```
38186    #[inline]
38187    pub fn vfmsub231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38188    where Assembler<'a>: Vfmsub231phMaskzEmitter<A, B, C> {
38189        <Self as Vfmsub231phMaskzEmitter<A, B, C>>::vfmsub231ph_maskz(self, op0, op1, op2);
38190    }
38191    /// `VFMSUB231PH_MASKZ_ER`.
38192    ///
38193    /// Supported operand variants:
38194    ///
38195    /// ```text
38196    /// +---+---------------+
38197    /// | # | Operands      |
38198    /// +---+---------------+
38199    /// | 1 | Zmm, Zmm, Zmm |
38200    /// +---+---------------+
38201    /// ```
38202    #[inline]
38203    pub fn vfmsub231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38204    where Assembler<'a>: Vfmsub231phMaskzErEmitter<A, B, C> {
38205        <Self as Vfmsub231phMaskzErEmitter<A, B, C>>::vfmsub231ph_maskz_er(self, op0, op1, op2);
38206    }
38207    /// `VFMSUB231SH`.
38208    ///
38209    /// Supported operand variants:
38210    ///
38211    /// ```text
38212    /// +---+---------------+
38213    /// | # | Operands      |
38214    /// +---+---------------+
38215    /// | 1 | Xmm, Xmm, Mem |
38216    /// | 2 | Xmm, Xmm, Xmm |
38217    /// +---+---------------+
38218    /// ```
38219    #[inline]
38220    pub fn vfmsub231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38221    where Assembler<'a>: Vfmsub231shEmitter<A, B, C> {
38222        <Self as Vfmsub231shEmitter<A, B, C>>::vfmsub231sh(self, op0, op1, op2);
38223    }
38224    /// `VFMSUB231SH_ER`.
38225    ///
38226    /// Supported operand variants:
38227    ///
38228    /// ```text
38229    /// +---+---------------+
38230    /// | # | Operands      |
38231    /// +---+---------------+
38232    /// | 1 | Xmm, Xmm, Xmm |
38233    /// +---+---------------+
38234    /// ```
38235    #[inline]
38236    pub fn vfmsub231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38237    where Assembler<'a>: Vfmsub231shErEmitter<A, B, C> {
38238        <Self as Vfmsub231shErEmitter<A, B, C>>::vfmsub231sh_er(self, op0, op1, op2);
38239    }
38240    /// `VFMSUB231SH_MASK`.
38241    ///
38242    /// Supported operand variants:
38243    ///
38244    /// ```text
38245    /// +---+---------------+
38246    /// | # | Operands      |
38247    /// +---+---------------+
38248    /// | 1 | Xmm, Xmm, Mem |
38249    /// | 2 | Xmm, Xmm, Xmm |
38250    /// +---+---------------+
38251    /// ```
38252    #[inline]
38253    pub fn vfmsub231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38254    where Assembler<'a>: Vfmsub231shMaskEmitter<A, B, C> {
38255        <Self as Vfmsub231shMaskEmitter<A, B, C>>::vfmsub231sh_mask(self, op0, op1, op2);
38256    }
38257    /// `VFMSUB231SH_MASK_ER`.
38258    ///
38259    /// Supported operand variants:
38260    ///
38261    /// ```text
38262    /// +---+---------------+
38263    /// | # | Operands      |
38264    /// +---+---------------+
38265    /// | 1 | Xmm, Xmm, Xmm |
38266    /// +---+---------------+
38267    /// ```
38268    #[inline]
38269    pub fn vfmsub231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38270    where Assembler<'a>: Vfmsub231shMaskErEmitter<A, B, C> {
38271        <Self as Vfmsub231shMaskErEmitter<A, B, C>>::vfmsub231sh_mask_er(self, op0, op1, op2);
38272    }
38273    /// `VFMSUB231SH_MASKZ`.
38274    ///
38275    /// Supported operand variants:
38276    ///
38277    /// ```text
38278    /// +---+---------------+
38279    /// | # | Operands      |
38280    /// +---+---------------+
38281    /// | 1 | Xmm, Xmm, Mem |
38282    /// | 2 | Xmm, Xmm, Xmm |
38283    /// +---+---------------+
38284    /// ```
38285    #[inline]
38286    pub fn vfmsub231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38287    where Assembler<'a>: Vfmsub231shMaskzEmitter<A, B, C> {
38288        <Self as Vfmsub231shMaskzEmitter<A, B, C>>::vfmsub231sh_maskz(self, op0, op1, op2);
38289    }
38290    /// `VFMSUB231SH_MASKZ_ER`.
38291    ///
38292    /// Supported operand variants:
38293    ///
38294    /// ```text
38295    /// +---+---------------+
38296    /// | # | Operands      |
38297    /// +---+---------------+
38298    /// | 1 | Xmm, Xmm, Xmm |
38299    /// +---+---------------+
38300    /// ```
38301    #[inline]
38302    pub fn vfmsub231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38303    where Assembler<'a>: Vfmsub231shMaskzErEmitter<A, B, C> {
38304        <Self as Vfmsub231shMaskzErEmitter<A, B, C>>::vfmsub231sh_maskz_er(self, op0, op1, op2);
38305    }
38306    /// `VFMSUBADD132PH`.
38307    ///
38308    /// Supported operand variants:
38309    ///
38310    /// ```text
38311    /// +---+---------------+
38312    /// | # | Operands      |
38313    /// +---+---------------+
38314    /// | 1 | Xmm, Xmm, Mem |
38315    /// | 2 | Xmm, Xmm, Xmm |
38316    /// | 3 | Ymm, Ymm, Mem |
38317    /// | 4 | Ymm, Ymm, Ymm |
38318    /// | 5 | Zmm, Zmm, Mem |
38319    /// | 6 | Zmm, Zmm, Zmm |
38320    /// +---+---------------+
38321    /// ```
38322    #[inline]
38323    pub fn vfmsubadd132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38324    where Assembler<'a>: Vfmsubadd132phEmitter<A, B, C> {
38325        <Self as Vfmsubadd132phEmitter<A, B, C>>::vfmsubadd132ph(self, op0, op1, op2);
38326    }
38327    /// `VFMSUBADD132PH_ER`.
38328    ///
38329    /// Supported operand variants:
38330    ///
38331    /// ```text
38332    /// +---+---------------+
38333    /// | # | Operands      |
38334    /// +---+---------------+
38335    /// | 1 | Zmm, Zmm, Zmm |
38336    /// +---+---------------+
38337    /// ```
38338    #[inline]
38339    pub fn vfmsubadd132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38340    where Assembler<'a>: Vfmsubadd132phErEmitter<A, B, C> {
38341        <Self as Vfmsubadd132phErEmitter<A, B, C>>::vfmsubadd132ph_er(self, op0, op1, op2);
38342    }
38343    /// `VFMSUBADD132PH_MASK`.
38344    ///
38345    /// Supported operand variants:
38346    ///
38347    /// ```text
38348    /// +---+---------------+
38349    /// | # | Operands      |
38350    /// +---+---------------+
38351    /// | 1 | Xmm, Xmm, Mem |
38352    /// | 2 | Xmm, Xmm, Xmm |
38353    /// | 3 | Ymm, Ymm, Mem |
38354    /// | 4 | Ymm, Ymm, Ymm |
38355    /// | 5 | Zmm, Zmm, Mem |
38356    /// | 6 | Zmm, Zmm, Zmm |
38357    /// +---+---------------+
38358    /// ```
38359    #[inline]
38360    pub fn vfmsubadd132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38361    where Assembler<'a>: Vfmsubadd132phMaskEmitter<A, B, C> {
38362        <Self as Vfmsubadd132phMaskEmitter<A, B, C>>::vfmsubadd132ph_mask(self, op0, op1, op2);
38363    }
38364    /// `VFMSUBADD132PH_MASK_ER`.
38365    ///
38366    /// Supported operand variants:
38367    ///
38368    /// ```text
38369    /// +---+---------------+
38370    /// | # | Operands      |
38371    /// +---+---------------+
38372    /// | 1 | Zmm, Zmm, Zmm |
38373    /// +---+---------------+
38374    /// ```
38375    #[inline]
38376    pub fn vfmsubadd132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38377    where Assembler<'a>: Vfmsubadd132phMaskErEmitter<A, B, C> {
38378        <Self as Vfmsubadd132phMaskErEmitter<A, B, C>>::vfmsubadd132ph_mask_er(self, op0, op1, op2);
38379    }
38380    /// `VFMSUBADD132PH_MASKZ`.
38381    ///
38382    /// Supported operand variants:
38383    ///
38384    /// ```text
38385    /// +---+---------------+
38386    /// | # | Operands      |
38387    /// +---+---------------+
38388    /// | 1 | Xmm, Xmm, Mem |
38389    /// | 2 | Xmm, Xmm, Xmm |
38390    /// | 3 | Ymm, Ymm, Mem |
38391    /// | 4 | Ymm, Ymm, Ymm |
38392    /// | 5 | Zmm, Zmm, Mem |
38393    /// | 6 | Zmm, Zmm, Zmm |
38394    /// +---+---------------+
38395    /// ```
38396    #[inline]
38397    pub fn vfmsubadd132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38398    where Assembler<'a>: Vfmsubadd132phMaskzEmitter<A, B, C> {
38399        <Self as Vfmsubadd132phMaskzEmitter<A, B, C>>::vfmsubadd132ph_maskz(self, op0, op1, op2);
38400    }
38401    /// `VFMSUBADD132PH_MASKZ_ER`.
38402    ///
38403    /// Supported operand variants:
38404    ///
38405    /// ```text
38406    /// +---+---------------+
38407    /// | # | Operands      |
38408    /// +---+---------------+
38409    /// | 1 | Zmm, Zmm, Zmm |
38410    /// +---+---------------+
38411    /// ```
38412    #[inline]
38413    pub fn vfmsubadd132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38414    where Assembler<'a>: Vfmsubadd132phMaskzErEmitter<A, B, C> {
38415        <Self as Vfmsubadd132phMaskzErEmitter<A, B, C>>::vfmsubadd132ph_maskz_er(self, op0, op1, op2);
38416    }
38417    /// `VFMSUBADD213PH`.
38418    ///
38419    /// Supported operand variants:
38420    ///
38421    /// ```text
38422    /// +---+---------------+
38423    /// | # | Operands      |
38424    /// +---+---------------+
38425    /// | 1 | Xmm, Xmm, Mem |
38426    /// | 2 | Xmm, Xmm, Xmm |
38427    /// | 3 | Ymm, Ymm, Mem |
38428    /// | 4 | Ymm, Ymm, Ymm |
38429    /// | 5 | Zmm, Zmm, Mem |
38430    /// | 6 | Zmm, Zmm, Zmm |
38431    /// +---+---------------+
38432    /// ```
38433    #[inline]
38434    pub fn vfmsubadd213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38435    where Assembler<'a>: Vfmsubadd213phEmitter<A, B, C> {
38436        <Self as Vfmsubadd213phEmitter<A, B, C>>::vfmsubadd213ph(self, op0, op1, op2);
38437    }
38438    /// `VFMSUBADD213PH_ER`.
38439    ///
38440    /// Supported operand variants:
38441    ///
38442    /// ```text
38443    /// +---+---------------+
38444    /// | # | Operands      |
38445    /// +---+---------------+
38446    /// | 1 | Zmm, Zmm, Zmm |
38447    /// +---+---------------+
38448    /// ```
38449    #[inline]
38450    pub fn vfmsubadd213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38451    where Assembler<'a>: Vfmsubadd213phErEmitter<A, B, C> {
38452        <Self as Vfmsubadd213phErEmitter<A, B, C>>::vfmsubadd213ph_er(self, op0, op1, op2);
38453    }
38454    /// `VFMSUBADD213PH_MASK`.
38455    ///
38456    /// Supported operand variants:
38457    ///
38458    /// ```text
38459    /// +---+---------------+
38460    /// | # | Operands      |
38461    /// +---+---------------+
38462    /// | 1 | Xmm, Xmm, Mem |
38463    /// | 2 | Xmm, Xmm, Xmm |
38464    /// | 3 | Ymm, Ymm, Mem |
38465    /// | 4 | Ymm, Ymm, Ymm |
38466    /// | 5 | Zmm, Zmm, Mem |
38467    /// | 6 | Zmm, Zmm, Zmm |
38468    /// +---+---------------+
38469    /// ```
38470    #[inline]
38471    pub fn vfmsubadd213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38472    where Assembler<'a>: Vfmsubadd213phMaskEmitter<A, B, C> {
38473        <Self as Vfmsubadd213phMaskEmitter<A, B, C>>::vfmsubadd213ph_mask(self, op0, op1, op2);
38474    }
38475    /// `VFMSUBADD213PH_MASK_ER`.
38476    ///
38477    /// Supported operand variants:
38478    ///
38479    /// ```text
38480    /// +---+---------------+
38481    /// | # | Operands      |
38482    /// +---+---------------+
38483    /// | 1 | Zmm, Zmm, Zmm |
38484    /// +---+---------------+
38485    /// ```
38486    #[inline]
38487    pub fn vfmsubadd213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38488    where Assembler<'a>: Vfmsubadd213phMaskErEmitter<A, B, C> {
38489        <Self as Vfmsubadd213phMaskErEmitter<A, B, C>>::vfmsubadd213ph_mask_er(self, op0, op1, op2);
38490    }
38491    /// `VFMSUBADD213PH_MASKZ`.
38492    ///
38493    /// Supported operand variants:
38494    ///
38495    /// ```text
38496    /// +---+---------------+
38497    /// | # | Operands      |
38498    /// +---+---------------+
38499    /// | 1 | Xmm, Xmm, Mem |
38500    /// | 2 | Xmm, Xmm, Xmm |
38501    /// | 3 | Ymm, Ymm, Mem |
38502    /// | 4 | Ymm, Ymm, Ymm |
38503    /// | 5 | Zmm, Zmm, Mem |
38504    /// | 6 | Zmm, Zmm, Zmm |
38505    /// +---+---------------+
38506    /// ```
38507    #[inline]
38508    pub fn vfmsubadd213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38509    where Assembler<'a>: Vfmsubadd213phMaskzEmitter<A, B, C> {
38510        <Self as Vfmsubadd213phMaskzEmitter<A, B, C>>::vfmsubadd213ph_maskz(self, op0, op1, op2);
38511    }
38512    /// `VFMSUBADD213PH_MASKZ_ER`.
38513    ///
38514    /// Supported operand variants:
38515    ///
38516    /// ```text
38517    /// +---+---------------+
38518    /// | # | Operands      |
38519    /// +---+---------------+
38520    /// | 1 | Zmm, Zmm, Zmm |
38521    /// +---+---------------+
38522    /// ```
38523    #[inline]
38524    pub fn vfmsubadd213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38525    where Assembler<'a>: Vfmsubadd213phMaskzErEmitter<A, B, C> {
38526        <Self as Vfmsubadd213phMaskzErEmitter<A, B, C>>::vfmsubadd213ph_maskz_er(self, op0, op1, op2);
38527    }
38528    /// `VFMSUBADD231PH`.
38529    ///
38530    /// Supported operand variants:
38531    ///
38532    /// ```text
38533    /// +---+---------------+
38534    /// | # | Operands      |
38535    /// +---+---------------+
38536    /// | 1 | Xmm, Xmm, Mem |
38537    /// | 2 | Xmm, Xmm, Xmm |
38538    /// | 3 | Ymm, Ymm, Mem |
38539    /// | 4 | Ymm, Ymm, Ymm |
38540    /// | 5 | Zmm, Zmm, Mem |
38541    /// | 6 | Zmm, Zmm, Zmm |
38542    /// +---+---------------+
38543    /// ```
38544    #[inline]
38545    pub fn vfmsubadd231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38546    where Assembler<'a>: Vfmsubadd231phEmitter<A, B, C> {
38547        <Self as Vfmsubadd231phEmitter<A, B, C>>::vfmsubadd231ph(self, op0, op1, op2);
38548    }
38549    /// `VFMSUBADD231PH_ER`.
38550    ///
38551    /// Supported operand variants:
38552    ///
38553    /// ```text
38554    /// +---+---------------+
38555    /// | # | Operands      |
38556    /// +---+---------------+
38557    /// | 1 | Zmm, Zmm, Zmm |
38558    /// +---+---------------+
38559    /// ```
38560    #[inline]
38561    pub fn vfmsubadd231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38562    where Assembler<'a>: Vfmsubadd231phErEmitter<A, B, C> {
38563        <Self as Vfmsubadd231phErEmitter<A, B, C>>::vfmsubadd231ph_er(self, op0, op1, op2);
38564    }
38565    /// `VFMSUBADD231PH_MASK`.
38566    ///
38567    /// Supported operand variants:
38568    ///
38569    /// ```text
38570    /// +---+---------------+
38571    /// | # | Operands      |
38572    /// +---+---------------+
38573    /// | 1 | Xmm, Xmm, Mem |
38574    /// | 2 | Xmm, Xmm, Xmm |
38575    /// | 3 | Ymm, Ymm, Mem |
38576    /// | 4 | Ymm, Ymm, Ymm |
38577    /// | 5 | Zmm, Zmm, Mem |
38578    /// | 6 | Zmm, Zmm, Zmm |
38579    /// +---+---------------+
38580    /// ```
38581    #[inline]
38582    pub fn vfmsubadd231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38583    where Assembler<'a>: Vfmsubadd231phMaskEmitter<A, B, C> {
38584        <Self as Vfmsubadd231phMaskEmitter<A, B, C>>::vfmsubadd231ph_mask(self, op0, op1, op2);
38585    }
38586    /// `VFMSUBADD231PH_MASK_ER`.
38587    ///
38588    /// Supported operand variants:
38589    ///
38590    /// ```text
38591    /// +---+---------------+
38592    /// | # | Operands      |
38593    /// +---+---------------+
38594    /// | 1 | Zmm, Zmm, Zmm |
38595    /// +---+---------------+
38596    /// ```
38597    #[inline]
38598    pub fn vfmsubadd231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38599    where Assembler<'a>: Vfmsubadd231phMaskErEmitter<A, B, C> {
38600        <Self as Vfmsubadd231phMaskErEmitter<A, B, C>>::vfmsubadd231ph_mask_er(self, op0, op1, op2);
38601    }
38602    /// `VFMSUBADD231PH_MASKZ`.
38603    ///
38604    /// Supported operand variants:
38605    ///
38606    /// ```text
38607    /// +---+---------------+
38608    /// | # | Operands      |
38609    /// +---+---------------+
38610    /// | 1 | Xmm, Xmm, Mem |
38611    /// | 2 | Xmm, Xmm, Xmm |
38612    /// | 3 | Ymm, Ymm, Mem |
38613    /// | 4 | Ymm, Ymm, Ymm |
38614    /// | 5 | Zmm, Zmm, Mem |
38615    /// | 6 | Zmm, Zmm, Zmm |
38616    /// +---+---------------+
38617    /// ```
38618    #[inline]
38619    pub fn vfmsubadd231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38620    where Assembler<'a>: Vfmsubadd231phMaskzEmitter<A, B, C> {
38621        <Self as Vfmsubadd231phMaskzEmitter<A, B, C>>::vfmsubadd231ph_maskz(self, op0, op1, op2);
38622    }
38623    /// `VFMSUBADD231PH_MASKZ_ER`.
38624    ///
38625    /// Supported operand variants:
38626    ///
38627    /// ```text
38628    /// +---+---------------+
38629    /// | # | Operands      |
38630    /// +---+---------------+
38631    /// | 1 | Zmm, Zmm, Zmm |
38632    /// +---+---------------+
38633    /// ```
38634    #[inline]
38635    pub fn vfmsubadd231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38636    where Assembler<'a>: Vfmsubadd231phMaskzErEmitter<A, B, C> {
38637        <Self as Vfmsubadd231phMaskzErEmitter<A, B, C>>::vfmsubadd231ph_maskz_er(self, op0, op1, op2);
38638    }
38639    /// `VFMULCPH`.
38640    ///
38641    /// Supported operand variants:
38642    ///
38643    /// ```text
38644    /// +---+---------------+
38645    /// | # | Operands      |
38646    /// +---+---------------+
38647    /// | 1 | Xmm, Xmm, Mem |
38648    /// | 2 | Xmm, Xmm, Xmm |
38649    /// | 3 | Ymm, Ymm, Mem |
38650    /// | 4 | Ymm, Ymm, Ymm |
38651    /// | 5 | Zmm, Zmm, Mem |
38652    /// | 6 | Zmm, Zmm, Zmm |
38653    /// +---+---------------+
38654    /// ```
38655    #[inline]
38656    pub fn vfmulcph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38657    where Assembler<'a>: VfmulcphEmitter<A, B, C> {
38658        <Self as VfmulcphEmitter<A, B, C>>::vfmulcph(self, op0, op1, op2);
38659    }
38660    /// `VFMULCPH_ER`.
38661    ///
38662    /// Supported operand variants:
38663    ///
38664    /// ```text
38665    /// +---+---------------+
38666    /// | # | Operands      |
38667    /// +---+---------------+
38668    /// | 1 | Zmm, Zmm, Zmm |
38669    /// +---+---------------+
38670    /// ```
38671    #[inline]
38672    pub fn vfmulcph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38673    where Assembler<'a>: VfmulcphErEmitter<A, B, C> {
38674        <Self as VfmulcphErEmitter<A, B, C>>::vfmulcph_er(self, op0, op1, op2);
38675    }
38676    /// `VFMULCPH_MASK`.
38677    ///
38678    /// Supported operand variants:
38679    ///
38680    /// ```text
38681    /// +---+---------------+
38682    /// | # | Operands      |
38683    /// +---+---------------+
38684    /// | 1 | Xmm, Xmm, Mem |
38685    /// | 2 | Xmm, Xmm, Xmm |
38686    /// | 3 | Ymm, Ymm, Mem |
38687    /// | 4 | Ymm, Ymm, Ymm |
38688    /// | 5 | Zmm, Zmm, Mem |
38689    /// | 6 | Zmm, Zmm, Zmm |
38690    /// +---+---------------+
38691    /// ```
38692    #[inline]
38693    pub fn vfmulcph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38694    where Assembler<'a>: VfmulcphMaskEmitter<A, B, C> {
38695        <Self as VfmulcphMaskEmitter<A, B, C>>::vfmulcph_mask(self, op0, op1, op2);
38696    }
38697    /// `VFMULCPH_MASK_ER`.
38698    ///
38699    /// Supported operand variants:
38700    ///
38701    /// ```text
38702    /// +---+---------------+
38703    /// | # | Operands      |
38704    /// +---+---------------+
38705    /// | 1 | Zmm, Zmm, Zmm |
38706    /// +---+---------------+
38707    /// ```
38708    #[inline]
38709    pub fn vfmulcph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38710    where Assembler<'a>: VfmulcphMaskErEmitter<A, B, C> {
38711        <Self as VfmulcphMaskErEmitter<A, B, C>>::vfmulcph_mask_er(self, op0, op1, op2);
38712    }
38713    /// `VFMULCPH_MASKZ`.
38714    ///
38715    /// Supported operand variants:
38716    ///
38717    /// ```text
38718    /// +---+---------------+
38719    /// | # | Operands      |
38720    /// +---+---------------+
38721    /// | 1 | Xmm, Xmm, Mem |
38722    /// | 2 | Xmm, Xmm, Xmm |
38723    /// | 3 | Ymm, Ymm, Mem |
38724    /// | 4 | Ymm, Ymm, Ymm |
38725    /// | 5 | Zmm, Zmm, Mem |
38726    /// | 6 | Zmm, Zmm, Zmm |
38727    /// +---+---------------+
38728    /// ```
38729    #[inline]
38730    pub fn vfmulcph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38731    where Assembler<'a>: VfmulcphMaskzEmitter<A, B, C> {
38732        <Self as VfmulcphMaskzEmitter<A, B, C>>::vfmulcph_maskz(self, op0, op1, op2);
38733    }
38734    /// `VFMULCPH_MASKZ_ER`.
38735    ///
38736    /// Supported operand variants:
38737    ///
38738    /// ```text
38739    /// +---+---------------+
38740    /// | # | Operands      |
38741    /// +---+---------------+
38742    /// | 1 | Zmm, Zmm, Zmm |
38743    /// +---+---------------+
38744    /// ```
38745    #[inline]
38746    pub fn vfmulcph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38747    where Assembler<'a>: VfmulcphMaskzErEmitter<A, B, C> {
38748        <Self as VfmulcphMaskzErEmitter<A, B, C>>::vfmulcph_maskz_er(self, op0, op1, op2);
38749    }
38750    /// `VFMULCSH`.
38751    ///
38752    /// Supported operand variants:
38753    ///
38754    /// ```text
38755    /// +---+---------------+
38756    /// | # | Operands      |
38757    /// +---+---------------+
38758    /// | 1 | Xmm, Xmm, Mem |
38759    /// | 2 | Xmm, Xmm, Xmm |
38760    /// +---+---------------+
38761    /// ```
38762    #[inline]
38763    pub fn vfmulcsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38764    where Assembler<'a>: VfmulcshEmitter<A, B, C> {
38765        <Self as VfmulcshEmitter<A, B, C>>::vfmulcsh(self, op0, op1, op2);
38766    }
38767    /// `VFMULCSH_ER`.
38768    ///
38769    /// Supported operand variants:
38770    ///
38771    /// ```text
38772    /// +---+---------------+
38773    /// | # | Operands      |
38774    /// +---+---------------+
38775    /// | 1 | Xmm, Xmm, Xmm |
38776    /// +---+---------------+
38777    /// ```
38778    #[inline]
38779    pub fn vfmulcsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38780    where Assembler<'a>: VfmulcshErEmitter<A, B, C> {
38781        <Self as VfmulcshErEmitter<A, B, C>>::vfmulcsh_er(self, op0, op1, op2);
38782    }
38783    /// `VFMULCSH_MASK`.
38784    ///
38785    /// Supported operand variants:
38786    ///
38787    /// ```text
38788    /// +---+---------------+
38789    /// | # | Operands      |
38790    /// +---+---------------+
38791    /// | 1 | Xmm, Xmm, Mem |
38792    /// | 2 | Xmm, Xmm, Xmm |
38793    /// +---+---------------+
38794    /// ```
38795    #[inline]
38796    pub fn vfmulcsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38797    where Assembler<'a>: VfmulcshMaskEmitter<A, B, C> {
38798        <Self as VfmulcshMaskEmitter<A, B, C>>::vfmulcsh_mask(self, op0, op1, op2);
38799    }
38800    /// `VFMULCSH_MASK_ER`.
38801    ///
38802    /// Supported operand variants:
38803    ///
38804    /// ```text
38805    /// +---+---------------+
38806    /// | # | Operands      |
38807    /// +---+---------------+
38808    /// | 1 | Xmm, Xmm, Xmm |
38809    /// +---+---------------+
38810    /// ```
38811    #[inline]
38812    pub fn vfmulcsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38813    where Assembler<'a>: VfmulcshMaskErEmitter<A, B, C> {
38814        <Self as VfmulcshMaskErEmitter<A, B, C>>::vfmulcsh_mask_er(self, op0, op1, op2);
38815    }
38816    /// `VFMULCSH_MASKZ`.
38817    ///
38818    /// Supported operand variants:
38819    ///
38820    /// ```text
38821    /// +---+---------------+
38822    /// | # | Operands      |
38823    /// +---+---------------+
38824    /// | 1 | Xmm, Xmm, Mem |
38825    /// | 2 | Xmm, Xmm, Xmm |
38826    /// +---+---------------+
38827    /// ```
38828    #[inline]
38829    pub fn vfmulcsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38830    where Assembler<'a>: VfmulcshMaskzEmitter<A, B, C> {
38831        <Self as VfmulcshMaskzEmitter<A, B, C>>::vfmulcsh_maskz(self, op0, op1, op2);
38832    }
38833    /// `VFMULCSH_MASKZ_ER`.
38834    ///
38835    /// Supported operand variants:
38836    ///
38837    /// ```text
38838    /// +---+---------------+
38839    /// | # | Operands      |
38840    /// +---+---------------+
38841    /// | 1 | Xmm, Xmm, Xmm |
38842    /// +---+---------------+
38843    /// ```
38844    #[inline]
38845    pub fn vfmulcsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38846    where Assembler<'a>: VfmulcshMaskzErEmitter<A, B, C> {
38847        <Self as VfmulcshMaskzErEmitter<A, B, C>>::vfmulcsh_maskz_er(self, op0, op1, op2);
38848    }
38849    /// `VFNMADD132PH`.
38850    ///
38851    /// Supported operand variants:
38852    ///
38853    /// ```text
38854    /// +---+---------------+
38855    /// | # | Operands      |
38856    /// +---+---------------+
38857    /// | 1 | Xmm, Xmm, Mem |
38858    /// | 2 | Xmm, Xmm, Xmm |
38859    /// | 3 | Ymm, Ymm, Mem |
38860    /// | 4 | Ymm, Ymm, Ymm |
38861    /// | 5 | Zmm, Zmm, Mem |
38862    /// | 6 | Zmm, Zmm, Zmm |
38863    /// +---+---------------+
38864    /// ```
38865    #[inline]
38866    pub fn vfnmadd132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38867    where Assembler<'a>: Vfnmadd132phEmitter<A, B, C> {
38868        <Self as Vfnmadd132phEmitter<A, B, C>>::vfnmadd132ph(self, op0, op1, op2);
38869    }
38870    /// `VFNMADD132PH_ER`.
38871    ///
38872    /// Supported operand variants:
38873    ///
38874    /// ```text
38875    /// +---+---------------+
38876    /// | # | Operands      |
38877    /// +---+---------------+
38878    /// | 1 | Zmm, Zmm, Zmm |
38879    /// +---+---------------+
38880    /// ```
38881    #[inline]
38882    pub fn vfnmadd132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38883    where Assembler<'a>: Vfnmadd132phErEmitter<A, B, C> {
38884        <Self as Vfnmadd132phErEmitter<A, B, C>>::vfnmadd132ph_er(self, op0, op1, op2);
38885    }
38886    /// `VFNMADD132PH_MASK`.
38887    ///
38888    /// Supported operand variants:
38889    ///
38890    /// ```text
38891    /// +---+---------------+
38892    /// | # | Operands      |
38893    /// +---+---------------+
38894    /// | 1 | Xmm, Xmm, Mem |
38895    /// | 2 | Xmm, Xmm, Xmm |
38896    /// | 3 | Ymm, Ymm, Mem |
38897    /// | 4 | Ymm, Ymm, Ymm |
38898    /// | 5 | Zmm, Zmm, Mem |
38899    /// | 6 | Zmm, Zmm, Zmm |
38900    /// +---+---------------+
38901    /// ```
38902    #[inline]
38903    pub fn vfnmadd132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38904    where Assembler<'a>: Vfnmadd132phMaskEmitter<A, B, C> {
38905        <Self as Vfnmadd132phMaskEmitter<A, B, C>>::vfnmadd132ph_mask(self, op0, op1, op2);
38906    }
38907    /// `VFNMADD132PH_MASK_ER`.
38908    ///
38909    /// Supported operand variants:
38910    ///
38911    /// ```text
38912    /// +---+---------------+
38913    /// | # | Operands      |
38914    /// +---+---------------+
38915    /// | 1 | Zmm, Zmm, Zmm |
38916    /// +---+---------------+
38917    /// ```
38918    #[inline]
38919    pub fn vfnmadd132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38920    where Assembler<'a>: Vfnmadd132phMaskErEmitter<A, B, C> {
38921        <Self as Vfnmadd132phMaskErEmitter<A, B, C>>::vfnmadd132ph_mask_er(self, op0, op1, op2);
38922    }
38923    /// `VFNMADD132PH_MASKZ`.
38924    ///
38925    /// Supported operand variants:
38926    ///
38927    /// ```text
38928    /// +---+---------------+
38929    /// | # | Operands      |
38930    /// +---+---------------+
38931    /// | 1 | Xmm, Xmm, Mem |
38932    /// | 2 | Xmm, Xmm, Xmm |
38933    /// | 3 | Ymm, Ymm, Mem |
38934    /// | 4 | Ymm, Ymm, Ymm |
38935    /// | 5 | Zmm, Zmm, Mem |
38936    /// | 6 | Zmm, Zmm, Zmm |
38937    /// +---+---------------+
38938    /// ```
38939    #[inline]
38940    pub fn vfnmadd132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38941    where Assembler<'a>: Vfnmadd132phMaskzEmitter<A, B, C> {
38942        <Self as Vfnmadd132phMaskzEmitter<A, B, C>>::vfnmadd132ph_maskz(self, op0, op1, op2);
38943    }
38944    /// `VFNMADD132PH_MASKZ_ER`.
38945    ///
38946    /// Supported operand variants:
38947    ///
38948    /// ```text
38949    /// +---+---------------+
38950    /// | # | Operands      |
38951    /// +---+---------------+
38952    /// | 1 | Zmm, Zmm, Zmm |
38953    /// +---+---------------+
38954    /// ```
38955    #[inline]
38956    pub fn vfnmadd132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38957    where Assembler<'a>: Vfnmadd132phMaskzErEmitter<A, B, C> {
38958        <Self as Vfnmadd132phMaskzErEmitter<A, B, C>>::vfnmadd132ph_maskz_er(self, op0, op1, op2);
38959    }
38960    /// `VFNMADD132SH`.
38961    ///
38962    /// Supported operand variants:
38963    ///
38964    /// ```text
38965    /// +---+---------------+
38966    /// | # | Operands      |
38967    /// +---+---------------+
38968    /// | 1 | Xmm, Xmm, Mem |
38969    /// | 2 | Xmm, Xmm, Xmm |
38970    /// +---+---------------+
38971    /// ```
38972    #[inline]
38973    pub fn vfnmadd132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38974    where Assembler<'a>: Vfnmadd132shEmitter<A, B, C> {
38975        <Self as Vfnmadd132shEmitter<A, B, C>>::vfnmadd132sh(self, op0, op1, op2);
38976    }
38977    /// `VFNMADD132SH_ER`.
38978    ///
38979    /// Supported operand variants:
38980    ///
38981    /// ```text
38982    /// +---+---------------+
38983    /// | # | Operands      |
38984    /// +---+---------------+
38985    /// | 1 | Xmm, Xmm, Xmm |
38986    /// +---+---------------+
38987    /// ```
38988    #[inline]
38989    pub fn vfnmadd132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
38990    where Assembler<'a>: Vfnmadd132shErEmitter<A, B, C> {
38991        <Self as Vfnmadd132shErEmitter<A, B, C>>::vfnmadd132sh_er(self, op0, op1, op2);
38992    }
38993    /// `VFNMADD132SH_MASK`.
38994    ///
38995    /// Supported operand variants:
38996    ///
38997    /// ```text
38998    /// +---+---------------+
38999    /// | # | Operands      |
39000    /// +---+---------------+
39001    /// | 1 | Xmm, Xmm, Mem |
39002    /// | 2 | Xmm, Xmm, Xmm |
39003    /// +---+---------------+
39004    /// ```
39005    #[inline]
39006    pub fn vfnmadd132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39007    where Assembler<'a>: Vfnmadd132shMaskEmitter<A, B, C> {
39008        <Self as Vfnmadd132shMaskEmitter<A, B, C>>::vfnmadd132sh_mask(self, op0, op1, op2);
39009    }
39010    /// `VFNMADD132SH_MASK_ER`.
39011    ///
39012    /// Supported operand variants:
39013    ///
39014    /// ```text
39015    /// +---+---------------+
39016    /// | # | Operands      |
39017    /// +---+---------------+
39018    /// | 1 | Xmm, Xmm, Xmm |
39019    /// +---+---------------+
39020    /// ```
39021    #[inline]
39022    pub fn vfnmadd132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39023    where Assembler<'a>: Vfnmadd132shMaskErEmitter<A, B, C> {
39024        <Self as Vfnmadd132shMaskErEmitter<A, B, C>>::vfnmadd132sh_mask_er(self, op0, op1, op2);
39025    }
39026    /// `VFNMADD132SH_MASKZ`.
39027    ///
39028    /// Supported operand variants:
39029    ///
39030    /// ```text
39031    /// +---+---------------+
39032    /// | # | Operands      |
39033    /// +---+---------------+
39034    /// | 1 | Xmm, Xmm, Mem |
39035    /// | 2 | Xmm, Xmm, Xmm |
39036    /// +---+---------------+
39037    /// ```
39038    #[inline]
39039    pub fn vfnmadd132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39040    where Assembler<'a>: Vfnmadd132shMaskzEmitter<A, B, C> {
39041        <Self as Vfnmadd132shMaskzEmitter<A, B, C>>::vfnmadd132sh_maskz(self, op0, op1, op2);
39042    }
39043    /// `VFNMADD132SH_MASKZ_ER`.
39044    ///
39045    /// Supported operand variants:
39046    ///
39047    /// ```text
39048    /// +---+---------------+
39049    /// | # | Operands      |
39050    /// +---+---------------+
39051    /// | 1 | Xmm, Xmm, Xmm |
39052    /// +---+---------------+
39053    /// ```
39054    #[inline]
39055    pub fn vfnmadd132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39056    where Assembler<'a>: Vfnmadd132shMaskzErEmitter<A, B, C> {
39057        <Self as Vfnmadd132shMaskzErEmitter<A, B, C>>::vfnmadd132sh_maskz_er(self, op0, op1, op2);
39058    }
39059    /// `VFNMADD213PH`.
39060    ///
39061    /// Supported operand variants:
39062    ///
39063    /// ```text
39064    /// +---+---------------+
39065    /// | # | Operands      |
39066    /// +---+---------------+
39067    /// | 1 | Xmm, Xmm, Mem |
39068    /// | 2 | Xmm, Xmm, Xmm |
39069    /// | 3 | Ymm, Ymm, Mem |
39070    /// | 4 | Ymm, Ymm, Ymm |
39071    /// | 5 | Zmm, Zmm, Mem |
39072    /// | 6 | Zmm, Zmm, Zmm |
39073    /// +---+---------------+
39074    /// ```
39075    #[inline]
39076    pub fn vfnmadd213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39077    where Assembler<'a>: Vfnmadd213phEmitter<A, B, C> {
39078        <Self as Vfnmadd213phEmitter<A, B, C>>::vfnmadd213ph(self, op0, op1, op2);
39079    }
39080    /// `VFNMADD213PH_ER`.
39081    ///
39082    /// Supported operand variants:
39083    ///
39084    /// ```text
39085    /// +---+---------------+
39086    /// | # | Operands      |
39087    /// +---+---------------+
39088    /// | 1 | Zmm, Zmm, Zmm |
39089    /// +---+---------------+
39090    /// ```
39091    #[inline]
39092    pub fn vfnmadd213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39093    where Assembler<'a>: Vfnmadd213phErEmitter<A, B, C> {
39094        <Self as Vfnmadd213phErEmitter<A, B, C>>::vfnmadd213ph_er(self, op0, op1, op2);
39095    }
39096    /// `VFNMADD213PH_MASK`.
39097    ///
39098    /// Supported operand variants:
39099    ///
39100    /// ```text
39101    /// +---+---------------+
39102    /// | # | Operands      |
39103    /// +---+---------------+
39104    /// | 1 | Xmm, Xmm, Mem |
39105    /// | 2 | Xmm, Xmm, Xmm |
39106    /// | 3 | Ymm, Ymm, Mem |
39107    /// | 4 | Ymm, Ymm, Ymm |
39108    /// | 5 | Zmm, Zmm, Mem |
39109    /// | 6 | Zmm, Zmm, Zmm |
39110    /// +---+---------------+
39111    /// ```
39112    #[inline]
39113    pub fn vfnmadd213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39114    where Assembler<'a>: Vfnmadd213phMaskEmitter<A, B, C> {
39115        <Self as Vfnmadd213phMaskEmitter<A, B, C>>::vfnmadd213ph_mask(self, op0, op1, op2);
39116    }
39117    /// `VFNMADD213PH_MASK_ER`.
39118    ///
39119    /// Supported operand variants:
39120    ///
39121    /// ```text
39122    /// +---+---------------+
39123    /// | # | Operands      |
39124    /// +---+---------------+
39125    /// | 1 | Zmm, Zmm, Zmm |
39126    /// +---+---------------+
39127    /// ```
39128    #[inline]
39129    pub fn vfnmadd213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39130    where Assembler<'a>: Vfnmadd213phMaskErEmitter<A, B, C> {
39131        <Self as Vfnmadd213phMaskErEmitter<A, B, C>>::vfnmadd213ph_mask_er(self, op0, op1, op2);
39132    }
39133    /// `VFNMADD213PH_MASKZ`.
39134    ///
39135    /// Supported operand variants:
39136    ///
39137    /// ```text
39138    /// +---+---------------+
39139    /// | # | Operands      |
39140    /// +---+---------------+
39141    /// | 1 | Xmm, Xmm, Mem |
39142    /// | 2 | Xmm, Xmm, Xmm |
39143    /// | 3 | Ymm, Ymm, Mem |
39144    /// | 4 | Ymm, Ymm, Ymm |
39145    /// | 5 | Zmm, Zmm, Mem |
39146    /// | 6 | Zmm, Zmm, Zmm |
39147    /// +---+---------------+
39148    /// ```
39149    #[inline]
39150    pub fn vfnmadd213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39151    where Assembler<'a>: Vfnmadd213phMaskzEmitter<A, B, C> {
39152        <Self as Vfnmadd213phMaskzEmitter<A, B, C>>::vfnmadd213ph_maskz(self, op0, op1, op2);
39153    }
39154    /// `VFNMADD213PH_MASKZ_ER`.
39155    ///
39156    /// Supported operand variants:
39157    ///
39158    /// ```text
39159    /// +---+---------------+
39160    /// | # | Operands      |
39161    /// +---+---------------+
39162    /// | 1 | Zmm, Zmm, Zmm |
39163    /// +---+---------------+
39164    /// ```
39165    #[inline]
39166    pub fn vfnmadd213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39167    where Assembler<'a>: Vfnmadd213phMaskzErEmitter<A, B, C> {
39168        <Self as Vfnmadd213phMaskzErEmitter<A, B, C>>::vfnmadd213ph_maskz_er(self, op0, op1, op2);
39169    }
39170    /// `VFNMADD213SH`.
39171    ///
39172    /// Supported operand variants:
39173    ///
39174    /// ```text
39175    /// +---+---------------+
39176    /// | # | Operands      |
39177    /// +---+---------------+
39178    /// | 1 | Xmm, Xmm, Mem |
39179    /// | 2 | Xmm, Xmm, Xmm |
39180    /// +---+---------------+
39181    /// ```
39182    #[inline]
39183    pub fn vfnmadd213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39184    where Assembler<'a>: Vfnmadd213shEmitter<A, B, C> {
39185        <Self as Vfnmadd213shEmitter<A, B, C>>::vfnmadd213sh(self, op0, op1, op2);
39186    }
39187    /// `VFNMADD213SH_ER`.
39188    ///
39189    /// Supported operand variants:
39190    ///
39191    /// ```text
39192    /// +---+---------------+
39193    /// | # | Operands      |
39194    /// +---+---------------+
39195    /// | 1 | Xmm, Xmm, Xmm |
39196    /// +---+---------------+
39197    /// ```
39198    #[inline]
39199    pub fn vfnmadd213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39200    where Assembler<'a>: Vfnmadd213shErEmitter<A, B, C> {
39201        <Self as Vfnmadd213shErEmitter<A, B, C>>::vfnmadd213sh_er(self, op0, op1, op2);
39202    }
39203    /// `VFNMADD213SH_MASK`.
39204    ///
39205    /// Supported operand variants:
39206    ///
39207    /// ```text
39208    /// +---+---------------+
39209    /// | # | Operands      |
39210    /// +---+---------------+
39211    /// | 1 | Xmm, Xmm, Mem |
39212    /// | 2 | Xmm, Xmm, Xmm |
39213    /// +---+---------------+
39214    /// ```
39215    #[inline]
39216    pub fn vfnmadd213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39217    where Assembler<'a>: Vfnmadd213shMaskEmitter<A, B, C> {
39218        <Self as Vfnmadd213shMaskEmitter<A, B, C>>::vfnmadd213sh_mask(self, op0, op1, op2);
39219    }
39220    /// `VFNMADD213SH_MASK_ER`.
39221    ///
39222    /// Supported operand variants:
39223    ///
39224    /// ```text
39225    /// +---+---------------+
39226    /// | # | Operands      |
39227    /// +---+---------------+
39228    /// | 1 | Xmm, Xmm, Xmm |
39229    /// +---+---------------+
39230    /// ```
39231    #[inline]
39232    pub fn vfnmadd213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39233    where Assembler<'a>: Vfnmadd213shMaskErEmitter<A, B, C> {
39234        <Self as Vfnmadd213shMaskErEmitter<A, B, C>>::vfnmadd213sh_mask_er(self, op0, op1, op2);
39235    }
39236    /// `VFNMADD213SH_MASKZ`.
39237    ///
39238    /// Supported operand variants:
39239    ///
39240    /// ```text
39241    /// +---+---------------+
39242    /// | # | Operands      |
39243    /// +---+---------------+
39244    /// | 1 | Xmm, Xmm, Mem |
39245    /// | 2 | Xmm, Xmm, Xmm |
39246    /// +---+---------------+
39247    /// ```
39248    #[inline]
39249    pub fn vfnmadd213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39250    where Assembler<'a>: Vfnmadd213shMaskzEmitter<A, B, C> {
39251        <Self as Vfnmadd213shMaskzEmitter<A, B, C>>::vfnmadd213sh_maskz(self, op0, op1, op2);
39252    }
39253    /// `VFNMADD213SH_MASKZ_ER`.
39254    ///
39255    /// Supported operand variants:
39256    ///
39257    /// ```text
39258    /// +---+---------------+
39259    /// | # | Operands      |
39260    /// +---+---------------+
39261    /// | 1 | Xmm, Xmm, Xmm |
39262    /// +---+---------------+
39263    /// ```
39264    #[inline]
39265    pub fn vfnmadd213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39266    where Assembler<'a>: Vfnmadd213shMaskzErEmitter<A, B, C> {
39267        <Self as Vfnmadd213shMaskzErEmitter<A, B, C>>::vfnmadd213sh_maskz_er(self, op0, op1, op2);
39268    }
39269    /// `VFNMADD231PH`.
39270    ///
39271    /// Supported operand variants:
39272    ///
39273    /// ```text
39274    /// +---+---------------+
39275    /// | # | Operands      |
39276    /// +---+---------------+
39277    /// | 1 | Xmm, Xmm, Mem |
39278    /// | 2 | Xmm, Xmm, Xmm |
39279    /// | 3 | Ymm, Ymm, Mem |
39280    /// | 4 | Ymm, Ymm, Ymm |
39281    /// | 5 | Zmm, Zmm, Mem |
39282    /// | 6 | Zmm, Zmm, Zmm |
39283    /// +---+---------------+
39284    /// ```
39285    #[inline]
39286    pub fn vfnmadd231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39287    where Assembler<'a>: Vfnmadd231phEmitter<A, B, C> {
39288        <Self as Vfnmadd231phEmitter<A, B, C>>::vfnmadd231ph(self, op0, op1, op2);
39289    }
39290    /// `VFNMADD231PH_ER`.
39291    ///
39292    /// Supported operand variants:
39293    ///
39294    /// ```text
39295    /// +---+---------------+
39296    /// | # | Operands      |
39297    /// +---+---------------+
39298    /// | 1 | Zmm, Zmm, Zmm |
39299    /// +---+---------------+
39300    /// ```
39301    #[inline]
39302    pub fn vfnmadd231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39303    where Assembler<'a>: Vfnmadd231phErEmitter<A, B, C> {
39304        <Self as Vfnmadd231phErEmitter<A, B, C>>::vfnmadd231ph_er(self, op0, op1, op2);
39305    }
39306    /// `VFNMADD231PH_MASK`.
39307    ///
39308    /// Supported operand variants:
39309    ///
39310    /// ```text
39311    /// +---+---------------+
39312    /// | # | Operands      |
39313    /// +---+---------------+
39314    /// | 1 | Xmm, Xmm, Mem |
39315    /// | 2 | Xmm, Xmm, Xmm |
39316    /// | 3 | Ymm, Ymm, Mem |
39317    /// | 4 | Ymm, Ymm, Ymm |
39318    /// | 5 | Zmm, Zmm, Mem |
39319    /// | 6 | Zmm, Zmm, Zmm |
39320    /// +---+---------------+
39321    /// ```
39322    #[inline]
39323    pub fn vfnmadd231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39324    where Assembler<'a>: Vfnmadd231phMaskEmitter<A, B, C> {
39325        <Self as Vfnmadd231phMaskEmitter<A, B, C>>::vfnmadd231ph_mask(self, op0, op1, op2);
39326    }
39327    /// `VFNMADD231PH_MASK_ER`.
39328    ///
39329    /// Supported operand variants:
39330    ///
39331    /// ```text
39332    /// +---+---------------+
39333    /// | # | Operands      |
39334    /// +---+---------------+
39335    /// | 1 | Zmm, Zmm, Zmm |
39336    /// +---+---------------+
39337    /// ```
39338    #[inline]
39339    pub fn vfnmadd231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39340    where Assembler<'a>: Vfnmadd231phMaskErEmitter<A, B, C> {
39341        <Self as Vfnmadd231phMaskErEmitter<A, B, C>>::vfnmadd231ph_mask_er(self, op0, op1, op2);
39342    }
39343    /// `VFNMADD231PH_MASKZ`.
39344    ///
39345    /// Supported operand variants:
39346    ///
39347    /// ```text
39348    /// +---+---------------+
39349    /// | # | Operands      |
39350    /// +---+---------------+
39351    /// | 1 | Xmm, Xmm, Mem |
39352    /// | 2 | Xmm, Xmm, Xmm |
39353    /// | 3 | Ymm, Ymm, Mem |
39354    /// | 4 | Ymm, Ymm, Ymm |
39355    /// | 5 | Zmm, Zmm, Mem |
39356    /// | 6 | Zmm, Zmm, Zmm |
39357    /// +---+---------------+
39358    /// ```
39359    #[inline]
39360    pub fn vfnmadd231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39361    where Assembler<'a>: Vfnmadd231phMaskzEmitter<A, B, C> {
39362        <Self as Vfnmadd231phMaskzEmitter<A, B, C>>::vfnmadd231ph_maskz(self, op0, op1, op2);
39363    }
39364    /// `VFNMADD231PH_MASKZ_ER`.
39365    ///
39366    /// Supported operand variants:
39367    ///
39368    /// ```text
39369    /// +---+---------------+
39370    /// | # | Operands      |
39371    /// +---+---------------+
39372    /// | 1 | Zmm, Zmm, Zmm |
39373    /// +---+---------------+
39374    /// ```
39375    #[inline]
39376    pub fn vfnmadd231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39377    where Assembler<'a>: Vfnmadd231phMaskzErEmitter<A, B, C> {
39378        <Self as Vfnmadd231phMaskzErEmitter<A, B, C>>::vfnmadd231ph_maskz_er(self, op0, op1, op2);
39379    }
39380    /// `VFNMADD231SH`.
39381    ///
39382    /// Supported operand variants:
39383    ///
39384    /// ```text
39385    /// +---+---------------+
39386    /// | # | Operands      |
39387    /// +---+---------------+
39388    /// | 1 | Xmm, Xmm, Mem |
39389    /// | 2 | Xmm, Xmm, Xmm |
39390    /// +---+---------------+
39391    /// ```
39392    #[inline]
39393    pub fn vfnmadd231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39394    where Assembler<'a>: Vfnmadd231shEmitter<A, B, C> {
39395        <Self as Vfnmadd231shEmitter<A, B, C>>::vfnmadd231sh(self, op0, op1, op2);
39396    }
39397    /// `VFNMADD231SH_ER`.
39398    ///
39399    /// Supported operand variants:
39400    ///
39401    /// ```text
39402    /// +---+---------------+
39403    /// | # | Operands      |
39404    /// +---+---------------+
39405    /// | 1 | Xmm, Xmm, Xmm |
39406    /// +---+---------------+
39407    /// ```
39408    #[inline]
39409    pub fn vfnmadd231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39410    where Assembler<'a>: Vfnmadd231shErEmitter<A, B, C> {
39411        <Self as Vfnmadd231shErEmitter<A, B, C>>::vfnmadd231sh_er(self, op0, op1, op2);
39412    }
39413    /// `VFNMADD231SH_MASK`.
39414    ///
39415    /// Supported operand variants:
39416    ///
39417    /// ```text
39418    /// +---+---------------+
39419    /// | # | Operands      |
39420    /// +---+---------------+
39421    /// | 1 | Xmm, Xmm, Mem |
39422    /// | 2 | Xmm, Xmm, Xmm |
39423    /// +---+---------------+
39424    /// ```
39425    #[inline]
39426    pub fn vfnmadd231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39427    where Assembler<'a>: Vfnmadd231shMaskEmitter<A, B, C> {
39428        <Self as Vfnmadd231shMaskEmitter<A, B, C>>::vfnmadd231sh_mask(self, op0, op1, op2);
39429    }
39430    /// `VFNMADD231SH_MASK_ER`.
39431    ///
39432    /// Supported operand variants:
39433    ///
39434    /// ```text
39435    /// +---+---------------+
39436    /// | # | Operands      |
39437    /// +---+---------------+
39438    /// | 1 | Xmm, Xmm, Xmm |
39439    /// +---+---------------+
39440    /// ```
39441    #[inline]
39442    pub fn vfnmadd231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39443    where Assembler<'a>: Vfnmadd231shMaskErEmitter<A, B, C> {
39444        <Self as Vfnmadd231shMaskErEmitter<A, B, C>>::vfnmadd231sh_mask_er(self, op0, op1, op2);
39445    }
39446    /// `VFNMADD231SH_MASKZ`.
39447    ///
39448    /// Supported operand variants:
39449    ///
39450    /// ```text
39451    /// +---+---------------+
39452    /// | # | Operands      |
39453    /// +---+---------------+
39454    /// | 1 | Xmm, Xmm, Mem |
39455    /// | 2 | Xmm, Xmm, Xmm |
39456    /// +---+---------------+
39457    /// ```
39458    #[inline]
39459    pub fn vfnmadd231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39460    where Assembler<'a>: Vfnmadd231shMaskzEmitter<A, B, C> {
39461        <Self as Vfnmadd231shMaskzEmitter<A, B, C>>::vfnmadd231sh_maskz(self, op0, op1, op2);
39462    }
39463    /// `VFNMADD231SH_MASKZ_ER`.
39464    ///
39465    /// Supported operand variants:
39466    ///
39467    /// ```text
39468    /// +---+---------------+
39469    /// | # | Operands      |
39470    /// +---+---------------+
39471    /// | 1 | Xmm, Xmm, Xmm |
39472    /// +---+---------------+
39473    /// ```
39474    #[inline]
39475    pub fn vfnmadd231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39476    where Assembler<'a>: Vfnmadd231shMaskzErEmitter<A, B, C> {
39477        <Self as Vfnmadd231shMaskzErEmitter<A, B, C>>::vfnmadd231sh_maskz_er(self, op0, op1, op2);
39478    }
39479    /// `VFNMSUB132PH`.
39480    ///
39481    /// Supported operand variants:
39482    ///
39483    /// ```text
39484    /// +---+---------------+
39485    /// | # | Operands      |
39486    /// +---+---------------+
39487    /// | 1 | Xmm, Xmm, Mem |
39488    /// | 2 | Xmm, Xmm, Xmm |
39489    /// | 3 | Ymm, Ymm, Mem |
39490    /// | 4 | Ymm, Ymm, Ymm |
39491    /// | 5 | Zmm, Zmm, Mem |
39492    /// | 6 | Zmm, Zmm, Zmm |
39493    /// +---+---------------+
39494    /// ```
39495    #[inline]
39496    pub fn vfnmsub132ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39497    where Assembler<'a>: Vfnmsub132phEmitter<A, B, C> {
39498        <Self as Vfnmsub132phEmitter<A, B, C>>::vfnmsub132ph(self, op0, op1, op2);
39499    }
39500    /// `VFNMSUB132PH_ER`.
39501    ///
39502    /// Supported operand variants:
39503    ///
39504    /// ```text
39505    /// +---+---------------+
39506    /// | # | Operands      |
39507    /// +---+---------------+
39508    /// | 1 | Zmm, Zmm, Zmm |
39509    /// +---+---------------+
39510    /// ```
39511    #[inline]
39512    pub fn vfnmsub132ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39513    where Assembler<'a>: Vfnmsub132phErEmitter<A, B, C> {
39514        <Self as Vfnmsub132phErEmitter<A, B, C>>::vfnmsub132ph_er(self, op0, op1, op2);
39515    }
39516    /// `VFNMSUB132PH_MASK`.
39517    ///
39518    /// Supported operand variants:
39519    ///
39520    /// ```text
39521    /// +---+---------------+
39522    /// | # | Operands      |
39523    /// +---+---------------+
39524    /// | 1 | Xmm, Xmm, Mem |
39525    /// | 2 | Xmm, Xmm, Xmm |
39526    /// | 3 | Ymm, Ymm, Mem |
39527    /// | 4 | Ymm, Ymm, Ymm |
39528    /// | 5 | Zmm, Zmm, Mem |
39529    /// | 6 | Zmm, Zmm, Zmm |
39530    /// +---+---------------+
39531    /// ```
39532    #[inline]
39533    pub fn vfnmsub132ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39534    where Assembler<'a>: Vfnmsub132phMaskEmitter<A, B, C> {
39535        <Self as Vfnmsub132phMaskEmitter<A, B, C>>::vfnmsub132ph_mask(self, op0, op1, op2);
39536    }
39537    /// `VFNMSUB132PH_MASK_ER`.
39538    ///
39539    /// Supported operand variants:
39540    ///
39541    /// ```text
39542    /// +---+---------------+
39543    /// | # | Operands      |
39544    /// +---+---------------+
39545    /// | 1 | Zmm, Zmm, Zmm |
39546    /// +---+---------------+
39547    /// ```
39548    #[inline]
39549    pub fn vfnmsub132ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39550    where Assembler<'a>: Vfnmsub132phMaskErEmitter<A, B, C> {
39551        <Self as Vfnmsub132phMaskErEmitter<A, B, C>>::vfnmsub132ph_mask_er(self, op0, op1, op2);
39552    }
39553    /// `VFNMSUB132PH_MASKZ`.
39554    ///
39555    /// Supported operand variants:
39556    ///
39557    /// ```text
39558    /// +---+---------------+
39559    /// | # | Operands      |
39560    /// +---+---------------+
39561    /// | 1 | Xmm, Xmm, Mem |
39562    /// | 2 | Xmm, Xmm, Xmm |
39563    /// | 3 | Ymm, Ymm, Mem |
39564    /// | 4 | Ymm, Ymm, Ymm |
39565    /// | 5 | Zmm, Zmm, Mem |
39566    /// | 6 | Zmm, Zmm, Zmm |
39567    /// +---+---------------+
39568    /// ```
39569    #[inline]
39570    pub fn vfnmsub132ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39571    where Assembler<'a>: Vfnmsub132phMaskzEmitter<A, B, C> {
39572        <Self as Vfnmsub132phMaskzEmitter<A, B, C>>::vfnmsub132ph_maskz(self, op0, op1, op2);
39573    }
39574    /// `VFNMSUB132PH_MASKZ_ER`.
39575    ///
39576    /// Supported operand variants:
39577    ///
39578    /// ```text
39579    /// +---+---------------+
39580    /// | # | Operands      |
39581    /// +---+---------------+
39582    /// | 1 | Zmm, Zmm, Zmm |
39583    /// +---+---------------+
39584    /// ```
39585    #[inline]
39586    pub fn vfnmsub132ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39587    where Assembler<'a>: Vfnmsub132phMaskzErEmitter<A, B, C> {
39588        <Self as Vfnmsub132phMaskzErEmitter<A, B, C>>::vfnmsub132ph_maskz_er(self, op0, op1, op2);
39589    }
39590    /// `VFNMSUB132SH`.
39591    ///
39592    /// Supported operand variants:
39593    ///
39594    /// ```text
39595    /// +---+---------------+
39596    /// | # | Operands      |
39597    /// +---+---------------+
39598    /// | 1 | Xmm, Xmm, Mem |
39599    /// | 2 | Xmm, Xmm, Xmm |
39600    /// +---+---------------+
39601    /// ```
39602    #[inline]
39603    pub fn vfnmsub132sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39604    where Assembler<'a>: Vfnmsub132shEmitter<A, B, C> {
39605        <Self as Vfnmsub132shEmitter<A, B, C>>::vfnmsub132sh(self, op0, op1, op2);
39606    }
39607    /// `VFNMSUB132SH_ER`.
39608    ///
39609    /// Supported operand variants:
39610    ///
39611    /// ```text
39612    /// +---+---------------+
39613    /// | # | Operands      |
39614    /// +---+---------------+
39615    /// | 1 | Xmm, Xmm, Xmm |
39616    /// +---+---------------+
39617    /// ```
39618    #[inline]
39619    pub fn vfnmsub132sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39620    where Assembler<'a>: Vfnmsub132shErEmitter<A, B, C> {
39621        <Self as Vfnmsub132shErEmitter<A, B, C>>::vfnmsub132sh_er(self, op0, op1, op2);
39622    }
39623    /// `VFNMSUB132SH_MASK`.
39624    ///
39625    /// Supported operand variants:
39626    ///
39627    /// ```text
39628    /// +---+---------------+
39629    /// | # | Operands      |
39630    /// +---+---------------+
39631    /// | 1 | Xmm, Xmm, Mem |
39632    /// | 2 | Xmm, Xmm, Xmm |
39633    /// +---+---------------+
39634    /// ```
39635    #[inline]
39636    pub fn vfnmsub132sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39637    where Assembler<'a>: Vfnmsub132shMaskEmitter<A, B, C> {
39638        <Self as Vfnmsub132shMaskEmitter<A, B, C>>::vfnmsub132sh_mask(self, op0, op1, op2);
39639    }
39640    /// `VFNMSUB132SH_MASK_ER`.
39641    ///
39642    /// Supported operand variants:
39643    ///
39644    /// ```text
39645    /// +---+---------------+
39646    /// | # | Operands      |
39647    /// +---+---------------+
39648    /// | 1 | Xmm, Xmm, Xmm |
39649    /// +---+---------------+
39650    /// ```
39651    #[inline]
39652    pub fn vfnmsub132sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39653    where Assembler<'a>: Vfnmsub132shMaskErEmitter<A, B, C> {
39654        <Self as Vfnmsub132shMaskErEmitter<A, B, C>>::vfnmsub132sh_mask_er(self, op0, op1, op2);
39655    }
39656    /// `VFNMSUB132SH_MASKZ`.
39657    ///
39658    /// Supported operand variants:
39659    ///
39660    /// ```text
39661    /// +---+---------------+
39662    /// | # | Operands      |
39663    /// +---+---------------+
39664    /// | 1 | Xmm, Xmm, Mem |
39665    /// | 2 | Xmm, Xmm, Xmm |
39666    /// +---+---------------+
39667    /// ```
39668    #[inline]
39669    pub fn vfnmsub132sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39670    where Assembler<'a>: Vfnmsub132shMaskzEmitter<A, B, C> {
39671        <Self as Vfnmsub132shMaskzEmitter<A, B, C>>::vfnmsub132sh_maskz(self, op0, op1, op2);
39672    }
39673    /// `VFNMSUB132SH_MASKZ_ER`.
39674    ///
39675    /// Supported operand variants:
39676    ///
39677    /// ```text
39678    /// +---+---------------+
39679    /// | # | Operands      |
39680    /// +---+---------------+
39681    /// | 1 | Xmm, Xmm, Xmm |
39682    /// +---+---------------+
39683    /// ```
39684    #[inline]
39685    pub fn vfnmsub132sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39686    where Assembler<'a>: Vfnmsub132shMaskzErEmitter<A, B, C> {
39687        <Self as Vfnmsub132shMaskzErEmitter<A, B, C>>::vfnmsub132sh_maskz_er(self, op0, op1, op2);
39688    }
39689    /// `VFNMSUB213PH`.
39690    ///
39691    /// Supported operand variants:
39692    ///
39693    /// ```text
39694    /// +---+---------------+
39695    /// | # | Operands      |
39696    /// +---+---------------+
39697    /// | 1 | Xmm, Xmm, Mem |
39698    /// | 2 | Xmm, Xmm, Xmm |
39699    /// | 3 | Ymm, Ymm, Mem |
39700    /// | 4 | Ymm, Ymm, Ymm |
39701    /// | 5 | Zmm, Zmm, Mem |
39702    /// | 6 | Zmm, Zmm, Zmm |
39703    /// +---+---------------+
39704    /// ```
39705    #[inline]
39706    pub fn vfnmsub213ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39707    where Assembler<'a>: Vfnmsub213phEmitter<A, B, C> {
39708        <Self as Vfnmsub213phEmitter<A, B, C>>::vfnmsub213ph(self, op0, op1, op2);
39709    }
39710    /// `VFNMSUB213PH_ER`.
39711    ///
39712    /// Supported operand variants:
39713    ///
39714    /// ```text
39715    /// +---+---------------+
39716    /// | # | Operands      |
39717    /// +---+---------------+
39718    /// | 1 | Zmm, Zmm, Zmm |
39719    /// +---+---------------+
39720    /// ```
39721    #[inline]
39722    pub fn vfnmsub213ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39723    where Assembler<'a>: Vfnmsub213phErEmitter<A, B, C> {
39724        <Self as Vfnmsub213phErEmitter<A, B, C>>::vfnmsub213ph_er(self, op0, op1, op2);
39725    }
39726    /// `VFNMSUB213PH_MASK`.
39727    ///
39728    /// Supported operand variants:
39729    ///
39730    /// ```text
39731    /// +---+---------------+
39732    /// | # | Operands      |
39733    /// +---+---------------+
39734    /// | 1 | Xmm, Xmm, Mem |
39735    /// | 2 | Xmm, Xmm, Xmm |
39736    /// | 3 | Ymm, Ymm, Mem |
39737    /// | 4 | Ymm, Ymm, Ymm |
39738    /// | 5 | Zmm, Zmm, Mem |
39739    /// | 6 | Zmm, Zmm, Zmm |
39740    /// +---+---------------+
39741    /// ```
39742    #[inline]
39743    pub fn vfnmsub213ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39744    where Assembler<'a>: Vfnmsub213phMaskEmitter<A, B, C> {
39745        <Self as Vfnmsub213phMaskEmitter<A, B, C>>::vfnmsub213ph_mask(self, op0, op1, op2);
39746    }
39747    /// `VFNMSUB213PH_MASK_ER`.
39748    ///
39749    /// Supported operand variants:
39750    ///
39751    /// ```text
39752    /// +---+---------------+
39753    /// | # | Operands      |
39754    /// +---+---------------+
39755    /// | 1 | Zmm, Zmm, Zmm |
39756    /// +---+---------------+
39757    /// ```
39758    #[inline]
39759    pub fn vfnmsub213ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39760    where Assembler<'a>: Vfnmsub213phMaskErEmitter<A, B, C> {
39761        <Self as Vfnmsub213phMaskErEmitter<A, B, C>>::vfnmsub213ph_mask_er(self, op0, op1, op2);
39762    }
39763    /// `VFNMSUB213PH_MASKZ`.
39764    ///
39765    /// Supported operand variants:
39766    ///
39767    /// ```text
39768    /// +---+---------------+
39769    /// | # | Operands      |
39770    /// +---+---------------+
39771    /// | 1 | Xmm, Xmm, Mem |
39772    /// | 2 | Xmm, Xmm, Xmm |
39773    /// | 3 | Ymm, Ymm, Mem |
39774    /// | 4 | Ymm, Ymm, Ymm |
39775    /// | 5 | Zmm, Zmm, Mem |
39776    /// | 6 | Zmm, Zmm, Zmm |
39777    /// +---+---------------+
39778    /// ```
39779    #[inline]
39780    pub fn vfnmsub213ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39781    where Assembler<'a>: Vfnmsub213phMaskzEmitter<A, B, C> {
39782        <Self as Vfnmsub213phMaskzEmitter<A, B, C>>::vfnmsub213ph_maskz(self, op0, op1, op2);
39783    }
39784    /// `VFNMSUB213PH_MASKZ_ER`.
39785    ///
39786    /// Supported operand variants:
39787    ///
39788    /// ```text
39789    /// +---+---------------+
39790    /// | # | Operands      |
39791    /// +---+---------------+
39792    /// | 1 | Zmm, Zmm, Zmm |
39793    /// +---+---------------+
39794    /// ```
39795    #[inline]
39796    pub fn vfnmsub213ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39797    where Assembler<'a>: Vfnmsub213phMaskzErEmitter<A, B, C> {
39798        <Self as Vfnmsub213phMaskzErEmitter<A, B, C>>::vfnmsub213ph_maskz_er(self, op0, op1, op2);
39799    }
39800    /// `VFNMSUB213SH`.
39801    ///
39802    /// Supported operand variants:
39803    ///
39804    /// ```text
39805    /// +---+---------------+
39806    /// | # | Operands      |
39807    /// +---+---------------+
39808    /// | 1 | Xmm, Xmm, Mem |
39809    /// | 2 | Xmm, Xmm, Xmm |
39810    /// +---+---------------+
39811    /// ```
39812    #[inline]
39813    pub fn vfnmsub213sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39814    where Assembler<'a>: Vfnmsub213shEmitter<A, B, C> {
39815        <Self as Vfnmsub213shEmitter<A, B, C>>::vfnmsub213sh(self, op0, op1, op2);
39816    }
39817    /// `VFNMSUB213SH_ER`.
39818    ///
39819    /// Supported operand variants:
39820    ///
39821    /// ```text
39822    /// +---+---------------+
39823    /// | # | Operands      |
39824    /// +---+---------------+
39825    /// | 1 | Xmm, Xmm, Xmm |
39826    /// +---+---------------+
39827    /// ```
39828    #[inline]
39829    pub fn vfnmsub213sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39830    where Assembler<'a>: Vfnmsub213shErEmitter<A, B, C> {
39831        <Self as Vfnmsub213shErEmitter<A, B, C>>::vfnmsub213sh_er(self, op0, op1, op2);
39832    }
39833    /// `VFNMSUB213SH_MASK`.
39834    ///
39835    /// Supported operand variants:
39836    ///
39837    /// ```text
39838    /// +---+---------------+
39839    /// | # | Operands      |
39840    /// +---+---------------+
39841    /// | 1 | Xmm, Xmm, Mem |
39842    /// | 2 | Xmm, Xmm, Xmm |
39843    /// +---+---------------+
39844    /// ```
39845    #[inline]
39846    pub fn vfnmsub213sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39847    where Assembler<'a>: Vfnmsub213shMaskEmitter<A, B, C> {
39848        <Self as Vfnmsub213shMaskEmitter<A, B, C>>::vfnmsub213sh_mask(self, op0, op1, op2);
39849    }
39850    /// `VFNMSUB213SH_MASK_ER`.
39851    ///
39852    /// Supported operand variants:
39853    ///
39854    /// ```text
39855    /// +---+---------------+
39856    /// | # | Operands      |
39857    /// +---+---------------+
39858    /// | 1 | Xmm, Xmm, Xmm |
39859    /// +---+---------------+
39860    /// ```
39861    #[inline]
39862    pub fn vfnmsub213sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39863    where Assembler<'a>: Vfnmsub213shMaskErEmitter<A, B, C> {
39864        <Self as Vfnmsub213shMaskErEmitter<A, B, C>>::vfnmsub213sh_mask_er(self, op0, op1, op2);
39865    }
39866    /// `VFNMSUB213SH_MASKZ`.
39867    ///
39868    /// Supported operand variants:
39869    ///
39870    /// ```text
39871    /// +---+---------------+
39872    /// | # | Operands      |
39873    /// +---+---------------+
39874    /// | 1 | Xmm, Xmm, Mem |
39875    /// | 2 | Xmm, Xmm, Xmm |
39876    /// +---+---------------+
39877    /// ```
39878    #[inline]
39879    pub fn vfnmsub213sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39880    where Assembler<'a>: Vfnmsub213shMaskzEmitter<A, B, C> {
39881        <Self as Vfnmsub213shMaskzEmitter<A, B, C>>::vfnmsub213sh_maskz(self, op0, op1, op2);
39882    }
39883    /// `VFNMSUB213SH_MASKZ_ER`.
39884    ///
39885    /// Supported operand variants:
39886    ///
39887    /// ```text
39888    /// +---+---------------+
39889    /// | # | Operands      |
39890    /// +---+---------------+
39891    /// | 1 | Xmm, Xmm, Xmm |
39892    /// +---+---------------+
39893    /// ```
39894    #[inline]
39895    pub fn vfnmsub213sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39896    where Assembler<'a>: Vfnmsub213shMaskzErEmitter<A, B, C> {
39897        <Self as Vfnmsub213shMaskzErEmitter<A, B, C>>::vfnmsub213sh_maskz_er(self, op0, op1, op2);
39898    }
39899    /// `VFNMSUB231PH`.
39900    ///
39901    /// Supported operand variants:
39902    ///
39903    /// ```text
39904    /// +---+---------------+
39905    /// | # | Operands      |
39906    /// +---+---------------+
39907    /// | 1 | Xmm, Xmm, Mem |
39908    /// | 2 | Xmm, Xmm, Xmm |
39909    /// | 3 | Ymm, Ymm, Mem |
39910    /// | 4 | Ymm, Ymm, Ymm |
39911    /// | 5 | Zmm, Zmm, Mem |
39912    /// | 6 | Zmm, Zmm, Zmm |
39913    /// +---+---------------+
39914    /// ```
39915    #[inline]
39916    pub fn vfnmsub231ph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39917    where Assembler<'a>: Vfnmsub231phEmitter<A, B, C> {
39918        <Self as Vfnmsub231phEmitter<A, B, C>>::vfnmsub231ph(self, op0, op1, op2);
39919    }
39920    /// `VFNMSUB231PH_ER`.
39921    ///
39922    /// Supported operand variants:
39923    ///
39924    /// ```text
39925    /// +---+---------------+
39926    /// | # | Operands      |
39927    /// +---+---------------+
39928    /// | 1 | Zmm, Zmm, Zmm |
39929    /// +---+---------------+
39930    /// ```
39931    #[inline]
39932    pub fn vfnmsub231ph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39933    where Assembler<'a>: Vfnmsub231phErEmitter<A, B, C> {
39934        <Self as Vfnmsub231phErEmitter<A, B, C>>::vfnmsub231ph_er(self, op0, op1, op2);
39935    }
39936    /// `VFNMSUB231PH_MASK`.
39937    ///
39938    /// Supported operand variants:
39939    ///
39940    /// ```text
39941    /// +---+---------------+
39942    /// | # | Operands      |
39943    /// +---+---------------+
39944    /// | 1 | Xmm, Xmm, Mem |
39945    /// | 2 | Xmm, Xmm, Xmm |
39946    /// | 3 | Ymm, Ymm, Mem |
39947    /// | 4 | Ymm, Ymm, Ymm |
39948    /// | 5 | Zmm, Zmm, Mem |
39949    /// | 6 | Zmm, Zmm, Zmm |
39950    /// +---+---------------+
39951    /// ```
39952    #[inline]
39953    pub fn vfnmsub231ph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39954    where Assembler<'a>: Vfnmsub231phMaskEmitter<A, B, C> {
39955        <Self as Vfnmsub231phMaskEmitter<A, B, C>>::vfnmsub231ph_mask(self, op0, op1, op2);
39956    }
39957    /// `VFNMSUB231PH_MASK_ER`.
39958    ///
39959    /// Supported operand variants:
39960    ///
39961    /// ```text
39962    /// +---+---------------+
39963    /// | # | Operands      |
39964    /// +---+---------------+
39965    /// | 1 | Zmm, Zmm, Zmm |
39966    /// +---+---------------+
39967    /// ```
39968    #[inline]
39969    pub fn vfnmsub231ph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39970    where Assembler<'a>: Vfnmsub231phMaskErEmitter<A, B, C> {
39971        <Self as Vfnmsub231phMaskErEmitter<A, B, C>>::vfnmsub231ph_mask_er(self, op0, op1, op2);
39972    }
39973    /// `VFNMSUB231PH_MASKZ`.
39974    ///
39975    /// Supported operand variants:
39976    ///
39977    /// ```text
39978    /// +---+---------------+
39979    /// | # | Operands      |
39980    /// +---+---------------+
39981    /// | 1 | Xmm, Xmm, Mem |
39982    /// | 2 | Xmm, Xmm, Xmm |
39983    /// | 3 | Ymm, Ymm, Mem |
39984    /// | 4 | Ymm, Ymm, Ymm |
39985    /// | 5 | Zmm, Zmm, Mem |
39986    /// | 6 | Zmm, Zmm, Zmm |
39987    /// +---+---------------+
39988    /// ```
39989    #[inline]
39990    pub fn vfnmsub231ph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
39991    where Assembler<'a>: Vfnmsub231phMaskzEmitter<A, B, C> {
39992        <Self as Vfnmsub231phMaskzEmitter<A, B, C>>::vfnmsub231ph_maskz(self, op0, op1, op2);
39993    }
39994    /// `VFNMSUB231PH_MASKZ_ER`.
39995    ///
39996    /// Supported operand variants:
39997    ///
39998    /// ```text
39999    /// +---+---------------+
40000    /// | # | Operands      |
40001    /// +---+---------------+
40002    /// | 1 | Zmm, Zmm, Zmm |
40003    /// +---+---------------+
40004    /// ```
40005    #[inline]
40006    pub fn vfnmsub231ph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40007    where Assembler<'a>: Vfnmsub231phMaskzErEmitter<A, B, C> {
40008        <Self as Vfnmsub231phMaskzErEmitter<A, B, C>>::vfnmsub231ph_maskz_er(self, op0, op1, op2);
40009    }
40010    /// `VFNMSUB231SH`.
40011    ///
40012    /// Supported operand variants:
40013    ///
40014    /// ```text
40015    /// +---+---------------+
40016    /// | # | Operands      |
40017    /// +---+---------------+
40018    /// | 1 | Xmm, Xmm, Mem |
40019    /// | 2 | Xmm, Xmm, Xmm |
40020    /// +---+---------------+
40021    /// ```
40022    #[inline]
40023    pub fn vfnmsub231sh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40024    where Assembler<'a>: Vfnmsub231shEmitter<A, B, C> {
40025        <Self as Vfnmsub231shEmitter<A, B, C>>::vfnmsub231sh(self, op0, op1, op2);
40026    }
40027    /// `VFNMSUB231SH_ER`.
40028    ///
40029    /// Supported operand variants:
40030    ///
40031    /// ```text
40032    /// +---+---------------+
40033    /// | # | Operands      |
40034    /// +---+---------------+
40035    /// | 1 | Xmm, Xmm, Xmm |
40036    /// +---+---------------+
40037    /// ```
40038    #[inline]
40039    pub fn vfnmsub231sh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40040    where Assembler<'a>: Vfnmsub231shErEmitter<A, B, C> {
40041        <Self as Vfnmsub231shErEmitter<A, B, C>>::vfnmsub231sh_er(self, op0, op1, op2);
40042    }
40043    /// `VFNMSUB231SH_MASK`.
40044    ///
40045    /// Supported operand variants:
40046    ///
40047    /// ```text
40048    /// +---+---------------+
40049    /// | # | Operands      |
40050    /// +---+---------------+
40051    /// | 1 | Xmm, Xmm, Mem |
40052    /// | 2 | Xmm, Xmm, Xmm |
40053    /// +---+---------------+
40054    /// ```
40055    #[inline]
40056    pub fn vfnmsub231sh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40057    where Assembler<'a>: Vfnmsub231shMaskEmitter<A, B, C> {
40058        <Self as Vfnmsub231shMaskEmitter<A, B, C>>::vfnmsub231sh_mask(self, op0, op1, op2);
40059    }
40060    /// `VFNMSUB231SH_MASK_ER`.
40061    ///
40062    /// Supported operand variants:
40063    ///
40064    /// ```text
40065    /// +---+---------------+
40066    /// | # | Operands      |
40067    /// +---+---------------+
40068    /// | 1 | Xmm, Xmm, Xmm |
40069    /// +---+---------------+
40070    /// ```
40071    #[inline]
40072    pub fn vfnmsub231sh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40073    where Assembler<'a>: Vfnmsub231shMaskErEmitter<A, B, C> {
40074        <Self as Vfnmsub231shMaskErEmitter<A, B, C>>::vfnmsub231sh_mask_er(self, op0, op1, op2);
40075    }
40076    /// `VFNMSUB231SH_MASKZ`.
40077    ///
40078    /// Supported operand variants:
40079    ///
40080    /// ```text
40081    /// +---+---------------+
40082    /// | # | Operands      |
40083    /// +---+---------------+
40084    /// | 1 | Xmm, Xmm, Mem |
40085    /// | 2 | Xmm, Xmm, Xmm |
40086    /// +---+---------------+
40087    /// ```
40088    #[inline]
40089    pub fn vfnmsub231sh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40090    where Assembler<'a>: Vfnmsub231shMaskzEmitter<A, B, C> {
40091        <Self as Vfnmsub231shMaskzEmitter<A, B, C>>::vfnmsub231sh_maskz(self, op0, op1, op2);
40092    }
40093    /// `VFNMSUB231SH_MASKZ_ER`.
40094    ///
40095    /// Supported operand variants:
40096    ///
40097    /// ```text
40098    /// +---+---------------+
40099    /// | # | Operands      |
40100    /// +---+---------------+
40101    /// | 1 | Xmm, Xmm, Xmm |
40102    /// +---+---------------+
40103    /// ```
40104    #[inline]
40105    pub fn vfnmsub231sh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40106    where Assembler<'a>: Vfnmsub231shMaskzErEmitter<A, B, C> {
40107        <Self as Vfnmsub231shMaskzErEmitter<A, B, C>>::vfnmsub231sh_maskz_er(self, op0, op1, op2);
40108    }
40109    /// `VFPCLASSPH`.
40110    ///
40111    /// Supported operand variants:
40112    ///
40113    /// ```text
40114    /// +---+----------------+
40115    /// | # | Operands       |
40116    /// +---+----------------+
40117    /// | 1 | KReg, Mem, Imm |
40118    /// | 2 | KReg, Xmm, Imm |
40119    /// | 3 | KReg, Ymm, Imm |
40120    /// | 4 | KReg, Zmm, Imm |
40121    /// +---+----------------+
40122    /// ```
40123    #[inline]
40124    pub fn vfpclassph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40125    where Assembler<'a>: VfpclassphEmitter<A, B, C> {
40126        <Self as VfpclassphEmitter<A, B, C>>::vfpclassph(self, op0, op1, op2);
40127    }
40128    /// `VFPCLASSPH_MASK`.
40129    ///
40130    /// Supported operand variants:
40131    ///
40132    /// ```text
40133    /// +---+----------------+
40134    /// | # | Operands       |
40135    /// +---+----------------+
40136    /// | 1 | KReg, Mem, Imm |
40137    /// | 2 | KReg, Xmm, Imm |
40138    /// | 3 | KReg, Ymm, Imm |
40139    /// | 4 | KReg, Zmm, Imm |
40140    /// +---+----------------+
40141    /// ```
40142    #[inline]
40143    pub fn vfpclassph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40144    where Assembler<'a>: VfpclassphMaskEmitter<A, B, C> {
40145        <Self as VfpclassphMaskEmitter<A, B, C>>::vfpclassph_mask(self, op0, op1, op2);
40146    }
40147    /// `VFPCLASSSH`.
40148    ///
40149    /// Supported operand variants:
40150    ///
40151    /// ```text
40152    /// +---+----------------+
40153    /// | # | Operands       |
40154    /// +---+----------------+
40155    /// | 1 | KReg, Mem, Imm |
40156    /// | 2 | KReg, Xmm, Imm |
40157    /// +---+----------------+
40158    /// ```
40159    #[inline]
40160    pub fn vfpclasssh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40161    where Assembler<'a>: VfpclassshEmitter<A, B, C> {
40162        <Self as VfpclassshEmitter<A, B, C>>::vfpclasssh(self, op0, op1, op2);
40163    }
40164    /// `VFPCLASSSH_MASK`.
40165    ///
40166    /// Supported operand variants:
40167    ///
40168    /// ```text
40169    /// +---+----------------+
40170    /// | # | Operands       |
40171    /// +---+----------------+
40172    /// | 1 | KReg, Mem, Imm |
40173    /// | 2 | KReg, Xmm, Imm |
40174    /// +---+----------------+
40175    /// ```
40176    #[inline]
40177    pub fn vfpclasssh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40178    where Assembler<'a>: VfpclassshMaskEmitter<A, B, C> {
40179        <Self as VfpclassshMaskEmitter<A, B, C>>::vfpclasssh_mask(self, op0, op1, op2);
40180    }
40181    /// `VGETEXPPH`.
40182    ///
40183    /// Supported operand variants:
40184    ///
40185    /// ```text
40186    /// +---+----------+
40187    /// | # | Operands |
40188    /// +---+----------+
40189    /// | 1 | Xmm, Mem |
40190    /// | 2 | Xmm, Xmm |
40191    /// | 3 | Ymm, Mem |
40192    /// | 4 | Ymm, Ymm |
40193    /// | 5 | Zmm, Mem |
40194    /// | 6 | Zmm, Zmm |
40195    /// +---+----------+
40196    /// ```
40197    #[inline]
40198    pub fn vgetexpph<A, B>(&mut self, op0: A, op1: B)
40199    where Assembler<'a>: VgetexpphEmitter<A, B> {
40200        <Self as VgetexpphEmitter<A, B>>::vgetexpph(self, op0, op1);
40201    }
40202    /// `VGETEXPPH_MASK`.
40203    ///
40204    /// Supported operand variants:
40205    ///
40206    /// ```text
40207    /// +---+----------+
40208    /// | # | Operands |
40209    /// +---+----------+
40210    /// | 1 | Xmm, Mem |
40211    /// | 2 | Xmm, Xmm |
40212    /// | 3 | Ymm, Mem |
40213    /// | 4 | Ymm, Ymm |
40214    /// | 5 | Zmm, Mem |
40215    /// | 6 | Zmm, Zmm |
40216    /// +---+----------+
40217    /// ```
40218    #[inline]
40219    pub fn vgetexpph_mask<A, B>(&mut self, op0: A, op1: B)
40220    where Assembler<'a>: VgetexpphMaskEmitter<A, B> {
40221        <Self as VgetexpphMaskEmitter<A, B>>::vgetexpph_mask(self, op0, op1);
40222    }
40223    /// `VGETEXPPH_MASK_SAE`.
40224    ///
40225    /// Supported operand variants:
40226    ///
40227    /// ```text
40228    /// +---+----------+
40229    /// | # | Operands |
40230    /// +---+----------+
40231    /// | 1 | Zmm, Zmm |
40232    /// +---+----------+
40233    /// ```
40234    #[inline]
40235    pub fn vgetexpph_mask_sae<A, B>(&mut self, op0: A, op1: B)
40236    where Assembler<'a>: VgetexpphMaskSaeEmitter<A, B> {
40237        <Self as VgetexpphMaskSaeEmitter<A, B>>::vgetexpph_mask_sae(self, op0, op1);
40238    }
40239    /// `VGETEXPPH_MASKZ`.
40240    ///
40241    /// Supported operand variants:
40242    ///
40243    /// ```text
40244    /// +---+----------+
40245    /// | # | Operands |
40246    /// +---+----------+
40247    /// | 1 | Xmm, Mem |
40248    /// | 2 | Xmm, Xmm |
40249    /// | 3 | Ymm, Mem |
40250    /// | 4 | Ymm, Ymm |
40251    /// | 5 | Zmm, Mem |
40252    /// | 6 | Zmm, Zmm |
40253    /// +---+----------+
40254    /// ```
40255    #[inline]
40256    pub fn vgetexpph_maskz<A, B>(&mut self, op0: A, op1: B)
40257    where Assembler<'a>: VgetexpphMaskzEmitter<A, B> {
40258        <Self as VgetexpphMaskzEmitter<A, B>>::vgetexpph_maskz(self, op0, op1);
40259    }
40260    /// `VGETEXPPH_MASKZ_SAE`.
40261    ///
40262    /// Supported operand variants:
40263    ///
40264    /// ```text
40265    /// +---+----------+
40266    /// | # | Operands |
40267    /// +---+----------+
40268    /// | 1 | Zmm, Zmm |
40269    /// +---+----------+
40270    /// ```
40271    #[inline]
40272    pub fn vgetexpph_maskz_sae<A, B>(&mut self, op0: A, op1: B)
40273    where Assembler<'a>: VgetexpphMaskzSaeEmitter<A, B> {
40274        <Self as VgetexpphMaskzSaeEmitter<A, B>>::vgetexpph_maskz_sae(self, op0, op1);
40275    }
40276    /// `VGETEXPPH_SAE`.
40277    ///
40278    /// Supported operand variants:
40279    ///
40280    /// ```text
40281    /// +---+----------+
40282    /// | # | Operands |
40283    /// +---+----------+
40284    /// | 1 | Zmm, Zmm |
40285    /// +---+----------+
40286    /// ```
40287    #[inline]
40288    pub fn vgetexpph_sae<A, B>(&mut self, op0: A, op1: B)
40289    where Assembler<'a>: VgetexpphSaeEmitter<A, B> {
40290        <Self as VgetexpphSaeEmitter<A, B>>::vgetexpph_sae(self, op0, op1);
40291    }
40292    /// `VGETEXPSH`.
40293    ///
40294    /// Supported operand variants:
40295    ///
40296    /// ```text
40297    /// +---+---------------+
40298    /// | # | Operands      |
40299    /// +---+---------------+
40300    /// | 1 | Xmm, Xmm, Mem |
40301    /// | 2 | Xmm, Xmm, Xmm |
40302    /// +---+---------------+
40303    /// ```
40304    #[inline]
40305    pub fn vgetexpsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40306    where Assembler<'a>: VgetexpshEmitter<A, B, C> {
40307        <Self as VgetexpshEmitter<A, B, C>>::vgetexpsh(self, op0, op1, op2);
40308    }
40309    /// `VGETEXPSH_MASK`.
40310    ///
40311    /// Supported operand variants:
40312    ///
40313    /// ```text
40314    /// +---+---------------+
40315    /// | # | Operands      |
40316    /// +---+---------------+
40317    /// | 1 | Xmm, Xmm, Mem |
40318    /// | 2 | Xmm, Xmm, Xmm |
40319    /// +---+---------------+
40320    /// ```
40321    #[inline]
40322    pub fn vgetexpsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40323    where Assembler<'a>: VgetexpshMaskEmitter<A, B, C> {
40324        <Self as VgetexpshMaskEmitter<A, B, C>>::vgetexpsh_mask(self, op0, op1, op2);
40325    }
40326    /// `VGETEXPSH_MASK_SAE`.
40327    ///
40328    /// Supported operand variants:
40329    ///
40330    /// ```text
40331    /// +---+---------------+
40332    /// | # | Operands      |
40333    /// +---+---------------+
40334    /// | 1 | Xmm, Xmm, Xmm |
40335    /// +---+---------------+
40336    /// ```
40337    #[inline]
40338    pub fn vgetexpsh_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40339    where Assembler<'a>: VgetexpshMaskSaeEmitter<A, B, C> {
40340        <Self as VgetexpshMaskSaeEmitter<A, B, C>>::vgetexpsh_mask_sae(self, op0, op1, op2);
40341    }
40342    /// `VGETEXPSH_MASKZ`.
40343    ///
40344    /// Supported operand variants:
40345    ///
40346    /// ```text
40347    /// +---+---------------+
40348    /// | # | Operands      |
40349    /// +---+---------------+
40350    /// | 1 | Xmm, Xmm, Mem |
40351    /// | 2 | Xmm, Xmm, Xmm |
40352    /// +---+---------------+
40353    /// ```
40354    #[inline]
40355    pub fn vgetexpsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40356    where Assembler<'a>: VgetexpshMaskzEmitter<A, B, C> {
40357        <Self as VgetexpshMaskzEmitter<A, B, C>>::vgetexpsh_maskz(self, op0, op1, op2);
40358    }
40359    /// `VGETEXPSH_MASKZ_SAE`.
40360    ///
40361    /// Supported operand variants:
40362    ///
40363    /// ```text
40364    /// +---+---------------+
40365    /// | # | Operands      |
40366    /// +---+---------------+
40367    /// | 1 | Xmm, Xmm, Xmm |
40368    /// +---+---------------+
40369    /// ```
40370    #[inline]
40371    pub fn vgetexpsh_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40372    where Assembler<'a>: VgetexpshMaskzSaeEmitter<A, B, C> {
40373        <Self as VgetexpshMaskzSaeEmitter<A, B, C>>::vgetexpsh_maskz_sae(self, op0, op1, op2);
40374    }
40375    /// `VGETEXPSH_SAE`.
40376    ///
40377    /// Supported operand variants:
40378    ///
40379    /// ```text
40380    /// +---+---------------+
40381    /// | # | Operands      |
40382    /// +---+---------------+
40383    /// | 1 | Xmm, Xmm, Xmm |
40384    /// +---+---------------+
40385    /// ```
40386    #[inline]
40387    pub fn vgetexpsh_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40388    where Assembler<'a>: VgetexpshSaeEmitter<A, B, C> {
40389        <Self as VgetexpshSaeEmitter<A, B, C>>::vgetexpsh_sae(self, op0, op1, op2);
40390    }
40391    /// `VGETMANTPH`.
40392    ///
40393    /// Supported operand variants:
40394    ///
40395    /// ```text
40396    /// +---+---------------+
40397    /// | # | Operands      |
40398    /// +---+---------------+
40399    /// | 1 | Xmm, Mem, Imm |
40400    /// | 2 | Xmm, Xmm, Imm |
40401    /// | 3 | Ymm, Mem, Imm |
40402    /// | 4 | Ymm, Ymm, Imm |
40403    /// | 5 | Zmm, Mem, Imm |
40404    /// | 6 | Zmm, Zmm, Imm |
40405    /// +---+---------------+
40406    /// ```
40407    #[inline]
40408    pub fn vgetmantph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40409    where Assembler<'a>: VgetmantphEmitter<A, B, C> {
40410        <Self as VgetmantphEmitter<A, B, C>>::vgetmantph(self, op0, op1, op2);
40411    }
40412    /// `VGETMANTPH_MASK`.
40413    ///
40414    /// Supported operand variants:
40415    ///
40416    /// ```text
40417    /// +---+---------------+
40418    /// | # | Operands      |
40419    /// +---+---------------+
40420    /// | 1 | Xmm, Mem, Imm |
40421    /// | 2 | Xmm, Xmm, Imm |
40422    /// | 3 | Ymm, Mem, Imm |
40423    /// | 4 | Ymm, Ymm, Imm |
40424    /// | 5 | Zmm, Mem, Imm |
40425    /// | 6 | Zmm, Zmm, Imm |
40426    /// +---+---------------+
40427    /// ```
40428    #[inline]
40429    pub fn vgetmantph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40430    where Assembler<'a>: VgetmantphMaskEmitter<A, B, C> {
40431        <Self as VgetmantphMaskEmitter<A, B, C>>::vgetmantph_mask(self, op0, op1, op2);
40432    }
40433    /// `VGETMANTPH_MASK_SAE`.
40434    ///
40435    /// Supported operand variants:
40436    ///
40437    /// ```text
40438    /// +---+---------------+
40439    /// | # | Operands      |
40440    /// +---+---------------+
40441    /// | 1 | Zmm, Zmm, Imm |
40442    /// +---+---------------+
40443    /// ```
40444    #[inline]
40445    pub fn vgetmantph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40446    where Assembler<'a>: VgetmantphMaskSaeEmitter<A, B, C> {
40447        <Self as VgetmantphMaskSaeEmitter<A, B, C>>::vgetmantph_mask_sae(self, op0, op1, op2);
40448    }
40449    /// `VGETMANTPH_MASKZ`.
40450    ///
40451    /// Supported operand variants:
40452    ///
40453    /// ```text
40454    /// +---+---------------+
40455    /// | # | Operands      |
40456    /// +---+---------------+
40457    /// | 1 | Xmm, Mem, Imm |
40458    /// | 2 | Xmm, Xmm, Imm |
40459    /// | 3 | Ymm, Mem, Imm |
40460    /// | 4 | Ymm, Ymm, Imm |
40461    /// | 5 | Zmm, Mem, Imm |
40462    /// | 6 | Zmm, Zmm, Imm |
40463    /// +---+---------------+
40464    /// ```
40465    #[inline]
40466    pub fn vgetmantph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40467    where Assembler<'a>: VgetmantphMaskzEmitter<A, B, C> {
40468        <Self as VgetmantphMaskzEmitter<A, B, C>>::vgetmantph_maskz(self, op0, op1, op2);
40469    }
40470    /// `VGETMANTPH_MASKZ_SAE`.
40471    ///
40472    /// Supported operand variants:
40473    ///
40474    /// ```text
40475    /// +---+---------------+
40476    /// | # | Operands      |
40477    /// +---+---------------+
40478    /// | 1 | Zmm, Zmm, Imm |
40479    /// +---+---------------+
40480    /// ```
40481    #[inline]
40482    pub fn vgetmantph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40483    where Assembler<'a>: VgetmantphMaskzSaeEmitter<A, B, C> {
40484        <Self as VgetmantphMaskzSaeEmitter<A, B, C>>::vgetmantph_maskz_sae(self, op0, op1, op2);
40485    }
40486    /// `VGETMANTPH_SAE`.
40487    ///
40488    /// Supported operand variants:
40489    ///
40490    /// ```text
40491    /// +---+---------------+
40492    /// | # | Operands      |
40493    /// +---+---------------+
40494    /// | 1 | Zmm, Zmm, Imm |
40495    /// +---+---------------+
40496    /// ```
40497    #[inline]
40498    pub fn vgetmantph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40499    where Assembler<'a>: VgetmantphSaeEmitter<A, B, C> {
40500        <Self as VgetmantphSaeEmitter<A, B, C>>::vgetmantph_sae(self, op0, op1, op2);
40501    }
40502    /// `VGETMANTSH`.
40503    ///
40504    /// Supported operand variants:
40505    ///
40506    /// ```text
40507    /// +---+--------------------+
40508    /// | # | Operands           |
40509    /// +---+--------------------+
40510    /// | 1 | Xmm, Xmm, Mem, Imm |
40511    /// | 2 | Xmm, Xmm, Xmm, Imm |
40512    /// +---+--------------------+
40513    /// ```
40514    #[inline]
40515    pub fn vgetmantsh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40516    where Assembler<'a>: VgetmantshEmitter<A, B, C, D> {
40517        <Self as VgetmantshEmitter<A, B, C, D>>::vgetmantsh(self, op0, op1, op2, op3);
40518    }
40519    /// `VGETMANTSH_MASK`.
40520    ///
40521    /// Supported operand variants:
40522    ///
40523    /// ```text
40524    /// +---+--------------------+
40525    /// | # | Operands           |
40526    /// +---+--------------------+
40527    /// | 1 | Xmm, Xmm, Mem, Imm |
40528    /// | 2 | Xmm, Xmm, Xmm, Imm |
40529    /// +---+--------------------+
40530    /// ```
40531    #[inline]
40532    pub fn vgetmantsh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40533    where Assembler<'a>: VgetmantshMaskEmitter<A, B, C, D> {
40534        <Self as VgetmantshMaskEmitter<A, B, C, D>>::vgetmantsh_mask(self, op0, op1, op2, op3);
40535    }
40536    /// `VGETMANTSH_MASK_SAE`.
40537    ///
40538    /// Supported operand variants:
40539    ///
40540    /// ```text
40541    /// +---+--------------------+
40542    /// | # | Operands           |
40543    /// +---+--------------------+
40544    /// | 1 | Xmm, Xmm, Xmm, Imm |
40545    /// +---+--------------------+
40546    /// ```
40547    #[inline]
40548    pub fn vgetmantsh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40549    where Assembler<'a>: VgetmantshMaskSaeEmitter<A, B, C, D> {
40550        <Self as VgetmantshMaskSaeEmitter<A, B, C, D>>::vgetmantsh_mask_sae(self, op0, op1, op2, op3);
40551    }
40552    /// `VGETMANTSH_MASKZ`.
40553    ///
40554    /// Supported operand variants:
40555    ///
40556    /// ```text
40557    /// +---+--------------------+
40558    /// | # | Operands           |
40559    /// +---+--------------------+
40560    /// | 1 | Xmm, Xmm, Mem, Imm |
40561    /// | 2 | Xmm, Xmm, Xmm, Imm |
40562    /// +---+--------------------+
40563    /// ```
40564    #[inline]
40565    pub fn vgetmantsh_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40566    where Assembler<'a>: VgetmantshMaskzEmitter<A, B, C, D> {
40567        <Self as VgetmantshMaskzEmitter<A, B, C, D>>::vgetmantsh_maskz(self, op0, op1, op2, op3);
40568    }
40569    /// `VGETMANTSH_MASKZ_SAE`.
40570    ///
40571    /// Supported operand variants:
40572    ///
40573    /// ```text
40574    /// +---+--------------------+
40575    /// | # | Operands           |
40576    /// +---+--------------------+
40577    /// | 1 | Xmm, Xmm, Xmm, Imm |
40578    /// +---+--------------------+
40579    /// ```
40580    #[inline]
40581    pub fn vgetmantsh_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40582    where Assembler<'a>: VgetmantshMaskzSaeEmitter<A, B, C, D> {
40583        <Self as VgetmantshMaskzSaeEmitter<A, B, C, D>>::vgetmantsh_maskz_sae(self, op0, op1, op2, op3);
40584    }
40585    /// `VGETMANTSH_SAE`.
40586    ///
40587    /// Supported operand variants:
40588    ///
40589    /// ```text
40590    /// +---+--------------------+
40591    /// | # | Operands           |
40592    /// +---+--------------------+
40593    /// | 1 | Xmm, Xmm, Xmm, Imm |
40594    /// +---+--------------------+
40595    /// ```
40596    #[inline]
40597    pub fn vgetmantsh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40598    where Assembler<'a>: VgetmantshSaeEmitter<A, B, C, D> {
40599        <Self as VgetmantshSaeEmitter<A, B, C, D>>::vgetmantsh_sae(self, op0, op1, op2, op3);
40600    }
40601    /// `VGF2P8AFFINEINVQB` (VGF2P8AFFINEINVQB). 
40602    /// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
40603    ///
40604    ///
40605    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
40606    ///
40607    /// Supported operand variants:
40608    ///
40609    /// ```text
40610    /// +---+--------------------+
40611    /// | # | Operands           |
40612    /// +---+--------------------+
40613    /// | 1 | Xmm, Xmm, Mem, Imm |
40614    /// | 2 | Xmm, Xmm, Xmm, Imm |
40615    /// | 3 | Ymm, Ymm, Mem, Imm |
40616    /// | 4 | Ymm, Ymm, Ymm, Imm |
40617    /// | 5 | Zmm, Zmm, Mem, Imm |
40618    /// | 6 | Zmm, Zmm, Zmm, Imm |
40619    /// +---+--------------------+
40620    /// ```
40621    #[inline]
40622    pub fn vgf2p8affineinvqb<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40623    where Assembler<'a>: Vgf2p8affineinvqbEmitter<A, B, C, D> {
40624        <Self as Vgf2p8affineinvqbEmitter<A, B, C, D>>::vgf2p8affineinvqb(self, op0, op1, op2, op3);
40625    }
40626    /// `VGF2P8AFFINEINVQB_MASK` (VGF2P8AFFINEINVQB). 
40627    /// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
40628    ///
40629    ///
40630    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
40631    ///
40632    /// Supported operand variants:
40633    ///
40634    /// ```text
40635    /// +---+--------------------+
40636    /// | # | Operands           |
40637    /// +---+--------------------+
40638    /// | 1 | Xmm, Xmm, Mem, Imm |
40639    /// | 2 | Xmm, Xmm, Xmm, Imm |
40640    /// | 3 | Ymm, Ymm, Mem, Imm |
40641    /// | 4 | Ymm, Ymm, Ymm, Imm |
40642    /// | 5 | Zmm, Zmm, Mem, Imm |
40643    /// | 6 | Zmm, Zmm, Zmm, Imm |
40644    /// +---+--------------------+
40645    /// ```
40646    #[inline]
40647    pub fn vgf2p8affineinvqb_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40648    where Assembler<'a>: Vgf2p8affineinvqbMaskEmitter<A, B, C, D> {
40649        <Self as Vgf2p8affineinvqbMaskEmitter<A, B, C, D>>::vgf2p8affineinvqb_mask(self, op0, op1, op2, op3);
40650    }
40651    /// `VGF2P8AFFINEINVQB_MASKZ` (VGF2P8AFFINEINVQB). 
40652    /// The AFFINEINVB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * inv(x) + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. The inverse of the bytes in x is defined with respect to the reduction polynomial x8 + x4 + x3 + x + 1.
40653    ///
40654    ///
40655    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEINVQB.html).
40656    ///
40657    /// Supported operand variants:
40658    ///
40659    /// ```text
40660    /// +---+--------------------+
40661    /// | # | Operands           |
40662    /// +---+--------------------+
40663    /// | 1 | Xmm, Xmm, Mem, Imm |
40664    /// | 2 | Xmm, Xmm, Xmm, Imm |
40665    /// | 3 | Ymm, Ymm, Mem, Imm |
40666    /// | 4 | Ymm, Ymm, Ymm, Imm |
40667    /// | 5 | Zmm, Zmm, Mem, Imm |
40668    /// | 6 | Zmm, Zmm, Zmm, Imm |
40669    /// +---+--------------------+
40670    /// ```
40671    #[inline]
40672    pub fn vgf2p8affineinvqb_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40673    where Assembler<'a>: Vgf2p8affineinvqbMaskzEmitter<A, B, C, D> {
40674        <Self as Vgf2p8affineinvqbMaskzEmitter<A, B, C, D>>::vgf2p8affineinvqb_maskz(self, op0, op1, op2, op3);
40675    }
40676    /// `VGF2P8AFFINEQB` (VGF2P8AFFINEQB). 
40677    /// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
40678    ///
40679    ///
40680    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
40681    ///
40682    /// Supported operand variants:
40683    ///
40684    /// ```text
40685    /// +---+--------------------+
40686    /// | # | Operands           |
40687    /// +---+--------------------+
40688    /// | 1 | Xmm, Xmm, Mem, Imm |
40689    /// | 2 | Xmm, Xmm, Xmm, Imm |
40690    /// | 3 | Ymm, Ymm, Mem, Imm |
40691    /// | 4 | Ymm, Ymm, Ymm, Imm |
40692    /// | 5 | Zmm, Zmm, Mem, Imm |
40693    /// | 6 | Zmm, Zmm, Zmm, Imm |
40694    /// +---+--------------------+
40695    /// ```
40696    #[inline]
40697    pub fn vgf2p8affineqb<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40698    where Assembler<'a>: Vgf2p8affineqbEmitter<A, B, C, D> {
40699        <Self as Vgf2p8affineqbEmitter<A, B, C, D>>::vgf2p8affineqb(self, op0, op1, op2, op3);
40700    }
40701    /// `VGF2P8AFFINEQB_MASK` (VGF2P8AFFINEQB). 
40702    /// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
40703    ///
40704    ///
40705    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
40706    ///
40707    /// Supported operand variants:
40708    ///
40709    /// ```text
40710    /// +---+--------------------+
40711    /// | # | Operands           |
40712    /// +---+--------------------+
40713    /// | 1 | Xmm, Xmm, Mem, Imm |
40714    /// | 2 | Xmm, Xmm, Xmm, Imm |
40715    /// | 3 | Ymm, Ymm, Mem, Imm |
40716    /// | 4 | Ymm, Ymm, Ymm, Imm |
40717    /// | 5 | Zmm, Zmm, Mem, Imm |
40718    /// | 6 | Zmm, Zmm, Zmm, Imm |
40719    /// +---+--------------------+
40720    /// ```
40721    #[inline]
40722    pub fn vgf2p8affineqb_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40723    where Assembler<'a>: Vgf2p8affineqbMaskEmitter<A, B, C, D> {
40724        <Self as Vgf2p8affineqbMaskEmitter<A, B, C, D>>::vgf2p8affineqb_mask(self, op0, op1, op2, op3);
40725    }
40726    /// `VGF2P8AFFINEQB_MASKZ` (VGF2P8AFFINEQB). 
40727    /// The AFFINEB instruction computes an affine transformation in the Galois Field 28. For this instruction, an affine transformation is defined by A * x + b where “A” is an 8 by 8 bit matrix, and “x” and “b” are 8-bit vectors. One SIMD register (operand 1) holds “x” as either 16, 32 or 64 8-bit vectors. A second SIMD (operand 2) register or memory operand contains 2, 4, or 8 “A” values, which are operated upon by the correspondingly aligned 8 “x” values in the first register. The “b” vector is constant for all calculations and contained in the immediate byte.
40728    ///
40729    ///
40730    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8AFFINEQB.html).
40731    ///
40732    /// Supported operand variants:
40733    ///
40734    /// ```text
40735    /// +---+--------------------+
40736    /// | # | Operands           |
40737    /// +---+--------------------+
40738    /// | 1 | Xmm, Xmm, Mem, Imm |
40739    /// | 2 | Xmm, Xmm, Xmm, Imm |
40740    /// | 3 | Ymm, Ymm, Mem, Imm |
40741    /// | 4 | Ymm, Ymm, Ymm, Imm |
40742    /// | 5 | Zmm, Zmm, Mem, Imm |
40743    /// | 6 | Zmm, Zmm, Zmm, Imm |
40744    /// +---+--------------------+
40745    /// ```
40746    #[inline]
40747    pub fn vgf2p8affineqb_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
40748    where Assembler<'a>: Vgf2p8affineqbMaskzEmitter<A, B, C, D> {
40749        <Self as Vgf2p8affineqbMaskzEmitter<A, B, C, D>>::vgf2p8affineqb_maskz(self, op0, op1, op2, op3);
40750    }
40751    /// `VGF2P8MULB` (VGF2P8MULB). 
40752    /// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
40753    ///
40754    ///
40755    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
40756    ///
40757    /// Supported operand variants:
40758    ///
40759    /// ```text
40760    /// +---+---------------+
40761    /// | # | Operands      |
40762    /// +---+---------------+
40763    /// | 1 | Xmm, Xmm, Mem |
40764    /// | 2 | Xmm, Xmm, Xmm |
40765    /// | 3 | Ymm, Ymm, Mem |
40766    /// | 4 | Ymm, Ymm, Ymm |
40767    /// | 5 | Zmm, Zmm, Mem |
40768    /// | 6 | Zmm, Zmm, Zmm |
40769    /// +---+---------------+
40770    /// ```
40771    #[inline]
40772    pub fn vgf2p8mulb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40773    where Assembler<'a>: Vgf2p8mulbEmitter<A, B, C> {
40774        <Self as Vgf2p8mulbEmitter<A, B, C>>::vgf2p8mulb(self, op0, op1, op2);
40775    }
40776    /// `VGF2P8MULB_MASK` (VGF2P8MULB). 
40777    /// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
40778    ///
40779    ///
40780    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
40781    ///
40782    /// Supported operand variants:
40783    ///
40784    /// ```text
40785    /// +---+---------------+
40786    /// | # | Operands      |
40787    /// +---+---------------+
40788    /// | 1 | Xmm, Xmm, Mem |
40789    /// | 2 | Xmm, Xmm, Xmm |
40790    /// | 3 | Ymm, Ymm, Mem |
40791    /// | 4 | Ymm, Ymm, Ymm |
40792    /// | 5 | Zmm, Zmm, Mem |
40793    /// | 6 | Zmm, Zmm, Zmm |
40794    /// +---+---------------+
40795    /// ```
40796    #[inline]
40797    pub fn vgf2p8mulb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40798    where Assembler<'a>: Vgf2p8mulbMaskEmitter<A, B, C> {
40799        <Self as Vgf2p8mulbMaskEmitter<A, B, C>>::vgf2p8mulb_mask(self, op0, op1, op2);
40800    }
40801    /// `VGF2P8MULB_MASKZ` (VGF2P8MULB). 
40802    /// The instruction multiplies elements in the finite field GF(28), operating on a byte (field element) in the first source operand and the corresponding byte in a second source operand. The field GF(28) is represented in polynomial representation with the reduction polynomial x8 + x4 + x3 + x + 1.
40803    ///
40804    ///
40805    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/GF2P8MULB.html).
40806    ///
40807    /// Supported operand variants:
40808    ///
40809    /// ```text
40810    /// +---+---------------+
40811    /// | # | Operands      |
40812    /// +---+---------------+
40813    /// | 1 | Xmm, Xmm, Mem |
40814    /// | 2 | Xmm, Xmm, Xmm |
40815    /// | 3 | Ymm, Ymm, Mem |
40816    /// | 4 | Ymm, Ymm, Ymm |
40817    /// | 5 | Zmm, Zmm, Mem |
40818    /// | 6 | Zmm, Zmm, Zmm |
40819    /// +---+---------------+
40820    /// ```
40821    #[inline]
40822    pub fn vgf2p8mulb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40823    where Assembler<'a>: Vgf2p8mulbMaskzEmitter<A, B, C> {
40824        <Self as Vgf2p8mulbMaskzEmitter<A, B, C>>::vgf2p8mulb_maskz(self, op0, op1, op2);
40825    }
40826    /// `VMAXPH`.
40827    ///
40828    /// Supported operand variants:
40829    ///
40830    /// ```text
40831    /// +---+---------------+
40832    /// | # | Operands      |
40833    /// +---+---------------+
40834    /// | 1 | Xmm, Xmm, Mem |
40835    /// | 2 | Xmm, Xmm, Xmm |
40836    /// | 3 | Ymm, Ymm, Mem |
40837    /// | 4 | Ymm, Ymm, Ymm |
40838    /// | 5 | Zmm, Zmm, Mem |
40839    /// | 6 | Zmm, Zmm, Zmm |
40840    /// +---+---------------+
40841    /// ```
40842    #[inline]
40843    pub fn vmaxph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40844    where Assembler<'a>: VmaxphEmitter<A, B, C> {
40845        <Self as VmaxphEmitter<A, B, C>>::vmaxph(self, op0, op1, op2);
40846    }
40847    /// `VMAXPH_MASK`.
40848    ///
40849    /// Supported operand variants:
40850    ///
40851    /// ```text
40852    /// +---+---------------+
40853    /// | # | Operands      |
40854    /// +---+---------------+
40855    /// | 1 | Xmm, Xmm, Mem |
40856    /// | 2 | Xmm, Xmm, Xmm |
40857    /// | 3 | Ymm, Ymm, Mem |
40858    /// | 4 | Ymm, Ymm, Ymm |
40859    /// | 5 | Zmm, Zmm, Mem |
40860    /// | 6 | Zmm, Zmm, Zmm |
40861    /// +---+---------------+
40862    /// ```
40863    #[inline]
40864    pub fn vmaxph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40865    where Assembler<'a>: VmaxphMaskEmitter<A, B, C> {
40866        <Self as VmaxphMaskEmitter<A, B, C>>::vmaxph_mask(self, op0, op1, op2);
40867    }
40868    /// `VMAXPH_MASK_SAE`.
40869    ///
40870    /// Supported operand variants:
40871    ///
40872    /// ```text
40873    /// +---+---------------+
40874    /// | # | Operands      |
40875    /// +---+---------------+
40876    /// | 1 | Zmm, Zmm, Zmm |
40877    /// +---+---------------+
40878    /// ```
40879    #[inline]
40880    pub fn vmaxph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40881    where Assembler<'a>: VmaxphMaskSaeEmitter<A, B, C> {
40882        <Self as VmaxphMaskSaeEmitter<A, B, C>>::vmaxph_mask_sae(self, op0, op1, op2);
40883    }
40884    /// `VMAXPH_MASKZ`.
40885    ///
40886    /// Supported operand variants:
40887    ///
40888    /// ```text
40889    /// +---+---------------+
40890    /// | # | Operands      |
40891    /// +---+---------------+
40892    /// | 1 | Xmm, Xmm, Mem |
40893    /// | 2 | Xmm, Xmm, Xmm |
40894    /// | 3 | Ymm, Ymm, Mem |
40895    /// | 4 | Ymm, Ymm, Ymm |
40896    /// | 5 | Zmm, Zmm, Mem |
40897    /// | 6 | Zmm, Zmm, Zmm |
40898    /// +---+---------------+
40899    /// ```
40900    #[inline]
40901    pub fn vmaxph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40902    where Assembler<'a>: VmaxphMaskzEmitter<A, B, C> {
40903        <Self as VmaxphMaskzEmitter<A, B, C>>::vmaxph_maskz(self, op0, op1, op2);
40904    }
40905    /// `VMAXPH_MASKZ_SAE`.
40906    ///
40907    /// Supported operand variants:
40908    ///
40909    /// ```text
40910    /// +---+---------------+
40911    /// | # | Operands      |
40912    /// +---+---------------+
40913    /// | 1 | Zmm, Zmm, Zmm |
40914    /// +---+---------------+
40915    /// ```
40916    #[inline]
40917    pub fn vmaxph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40918    where Assembler<'a>: VmaxphMaskzSaeEmitter<A, B, C> {
40919        <Self as VmaxphMaskzSaeEmitter<A, B, C>>::vmaxph_maskz_sae(self, op0, op1, op2);
40920    }
40921    /// `VMAXPH_SAE`.
40922    ///
40923    /// Supported operand variants:
40924    ///
40925    /// ```text
40926    /// +---+---------------+
40927    /// | # | Operands      |
40928    /// +---+---------------+
40929    /// | 1 | Zmm, Zmm, Zmm |
40930    /// +---+---------------+
40931    /// ```
40932    #[inline]
40933    pub fn vmaxph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40934    where Assembler<'a>: VmaxphSaeEmitter<A, B, C> {
40935        <Self as VmaxphSaeEmitter<A, B, C>>::vmaxph_sae(self, op0, op1, op2);
40936    }
40937    /// `VMAXSH`.
40938    ///
40939    /// Supported operand variants:
40940    ///
40941    /// ```text
40942    /// +---+---------------+
40943    /// | # | Operands      |
40944    /// +---+---------------+
40945    /// | 1 | Xmm, Xmm, Mem |
40946    /// | 2 | Xmm, Xmm, Xmm |
40947    /// +---+---------------+
40948    /// ```
40949    #[inline]
40950    pub fn vmaxsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40951    where Assembler<'a>: VmaxshEmitter<A, B, C> {
40952        <Self as VmaxshEmitter<A, B, C>>::vmaxsh(self, op0, op1, op2);
40953    }
40954    /// `VMAXSH_MASK`.
40955    ///
40956    /// Supported operand variants:
40957    ///
40958    /// ```text
40959    /// +---+---------------+
40960    /// | # | Operands      |
40961    /// +---+---------------+
40962    /// | 1 | Xmm, Xmm, Mem |
40963    /// | 2 | Xmm, Xmm, Xmm |
40964    /// +---+---------------+
40965    /// ```
40966    #[inline]
40967    pub fn vmaxsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40968    where Assembler<'a>: VmaxshMaskEmitter<A, B, C> {
40969        <Self as VmaxshMaskEmitter<A, B, C>>::vmaxsh_mask(self, op0, op1, op2);
40970    }
40971    /// `VMAXSH_MASK_SAE`.
40972    ///
40973    /// Supported operand variants:
40974    ///
40975    /// ```text
40976    /// +---+---------------+
40977    /// | # | Operands      |
40978    /// +---+---------------+
40979    /// | 1 | Xmm, Xmm, Xmm |
40980    /// +---+---------------+
40981    /// ```
40982    #[inline]
40983    pub fn vmaxsh_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
40984    where Assembler<'a>: VmaxshMaskSaeEmitter<A, B, C> {
40985        <Self as VmaxshMaskSaeEmitter<A, B, C>>::vmaxsh_mask_sae(self, op0, op1, op2);
40986    }
40987    /// `VMAXSH_MASKZ`.
40988    ///
40989    /// Supported operand variants:
40990    ///
40991    /// ```text
40992    /// +---+---------------+
40993    /// | # | Operands      |
40994    /// +---+---------------+
40995    /// | 1 | Xmm, Xmm, Mem |
40996    /// | 2 | Xmm, Xmm, Xmm |
40997    /// +---+---------------+
40998    /// ```
40999    #[inline]
41000    pub fn vmaxsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41001    where Assembler<'a>: VmaxshMaskzEmitter<A, B, C> {
41002        <Self as VmaxshMaskzEmitter<A, B, C>>::vmaxsh_maskz(self, op0, op1, op2);
41003    }
41004    /// `VMAXSH_MASKZ_SAE`.
41005    ///
41006    /// Supported operand variants:
41007    ///
41008    /// ```text
41009    /// +---+---------------+
41010    /// | # | Operands      |
41011    /// +---+---------------+
41012    /// | 1 | Xmm, Xmm, Xmm |
41013    /// +---+---------------+
41014    /// ```
41015    #[inline]
41016    pub fn vmaxsh_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41017    where Assembler<'a>: VmaxshMaskzSaeEmitter<A, B, C> {
41018        <Self as VmaxshMaskzSaeEmitter<A, B, C>>::vmaxsh_maskz_sae(self, op0, op1, op2);
41019    }
41020    /// `VMAXSH_SAE`.
41021    ///
41022    /// Supported operand variants:
41023    ///
41024    /// ```text
41025    /// +---+---------------+
41026    /// | # | Operands      |
41027    /// +---+---------------+
41028    /// | 1 | Xmm, Xmm, Xmm |
41029    /// +---+---------------+
41030    /// ```
41031    #[inline]
41032    pub fn vmaxsh_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41033    where Assembler<'a>: VmaxshSaeEmitter<A, B, C> {
41034        <Self as VmaxshSaeEmitter<A, B, C>>::vmaxsh_sae(self, op0, op1, op2);
41035    }
41036    /// `VMINPH`.
41037    ///
41038    /// Supported operand variants:
41039    ///
41040    /// ```text
41041    /// +---+---------------+
41042    /// | # | Operands      |
41043    /// +---+---------------+
41044    /// | 1 | Xmm, Xmm, Mem |
41045    /// | 2 | Xmm, Xmm, Xmm |
41046    /// | 3 | Ymm, Ymm, Mem |
41047    /// | 4 | Ymm, Ymm, Ymm |
41048    /// | 5 | Zmm, Zmm, Mem |
41049    /// | 6 | Zmm, Zmm, Zmm |
41050    /// +---+---------------+
41051    /// ```
41052    #[inline]
41053    pub fn vminph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41054    where Assembler<'a>: VminphEmitter<A, B, C> {
41055        <Self as VminphEmitter<A, B, C>>::vminph(self, op0, op1, op2);
41056    }
41057    /// `VMINPH_MASK`.
41058    ///
41059    /// Supported operand variants:
41060    ///
41061    /// ```text
41062    /// +---+---------------+
41063    /// | # | Operands      |
41064    /// +---+---------------+
41065    /// | 1 | Xmm, Xmm, Mem |
41066    /// | 2 | Xmm, Xmm, Xmm |
41067    /// | 3 | Ymm, Ymm, Mem |
41068    /// | 4 | Ymm, Ymm, Ymm |
41069    /// | 5 | Zmm, Zmm, Mem |
41070    /// | 6 | Zmm, Zmm, Zmm |
41071    /// +---+---------------+
41072    /// ```
41073    #[inline]
41074    pub fn vminph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41075    where Assembler<'a>: VminphMaskEmitter<A, B, C> {
41076        <Self as VminphMaskEmitter<A, B, C>>::vminph_mask(self, op0, op1, op2);
41077    }
41078    /// `VMINPH_MASK_SAE`.
41079    ///
41080    /// Supported operand variants:
41081    ///
41082    /// ```text
41083    /// +---+---------------+
41084    /// | # | Operands      |
41085    /// +---+---------------+
41086    /// | 1 | Zmm, Zmm, Zmm |
41087    /// +---+---------------+
41088    /// ```
41089    #[inline]
41090    pub fn vminph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41091    where Assembler<'a>: VminphMaskSaeEmitter<A, B, C> {
41092        <Self as VminphMaskSaeEmitter<A, B, C>>::vminph_mask_sae(self, op0, op1, op2);
41093    }
41094    /// `VMINPH_MASKZ`.
41095    ///
41096    /// Supported operand variants:
41097    ///
41098    /// ```text
41099    /// +---+---------------+
41100    /// | # | Operands      |
41101    /// +---+---------------+
41102    /// | 1 | Xmm, Xmm, Mem |
41103    /// | 2 | Xmm, Xmm, Xmm |
41104    /// | 3 | Ymm, Ymm, Mem |
41105    /// | 4 | Ymm, Ymm, Ymm |
41106    /// | 5 | Zmm, Zmm, Mem |
41107    /// | 6 | Zmm, Zmm, Zmm |
41108    /// +---+---------------+
41109    /// ```
41110    #[inline]
41111    pub fn vminph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41112    where Assembler<'a>: VminphMaskzEmitter<A, B, C> {
41113        <Self as VminphMaskzEmitter<A, B, C>>::vminph_maskz(self, op0, op1, op2);
41114    }
41115    /// `VMINPH_MASKZ_SAE`.
41116    ///
41117    /// Supported operand variants:
41118    ///
41119    /// ```text
41120    /// +---+---------------+
41121    /// | # | Operands      |
41122    /// +---+---------------+
41123    /// | 1 | Zmm, Zmm, Zmm |
41124    /// +---+---------------+
41125    /// ```
41126    #[inline]
41127    pub fn vminph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41128    where Assembler<'a>: VminphMaskzSaeEmitter<A, B, C> {
41129        <Self as VminphMaskzSaeEmitter<A, B, C>>::vminph_maskz_sae(self, op0, op1, op2);
41130    }
41131    /// `VMINPH_SAE`.
41132    ///
41133    /// Supported operand variants:
41134    ///
41135    /// ```text
41136    /// +---+---------------+
41137    /// | # | Operands      |
41138    /// +---+---------------+
41139    /// | 1 | Zmm, Zmm, Zmm |
41140    /// +---+---------------+
41141    /// ```
41142    #[inline]
41143    pub fn vminph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41144    where Assembler<'a>: VminphSaeEmitter<A, B, C> {
41145        <Self as VminphSaeEmitter<A, B, C>>::vminph_sae(self, op0, op1, op2);
41146    }
41147    /// `VMINSH`.
41148    ///
41149    /// Supported operand variants:
41150    ///
41151    /// ```text
41152    /// +---+---------------+
41153    /// | # | Operands      |
41154    /// +---+---------------+
41155    /// | 1 | Xmm, Xmm, Mem |
41156    /// | 2 | Xmm, Xmm, Xmm |
41157    /// +---+---------------+
41158    /// ```
41159    #[inline]
41160    pub fn vminsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41161    where Assembler<'a>: VminshEmitter<A, B, C> {
41162        <Self as VminshEmitter<A, B, C>>::vminsh(self, op0, op1, op2);
41163    }
41164    /// `VMINSH_MASK`.
41165    ///
41166    /// Supported operand variants:
41167    ///
41168    /// ```text
41169    /// +---+---------------+
41170    /// | # | Operands      |
41171    /// +---+---------------+
41172    /// | 1 | Xmm, Xmm, Mem |
41173    /// | 2 | Xmm, Xmm, Xmm |
41174    /// +---+---------------+
41175    /// ```
41176    #[inline]
41177    pub fn vminsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41178    where Assembler<'a>: VminshMaskEmitter<A, B, C> {
41179        <Self as VminshMaskEmitter<A, B, C>>::vminsh_mask(self, op0, op1, op2);
41180    }
41181    /// `VMINSH_MASK_SAE`.
41182    ///
41183    /// Supported operand variants:
41184    ///
41185    /// ```text
41186    /// +---+---------------+
41187    /// | # | Operands      |
41188    /// +---+---------------+
41189    /// | 1 | Xmm, Xmm, Xmm |
41190    /// +---+---------------+
41191    /// ```
41192    #[inline]
41193    pub fn vminsh_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41194    where Assembler<'a>: VminshMaskSaeEmitter<A, B, C> {
41195        <Self as VminshMaskSaeEmitter<A, B, C>>::vminsh_mask_sae(self, op0, op1, op2);
41196    }
41197    /// `VMINSH_MASKZ`.
41198    ///
41199    /// Supported operand variants:
41200    ///
41201    /// ```text
41202    /// +---+---------------+
41203    /// | # | Operands      |
41204    /// +---+---------------+
41205    /// | 1 | Xmm, Xmm, Mem |
41206    /// | 2 | Xmm, Xmm, Xmm |
41207    /// +---+---------------+
41208    /// ```
41209    #[inline]
41210    pub fn vminsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41211    where Assembler<'a>: VminshMaskzEmitter<A, B, C> {
41212        <Self as VminshMaskzEmitter<A, B, C>>::vminsh_maskz(self, op0, op1, op2);
41213    }
41214    /// `VMINSH_MASKZ_SAE`.
41215    ///
41216    /// Supported operand variants:
41217    ///
41218    /// ```text
41219    /// +---+---------------+
41220    /// | # | Operands      |
41221    /// +---+---------------+
41222    /// | 1 | Xmm, Xmm, Xmm |
41223    /// +---+---------------+
41224    /// ```
41225    #[inline]
41226    pub fn vminsh_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41227    where Assembler<'a>: VminshMaskzSaeEmitter<A, B, C> {
41228        <Self as VminshMaskzSaeEmitter<A, B, C>>::vminsh_maskz_sae(self, op0, op1, op2);
41229    }
41230    /// `VMINSH_SAE`.
41231    ///
41232    /// Supported operand variants:
41233    ///
41234    /// ```text
41235    /// +---+---------------+
41236    /// | # | Operands      |
41237    /// +---+---------------+
41238    /// | 1 | Xmm, Xmm, Xmm |
41239    /// +---+---------------+
41240    /// ```
41241    #[inline]
41242    pub fn vminsh_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41243    where Assembler<'a>: VminshSaeEmitter<A, B, C> {
41244        <Self as VminshSaeEmitter<A, B, C>>::vminsh_sae(self, op0, op1, op2);
41245    }
41246    /// `VMOVSH`.
41247    ///
41248    /// Supported operand variants:
41249    ///
41250    /// ```text
41251    /// +---+----------+
41252    /// | # | Operands |
41253    /// +---+----------+
41254    /// | 1 | Mem, Xmm |
41255    /// | 2 | Xmm, Mem |
41256    /// +---+----------+
41257    /// ```
41258    #[inline]
41259    pub fn vmovsh_2<A, B>(&mut self, op0: A, op1: B)
41260    where Assembler<'a>: VmovshEmitter_2<A, B> {
41261        <Self as VmovshEmitter_2<A, B>>::vmovsh_2(self, op0, op1);
41262    }
41263    /// `VMOVSH`.
41264    ///
41265    /// Supported operand variants:
41266    ///
41267    /// ```text
41268    /// +---+---------------+
41269    /// | # | Operands      |
41270    /// +---+---------------+
41271    /// | 1 | Xmm, Xmm, Xmm |
41272    /// +---+---------------+
41273    /// ```
41274    #[inline]
41275    pub fn vmovsh_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41276    where Assembler<'a>: VmovshEmitter_3<A, B, C> {
41277        <Self as VmovshEmitter_3<A, B, C>>::vmovsh_3(self, op0, op1, op2);
41278    }
41279    /// `VMOVSH_MASK`.
41280    ///
41281    /// Supported operand variants:
41282    ///
41283    /// ```text
41284    /// +---+----------+
41285    /// | # | Operands |
41286    /// +---+----------+
41287    /// | 1 | Mem, Xmm |
41288    /// | 2 | Xmm, Mem |
41289    /// +---+----------+
41290    /// ```
41291    #[inline]
41292    pub fn vmovsh_mask_2<A, B>(&mut self, op0: A, op1: B)
41293    where Assembler<'a>: VmovshMaskEmitter_2<A, B> {
41294        <Self as VmovshMaskEmitter_2<A, B>>::vmovsh_mask_2(self, op0, op1);
41295    }
41296    /// `VMOVSH_MASK`.
41297    ///
41298    /// Supported operand variants:
41299    ///
41300    /// ```text
41301    /// +---+---------------+
41302    /// | # | Operands      |
41303    /// +---+---------------+
41304    /// | 1 | Xmm, Xmm, Xmm |
41305    /// +---+---------------+
41306    /// ```
41307    #[inline]
41308    pub fn vmovsh_mask_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41309    where Assembler<'a>: VmovshMaskEmitter_3<A, B, C> {
41310        <Self as VmovshMaskEmitter_3<A, B, C>>::vmovsh_mask_3(self, op0, op1, op2);
41311    }
41312    /// `VMOVSH_MASKZ`.
41313    ///
41314    /// Supported operand variants:
41315    ///
41316    /// ```text
41317    /// +---+----------+
41318    /// | # | Operands |
41319    /// +---+----------+
41320    /// | 1 | Xmm, Mem |
41321    /// +---+----------+
41322    /// ```
41323    #[inline]
41324    pub fn vmovsh_maskz_2<A, B>(&mut self, op0: A, op1: B)
41325    where Assembler<'a>: VmovshMaskzEmitter_2<A, B> {
41326        <Self as VmovshMaskzEmitter_2<A, B>>::vmovsh_maskz_2(self, op0, op1);
41327    }
41328    /// `VMOVSH_MASKZ`.
41329    ///
41330    /// Supported operand variants:
41331    ///
41332    /// ```text
41333    /// +---+---------------+
41334    /// | # | Operands      |
41335    /// +---+---------------+
41336    /// | 1 | Xmm, Xmm, Xmm |
41337    /// +---+---------------+
41338    /// ```
41339    #[inline]
41340    pub fn vmovsh_maskz_3<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41341    where Assembler<'a>: VmovshMaskzEmitter_3<A, B, C> {
41342        <Self as VmovshMaskzEmitter_3<A, B, C>>::vmovsh_maskz_3(self, op0, op1, op2);
41343    }
41344    /// `VMOVW_G2X`.
41345    ///
41346    /// Supported operand variants:
41347    ///
41348    /// ```text
41349    /// +---+----------+
41350    /// | # | Operands |
41351    /// +---+----------+
41352    /// | 1 | Xmm, Gpd |
41353    /// | 2 | Xmm, Mem |
41354    /// +---+----------+
41355    /// ```
41356    #[inline]
41357    pub fn vmovw_g2x<A, B>(&mut self, op0: A, op1: B)
41358    where Assembler<'a>: VmovwG2xEmitter<A, B> {
41359        <Self as VmovwG2xEmitter<A, B>>::vmovw_g2x(self, op0, op1);
41360    }
41361    /// `VMOVW_X2G`.
41362    ///
41363    /// Supported operand variants:
41364    ///
41365    /// ```text
41366    /// +---+----------+
41367    /// | # | Operands |
41368    /// +---+----------+
41369    /// | 1 | Gpd, Xmm |
41370    /// | 2 | Mem, Xmm |
41371    /// +---+----------+
41372    /// ```
41373    #[inline]
41374    pub fn vmovw_x2g<A, B>(&mut self, op0: A, op1: B)
41375    where Assembler<'a>: VmovwX2gEmitter<A, B> {
41376        <Self as VmovwX2gEmitter<A, B>>::vmovw_x2g(self, op0, op1);
41377    }
41378    /// `VMULPH`.
41379    ///
41380    /// Supported operand variants:
41381    ///
41382    /// ```text
41383    /// +---+---------------+
41384    /// | # | Operands      |
41385    /// +---+---------------+
41386    /// | 1 | Xmm, Xmm, Mem |
41387    /// | 2 | Xmm, Xmm, Xmm |
41388    /// | 3 | Ymm, Ymm, Mem |
41389    /// | 4 | Ymm, Ymm, Ymm |
41390    /// | 5 | Zmm, Zmm, Mem |
41391    /// | 6 | Zmm, Zmm, Zmm |
41392    /// +---+---------------+
41393    /// ```
41394    #[inline]
41395    pub fn vmulph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41396    where Assembler<'a>: VmulphEmitter<A, B, C> {
41397        <Self as VmulphEmitter<A, B, C>>::vmulph(self, op0, op1, op2);
41398    }
41399    /// `VMULPH_ER`.
41400    ///
41401    /// Supported operand variants:
41402    ///
41403    /// ```text
41404    /// +---+---------------+
41405    /// | # | Operands      |
41406    /// +---+---------------+
41407    /// | 1 | Zmm, Zmm, Zmm |
41408    /// +---+---------------+
41409    /// ```
41410    #[inline]
41411    pub fn vmulph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41412    where Assembler<'a>: VmulphErEmitter<A, B, C> {
41413        <Self as VmulphErEmitter<A, B, C>>::vmulph_er(self, op0, op1, op2);
41414    }
41415    /// `VMULPH_MASK`.
41416    ///
41417    /// Supported operand variants:
41418    ///
41419    /// ```text
41420    /// +---+---------------+
41421    /// | # | Operands      |
41422    /// +---+---------------+
41423    /// | 1 | Xmm, Xmm, Mem |
41424    /// | 2 | Xmm, Xmm, Xmm |
41425    /// | 3 | Ymm, Ymm, Mem |
41426    /// | 4 | Ymm, Ymm, Ymm |
41427    /// | 5 | Zmm, Zmm, Mem |
41428    /// | 6 | Zmm, Zmm, Zmm |
41429    /// +---+---------------+
41430    /// ```
41431    #[inline]
41432    pub fn vmulph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41433    where Assembler<'a>: VmulphMaskEmitter<A, B, C> {
41434        <Self as VmulphMaskEmitter<A, B, C>>::vmulph_mask(self, op0, op1, op2);
41435    }
41436    /// `VMULPH_MASK_ER`.
41437    ///
41438    /// Supported operand variants:
41439    ///
41440    /// ```text
41441    /// +---+---------------+
41442    /// | # | Operands      |
41443    /// +---+---------------+
41444    /// | 1 | Zmm, Zmm, Zmm |
41445    /// +---+---------------+
41446    /// ```
41447    #[inline]
41448    pub fn vmulph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41449    where Assembler<'a>: VmulphMaskErEmitter<A, B, C> {
41450        <Self as VmulphMaskErEmitter<A, B, C>>::vmulph_mask_er(self, op0, op1, op2);
41451    }
41452    /// `VMULPH_MASKZ`.
41453    ///
41454    /// Supported operand variants:
41455    ///
41456    /// ```text
41457    /// +---+---------------+
41458    /// | # | Operands      |
41459    /// +---+---------------+
41460    /// | 1 | Xmm, Xmm, Mem |
41461    /// | 2 | Xmm, Xmm, Xmm |
41462    /// | 3 | Ymm, Ymm, Mem |
41463    /// | 4 | Ymm, Ymm, Ymm |
41464    /// | 5 | Zmm, Zmm, Mem |
41465    /// | 6 | Zmm, Zmm, Zmm |
41466    /// +---+---------------+
41467    /// ```
41468    #[inline]
41469    pub fn vmulph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41470    where Assembler<'a>: VmulphMaskzEmitter<A, B, C> {
41471        <Self as VmulphMaskzEmitter<A, B, C>>::vmulph_maskz(self, op0, op1, op2);
41472    }
41473    /// `VMULPH_MASKZ_ER`.
41474    ///
41475    /// Supported operand variants:
41476    ///
41477    /// ```text
41478    /// +---+---------------+
41479    /// | # | Operands      |
41480    /// +---+---------------+
41481    /// | 1 | Zmm, Zmm, Zmm |
41482    /// +---+---------------+
41483    /// ```
41484    #[inline]
41485    pub fn vmulph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41486    where Assembler<'a>: VmulphMaskzErEmitter<A, B, C> {
41487        <Self as VmulphMaskzErEmitter<A, B, C>>::vmulph_maskz_er(self, op0, op1, op2);
41488    }
41489    /// `VMULSH`.
41490    ///
41491    /// Supported operand variants:
41492    ///
41493    /// ```text
41494    /// +---+---------------+
41495    /// | # | Operands      |
41496    /// +---+---------------+
41497    /// | 1 | Xmm, Xmm, Mem |
41498    /// | 2 | Xmm, Xmm, Xmm |
41499    /// +---+---------------+
41500    /// ```
41501    #[inline]
41502    pub fn vmulsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41503    where Assembler<'a>: VmulshEmitter<A, B, C> {
41504        <Self as VmulshEmitter<A, B, C>>::vmulsh(self, op0, op1, op2);
41505    }
41506    /// `VMULSH_ER`.
41507    ///
41508    /// Supported operand variants:
41509    ///
41510    /// ```text
41511    /// +---+---------------+
41512    /// | # | Operands      |
41513    /// +---+---------------+
41514    /// | 1 | Xmm, Xmm, Xmm |
41515    /// +---+---------------+
41516    /// ```
41517    #[inline]
41518    pub fn vmulsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41519    where Assembler<'a>: VmulshErEmitter<A, B, C> {
41520        <Self as VmulshErEmitter<A, B, C>>::vmulsh_er(self, op0, op1, op2);
41521    }
41522    /// `VMULSH_MASK`.
41523    ///
41524    /// Supported operand variants:
41525    ///
41526    /// ```text
41527    /// +---+---------------+
41528    /// | # | Operands      |
41529    /// +---+---------------+
41530    /// | 1 | Xmm, Xmm, Mem |
41531    /// | 2 | Xmm, Xmm, Xmm |
41532    /// +---+---------------+
41533    /// ```
41534    #[inline]
41535    pub fn vmulsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41536    where Assembler<'a>: VmulshMaskEmitter<A, B, C> {
41537        <Self as VmulshMaskEmitter<A, B, C>>::vmulsh_mask(self, op0, op1, op2);
41538    }
41539    /// `VMULSH_MASK_ER`.
41540    ///
41541    /// Supported operand variants:
41542    ///
41543    /// ```text
41544    /// +---+---------------+
41545    /// | # | Operands      |
41546    /// +---+---------------+
41547    /// | 1 | Xmm, Xmm, Xmm |
41548    /// +---+---------------+
41549    /// ```
41550    #[inline]
41551    pub fn vmulsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41552    where Assembler<'a>: VmulshMaskErEmitter<A, B, C> {
41553        <Self as VmulshMaskErEmitter<A, B, C>>::vmulsh_mask_er(self, op0, op1, op2);
41554    }
41555    /// `VMULSH_MASKZ`.
41556    ///
41557    /// Supported operand variants:
41558    ///
41559    /// ```text
41560    /// +---+---------------+
41561    /// | # | Operands      |
41562    /// +---+---------------+
41563    /// | 1 | Xmm, Xmm, Mem |
41564    /// | 2 | Xmm, Xmm, Xmm |
41565    /// +---+---------------+
41566    /// ```
41567    #[inline]
41568    pub fn vmulsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41569    where Assembler<'a>: VmulshMaskzEmitter<A, B, C> {
41570        <Self as VmulshMaskzEmitter<A, B, C>>::vmulsh_maskz(self, op0, op1, op2);
41571    }
41572    /// `VMULSH_MASKZ_ER`.
41573    ///
41574    /// Supported operand variants:
41575    ///
41576    /// ```text
41577    /// +---+---------------+
41578    /// | # | Operands      |
41579    /// +---+---------------+
41580    /// | 1 | Xmm, Xmm, Xmm |
41581    /// +---+---------------+
41582    /// ```
41583    #[inline]
41584    pub fn vmulsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41585    where Assembler<'a>: VmulshMaskzErEmitter<A, B, C> {
41586        <Self as VmulshMaskzErEmitter<A, B, C>>::vmulsh_maskz_er(self, op0, op1, op2);
41587    }
41588    /// `VPCLMULQDQ` (VPCLMULQDQ). 
41589    /// Performs a carry-less multiplication of two quadwords, selected from the first source and second source operand according to the value of the immediate byte. Bits 4 and 0 are used to select which 64-bit half of each operand to use according to Table 4-13, other bits of the immediate byte are ignored.
41590    ///
41591    ///
41592    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PCLMULQDQ.html).
41593    ///
41594    /// Supported operand variants:
41595    ///
41596    /// ```text
41597    /// +---+--------------------+
41598    /// | # | Operands           |
41599    /// +---+--------------------+
41600    /// | 1 | Xmm, Xmm, Mem, Imm |
41601    /// | 2 | Xmm, Xmm, Xmm, Imm |
41602    /// | 3 | Ymm, Ymm, Mem, Imm |
41603    /// | 4 | Ymm, Ymm, Ymm, Imm |
41604    /// | 5 | Zmm, Zmm, Mem, Imm |
41605    /// | 6 | Zmm, Zmm, Zmm, Imm |
41606    /// +---+--------------------+
41607    /// ```
41608    #[inline]
41609    pub fn vpclmulqdq<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
41610    where Assembler<'a>: VpclmulqdqEmitter<A, B, C, D> {
41611        <Self as VpclmulqdqEmitter<A, B, C, D>>::vpclmulqdq(self, op0, op1, op2, op3);
41612    }
41613    /// `VPDPBSSD`.
41614    ///
41615    /// Supported operand variants:
41616    ///
41617    /// ```text
41618    /// +---+---------------+
41619    /// | # | Operands      |
41620    /// +---+---------------+
41621    /// | 1 | Xmm, Xmm, Mem |
41622    /// | 2 | Xmm, Xmm, Xmm |
41623    /// | 3 | Ymm, Ymm, Mem |
41624    /// | 4 | Ymm, Ymm, Ymm |
41625    /// +---+---------------+
41626    /// ```
41627    #[inline]
41628    pub fn vpdpbssd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41629    where Assembler<'a>: VpdpbssdEmitter<A, B, C> {
41630        <Self as VpdpbssdEmitter<A, B, C>>::vpdpbssd(self, op0, op1, op2);
41631    }
41632    /// `VPDPBSSDS`.
41633    ///
41634    /// Supported operand variants:
41635    ///
41636    /// ```text
41637    /// +---+---------------+
41638    /// | # | Operands      |
41639    /// +---+---------------+
41640    /// | 1 | Xmm, Xmm, Mem |
41641    /// | 2 | Xmm, Xmm, Xmm |
41642    /// | 3 | Ymm, Ymm, Mem |
41643    /// | 4 | Ymm, Ymm, Ymm |
41644    /// +---+---------------+
41645    /// ```
41646    #[inline]
41647    pub fn vpdpbssds<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41648    where Assembler<'a>: VpdpbssdsEmitter<A, B, C> {
41649        <Self as VpdpbssdsEmitter<A, B, C>>::vpdpbssds(self, op0, op1, op2);
41650    }
41651    /// `VPDPBSUD`.
41652    ///
41653    /// Supported operand variants:
41654    ///
41655    /// ```text
41656    /// +---+---------------+
41657    /// | # | Operands      |
41658    /// +---+---------------+
41659    /// | 1 | Xmm, Xmm, Mem |
41660    /// | 2 | Xmm, Xmm, Xmm |
41661    /// | 3 | Ymm, Ymm, Mem |
41662    /// | 4 | Ymm, Ymm, Ymm |
41663    /// +---+---------------+
41664    /// ```
41665    #[inline]
41666    pub fn vpdpbsud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41667    where Assembler<'a>: VpdpbsudEmitter<A, B, C> {
41668        <Self as VpdpbsudEmitter<A, B, C>>::vpdpbsud(self, op0, op1, op2);
41669    }
41670    /// `VPDPBSUDS`.
41671    ///
41672    /// Supported operand variants:
41673    ///
41674    /// ```text
41675    /// +---+---------------+
41676    /// | # | Operands      |
41677    /// +---+---------------+
41678    /// | 1 | Xmm, Xmm, Mem |
41679    /// | 2 | Xmm, Xmm, Xmm |
41680    /// | 3 | Ymm, Ymm, Mem |
41681    /// | 4 | Ymm, Ymm, Ymm |
41682    /// +---+---------------+
41683    /// ```
41684    #[inline]
41685    pub fn vpdpbsuds<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41686    where Assembler<'a>: VpdpbsudsEmitter<A, B, C> {
41687        <Self as VpdpbsudsEmitter<A, B, C>>::vpdpbsuds(self, op0, op1, op2);
41688    }
41689    /// `VPDPBUUD`.
41690    ///
41691    /// Supported operand variants:
41692    ///
41693    /// ```text
41694    /// +---+---------------+
41695    /// | # | Operands      |
41696    /// +---+---------------+
41697    /// | 1 | Xmm, Xmm, Mem |
41698    /// | 2 | Xmm, Xmm, Xmm |
41699    /// | 3 | Ymm, Ymm, Mem |
41700    /// | 4 | Ymm, Ymm, Ymm |
41701    /// +---+---------------+
41702    /// ```
41703    #[inline]
41704    pub fn vpdpbuud<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41705    where Assembler<'a>: VpdpbuudEmitter<A, B, C> {
41706        <Self as VpdpbuudEmitter<A, B, C>>::vpdpbuud(self, op0, op1, op2);
41707    }
41708    /// `VPDPBUUDS`.
41709    ///
41710    /// Supported operand variants:
41711    ///
41712    /// ```text
41713    /// +---+---------------+
41714    /// | # | Operands      |
41715    /// +---+---------------+
41716    /// | 1 | Xmm, Xmm, Mem |
41717    /// | 2 | Xmm, Xmm, Xmm |
41718    /// | 3 | Ymm, Ymm, Mem |
41719    /// | 4 | Ymm, Ymm, Ymm |
41720    /// +---+---------------+
41721    /// ```
41722    #[inline]
41723    pub fn vpdpbuuds<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41724    where Assembler<'a>: VpdpbuudsEmitter<A, B, C> {
41725        <Self as VpdpbuudsEmitter<A, B, C>>::vpdpbuuds(self, op0, op1, op2);
41726    }
41727    /// `VRCPPH`.
41728    ///
41729    /// Supported operand variants:
41730    ///
41731    /// ```text
41732    /// +---+----------+
41733    /// | # | Operands |
41734    /// +---+----------+
41735    /// | 1 | Xmm, Mem |
41736    /// | 2 | Xmm, Xmm |
41737    /// | 3 | Ymm, Mem |
41738    /// | 4 | Ymm, Ymm |
41739    /// | 5 | Zmm, Mem |
41740    /// | 6 | Zmm, Zmm |
41741    /// +---+----------+
41742    /// ```
41743    #[inline]
41744    pub fn vrcpph<A, B>(&mut self, op0: A, op1: B)
41745    where Assembler<'a>: VrcpphEmitter<A, B> {
41746        <Self as VrcpphEmitter<A, B>>::vrcpph(self, op0, op1);
41747    }
41748    /// `VRCPPH_MASK`.
41749    ///
41750    /// Supported operand variants:
41751    ///
41752    /// ```text
41753    /// +---+----------+
41754    /// | # | Operands |
41755    /// +---+----------+
41756    /// | 1 | Xmm, Mem |
41757    /// | 2 | Xmm, Xmm |
41758    /// | 3 | Ymm, Mem |
41759    /// | 4 | Ymm, Ymm |
41760    /// | 5 | Zmm, Mem |
41761    /// | 6 | Zmm, Zmm |
41762    /// +---+----------+
41763    /// ```
41764    #[inline]
41765    pub fn vrcpph_mask<A, B>(&mut self, op0: A, op1: B)
41766    where Assembler<'a>: VrcpphMaskEmitter<A, B> {
41767        <Self as VrcpphMaskEmitter<A, B>>::vrcpph_mask(self, op0, op1);
41768    }
41769    /// `VRCPPH_MASKZ`.
41770    ///
41771    /// Supported operand variants:
41772    ///
41773    /// ```text
41774    /// +---+----------+
41775    /// | # | Operands |
41776    /// +---+----------+
41777    /// | 1 | Xmm, Mem |
41778    /// | 2 | Xmm, Xmm |
41779    /// | 3 | Ymm, Mem |
41780    /// | 4 | Ymm, Ymm |
41781    /// | 5 | Zmm, Mem |
41782    /// | 6 | Zmm, Zmm |
41783    /// +---+----------+
41784    /// ```
41785    #[inline]
41786    pub fn vrcpph_maskz<A, B>(&mut self, op0: A, op1: B)
41787    where Assembler<'a>: VrcpphMaskzEmitter<A, B> {
41788        <Self as VrcpphMaskzEmitter<A, B>>::vrcpph_maskz(self, op0, op1);
41789    }
41790    /// `VRCPSH`.
41791    ///
41792    /// Supported operand variants:
41793    ///
41794    /// ```text
41795    /// +---+---------------+
41796    /// | # | Operands      |
41797    /// +---+---------------+
41798    /// | 1 | Xmm, Xmm, Mem |
41799    /// | 2 | Xmm, Xmm, Xmm |
41800    /// +---+---------------+
41801    /// ```
41802    #[inline]
41803    pub fn vrcpsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41804    where Assembler<'a>: VrcpshEmitter<A, B, C> {
41805        <Self as VrcpshEmitter<A, B, C>>::vrcpsh(self, op0, op1, op2);
41806    }
41807    /// `VRCPSH_MASK`.
41808    ///
41809    /// Supported operand variants:
41810    ///
41811    /// ```text
41812    /// +---+---------------+
41813    /// | # | Operands      |
41814    /// +---+---------------+
41815    /// | 1 | Xmm, Xmm, Mem |
41816    /// | 2 | Xmm, Xmm, Xmm |
41817    /// +---+---------------+
41818    /// ```
41819    #[inline]
41820    pub fn vrcpsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41821    where Assembler<'a>: VrcpshMaskEmitter<A, B, C> {
41822        <Self as VrcpshMaskEmitter<A, B, C>>::vrcpsh_mask(self, op0, op1, op2);
41823    }
41824    /// `VRCPSH_MASKZ`.
41825    ///
41826    /// Supported operand variants:
41827    ///
41828    /// ```text
41829    /// +---+---------------+
41830    /// | # | Operands      |
41831    /// +---+---------------+
41832    /// | 1 | Xmm, Xmm, Mem |
41833    /// | 2 | Xmm, Xmm, Xmm |
41834    /// +---+---------------+
41835    /// ```
41836    #[inline]
41837    pub fn vrcpsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41838    where Assembler<'a>: VrcpshMaskzEmitter<A, B, C> {
41839        <Self as VrcpshMaskzEmitter<A, B, C>>::vrcpsh_maskz(self, op0, op1, op2);
41840    }
41841    /// `VREDUCEPH`.
41842    ///
41843    /// Supported operand variants:
41844    ///
41845    /// ```text
41846    /// +---+---------------+
41847    /// | # | Operands      |
41848    /// +---+---------------+
41849    /// | 1 | Xmm, Mem, Imm |
41850    /// | 2 | Xmm, Xmm, Imm |
41851    /// | 3 | Ymm, Mem, Imm |
41852    /// | 4 | Ymm, Ymm, Imm |
41853    /// | 5 | Zmm, Mem, Imm |
41854    /// | 6 | Zmm, Zmm, Imm |
41855    /// +---+---------------+
41856    /// ```
41857    #[inline]
41858    pub fn vreduceph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41859    where Assembler<'a>: VreducephEmitter<A, B, C> {
41860        <Self as VreducephEmitter<A, B, C>>::vreduceph(self, op0, op1, op2);
41861    }
41862    /// `VREDUCEPH_MASK`.
41863    ///
41864    /// Supported operand variants:
41865    ///
41866    /// ```text
41867    /// +---+---------------+
41868    /// | # | Operands      |
41869    /// +---+---------------+
41870    /// | 1 | Xmm, Mem, Imm |
41871    /// | 2 | Xmm, Xmm, Imm |
41872    /// | 3 | Ymm, Mem, Imm |
41873    /// | 4 | Ymm, Ymm, Imm |
41874    /// | 5 | Zmm, Mem, Imm |
41875    /// | 6 | Zmm, Zmm, Imm |
41876    /// +---+---------------+
41877    /// ```
41878    #[inline]
41879    pub fn vreduceph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41880    where Assembler<'a>: VreducephMaskEmitter<A, B, C> {
41881        <Self as VreducephMaskEmitter<A, B, C>>::vreduceph_mask(self, op0, op1, op2);
41882    }
41883    /// `VREDUCEPH_MASK_SAE`.
41884    ///
41885    /// Supported operand variants:
41886    ///
41887    /// ```text
41888    /// +---+---------------+
41889    /// | # | Operands      |
41890    /// +---+---------------+
41891    /// | 1 | Zmm, Zmm, Imm |
41892    /// +---+---------------+
41893    /// ```
41894    #[inline]
41895    pub fn vreduceph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41896    where Assembler<'a>: VreducephMaskSaeEmitter<A, B, C> {
41897        <Self as VreducephMaskSaeEmitter<A, B, C>>::vreduceph_mask_sae(self, op0, op1, op2);
41898    }
41899    /// `VREDUCEPH_MASKZ`.
41900    ///
41901    /// Supported operand variants:
41902    ///
41903    /// ```text
41904    /// +---+---------------+
41905    /// | # | Operands      |
41906    /// +---+---------------+
41907    /// | 1 | Xmm, Mem, Imm |
41908    /// | 2 | Xmm, Xmm, Imm |
41909    /// | 3 | Ymm, Mem, Imm |
41910    /// | 4 | Ymm, Ymm, Imm |
41911    /// | 5 | Zmm, Mem, Imm |
41912    /// | 6 | Zmm, Zmm, Imm |
41913    /// +---+---------------+
41914    /// ```
41915    #[inline]
41916    pub fn vreduceph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41917    where Assembler<'a>: VreducephMaskzEmitter<A, B, C> {
41918        <Self as VreducephMaskzEmitter<A, B, C>>::vreduceph_maskz(self, op0, op1, op2);
41919    }
41920    /// `VREDUCEPH_MASKZ_SAE`.
41921    ///
41922    /// Supported operand variants:
41923    ///
41924    /// ```text
41925    /// +---+---------------+
41926    /// | # | Operands      |
41927    /// +---+---------------+
41928    /// | 1 | Zmm, Zmm, Imm |
41929    /// +---+---------------+
41930    /// ```
41931    #[inline]
41932    pub fn vreduceph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41933    where Assembler<'a>: VreducephMaskzSaeEmitter<A, B, C> {
41934        <Self as VreducephMaskzSaeEmitter<A, B, C>>::vreduceph_maskz_sae(self, op0, op1, op2);
41935    }
41936    /// `VREDUCEPH_SAE`.
41937    ///
41938    /// Supported operand variants:
41939    ///
41940    /// ```text
41941    /// +---+---------------+
41942    /// | # | Operands      |
41943    /// +---+---------------+
41944    /// | 1 | Zmm, Zmm, Imm |
41945    /// +---+---------------+
41946    /// ```
41947    #[inline]
41948    pub fn vreduceph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
41949    where Assembler<'a>: VreducephSaeEmitter<A, B, C> {
41950        <Self as VreducephSaeEmitter<A, B, C>>::vreduceph_sae(self, op0, op1, op2);
41951    }
41952    /// `VREDUCESH`.
41953    ///
41954    /// Supported operand variants:
41955    ///
41956    /// ```text
41957    /// +---+--------------------+
41958    /// | # | Operands           |
41959    /// +---+--------------------+
41960    /// | 1 | Xmm, Xmm, Mem, Imm |
41961    /// | 2 | Xmm, Xmm, Xmm, Imm |
41962    /// +---+--------------------+
41963    /// ```
41964    #[inline]
41965    pub fn vreducesh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
41966    where Assembler<'a>: VreduceshEmitter<A, B, C, D> {
41967        <Self as VreduceshEmitter<A, B, C, D>>::vreducesh(self, op0, op1, op2, op3);
41968    }
41969    /// `VREDUCESH_MASK`.
41970    ///
41971    /// Supported operand variants:
41972    ///
41973    /// ```text
41974    /// +---+--------------------+
41975    /// | # | Operands           |
41976    /// +---+--------------------+
41977    /// | 1 | Xmm, Xmm, Mem, Imm |
41978    /// | 2 | Xmm, Xmm, Xmm, Imm |
41979    /// +---+--------------------+
41980    /// ```
41981    #[inline]
41982    pub fn vreducesh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
41983    where Assembler<'a>: VreduceshMaskEmitter<A, B, C, D> {
41984        <Self as VreduceshMaskEmitter<A, B, C, D>>::vreducesh_mask(self, op0, op1, op2, op3);
41985    }
41986    /// `VREDUCESH_MASK_SAE`.
41987    ///
41988    /// Supported operand variants:
41989    ///
41990    /// ```text
41991    /// +---+--------------------+
41992    /// | # | Operands           |
41993    /// +---+--------------------+
41994    /// | 1 | Xmm, Xmm, Xmm, Imm |
41995    /// +---+--------------------+
41996    /// ```
41997    #[inline]
41998    pub fn vreducesh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
41999    where Assembler<'a>: VreduceshMaskSaeEmitter<A, B, C, D> {
42000        <Self as VreduceshMaskSaeEmitter<A, B, C, D>>::vreducesh_mask_sae(self, op0, op1, op2, op3);
42001    }
42002    /// `VREDUCESH_MASKZ`.
42003    ///
42004    /// Supported operand variants:
42005    ///
42006    /// ```text
42007    /// +---+--------------------+
42008    /// | # | Operands           |
42009    /// +---+--------------------+
42010    /// | 1 | Xmm, Xmm, Mem, Imm |
42011    /// | 2 | Xmm, Xmm, Xmm, Imm |
42012    /// +---+--------------------+
42013    /// ```
42014    #[inline]
42015    pub fn vreducesh_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42016    where Assembler<'a>: VreduceshMaskzEmitter<A, B, C, D> {
42017        <Self as VreduceshMaskzEmitter<A, B, C, D>>::vreducesh_maskz(self, op0, op1, op2, op3);
42018    }
42019    /// `VREDUCESH_MASKZ_SAE`.
42020    ///
42021    /// Supported operand variants:
42022    ///
42023    /// ```text
42024    /// +---+--------------------+
42025    /// | # | Operands           |
42026    /// +---+--------------------+
42027    /// | 1 | Xmm, Xmm, Xmm, Imm |
42028    /// +---+--------------------+
42029    /// ```
42030    #[inline]
42031    pub fn vreducesh_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42032    where Assembler<'a>: VreduceshMaskzSaeEmitter<A, B, C, D> {
42033        <Self as VreduceshMaskzSaeEmitter<A, B, C, D>>::vreducesh_maskz_sae(self, op0, op1, op2, op3);
42034    }
42035    /// `VREDUCESH_SAE`.
42036    ///
42037    /// Supported operand variants:
42038    ///
42039    /// ```text
42040    /// +---+--------------------+
42041    /// | # | Operands           |
42042    /// +---+--------------------+
42043    /// | 1 | Xmm, Xmm, Xmm, Imm |
42044    /// +---+--------------------+
42045    /// ```
42046    #[inline]
42047    pub fn vreducesh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42048    where Assembler<'a>: VreduceshSaeEmitter<A, B, C, D> {
42049        <Self as VreduceshSaeEmitter<A, B, C, D>>::vreducesh_sae(self, op0, op1, op2, op3);
42050    }
42051    /// `VRNDSCALEPH`.
42052    ///
42053    /// Supported operand variants:
42054    ///
42055    /// ```text
42056    /// +---+---------------+
42057    /// | # | Operands      |
42058    /// +---+---------------+
42059    /// | 1 | Xmm, Mem, Imm |
42060    /// | 2 | Xmm, Xmm, Imm |
42061    /// | 3 | Ymm, Mem, Imm |
42062    /// | 4 | Ymm, Ymm, Imm |
42063    /// | 5 | Zmm, Mem, Imm |
42064    /// | 6 | Zmm, Zmm, Imm |
42065    /// +---+---------------+
42066    /// ```
42067    #[inline]
42068    pub fn vrndscaleph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42069    where Assembler<'a>: VrndscalephEmitter<A, B, C> {
42070        <Self as VrndscalephEmitter<A, B, C>>::vrndscaleph(self, op0, op1, op2);
42071    }
42072    /// `VRNDSCALEPH_MASK`.
42073    ///
42074    /// Supported operand variants:
42075    ///
42076    /// ```text
42077    /// +---+---------------+
42078    /// | # | Operands      |
42079    /// +---+---------------+
42080    /// | 1 | Xmm, Mem, Imm |
42081    /// | 2 | Xmm, Xmm, Imm |
42082    /// | 3 | Ymm, Mem, Imm |
42083    /// | 4 | Ymm, Ymm, Imm |
42084    /// | 5 | Zmm, Mem, Imm |
42085    /// | 6 | Zmm, Zmm, Imm |
42086    /// +---+---------------+
42087    /// ```
42088    #[inline]
42089    pub fn vrndscaleph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42090    where Assembler<'a>: VrndscalephMaskEmitter<A, B, C> {
42091        <Self as VrndscalephMaskEmitter<A, B, C>>::vrndscaleph_mask(self, op0, op1, op2);
42092    }
42093    /// `VRNDSCALEPH_MASK_SAE`.
42094    ///
42095    /// Supported operand variants:
42096    ///
42097    /// ```text
42098    /// +---+---------------+
42099    /// | # | Operands      |
42100    /// +---+---------------+
42101    /// | 1 | Zmm, Zmm, Imm |
42102    /// +---+---------------+
42103    /// ```
42104    #[inline]
42105    pub fn vrndscaleph_mask_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42106    where Assembler<'a>: VrndscalephMaskSaeEmitter<A, B, C> {
42107        <Self as VrndscalephMaskSaeEmitter<A, B, C>>::vrndscaleph_mask_sae(self, op0, op1, op2);
42108    }
42109    /// `VRNDSCALEPH_MASKZ`.
42110    ///
42111    /// Supported operand variants:
42112    ///
42113    /// ```text
42114    /// +---+---------------+
42115    /// | # | Operands      |
42116    /// +---+---------------+
42117    /// | 1 | Xmm, Mem, Imm |
42118    /// | 2 | Xmm, Xmm, Imm |
42119    /// | 3 | Ymm, Mem, Imm |
42120    /// | 4 | Ymm, Ymm, Imm |
42121    /// | 5 | Zmm, Mem, Imm |
42122    /// | 6 | Zmm, Zmm, Imm |
42123    /// +---+---------------+
42124    /// ```
42125    #[inline]
42126    pub fn vrndscaleph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42127    where Assembler<'a>: VrndscalephMaskzEmitter<A, B, C> {
42128        <Self as VrndscalephMaskzEmitter<A, B, C>>::vrndscaleph_maskz(self, op0, op1, op2);
42129    }
42130    /// `VRNDSCALEPH_MASKZ_SAE`.
42131    ///
42132    /// Supported operand variants:
42133    ///
42134    /// ```text
42135    /// +---+---------------+
42136    /// | # | Operands      |
42137    /// +---+---------------+
42138    /// | 1 | Zmm, Zmm, Imm |
42139    /// +---+---------------+
42140    /// ```
42141    #[inline]
42142    pub fn vrndscaleph_maskz_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42143    where Assembler<'a>: VrndscalephMaskzSaeEmitter<A, B, C> {
42144        <Self as VrndscalephMaskzSaeEmitter<A, B, C>>::vrndscaleph_maskz_sae(self, op0, op1, op2);
42145    }
42146    /// `VRNDSCALEPH_SAE`.
42147    ///
42148    /// Supported operand variants:
42149    ///
42150    /// ```text
42151    /// +---+---------------+
42152    /// | # | Operands      |
42153    /// +---+---------------+
42154    /// | 1 | Zmm, Zmm, Imm |
42155    /// +---+---------------+
42156    /// ```
42157    #[inline]
42158    pub fn vrndscaleph_sae<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42159    where Assembler<'a>: VrndscalephSaeEmitter<A, B, C> {
42160        <Self as VrndscalephSaeEmitter<A, B, C>>::vrndscaleph_sae(self, op0, op1, op2);
42161    }
42162    /// `VRNDSCALESH`.
42163    ///
42164    /// Supported operand variants:
42165    ///
42166    /// ```text
42167    /// +---+--------------------+
42168    /// | # | Operands           |
42169    /// +---+--------------------+
42170    /// | 1 | Xmm, Xmm, Mem, Imm |
42171    /// | 2 | Xmm, Xmm, Xmm, Imm |
42172    /// +---+--------------------+
42173    /// ```
42174    #[inline]
42175    pub fn vrndscalesh<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42176    where Assembler<'a>: VrndscaleshEmitter<A, B, C, D> {
42177        <Self as VrndscaleshEmitter<A, B, C, D>>::vrndscalesh(self, op0, op1, op2, op3);
42178    }
42179    /// `VRNDSCALESH_MASK`.
42180    ///
42181    /// Supported operand variants:
42182    ///
42183    /// ```text
42184    /// +---+--------------------+
42185    /// | # | Operands           |
42186    /// +---+--------------------+
42187    /// | 1 | Xmm, Xmm, Mem, Imm |
42188    /// | 2 | Xmm, Xmm, Xmm, Imm |
42189    /// +---+--------------------+
42190    /// ```
42191    #[inline]
42192    pub fn vrndscalesh_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42193    where Assembler<'a>: VrndscaleshMaskEmitter<A, B, C, D> {
42194        <Self as VrndscaleshMaskEmitter<A, B, C, D>>::vrndscalesh_mask(self, op0, op1, op2, op3);
42195    }
42196    /// `VRNDSCALESH_MASK_SAE`.
42197    ///
42198    /// Supported operand variants:
42199    ///
42200    /// ```text
42201    /// +---+--------------------+
42202    /// | # | Operands           |
42203    /// +---+--------------------+
42204    /// | 1 | Xmm, Xmm, Xmm, Imm |
42205    /// +---+--------------------+
42206    /// ```
42207    #[inline]
42208    pub fn vrndscalesh_mask_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42209    where Assembler<'a>: VrndscaleshMaskSaeEmitter<A, B, C, D> {
42210        <Self as VrndscaleshMaskSaeEmitter<A, B, C, D>>::vrndscalesh_mask_sae(self, op0, op1, op2, op3);
42211    }
42212    /// `VRNDSCALESH_MASKZ`.
42213    ///
42214    /// Supported operand variants:
42215    ///
42216    /// ```text
42217    /// +---+--------------------+
42218    /// | # | Operands           |
42219    /// +---+--------------------+
42220    /// | 1 | Xmm, Xmm, Mem, Imm |
42221    /// | 2 | Xmm, Xmm, Xmm, Imm |
42222    /// +---+--------------------+
42223    /// ```
42224    #[inline]
42225    pub fn vrndscalesh_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42226    where Assembler<'a>: VrndscaleshMaskzEmitter<A, B, C, D> {
42227        <Self as VrndscaleshMaskzEmitter<A, B, C, D>>::vrndscalesh_maskz(self, op0, op1, op2, op3);
42228    }
42229    /// `VRNDSCALESH_MASKZ_SAE`.
42230    ///
42231    /// Supported operand variants:
42232    ///
42233    /// ```text
42234    /// +---+--------------------+
42235    /// | # | Operands           |
42236    /// +---+--------------------+
42237    /// | 1 | Xmm, Xmm, Xmm, Imm |
42238    /// +---+--------------------+
42239    /// ```
42240    #[inline]
42241    pub fn vrndscalesh_maskz_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42242    where Assembler<'a>: VrndscaleshMaskzSaeEmitter<A, B, C, D> {
42243        <Self as VrndscaleshMaskzSaeEmitter<A, B, C, D>>::vrndscalesh_maskz_sae(self, op0, op1, op2, op3);
42244    }
42245    /// `VRNDSCALESH_SAE`.
42246    ///
42247    /// Supported operand variants:
42248    ///
42249    /// ```text
42250    /// +---+--------------------+
42251    /// | # | Operands           |
42252    /// +---+--------------------+
42253    /// | 1 | Xmm, Xmm, Xmm, Imm |
42254    /// +---+--------------------+
42255    /// ```
42256    #[inline]
42257    pub fn vrndscalesh_sae<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
42258    where Assembler<'a>: VrndscaleshSaeEmitter<A, B, C, D> {
42259        <Self as VrndscaleshSaeEmitter<A, B, C, D>>::vrndscalesh_sae(self, op0, op1, op2, op3);
42260    }
42261    /// `VRSQRTPH`.
42262    ///
42263    /// Supported operand variants:
42264    ///
42265    /// ```text
42266    /// +---+----------+
42267    /// | # | Operands |
42268    /// +---+----------+
42269    /// | 1 | Xmm, Mem |
42270    /// | 2 | Xmm, Xmm |
42271    /// | 3 | Ymm, Mem |
42272    /// | 4 | Ymm, Ymm |
42273    /// | 5 | Zmm, Mem |
42274    /// | 6 | Zmm, Zmm |
42275    /// +---+----------+
42276    /// ```
42277    #[inline]
42278    pub fn vrsqrtph<A, B>(&mut self, op0: A, op1: B)
42279    where Assembler<'a>: VrsqrtphEmitter<A, B> {
42280        <Self as VrsqrtphEmitter<A, B>>::vrsqrtph(self, op0, op1);
42281    }
42282    /// `VRSQRTPH_MASK`.
42283    ///
42284    /// Supported operand variants:
42285    ///
42286    /// ```text
42287    /// +---+----------+
42288    /// | # | Operands |
42289    /// +---+----------+
42290    /// | 1 | Xmm, Mem |
42291    /// | 2 | Xmm, Xmm |
42292    /// | 3 | Ymm, Mem |
42293    /// | 4 | Ymm, Ymm |
42294    /// | 5 | Zmm, Mem |
42295    /// | 6 | Zmm, Zmm |
42296    /// +---+----------+
42297    /// ```
42298    #[inline]
42299    pub fn vrsqrtph_mask<A, B>(&mut self, op0: A, op1: B)
42300    where Assembler<'a>: VrsqrtphMaskEmitter<A, B> {
42301        <Self as VrsqrtphMaskEmitter<A, B>>::vrsqrtph_mask(self, op0, op1);
42302    }
42303    /// `VRSQRTPH_MASKZ`.
42304    ///
42305    /// Supported operand variants:
42306    ///
42307    /// ```text
42308    /// +---+----------+
42309    /// | # | Operands |
42310    /// +---+----------+
42311    /// | 1 | Xmm, Mem |
42312    /// | 2 | Xmm, Xmm |
42313    /// | 3 | Ymm, Mem |
42314    /// | 4 | Ymm, Ymm |
42315    /// | 5 | Zmm, Mem |
42316    /// | 6 | Zmm, Zmm |
42317    /// +---+----------+
42318    /// ```
42319    #[inline]
42320    pub fn vrsqrtph_maskz<A, B>(&mut self, op0: A, op1: B)
42321    where Assembler<'a>: VrsqrtphMaskzEmitter<A, B> {
42322        <Self as VrsqrtphMaskzEmitter<A, B>>::vrsqrtph_maskz(self, op0, op1);
42323    }
42324    /// `VRSQRTSH`.
42325    ///
42326    /// Supported operand variants:
42327    ///
42328    /// ```text
42329    /// +---+---------------+
42330    /// | # | Operands      |
42331    /// +---+---------------+
42332    /// | 1 | Xmm, Xmm, Mem |
42333    /// | 2 | Xmm, Xmm, Xmm |
42334    /// +---+---------------+
42335    /// ```
42336    #[inline]
42337    pub fn vrsqrtsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42338    where Assembler<'a>: VrsqrtshEmitter<A, B, C> {
42339        <Self as VrsqrtshEmitter<A, B, C>>::vrsqrtsh(self, op0, op1, op2);
42340    }
42341    /// `VRSQRTSH_MASK`.
42342    ///
42343    /// Supported operand variants:
42344    ///
42345    /// ```text
42346    /// +---+---------------+
42347    /// | # | Operands      |
42348    /// +---+---------------+
42349    /// | 1 | Xmm, Xmm, Mem |
42350    /// | 2 | Xmm, Xmm, Xmm |
42351    /// +---+---------------+
42352    /// ```
42353    #[inline]
42354    pub fn vrsqrtsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42355    where Assembler<'a>: VrsqrtshMaskEmitter<A, B, C> {
42356        <Self as VrsqrtshMaskEmitter<A, B, C>>::vrsqrtsh_mask(self, op0, op1, op2);
42357    }
42358    /// `VRSQRTSH_MASKZ`.
42359    ///
42360    /// Supported operand variants:
42361    ///
42362    /// ```text
42363    /// +---+---------------+
42364    /// | # | Operands      |
42365    /// +---+---------------+
42366    /// | 1 | Xmm, Xmm, Mem |
42367    /// | 2 | Xmm, Xmm, Xmm |
42368    /// +---+---------------+
42369    /// ```
42370    #[inline]
42371    pub fn vrsqrtsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42372    where Assembler<'a>: VrsqrtshMaskzEmitter<A, B, C> {
42373        <Self as VrsqrtshMaskzEmitter<A, B, C>>::vrsqrtsh_maskz(self, op0, op1, op2);
42374    }
42375    /// `VSCALEFPH`.
42376    ///
42377    /// Supported operand variants:
42378    ///
42379    /// ```text
42380    /// +---+---------------+
42381    /// | # | Operands      |
42382    /// +---+---------------+
42383    /// | 1 | Xmm, Xmm, Mem |
42384    /// | 2 | Xmm, Xmm, Xmm |
42385    /// | 3 | Ymm, Ymm, Mem |
42386    /// | 4 | Ymm, Ymm, Ymm |
42387    /// | 5 | Zmm, Zmm, Mem |
42388    /// | 6 | Zmm, Zmm, Zmm |
42389    /// +---+---------------+
42390    /// ```
42391    #[inline]
42392    pub fn vscalefph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42393    where Assembler<'a>: VscalefphEmitter<A, B, C> {
42394        <Self as VscalefphEmitter<A, B, C>>::vscalefph(self, op0, op1, op2);
42395    }
42396    /// `VSCALEFPH_ER`.
42397    ///
42398    /// Supported operand variants:
42399    ///
42400    /// ```text
42401    /// +---+---------------+
42402    /// | # | Operands      |
42403    /// +---+---------------+
42404    /// | 1 | Zmm, Zmm, Zmm |
42405    /// +---+---------------+
42406    /// ```
42407    #[inline]
42408    pub fn vscalefph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42409    where Assembler<'a>: VscalefphErEmitter<A, B, C> {
42410        <Self as VscalefphErEmitter<A, B, C>>::vscalefph_er(self, op0, op1, op2);
42411    }
42412    /// `VSCALEFPH_MASK`.
42413    ///
42414    /// Supported operand variants:
42415    ///
42416    /// ```text
42417    /// +---+---------------+
42418    /// | # | Operands      |
42419    /// +---+---------------+
42420    /// | 1 | Xmm, Xmm, Mem |
42421    /// | 2 | Xmm, Xmm, Xmm |
42422    /// | 3 | Ymm, Ymm, Mem |
42423    /// | 4 | Ymm, Ymm, Ymm |
42424    /// | 5 | Zmm, Zmm, Mem |
42425    /// | 6 | Zmm, Zmm, Zmm |
42426    /// +---+---------------+
42427    /// ```
42428    #[inline]
42429    pub fn vscalefph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42430    where Assembler<'a>: VscalefphMaskEmitter<A, B, C> {
42431        <Self as VscalefphMaskEmitter<A, B, C>>::vscalefph_mask(self, op0, op1, op2);
42432    }
42433    /// `VSCALEFPH_MASK_ER`.
42434    ///
42435    /// Supported operand variants:
42436    ///
42437    /// ```text
42438    /// +---+---------------+
42439    /// | # | Operands      |
42440    /// +---+---------------+
42441    /// | 1 | Zmm, Zmm, Zmm |
42442    /// +---+---------------+
42443    /// ```
42444    #[inline]
42445    pub fn vscalefph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42446    where Assembler<'a>: VscalefphMaskErEmitter<A, B, C> {
42447        <Self as VscalefphMaskErEmitter<A, B, C>>::vscalefph_mask_er(self, op0, op1, op2);
42448    }
42449    /// `VSCALEFPH_MASKZ`.
42450    ///
42451    /// Supported operand variants:
42452    ///
42453    /// ```text
42454    /// +---+---------------+
42455    /// | # | Operands      |
42456    /// +---+---------------+
42457    /// | 1 | Xmm, Xmm, Mem |
42458    /// | 2 | Xmm, Xmm, Xmm |
42459    /// | 3 | Ymm, Ymm, Mem |
42460    /// | 4 | Ymm, Ymm, Ymm |
42461    /// | 5 | Zmm, Zmm, Mem |
42462    /// | 6 | Zmm, Zmm, Zmm |
42463    /// +---+---------------+
42464    /// ```
42465    #[inline]
42466    pub fn vscalefph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42467    where Assembler<'a>: VscalefphMaskzEmitter<A, B, C> {
42468        <Self as VscalefphMaskzEmitter<A, B, C>>::vscalefph_maskz(self, op0, op1, op2);
42469    }
42470    /// `VSCALEFPH_MASKZ_ER`.
42471    ///
42472    /// Supported operand variants:
42473    ///
42474    /// ```text
42475    /// +---+---------------+
42476    /// | # | Operands      |
42477    /// +---+---------------+
42478    /// | 1 | Zmm, Zmm, Zmm |
42479    /// +---+---------------+
42480    /// ```
42481    #[inline]
42482    pub fn vscalefph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42483    where Assembler<'a>: VscalefphMaskzErEmitter<A, B, C> {
42484        <Self as VscalefphMaskzErEmitter<A, B, C>>::vscalefph_maskz_er(self, op0, op1, op2);
42485    }
42486    /// `VSCALEFSH`.
42487    ///
42488    /// Supported operand variants:
42489    ///
42490    /// ```text
42491    /// +---+---------------+
42492    /// | # | Operands      |
42493    /// +---+---------------+
42494    /// | 1 | Xmm, Xmm, Mem |
42495    /// | 2 | Xmm, Xmm, Xmm |
42496    /// +---+---------------+
42497    /// ```
42498    #[inline]
42499    pub fn vscalefsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42500    where Assembler<'a>: VscalefshEmitter<A, B, C> {
42501        <Self as VscalefshEmitter<A, B, C>>::vscalefsh(self, op0, op1, op2);
42502    }
42503    /// `VSCALEFSH_ER`.
42504    ///
42505    /// Supported operand variants:
42506    ///
42507    /// ```text
42508    /// +---+---------------+
42509    /// | # | Operands      |
42510    /// +---+---------------+
42511    /// | 1 | Xmm, Xmm, Xmm |
42512    /// +---+---------------+
42513    /// ```
42514    #[inline]
42515    pub fn vscalefsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42516    where Assembler<'a>: VscalefshErEmitter<A, B, C> {
42517        <Self as VscalefshErEmitter<A, B, C>>::vscalefsh_er(self, op0, op1, op2);
42518    }
42519    /// `VSCALEFSH_MASK`.
42520    ///
42521    /// Supported operand variants:
42522    ///
42523    /// ```text
42524    /// +---+---------------+
42525    /// | # | Operands      |
42526    /// +---+---------------+
42527    /// | 1 | Xmm, Xmm, Mem |
42528    /// | 2 | Xmm, Xmm, Xmm |
42529    /// +---+---------------+
42530    /// ```
42531    #[inline]
42532    pub fn vscalefsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42533    where Assembler<'a>: VscalefshMaskEmitter<A, B, C> {
42534        <Self as VscalefshMaskEmitter<A, B, C>>::vscalefsh_mask(self, op0, op1, op2);
42535    }
42536    /// `VSCALEFSH_MASK_ER`.
42537    ///
42538    /// Supported operand variants:
42539    ///
42540    /// ```text
42541    /// +---+---------------+
42542    /// | # | Operands      |
42543    /// +---+---------------+
42544    /// | 1 | Xmm, Xmm, Xmm |
42545    /// +---+---------------+
42546    /// ```
42547    #[inline]
42548    pub fn vscalefsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42549    where Assembler<'a>: VscalefshMaskErEmitter<A, B, C> {
42550        <Self as VscalefshMaskErEmitter<A, B, C>>::vscalefsh_mask_er(self, op0, op1, op2);
42551    }
42552    /// `VSCALEFSH_MASKZ`.
42553    ///
42554    /// Supported operand variants:
42555    ///
42556    /// ```text
42557    /// +---+---------------+
42558    /// | # | Operands      |
42559    /// +---+---------------+
42560    /// | 1 | Xmm, Xmm, Mem |
42561    /// | 2 | Xmm, Xmm, Xmm |
42562    /// +---+---------------+
42563    /// ```
42564    #[inline]
42565    pub fn vscalefsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42566    where Assembler<'a>: VscalefshMaskzEmitter<A, B, C> {
42567        <Self as VscalefshMaskzEmitter<A, B, C>>::vscalefsh_maskz(self, op0, op1, op2);
42568    }
42569    /// `VSCALEFSH_MASKZ_ER`.
42570    ///
42571    /// Supported operand variants:
42572    ///
42573    /// ```text
42574    /// +---+---------------+
42575    /// | # | Operands      |
42576    /// +---+---------------+
42577    /// | 1 | Xmm, Xmm, Xmm |
42578    /// +---+---------------+
42579    /// ```
42580    #[inline]
42581    pub fn vscalefsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42582    where Assembler<'a>: VscalefshMaskzErEmitter<A, B, C> {
42583        <Self as VscalefshMaskzErEmitter<A, B, C>>::vscalefsh_maskz_er(self, op0, op1, op2);
42584    }
42585    /// `VSM4KEY4`.
42586    ///
42587    /// Supported operand variants:
42588    ///
42589    /// ```text
42590    /// +---+---------------+
42591    /// | # | Operands      |
42592    /// +---+---------------+
42593    /// | 1 | Xmm, Xmm, Mem |
42594    /// | 2 | Xmm, Xmm, Xmm |
42595    /// | 3 | Ymm, Ymm, Mem |
42596    /// | 4 | Ymm, Ymm, Ymm |
42597    /// | 5 | Zmm, Zmm, Mem |
42598    /// | 6 | Zmm, Zmm, Zmm |
42599    /// +---+---------------+
42600    /// ```
42601    #[inline]
42602    pub fn vsm4key4<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42603    where Assembler<'a>: Vsm4key4Emitter<A, B, C> {
42604        <Self as Vsm4key4Emitter<A, B, C>>::vsm4key4(self, op0, op1, op2);
42605    }
42606    /// `VSM4RNDS4`.
42607    ///
42608    /// Supported operand variants:
42609    ///
42610    /// ```text
42611    /// +---+---------------+
42612    /// | # | Operands      |
42613    /// +---+---------------+
42614    /// | 1 | Xmm, Xmm, Mem |
42615    /// | 2 | Xmm, Xmm, Xmm |
42616    /// | 3 | Ymm, Ymm, Mem |
42617    /// | 4 | Ymm, Ymm, Ymm |
42618    /// | 5 | Zmm, Zmm, Mem |
42619    /// | 6 | Zmm, Zmm, Zmm |
42620    /// +---+---------------+
42621    /// ```
42622    #[inline]
42623    pub fn vsm4rnds4<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42624    where Assembler<'a>: Vsm4rnds4Emitter<A, B, C> {
42625        <Self as Vsm4rnds4Emitter<A, B, C>>::vsm4rnds4(self, op0, op1, op2);
42626    }
42627    /// `VSQRTPH`.
42628    ///
42629    /// Supported operand variants:
42630    ///
42631    /// ```text
42632    /// +---+----------+
42633    /// | # | Operands |
42634    /// +---+----------+
42635    /// | 1 | Xmm, Mem |
42636    /// | 2 | Xmm, Xmm |
42637    /// | 3 | Ymm, Mem |
42638    /// | 4 | Ymm, Ymm |
42639    /// | 5 | Zmm, Mem |
42640    /// | 6 | Zmm, Zmm |
42641    /// +---+----------+
42642    /// ```
42643    #[inline]
42644    pub fn vsqrtph<A, B>(&mut self, op0: A, op1: B)
42645    where Assembler<'a>: VsqrtphEmitter<A, B> {
42646        <Self as VsqrtphEmitter<A, B>>::vsqrtph(self, op0, op1);
42647    }
42648    /// `VSQRTPH_ER`.
42649    ///
42650    /// Supported operand variants:
42651    ///
42652    /// ```text
42653    /// +---+----------+
42654    /// | # | Operands |
42655    /// +---+----------+
42656    /// | 1 | Zmm, Zmm |
42657    /// +---+----------+
42658    /// ```
42659    #[inline]
42660    pub fn vsqrtph_er<A, B>(&mut self, op0: A, op1: B)
42661    where Assembler<'a>: VsqrtphErEmitter<A, B> {
42662        <Self as VsqrtphErEmitter<A, B>>::vsqrtph_er(self, op0, op1);
42663    }
42664    /// `VSQRTPH_MASK`.
42665    ///
42666    /// Supported operand variants:
42667    ///
42668    /// ```text
42669    /// +---+----------+
42670    /// | # | Operands |
42671    /// +---+----------+
42672    /// | 1 | Xmm, Mem |
42673    /// | 2 | Xmm, Xmm |
42674    /// | 3 | Ymm, Mem |
42675    /// | 4 | Ymm, Ymm |
42676    /// | 5 | Zmm, Mem |
42677    /// | 6 | Zmm, Zmm |
42678    /// +---+----------+
42679    /// ```
42680    #[inline]
42681    pub fn vsqrtph_mask<A, B>(&mut self, op0: A, op1: B)
42682    where Assembler<'a>: VsqrtphMaskEmitter<A, B> {
42683        <Self as VsqrtphMaskEmitter<A, B>>::vsqrtph_mask(self, op0, op1);
42684    }
42685    /// `VSQRTPH_MASK_ER`.
42686    ///
42687    /// Supported operand variants:
42688    ///
42689    /// ```text
42690    /// +---+----------+
42691    /// | # | Operands |
42692    /// +---+----------+
42693    /// | 1 | Zmm, Zmm |
42694    /// +---+----------+
42695    /// ```
42696    #[inline]
42697    pub fn vsqrtph_mask_er<A, B>(&mut self, op0: A, op1: B)
42698    where Assembler<'a>: VsqrtphMaskErEmitter<A, B> {
42699        <Self as VsqrtphMaskErEmitter<A, B>>::vsqrtph_mask_er(self, op0, op1);
42700    }
42701    /// `VSQRTPH_MASKZ`.
42702    ///
42703    /// Supported operand variants:
42704    ///
42705    /// ```text
42706    /// +---+----------+
42707    /// | # | Operands |
42708    /// +---+----------+
42709    /// | 1 | Xmm, Mem |
42710    /// | 2 | Xmm, Xmm |
42711    /// | 3 | Ymm, Mem |
42712    /// | 4 | Ymm, Ymm |
42713    /// | 5 | Zmm, Mem |
42714    /// | 6 | Zmm, Zmm |
42715    /// +---+----------+
42716    /// ```
42717    #[inline]
42718    pub fn vsqrtph_maskz<A, B>(&mut self, op0: A, op1: B)
42719    where Assembler<'a>: VsqrtphMaskzEmitter<A, B> {
42720        <Self as VsqrtphMaskzEmitter<A, B>>::vsqrtph_maskz(self, op0, op1);
42721    }
42722    /// `VSQRTPH_MASKZ_ER`.
42723    ///
42724    /// Supported operand variants:
42725    ///
42726    /// ```text
42727    /// +---+----------+
42728    /// | # | Operands |
42729    /// +---+----------+
42730    /// | 1 | Zmm, Zmm |
42731    /// +---+----------+
42732    /// ```
42733    #[inline]
42734    pub fn vsqrtph_maskz_er<A, B>(&mut self, op0: A, op1: B)
42735    where Assembler<'a>: VsqrtphMaskzErEmitter<A, B> {
42736        <Self as VsqrtphMaskzErEmitter<A, B>>::vsqrtph_maskz_er(self, op0, op1);
42737    }
42738    /// `VSQRTSH`.
42739    ///
42740    /// Supported operand variants:
42741    ///
42742    /// ```text
42743    /// +---+---------------+
42744    /// | # | Operands      |
42745    /// +---+---------------+
42746    /// | 1 | Xmm, Xmm, Mem |
42747    /// | 2 | Xmm, Xmm, Xmm |
42748    /// +---+---------------+
42749    /// ```
42750    #[inline]
42751    pub fn vsqrtsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42752    where Assembler<'a>: VsqrtshEmitter<A, B, C> {
42753        <Self as VsqrtshEmitter<A, B, C>>::vsqrtsh(self, op0, op1, op2);
42754    }
42755    /// `VSQRTSH_ER`.
42756    ///
42757    /// Supported operand variants:
42758    ///
42759    /// ```text
42760    /// +---+---------------+
42761    /// | # | Operands      |
42762    /// +---+---------------+
42763    /// | 1 | Xmm, Xmm, Xmm |
42764    /// +---+---------------+
42765    /// ```
42766    #[inline]
42767    pub fn vsqrtsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42768    where Assembler<'a>: VsqrtshErEmitter<A, B, C> {
42769        <Self as VsqrtshErEmitter<A, B, C>>::vsqrtsh_er(self, op0, op1, op2);
42770    }
42771    /// `VSQRTSH_MASK`.
42772    ///
42773    /// Supported operand variants:
42774    ///
42775    /// ```text
42776    /// +---+---------------+
42777    /// | # | Operands      |
42778    /// +---+---------------+
42779    /// | 1 | Xmm, Xmm, Mem |
42780    /// | 2 | Xmm, Xmm, Xmm |
42781    /// +---+---------------+
42782    /// ```
42783    #[inline]
42784    pub fn vsqrtsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42785    where Assembler<'a>: VsqrtshMaskEmitter<A, B, C> {
42786        <Self as VsqrtshMaskEmitter<A, B, C>>::vsqrtsh_mask(self, op0, op1, op2);
42787    }
42788    /// `VSQRTSH_MASK_ER`.
42789    ///
42790    /// Supported operand variants:
42791    ///
42792    /// ```text
42793    /// +---+---------------+
42794    /// | # | Operands      |
42795    /// +---+---------------+
42796    /// | 1 | Xmm, Xmm, Xmm |
42797    /// +---+---------------+
42798    /// ```
42799    #[inline]
42800    pub fn vsqrtsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42801    where Assembler<'a>: VsqrtshMaskErEmitter<A, B, C> {
42802        <Self as VsqrtshMaskErEmitter<A, B, C>>::vsqrtsh_mask_er(self, op0, op1, op2);
42803    }
42804    /// `VSQRTSH_MASKZ`.
42805    ///
42806    /// Supported operand variants:
42807    ///
42808    /// ```text
42809    /// +---+---------------+
42810    /// | # | Operands      |
42811    /// +---+---------------+
42812    /// | 1 | Xmm, Xmm, Mem |
42813    /// | 2 | Xmm, Xmm, Xmm |
42814    /// +---+---------------+
42815    /// ```
42816    #[inline]
42817    pub fn vsqrtsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42818    where Assembler<'a>: VsqrtshMaskzEmitter<A, B, C> {
42819        <Self as VsqrtshMaskzEmitter<A, B, C>>::vsqrtsh_maskz(self, op0, op1, op2);
42820    }
42821    /// `VSQRTSH_MASKZ_ER`.
42822    ///
42823    /// Supported operand variants:
42824    ///
42825    /// ```text
42826    /// +---+---------------+
42827    /// | # | Operands      |
42828    /// +---+---------------+
42829    /// | 1 | Xmm, Xmm, Xmm |
42830    /// +---+---------------+
42831    /// ```
42832    #[inline]
42833    pub fn vsqrtsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42834    where Assembler<'a>: VsqrtshMaskzErEmitter<A, B, C> {
42835        <Self as VsqrtshMaskzErEmitter<A, B, C>>::vsqrtsh_maskz_er(self, op0, op1, op2);
42836    }
42837    /// `VSUBPH`.
42838    ///
42839    /// Supported operand variants:
42840    ///
42841    /// ```text
42842    /// +---+---------------+
42843    /// | # | Operands      |
42844    /// +---+---------------+
42845    /// | 1 | Xmm, Xmm, Mem |
42846    /// | 2 | Xmm, Xmm, Xmm |
42847    /// | 3 | Ymm, Ymm, Mem |
42848    /// | 4 | Ymm, Ymm, Ymm |
42849    /// | 5 | Zmm, Zmm, Mem |
42850    /// | 6 | Zmm, Zmm, Zmm |
42851    /// +---+---------------+
42852    /// ```
42853    #[inline]
42854    pub fn vsubph<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42855    where Assembler<'a>: VsubphEmitter<A, B, C> {
42856        <Self as VsubphEmitter<A, B, C>>::vsubph(self, op0, op1, op2);
42857    }
42858    /// `VSUBPH_ER`.
42859    ///
42860    /// Supported operand variants:
42861    ///
42862    /// ```text
42863    /// +---+---------------+
42864    /// | # | Operands      |
42865    /// +---+---------------+
42866    /// | 1 | Zmm, Zmm, Zmm |
42867    /// +---+---------------+
42868    /// ```
42869    #[inline]
42870    pub fn vsubph_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42871    where Assembler<'a>: VsubphErEmitter<A, B, C> {
42872        <Self as VsubphErEmitter<A, B, C>>::vsubph_er(self, op0, op1, op2);
42873    }
42874    /// `VSUBPH_MASK`.
42875    ///
42876    /// Supported operand variants:
42877    ///
42878    /// ```text
42879    /// +---+---------------+
42880    /// | # | Operands      |
42881    /// +---+---------------+
42882    /// | 1 | Xmm, Xmm, Mem |
42883    /// | 2 | Xmm, Xmm, Xmm |
42884    /// | 3 | Ymm, Ymm, Mem |
42885    /// | 4 | Ymm, Ymm, Ymm |
42886    /// | 5 | Zmm, Zmm, Mem |
42887    /// | 6 | Zmm, Zmm, Zmm |
42888    /// +---+---------------+
42889    /// ```
42890    #[inline]
42891    pub fn vsubph_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42892    where Assembler<'a>: VsubphMaskEmitter<A, B, C> {
42893        <Self as VsubphMaskEmitter<A, B, C>>::vsubph_mask(self, op0, op1, op2);
42894    }
42895    /// `VSUBPH_MASK_ER`.
42896    ///
42897    /// Supported operand variants:
42898    ///
42899    /// ```text
42900    /// +---+---------------+
42901    /// | # | Operands      |
42902    /// +---+---------------+
42903    /// | 1 | Zmm, Zmm, Zmm |
42904    /// +---+---------------+
42905    /// ```
42906    #[inline]
42907    pub fn vsubph_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42908    where Assembler<'a>: VsubphMaskErEmitter<A, B, C> {
42909        <Self as VsubphMaskErEmitter<A, B, C>>::vsubph_mask_er(self, op0, op1, op2);
42910    }
42911    /// `VSUBPH_MASKZ`.
42912    ///
42913    /// Supported operand variants:
42914    ///
42915    /// ```text
42916    /// +---+---------------+
42917    /// | # | Operands      |
42918    /// +---+---------------+
42919    /// | 1 | Xmm, Xmm, Mem |
42920    /// | 2 | Xmm, Xmm, Xmm |
42921    /// | 3 | Ymm, Ymm, Mem |
42922    /// | 4 | Ymm, Ymm, Ymm |
42923    /// | 5 | Zmm, Zmm, Mem |
42924    /// | 6 | Zmm, Zmm, Zmm |
42925    /// +---+---------------+
42926    /// ```
42927    #[inline]
42928    pub fn vsubph_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42929    where Assembler<'a>: VsubphMaskzEmitter<A, B, C> {
42930        <Self as VsubphMaskzEmitter<A, B, C>>::vsubph_maskz(self, op0, op1, op2);
42931    }
42932    /// `VSUBPH_MASKZ_ER`.
42933    ///
42934    /// Supported operand variants:
42935    ///
42936    /// ```text
42937    /// +---+---------------+
42938    /// | # | Operands      |
42939    /// +---+---------------+
42940    /// | 1 | Zmm, Zmm, Zmm |
42941    /// +---+---------------+
42942    /// ```
42943    #[inline]
42944    pub fn vsubph_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42945    where Assembler<'a>: VsubphMaskzErEmitter<A, B, C> {
42946        <Self as VsubphMaskzErEmitter<A, B, C>>::vsubph_maskz_er(self, op0, op1, op2);
42947    }
42948    /// `VSUBSH`.
42949    ///
42950    /// Supported operand variants:
42951    ///
42952    /// ```text
42953    /// +---+---------------+
42954    /// | # | Operands      |
42955    /// +---+---------------+
42956    /// | 1 | Xmm, Xmm, Mem |
42957    /// | 2 | Xmm, Xmm, Xmm |
42958    /// +---+---------------+
42959    /// ```
42960    #[inline]
42961    pub fn vsubsh<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42962    where Assembler<'a>: VsubshEmitter<A, B, C> {
42963        <Self as VsubshEmitter<A, B, C>>::vsubsh(self, op0, op1, op2);
42964    }
42965    /// `VSUBSH_ER`.
42966    ///
42967    /// Supported operand variants:
42968    ///
42969    /// ```text
42970    /// +---+---------------+
42971    /// | # | Operands      |
42972    /// +---+---------------+
42973    /// | 1 | Xmm, Xmm, Xmm |
42974    /// +---+---------------+
42975    /// ```
42976    #[inline]
42977    pub fn vsubsh_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42978    where Assembler<'a>: VsubshErEmitter<A, B, C> {
42979        <Self as VsubshErEmitter<A, B, C>>::vsubsh_er(self, op0, op1, op2);
42980    }
42981    /// `VSUBSH_MASK`.
42982    ///
42983    /// Supported operand variants:
42984    ///
42985    /// ```text
42986    /// +---+---------------+
42987    /// | # | Operands      |
42988    /// +---+---------------+
42989    /// | 1 | Xmm, Xmm, Mem |
42990    /// | 2 | Xmm, Xmm, Xmm |
42991    /// +---+---------------+
42992    /// ```
42993    #[inline]
42994    pub fn vsubsh_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
42995    where Assembler<'a>: VsubshMaskEmitter<A, B, C> {
42996        <Self as VsubshMaskEmitter<A, B, C>>::vsubsh_mask(self, op0, op1, op2);
42997    }
42998    /// `VSUBSH_MASK_ER`.
42999    ///
43000    /// Supported operand variants:
43001    ///
43002    /// ```text
43003    /// +---+---------------+
43004    /// | # | Operands      |
43005    /// +---+---------------+
43006    /// | 1 | Xmm, Xmm, Xmm |
43007    /// +---+---------------+
43008    /// ```
43009    #[inline]
43010    pub fn vsubsh_mask_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
43011    where Assembler<'a>: VsubshMaskErEmitter<A, B, C> {
43012        <Self as VsubshMaskErEmitter<A, B, C>>::vsubsh_mask_er(self, op0, op1, op2);
43013    }
43014    /// `VSUBSH_MASKZ`.
43015    ///
43016    /// Supported operand variants:
43017    ///
43018    /// ```text
43019    /// +---+---------------+
43020    /// | # | Operands      |
43021    /// +---+---------------+
43022    /// | 1 | Xmm, Xmm, Mem |
43023    /// | 2 | Xmm, Xmm, Xmm |
43024    /// +---+---------------+
43025    /// ```
43026    #[inline]
43027    pub fn vsubsh_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
43028    where Assembler<'a>: VsubshMaskzEmitter<A, B, C> {
43029        <Self as VsubshMaskzEmitter<A, B, C>>::vsubsh_maskz(self, op0, op1, op2);
43030    }
43031    /// `VSUBSH_MASKZ_ER`.
43032    ///
43033    /// Supported operand variants:
43034    ///
43035    /// ```text
43036    /// +---+---------------+
43037    /// | # | Operands      |
43038    /// +---+---------------+
43039    /// | 1 | Xmm, Xmm, Xmm |
43040    /// +---+---------------+
43041    /// ```
43042    #[inline]
43043    pub fn vsubsh_maskz_er<A, B, C>(&mut self, op0: A, op1: B, op2: C)
43044    where Assembler<'a>: VsubshMaskzErEmitter<A, B, C> {
43045        <Self as VsubshMaskzErEmitter<A, B, C>>::vsubsh_maskz_er(self, op0, op1, op2);
43046    }
43047    /// `VUCOMISH`.
43048    ///
43049    /// Supported operand variants:
43050    ///
43051    /// ```text
43052    /// +---+----------+
43053    /// | # | Operands |
43054    /// +---+----------+
43055    /// | 1 | Xmm, Mem |
43056    /// | 2 | Xmm, Xmm |
43057    /// +---+----------+
43058    /// ```
43059    #[inline]
43060    pub fn vucomish<A, B>(&mut self, op0: A, op1: B)
43061    where Assembler<'a>: VucomishEmitter<A, B> {
43062        <Self as VucomishEmitter<A, B>>::vucomish(self, op0, op1);
43063    }
43064    /// `VUCOMISH_SAE`.
43065    ///
43066    /// Supported operand variants:
43067    ///
43068    /// ```text
43069    /// +---+----------+
43070    /// | # | Operands |
43071    /// +---+----------+
43072    /// | 1 | Xmm, Xmm |
43073    /// +---+----------+
43074    /// ```
43075    #[inline]
43076    pub fn vucomish_sae<A, B>(&mut self, op0: A, op1: B)
43077    where Assembler<'a>: VucomishSaeEmitter<A, B> {
43078        <Self as VucomishSaeEmitter<A, B>>::vucomish_sae(self, op0, op1);
43079    }
43080    /// `XCHG` (XCHG). 
43081    /// Exchanges the contents of the destination (first) and source (second) operands. The operands can be two general-purpose registers or a register and a memory location. If a memory operand is referenced, the processor’s locking protocol is automatically implemented for the duration of the exchange operation, regardless of the presence or absence of the LOCK prefix or of the value of the IOPL. (See the LOCK prefix description in this chapter for more information on the locking protocol.)
43082    ///
43083    ///
43084    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XCHG.html).
43085    ///
43086    /// Supported operand variants:
43087    ///
43088    /// ```text
43089    /// +---+--------------+
43090    /// | # | Operands     |
43091    /// +---+--------------+
43092    /// | 1 | GpbLo, GpbLo |
43093    /// | 2 | Gpd, Gpd     |
43094    /// | 3 | Gpq, Gpq     |
43095    /// | 4 | Gpw, Gpw     |
43096    /// | 5 | Mem, GpbLo   |
43097    /// | 6 | Mem, Gpd     |
43098    /// | 7 | Mem, Gpq     |
43099    /// | 8 | Mem, Gpw     |
43100    /// +---+--------------+
43101    /// ```
43102    #[inline]
43103    pub fn xchg<A, B>(&mut self, op0: A, op1: B)
43104    where Assembler<'a>: XchgEmitter<A, B> {
43105        <Self as XchgEmitter<A, B>>::xchg(self, op0, op1);
43106    }
43107    /// `XLATB` (XLATB). 
43108    /// Locates a byte entry in a table in memory, using the contents of the AL register as a table index, then copies the contents of the table entry back into the AL register. The index in the AL register is treated as an unsigned integer. The XLAT and XLATB instructions get the base address of the table in memory from either the DS:EBX or the DS:BX registers (depending on the address-size attribute of the instruction, 32 or 16, respectively). (The DS segment may be overridden with a segment override prefix.)
43109    ///
43110    ///
43111    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XLAT%3AXLATB.html).
43112    ///
43113    /// Supported operand variants:
43114    ///
43115    /// ```text
43116    /// +---+----------+
43117    /// | # | Operands |
43118    /// +---+----------+
43119    /// | 1 | (none)   |
43120    /// +---+----------+
43121    /// ```
43122    #[inline]
43123    pub fn xlatb(&mut self)
43124    where Assembler<'a>: XlatbEmitter {
43125        <Self as XlatbEmitter>::xlatb(self);
43126    }
43127    /// `XOR` (XOR). 
43128    /// Performs a bitwise exclusive OR (XOR) operation on the destination (first) and source (second) operands and stores the result in the destination operand location. The source operand can be an immediate, a register, or a memory location; the destination operand can be a register or a memory location. (However, two memory operands cannot be used in one instruction.) Each bit of the result is 1 if the corresponding bits of the operands are different; each bit is 0 if the corresponding bits are the same.
43129    ///
43130    ///
43131    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/XOR.html).
43132    ///
43133    /// Supported operand variants:
43134    ///
43135    /// ```text
43136    /// +----+--------------+
43137    /// | #  | Operands     |
43138    /// +----+--------------+
43139    /// | 1  | GpbLo, GpbLo |
43140    /// | 2  | GpbLo, Imm   |
43141    /// | 3  | GpbLo, Mem   |
43142    /// | 4  | Gpd, Gpd     |
43143    /// | 5  | Gpd, Imm     |
43144    /// | 6  | Gpd, Mem     |
43145    /// | 7  | Gpq, Gpq     |
43146    /// | 8  | Gpq, Imm     |
43147    /// | 9  | Gpq, Mem     |
43148    /// | 10 | Gpw, Gpw     |
43149    /// | 11 | Gpw, Imm     |
43150    /// | 12 | Gpw, Mem     |
43151    /// | 13 | Mem, GpbLo   |
43152    /// | 14 | Mem, Gpd     |
43153    /// | 15 | Mem, Gpq     |
43154    /// | 16 | Mem, Gpw     |
43155    /// | 17 | Mem, Imm     |
43156    /// +----+--------------+
43157    /// ```
43158    #[inline]
43159    pub fn xor<A, B>(&mut self, op0: A, op1: B)
43160    where Assembler<'a>: XorEmitter<A, B> {
43161        <Self as XorEmitter<A, B>>::xor(self, op0, op1);
43162    }
43163}