asmkit/x86/features/AVX512BW.rs
1use crate::x86::assembler::*;
2use crate::x86::operands::*;
3use super::super::opcodes::*;
4use crate::core::emitter::*;
5use crate::core::operand::*;
6
7/// A dummy operand that represents no register. Here just for simplicity.
8const NOREG: Operand = Operand::new();
9
10/// `KADDD` (KADDD).
11/// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
12///
13///
14/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
15///
16/// Supported operand variants:
17///
18/// ```text
19/// +---+------------------+
20/// | # | Operands |
21/// +---+------------------+
22/// | 1 | KReg, KReg, KReg |
23/// +---+------------------+
24/// ```
25pub trait KadddEmitter<A, B, C> {
26 fn kaddd(&mut self, op0: A, op1: B, op2: C);
27}
28
29impl<'a> KadddEmitter<KReg, KReg, KReg> for Assembler<'a> {
30 fn kaddd(&mut self, op0: KReg, op1: KReg, op2: KReg) {
31 self.emit(KADDDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
32 }
33}
34
35/// `KADDQ` (KADDQ).
36/// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
37///
38///
39/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
40///
41/// Supported operand variants:
42///
43/// ```text
44/// +---+------------------+
45/// | # | Operands |
46/// +---+------------------+
47/// | 1 | KReg, KReg, KReg |
48/// +---+------------------+
49/// ```
50pub trait KaddqEmitter<A, B, C> {
51 fn kaddq(&mut self, op0: A, op1: B, op2: C);
52}
53
54impl<'a> KaddqEmitter<KReg, KReg, KReg> for Assembler<'a> {
55 fn kaddq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
56 self.emit(KADDQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
57 }
58}
59
60/// `KANDD` (KANDD).
61/// Performs a bitwise AND between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
62///
63///
64/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDW%3AKANDB%3AKANDQ%3AKANDD.html).
65///
66/// Supported operand variants:
67///
68/// ```text
69/// +---+------------------+
70/// | # | Operands |
71/// +---+------------------+
72/// | 1 | KReg, KReg, KReg |
73/// +---+------------------+
74/// ```
75pub trait KanddEmitter<A, B, C> {
76 fn kandd(&mut self, op0: A, op1: B, op2: C);
77}
78
79impl<'a> KanddEmitter<KReg, KReg, KReg> for Assembler<'a> {
80 fn kandd(&mut self, op0: KReg, op1: KReg, op2: KReg) {
81 self.emit(KANDDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
82 }
83}
84
85/// `KANDND` (KANDND).
86/// Performs a bitwise AND NOT between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
87///
88///
89/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDNW%3AKANDNB%3AKANDNQ%3AKANDND.html).
90///
91/// Supported operand variants:
92///
93/// ```text
94/// +---+------------------+
95/// | # | Operands |
96/// +---+------------------+
97/// | 1 | KReg, KReg, KReg |
98/// +---+------------------+
99/// ```
100pub trait KandndEmitter<A, B, C> {
101 fn kandnd(&mut self, op0: A, op1: B, op2: C);
102}
103
104impl<'a> KandndEmitter<KReg, KReg, KReg> for Assembler<'a> {
105 fn kandnd(&mut self, op0: KReg, op1: KReg, op2: KReg) {
106 self.emit(KANDNDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
107 }
108}
109
110/// `KANDNQ` (KANDNQ).
111/// Performs a bitwise AND NOT between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
112///
113///
114/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDNW%3AKANDNB%3AKANDNQ%3AKANDND.html).
115///
116/// Supported operand variants:
117///
118/// ```text
119/// +---+------------------+
120/// | # | Operands |
121/// +---+------------------+
122/// | 1 | KReg, KReg, KReg |
123/// +---+------------------+
124/// ```
125pub trait KandnqEmitter<A, B, C> {
126 fn kandnq(&mut self, op0: A, op1: B, op2: C);
127}
128
129impl<'a> KandnqEmitter<KReg, KReg, KReg> for Assembler<'a> {
130 fn kandnq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
131 self.emit(KANDNQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
132 }
133}
134
135/// `KANDQ` (KANDQ).
136/// Performs a bitwise AND between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
137///
138///
139/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDW%3AKANDB%3AKANDQ%3AKANDD.html).
140///
141/// Supported operand variants:
142///
143/// ```text
144/// +---+------------------+
145/// | # | Operands |
146/// +---+------------------+
147/// | 1 | KReg, KReg, KReg |
148/// +---+------------------+
149/// ```
150pub trait KandqEmitter<A, B, C> {
151 fn kandq(&mut self, op0: A, op1: B, op2: C);
152}
153
154impl<'a> KandqEmitter<KReg, KReg, KReg> for Assembler<'a> {
155 fn kandq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
156 self.emit(KANDQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
157 }
158}
159
160/// `KMOVD` (KMOVD).
161/// Copies values from the source operand (second operand) to the destination operand (first operand). The source and destination operands can be mask registers, memory location or general purpose. The instruction cannot be used to transfer data between general purpose registers and or memory locations.
162///
163///
164/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KMOVW%3AKMOVB%3AKMOVQ%3AKMOVD.html).
165///
166/// Supported operand variants:
167///
168/// ```text
169/// +---+------------+
170/// | # | Operands |
171/// +---+------------+
172/// | 1 | Gpd, KReg |
173/// | 2 | KReg, Gpd |
174/// | 3 | KReg, KReg |
175/// | 4 | KReg, Mem |
176/// | 5 | Mem, KReg |
177/// +---+------------+
178/// ```
179pub trait KmovdEmitter<A, B> {
180 fn kmovd(&mut self, op0: A, op1: B);
181}
182
183impl<'a> KmovdEmitter<KReg, KReg> for Assembler<'a> {
184 fn kmovd(&mut self, op0: KReg, op1: KReg) {
185 self.emit(KMOVDKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
186 }
187}
188
189impl<'a> KmovdEmitter<KReg, Mem> for Assembler<'a> {
190 fn kmovd(&mut self, op0: KReg, op1: Mem) {
191 self.emit(KMOVDKM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
192 }
193}
194
195impl<'a> KmovdEmitter<Mem, KReg> for Assembler<'a> {
196 fn kmovd(&mut self, op0: Mem, op1: KReg) {
197 self.emit(KMOVDMK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
198 }
199}
200
201impl<'a> KmovdEmitter<KReg, Gpd> for Assembler<'a> {
202 fn kmovd(&mut self, op0: KReg, op1: Gpd) {
203 self.emit(KMOVDKR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
204 }
205}
206
207impl<'a> KmovdEmitter<Gpd, KReg> for Assembler<'a> {
208 fn kmovd(&mut self, op0: Gpd, op1: KReg) {
209 self.emit(KMOVDRK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
210 }
211}
212
213/// `KMOVQ` (KMOVQ).
214/// Copies values from the source operand (second operand) to the destination operand (first operand). The source and destination operands can be mask registers, memory location or general purpose. The instruction cannot be used to transfer data between general purpose registers and or memory locations.
215///
216///
217/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KMOVW%3AKMOVB%3AKMOVQ%3AKMOVD.html).
218///
219/// Supported operand variants:
220///
221/// ```text
222/// +---+------------+
223/// | # | Operands |
224/// +---+------------+
225/// | 1 | Gpd, KReg |
226/// | 2 | KReg, Gpd |
227/// | 3 | KReg, KReg |
228/// | 4 | KReg, Mem |
229/// | 5 | Mem, KReg |
230/// +---+------------+
231/// ```
232pub trait KmovqEmitter<A, B> {
233 fn kmovq(&mut self, op0: A, op1: B);
234}
235
236impl<'a> KmovqEmitter<KReg, KReg> for Assembler<'a> {
237 fn kmovq(&mut self, op0: KReg, op1: KReg) {
238 self.emit(KMOVQKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
239 }
240}
241
242impl<'a> KmovqEmitter<KReg, Mem> for Assembler<'a> {
243 fn kmovq(&mut self, op0: KReg, op1: Mem) {
244 self.emit(KMOVQKM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
245 }
246}
247
248impl<'a> KmovqEmitter<Mem, KReg> for Assembler<'a> {
249 fn kmovq(&mut self, op0: Mem, op1: KReg) {
250 self.emit(KMOVQMK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
251 }
252}
253
254impl<'a> KmovqEmitter<KReg, Gpd> for Assembler<'a> {
255 fn kmovq(&mut self, op0: KReg, op1: Gpd) {
256 self.emit(KMOVQKR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
257 }
258}
259
260impl<'a> KmovqEmitter<Gpd, KReg> for Assembler<'a> {
261 fn kmovq(&mut self, op0: Gpd, op1: KReg) {
262 self.emit(KMOVQRK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
263 }
264}
265
266/// `KNOTD` (KNOTD).
267/// Performs a bitwise NOT of vector mask k2 and writes the result into vector mask k1.
268///
269///
270/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KNOTW%3AKNOTB%3AKNOTQ%3AKNOTD.html).
271///
272/// Supported operand variants:
273///
274/// ```text
275/// +---+------------+
276/// | # | Operands |
277/// +---+------------+
278/// | 1 | KReg, KReg |
279/// +---+------------+
280/// ```
281pub trait KnotdEmitter<A, B> {
282 fn knotd(&mut self, op0: A, op1: B);
283}
284
285impl<'a> KnotdEmitter<KReg, KReg> for Assembler<'a> {
286 fn knotd(&mut self, op0: KReg, op1: KReg) {
287 self.emit(KNOTDKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
288 }
289}
290
291/// `KNOTQ` (KNOTQ).
292/// Performs a bitwise NOT of vector mask k2 and writes the result into vector mask k1.
293///
294///
295/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KNOTW%3AKNOTB%3AKNOTQ%3AKNOTD.html).
296///
297/// Supported operand variants:
298///
299/// ```text
300/// +---+------------+
301/// | # | Operands |
302/// +---+------------+
303/// | 1 | KReg, KReg |
304/// +---+------------+
305/// ```
306pub trait KnotqEmitter<A, B> {
307 fn knotq(&mut self, op0: A, op1: B);
308}
309
310impl<'a> KnotqEmitter<KReg, KReg> for Assembler<'a> {
311 fn knotq(&mut self, op0: KReg, op1: KReg) {
312 self.emit(KNOTQKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
313 }
314}
315
316/// `KORD` (KORD).
317/// Performs a bitwise OR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
318///
319///
320/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORW%3AKORB%3AKORQ%3AKORD.html).
321///
322/// Supported operand variants:
323///
324/// ```text
325/// +---+------------------+
326/// | # | Operands |
327/// +---+------------------+
328/// | 1 | KReg, KReg, KReg |
329/// +---+------------------+
330/// ```
331pub trait KordEmitter<A, B, C> {
332 fn kord(&mut self, op0: A, op1: B, op2: C);
333}
334
335impl<'a> KordEmitter<KReg, KReg, KReg> for Assembler<'a> {
336 fn kord(&mut self, op0: KReg, op1: KReg, op2: KReg) {
337 self.emit(KORDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
338 }
339}
340
341/// `KORQ` (KORQ).
342/// Performs a bitwise OR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
343///
344///
345/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORW%3AKORB%3AKORQ%3AKORD.html).
346///
347/// Supported operand variants:
348///
349/// ```text
350/// +---+------------------+
351/// | # | Operands |
352/// +---+------------------+
353/// | 1 | KReg, KReg, KReg |
354/// +---+------------------+
355/// ```
356pub trait KorqEmitter<A, B, C> {
357 fn korq(&mut self, op0: A, op1: B, op2: C);
358}
359
360impl<'a> KorqEmitter<KReg, KReg, KReg> for Assembler<'a> {
361 fn korq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
362 self.emit(KORQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
363 }
364}
365
366/// `KORTESTD` (KORTESTD).
367/// Performs a bitwise OR between the vector mask register k2, and the vector mask register k1, and sets CF and ZF based on the operation result.
368///
369///
370/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORTESTW%3AKORTESTB%3AKORTESTQ%3AKORTESTD.html).
371///
372/// Supported operand variants:
373///
374/// ```text
375/// +---+------------+
376/// | # | Operands |
377/// +---+------------+
378/// | 1 | KReg, KReg |
379/// +---+------------+
380/// ```
381pub trait KortestdEmitter<A, B> {
382 fn kortestd(&mut self, op0: A, op1: B);
383}
384
385impl<'a> KortestdEmitter<KReg, KReg> for Assembler<'a> {
386 fn kortestd(&mut self, op0: KReg, op1: KReg) {
387 self.emit(KORTESTDKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
388 }
389}
390
391/// `KORTESTQ` (KORTESTQ).
392/// Performs a bitwise OR between the vector mask register k2, and the vector mask register k1, and sets CF and ZF based on the operation result.
393///
394///
395/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORTESTW%3AKORTESTB%3AKORTESTQ%3AKORTESTD.html).
396///
397/// Supported operand variants:
398///
399/// ```text
400/// +---+------------+
401/// | # | Operands |
402/// +---+------------+
403/// | 1 | KReg, KReg |
404/// +---+------------+
405/// ```
406pub trait KortestqEmitter<A, B> {
407 fn kortestq(&mut self, op0: A, op1: B);
408}
409
410impl<'a> KortestqEmitter<KReg, KReg> for Assembler<'a> {
411 fn kortestq(&mut self, op0: KReg, op1: KReg) {
412 self.emit(KORTESTQKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
413 }
414}
415
416/// `KSHIFTLD` (KSHIFTLD).
417/// Shifts 8/16/32/64 bits in the second operand (source operand) left by the count specified in immediate byte and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
418///
419///
420/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTLW%3AKSHIFTLB%3AKSHIFTLQ%3AKSHIFTLD.html).
421///
422/// Supported operand variants:
423///
424/// ```text
425/// +---+-----------------+
426/// | # | Operands |
427/// +---+-----------------+
428/// | 1 | KReg, KReg, Imm |
429/// +---+-----------------+
430/// ```
431pub trait KshiftldEmitter<A, B, C> {
432 fn kshiftld(&mut self, op0: A, op1: B, op2: C);
433}
434
435impl<'a> KshiftldEmitter<KReg, KReg, Imm> for Assembler<'a> {
436 fn kshiftld(&mut self, op0: KReg, op1: KReg, op2: Imm) {
437 self.emit(KSHIFTLDKKI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
438 }
439}
440
441/// `KSHIFTLQ` (KSHIFTLQ).
442/// Shifts 8/16/32/64 bits in the second operand (source operand) left by the count specified in immediate byte and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
443///
444///
445/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTLW%3AKSHIFTLB%3AKSHIFTLQ%3AKSHIFTLD.html).
446///
447/// Supported operand variants:
448///
449/// ```text
450/// +---+-----------------+
451/// | # | Operands |
452/// +---+-----------------+
453/// | 1 | KReg, KReg, Imm |
454/// +---+-----------------+
455/// ```
456pub trait KshiftlqEmitter<A, B, C> {
457 fn kshiftlq(&mut self, op0: A, op1: B, op2: C);
458}
459
460impl<'a> KshiftlqEmitter<KReg, KReg, Imm> for Assembler<'a> {
461 fn kshiftlq(&mut self, op0: KReg, op1: KReg, op2: Imm) {
462 self.emit(KSHIFTLQKKI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
463 }
464}
465
466/// `KSHIFTRD` (KSHIFTRD).
467/// Shifts 8/16/32/64 bits in the second operand (source operand) right by the count specified in immediate and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
468///
469///
470/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTRW%3AKSHIFTRB%3AKSHIFTRQ%3AKSHIFTRD.html).
471///
472/// Supported operand variants:
473///
474/// ```text
475/// +---+-----------------+
476/// | # | Operands |
477/// +---+-----------------+
478/// | 1 | KReg, KReg, Imm |
479/// +---+-----------------+
480/// ```
481pub trait KshiftrdEmitter<A, B, C> {
482 fn kshiftrd(&mut self, op0: A, op1: B, op2: C);
483}
484
485impl<'a> KshiftrdEmitter<KReg, KReg, Imm> for Assembler<'a> {
486 fn kshiftrd(&mut self, op0: KReg, op1: KReg, op2: Imm) {
487 self.emit(KSHIFTRDKKI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
488 }
489}
490
491/// `KSHIFTRQ` (KSHIFTRQ).
492/// Shifts 8/16/32/64 bits in the second operand (source operand) right by the count specified in immediate and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
493///
494///
495/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTRW%3AKSHIFTRB%3AKSHIFTRQ%3AKSHIFTRD.html).
496///
497/// Supported operand variants:
498///
499/// ```text
500/// +---+-----------------+
501/// | # | Operands |
502/// +---+-----------------+
503/// | 1 | KReg, KReg, Imm |
504/// +---+-----------------+
505/// ```
506pub trait KshiftrqEmitter<A, B, C> {
507 fn kshiftrq(&mut self, op0: A, op1: B, op2: C);
508}
509
510impl<'a> KshiftrqEmitter<KReg, KReg, Imm> for Assembler<'a> {
511 fn kshiftrq(&mut self, op0: KReg, op1: KReg, op2: Imm) {
512 self.emit(KSHIFTRQKKI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
513 }
514}
515
516/// `KTESTD` (KTESTD).
517/// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
518///
519///
520/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
521///
522/// Supported operand variants:
523///
524/// ```text
525/// +---+------------+
526/// | # | Operands |
527/// +---+------------+
528/// | 1 | KReg, KReg |
529/// +---+------------+
530/// ```
531pub trait KtestdEmitter<A, B> {
532 fn ktestd(&mut self, op0: A, op1: B);
533}
534
535impl<'a> KtestdEmitter<KReg, KReg> for Assembler<'a> {
536 fn ktestd(&mut self, op0: KReg, op1: KReg) {
537 self.emit(KTESTDKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
538 }
539}
540
541/// `KTESTQ` (KTESTQ).
542/// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
543///
544///
545/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
546///
547/// Supported operand variants:
548///
549/// ```text
550/// +---+------------+
551/// | # | Operands |
552/// +---+------------+
553/// | 1 | KReg, KReg |
554/// +---+------------+
555/// ```
556pub trait KtestqEmitter<A, B> {
557 fn ktestq(&mut self, op0: A, op1: B);
558}
559
560impl<'a> KtestqEmitter<KReg, KReg> for Assembler<'a> {
561 fn ktestq(&mut self, op0: KReg, op1: KReg) {
562 self.emit(KTESTQKK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
563 }
564}
565
566/// `KUNPCKDQ` (KUNPCKDQ).
567/// Unpacks the lower 8/16/32 bits of the second and third operands (source operands) into the low part of the first operand (destination operand), starting from the low bytes. The result is zero-extended in the destination.
568///
569///
570/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KUNPCKBW%3AKUNPCKWD%3AKUNPCKDQ.html).
571///
572/// Supported operand variants:
573///
574/// ```text
575/// +---+------------------+
576/// | # | Operands |
577/// +---+------------------+
578/// | 1 | KReg, KReg, KReg |
579/// +---+------------------+
580/// ```
581pub trait KunpckdqEmitter<A, B, C> {
582 fn kunpckdq(&mut self, op0: A, op1: B, op2: C);
583}
584
585impl<'a> KunpckdqEmitter<KReg, KReg, KReg> for Assembler<'a> {
586 fn kunpckdq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
587 self.emit(KUNPCKDQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
588 }
589}
590
591/// `KUNPCKWD` (KUNPCKWD).
592/// Unpacks the lower 8/16/32 bits of the second and third operands (source operands) into the low part of the first operand (destination operand), starting from the low bytes. The result is zero-extended in the destination.
593///
594///
595/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KUNPCKBW%3AKUNPCKWD%3AKUNPCKDQ.html).
596///
597/// Supported operand variants:
598///
599/// ```text
600/// +---+------------------+
601/// | # | Operands |
602/// +---+------------------+
603/// | 1 | KReg, KReg, KReg |
604/// +---+------------------+
605/// ```
606pub trait KunpckwdEmitter<A, B, C> {
607 fn kunpckwd(&mut self, op0: A, op1: B, op2: C);
608}
609
610impl<'a> KunpckwdEmitter<KReg, KReg, KReg> for Assembler<'a> {
611 fn kunpckwd(&mut self, op0: KReg, op1: KReg, op2: KReg) {
612 self.emit(KUNPCKWDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
613 }
614}
615
616/// `KXNORD` (KXNORD).
617/// Performs a bitwise XNOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
618///
619///
620/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXNORW%3AKXNORB%3AKXNORQ%3AKXNORD.html).
621///
622/// Supported operand variants:
623///
624/// ```text
625/// +---+------------------+
626/// | # | Operands |
627/// +---+------------------+
628/// | 1 | KReg, KReg, KReg |
629/// +---+------------------+
630/// ```
631pub trait KxnordEmitter<A, B, C> {
632 fn kxnord(&mut self, op0: A, op1: B, op2: C);
633}
634
635impl<'a> KxnordEmitter<KReg, KReg, KReg> for Assembler<'a> {
636 fn kxnord(&mut self, op0: KReg, op1: KReg, op2: KReg) {
637 self.emit(KXNORDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
638 }
639}
640
641/// `KXNORQ` (KXNORQ).
642/// Performs a bitwise XNOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
643///
644///
645/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXNORW%3AKXNORB%3AKXNORQ%3AKXNORD.html).
646///
647/// Supported operand variants:
648///
649/// ```text
650/// +---+------------------+
651/// | # | Operands |
652/// +---+------------------+
653/// | 1 | KReg, KReg, KReg |
654/// +---+------------------+
655/// ```
656pub trait KxnorqEmitter<A, B, C> {
657 fn kxnorq(&mut self, op0: A, op1: B, op2: C);
658}
659
660impl<'a> KxnorqEmitter<KReg, KReg, KReg> for Assembler<'a> {
661 fn kxnorq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
662 self.emit(KXNORQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
663 }
664}
665
666/// `KXORD` (KXORD).
667/// Performs a bitwise XOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
668///
669///
670/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXORW%3AKXORB%3AKXORQ%3AKXORD.html).
671///
672/// Supported operand variants:
673///
674/// ```text
675/// +---+------------------+
676/// | # | Operands |
677/// +---+------------------+
678/// | 1 | KReg, KReg, KReg |
679/// +---+------------------+
680/// ```
681pub trait KxordEmitter<A, B, C> {
682 fn kxord(&mut self, op0: A, op1: B, op2: C);
683}
684
685impl<'a> KxordEmitter<KReg, KReg, KReg> for Assembler<'a> {
686 fn kxord(&mut self, op0: KReg, op1: KReg, op2: KReg) {
687 self.emit(KXORDKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
688 }
689}
690
691/// `KXORQ` (KXORQ).
692/// Performs a bitwise XOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
693///
694///
695/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXORW%3AKXORB%3AKXORQ%3AKXORD.html).
696///
697/// Supported operand variants:
698///
699/// ```text
700/// +---+------------------+
701/// | # | Operands |
702/// +---+------------------+
703/// | 1 | KReg, KReg, KReg |
704/// +---+------------------+
705/// ```
706pub trait KxorqEmitter<A, B, C> {
707 fn kxorq(&mut self, op0: A, op1: B, op2: C);
708}
709
710impl<'a> KxorqEmitter<KReg, KReg, KReg> for Assembler<'a> {
711 fn kxorq(&mut self, op0: KReg, op1: KReg, op2: KReg) {
712 self.emit(KXORQKKK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
713 }
714}
715
716/// `VDBPSADBW` (VDBPSADBW).
717/// Compute packed SAD (sum of absolute differences) word results of unsigned bytes from two 32-bit dword elements. Packed SAD word results are calculated in multiples of qword superblocks, producing 4 SAD word results in each 64-bit superblock of the destination register.
718///
719///
720/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VDBPSADBW.html).
721///
722/// Supported operand variants:
723///
724/// ```text
725/// +---+--------------------+
726/// | # | Operands |
727/// +---+--------------------+
728/// | 1 | Xmm, Xmm, Mem, Imm |
729/// | 2 | Xmm, Xmm, Xmm, Imm |
730/// | 3 | Ymm, Ymm, Mem, Imm |
731/// | 4 | Ymm, Ymm, Ymm, Imm |
732/// | 5 | Zmm, Zmm, Mem, Imm |
733/// | 6 | Zmm, Zmm, Zmm, Imm |
734/// +---+--------------------+
735/// ```
736pub trait VdbpsadbwEmitter<A, B, C, D> {
737 fn vdbpsadbw(&mut self, op0: A, op1: B, op2: C, op3: D);
738}
739
740impl<'a> VdbpsadbwEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
741 fn vdbpsadbw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
742 self.emit(VDBPSADBW128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
743 }
744}
745
746impl<'a> VdbpsadbwEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
747 fn vdbpsadbw(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
748 self.emit(VDBPSADBW128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
749 }
750}
751
752impl<'a> VdbpsadbwEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
753 fn vdbpsadbw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
754 self.emit(VDBPSADBW256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
755 }
756}
757
758impl<'a> VdbpsadbwEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
759 fn vdbpsadbw(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
760 self.emit(VDBPSADBW256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
761 }
762}
763
764impl<'a> VdbpsadbwEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
765 fn vdbpsadbw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
766 self.emit(VDBPSADBW512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
767 }
768}
769
770impl<'a> VdbpsadbwEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
771 fn vdbpsadbw(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
772 self.emit(VDBPSADBW512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
773 }
774}
775
776/// `VDBPSADBW_MASK` (VDBPSADBW).
777/// Compute packed SAD (sum of absolute differences) word results of unsigned bytes from two 32-bit dword elements. Packed SAD word results are calculated in multiples of qword superblocks, producing 4 SAD word results in each 64-bit superblock of the destination register.
778///
779///
780/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VDBPSADBW.html).
781///
782/// Supported operand variants:
783///
784/// ```text
785/// +---+--------------------+
786/// | # | Operands |
787/// +---+--------------------+
788/// | 1 | Xmm, Xmm, Mem, Imm |
789/// | 2 | Xmm, Xmm, Xmm, Imm |
790/// | 3 | Ymm, Ymm, Mem, Imm |
791/// | 4 | Ymm, Ymm, Ymm, Imm |
792/// | 5 | Zmm, Zmm, Mem, Imm |
793/// | 6 | Zmm, Zmm, Zmm, Imm |
794/// +---+--------------------+
795/// ```
796pub trait VdbpsadbwMaskEmitter<A, B, C, D> {
797 fn vdbpsadbw_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
798}
799
800impl<'a> VdbpsadbwMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
801 fn vdbpsadbw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
802 self.emit(VDBPSADBW128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
803 }
804}
805
806impl<'a> VdbpsadbwMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
807 fn vdbpsadbw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
808 self.emit(VDBPSADBW128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
809 }
810}
811
812impl<'a> VdbpsadbwMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
813 fn vdbpsadbw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
814 self.emit(VDBPSADBW256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
815 }
816}
817
818impl<'a> VdbpsadbwMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
819 fn vdbpsadbw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
820 self.emit(VDBPSADBW256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
821 }
822}
823
824impl<'a> VdbpsadbwMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
825 fn vdbpsadbw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
826 self.emit(VDBPSADBW512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
827 }
828}
829
830impl<'a> VdbpsadbwMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
831 fn vdbpsadbw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
832 self.emit(VDBPSADBW512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
833 }
834}
835
836/// `VDBPSADBW_MASKZ` (VDBPSADBW).
837/// Compute packed SAD (sum of absolute differences) word results of unsigned bytes from two 32-bit dword elements. Packed SAD word results are calculated in multiples of qword superblocks, producing 4 SAD word results in each 64-bit superblock of the destination register.
838///
839///
840/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VDBPSADBW.html).
841///
842/// Supported operand variants:
843///
844/// ```text
845/// +---+--------------------+
846/// | # | Operands |
847/// +---+--------------------+
848/// | 1 | Xmm, Xmm, Mem, Imm |
849/// | 2 | Xmm, Xmm, Xmm, Imm |
850/// | 3 | Ymm, Ymm, Mem, Imm |
851/// | 4 | Ymm, Ymm, Ymm, Imm |
852/// | 5 | Zmm, Zmm, Mem, Imm |
853/// | 6 | Zmm, Zmm, Zmm, Imm |
854/// +---+--------------------+
855/// ```
856pub trait VdbpsadbwMaskzEmitter<A, B, C, D> {
857 fn vdbpsadbw_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
858}
859
860impl<'a> VdbpsadbwMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
861 fn vdbpsadbw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
862 self.emit(VDBPSADBW128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
863 }
864}
865
866impl<'a> VdbpsadbwMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
867 fn vdbpsadbw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
868 self.emit(VDBPSADBW128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
869 }
870}
871
872impl<'a> VdbpsadbwMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
873 fn vdbpsadbw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
874 self.emit(VDBPSADBW256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
875 }
876}
877
878impl<'a> VdbpsadbwMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
879 fn vdbpsadbw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
880 self.emit(VDBPSADBW256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
881 }
882}
883
884impl<'a> VdbpsadbwMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
885 fn vdbpsadbw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
886 self.emit(VDBPSADBW512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
887 }
888}
889
890impl<'a> VdbpsadbwMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
891 fn vdbpsadbw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
892 self.emit(VDBPSADBW512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
893 }
894}
895
896/// `VMOVDQU16` (VMOVDQU16).
897/// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
898///
899///
900/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
901///
902/// Supported operand variants:
903///
904/// ```text
905/// +---+----------+
906/// | # | Operands |
907/// +---+----------+
908/// | 1 | Mem, Xmm |
909/// | 2 | Mem, Ymm |
910/// | 3 | Mem, Zmm |
911/// | 4 | Xmm, Mem |
912/// | 5 | Xmm, Xmm |
913/// | 6 | Ymm, Mem |
914/// | 7 | Ymm, Ymm |
915/// | 8 | Zmm, Mem |
916/// | 9 | Zmm, Zmm |
917/// +---+----------+
918/// ```
919pub trait Vmovdqu16Emitter<A, B> {
920 fn vmovdqu16(&mut self, op0: A, op1: B);
921}
922
923impl<'a> Vmovdqu16Emitter<Xmm, Xmm> for Assembler<'a> {
924 fn vmovdqu16(&mut self, op0: Xmm, op1: Xmm) {
925 self.emit(VMOVDQU16_128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
926 }
927}
928
929impl<'a> Vmovdqu16Emitter<Xmm, Mem> for Assembler<'a> {
930 fn vmovdqu16(&mut self, op0: Xmm, op1: Mem) {
931 self.emit(VMOVDQU16_128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
932 }
933}
934
935impl<'a> Vmovdqu16Emitter<Ymm, Ymm> for Assembler<'a> {
936 fn vmovdqu16(&mut self, op0: Ymm, op1: Ymm) {
937 self.emit(VMOVDQU16_256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
938 }
939}
940
941impl<'a> Vmovdqu16Emitter<Ymm, Mem> for Assembler<'a> {
942 fn vmovdqu16(&mut self, op0: Ymm, op1: Mem) {
943 self.emit(VMOVDQU16_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
944 }
945}
946
947impl<'a> Vmovdqu16Emitter<Zmm, Zmm> for Assembler<'a> {
948 fn vmovdqu16(&mut self, op0: Zmm, op1: Zmm) {
949 self.emit(VMOVDQU16_512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
950 }
951}
952
953impl<'a> Vmovdqu16Emitter<Zmm, Mem> for Assembler<'a> {
954 fn vmovdqu16(&mut self, op0: Zmm, op1: Mem) {
955 self.emit(VMOVDQU16_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
956 }
957}
958
959impl<'a> Vmovdqu16Emitter<Mem, Xmm> for Assembler<'a> {
960 fn vmovdqu16(&mut self, op0: Mem, op1: Xmm) {
961 self.emit(VMOVDQU16_128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
962 }
963}
964
965impl<'a> Vmovdqu16Emitter<Mem, Ymm> for Assembler<'a> {
966 fn vmovdqu16(&mut self, op0: Mem, op1: Ymm) {
967 self.emit(VMOVDQU16_256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
968 }
969}
970
971impl<'a> Vmovdqu16Emitter<Mem, Zmm> for Assembler<'a> {
972 fn vmovdqu16(&mut self, op0: Mem, op1: Zmm) {
973 self.emit(VMOVDQU16_512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
974 }
975}
976
977/// `VMOVDQU16_MASK` (VMOVDQU16).
978/// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
979///
980///
981/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
982///
983/// Supported operand variants:
984///
985/// ```text
986/// +---+----------+
987/// | # | Operands |
988/// +---+----------+
989/// | 1 | Mem, Xmm |
990/// | 2 | Mem, Ymm |
991/// | 3 | Mem, Zmm |
992/// | 4 | Xmm, Mem |
993/// | 5 | Xmm, Xmm |
994/// | 6 | Ymm, Mem |
995/// | 7 | Ymm, Ymm |
996/// | 8 | Zmm, Mem |
997/// | 9 | Zmm, Zmm |
998/// +---+----------+
999/// ```
1000pub trait Vmovdqu16MaskEmitter<A, B> {
1001 fn vmovdqu16_mask(&mut self, op0: A, op1: B);
1002}
1003
1004impl<'a> Vmovdqu16MaskEmitter<Xmm, Xmm> for Assembler<'a> {
1005 fn vmovdqu16_mask(&mut self, op0: Xmm, op1: Xmm) {
1006 self.emit(VMOVDQU16_128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1007 }
1008}
1009
1010impl<'a> Vmovdqu16MaskEmitter<Xmm, Mem> for Assembler<'a> {
1011 fn vmovdqu16_mask(&mut self, op0: Xmm, op1: Mem) {
1012 self.emit(VMOVDQU16_128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1013 }
1014}
1015
1016impl<'a> Vmovdqu16MaskEmitter<Ymm, Ymm> for Assembler<'a> {
1017 fn vmovdqu16_mask(&mut self, op0: Ymm, op1: Ymm) {
1018 self.emit(VMOVDQU16_256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1019 }
1020}
1021
1022impl<'a> Vmovdqu16MaskEmitter<Ymm, Mem> for Assembler<'a> {
1023 fn vmovdqu16_mask(&mut self, op0: Ymm, op1: Mem) {
1024 self.emit(VMOVDQU16_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1025 }
1026}
1027
1028impl<'a> Vmovdqu16MaskEmitter<Zmm, Zmm> for Assembler<'a> {
1029 fn vmovdqu16_mask(&mut self, op0: Zmm, op1: Zmm) {
1030 self.emit(VMOVDQU16_512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1031 }
1032}
1033
1034impl<'a> Vmovdqu16MaskEmitter<Zmm, Mem> for Assembler<'a> {
1035 fn vmovdqu16_mask(&mut self, op0: Zmm, op1: Mem) {
1036 self.emit(VMOVDQU16_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1037 }
1038}
1039
1040impl<'a> Vmovdqu16MaskEmitter<Mem, Xmm> for Assembler<'a> {
1041 fn vmovdqu16_mask(&mut self, op0: Mem, op1: Xmm) {
1042 self.emit(VMOVDQU16_128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1043 }
1044}
1045
1046impl<'a> Vmovdqu16MaskEmitter<Mem, Ymm> for Assembler<'a> {
1047 fn vmovdqu16_mask(&mut self, op0: Mem, op1: Ymm) {
1048 self.emit(VMOVDQU16_256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1049 }
1050}
1051
1052impl<'a> Vmovdqu16MaskEmitter<Mem, Zmm> for Assembler<'a> {
1053 fn vmovdqu16_mask(&mut self, op0: Mem, op1: Zmm) {
1054 self.emit(VMOVDQU16_512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1055 }
1056}
1057
1058/// `VMOVDQU16_MASKZ` (VMOVDQU16).
1059/// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
1060///
1061///
1062/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
1063///
1064/// Supported operand variants:
1065///
1066/// ```text
1067/// +---+----------+
1068/// | # | Operands |
1069/// +---+----------+
1070/// | 1 | Xmm, Mem |
1071/// | 2 | Xmm, Xmm |
1072/// | 3 | Ymm, Mem |
1073/// | 4 | Ymm, Ymm |
1074/// | 5 | Zmm, Mem |
1075/// | 6 | Zmm, Zmm |
1076/// +---+----------+
1077/// ```
1078pub trait Vmovdqu16MaskzEmitter<A, B> {
1079 fn vmovdqu16_maskz(&mut self, op0: A, op1: B);
1080}
1081
1082impl<'a> Vmovdqu16MaskzEmitter<Xmm, Xmm> for Assembler<'a> {
1083 fn vmovdqu16_maskz(&mut self, op0: Xmm, op1: Xmm) {
1084 self.emit(VMOVDQU16_128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1085 }
1086}
1087
1088impl<'a> Vmovdqu16MaskzEmitter<Xmm, Mem> for Assembler<'a> {
1089 fn vmovdqu16_maskz(&mut self, op0: Xmm, op1: Mem) {
1090 self.emit(VMOVDQU16_128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1091 }
1092}
1093
1094impl<'a> Vmovdqu16MaskzEmitter<Ymm, Ymm> for Assembler<'a> {
1095 fn vmovdqu16_maskz(&mut self, op0: Ymm, op1: Ymm) {
1096 self.emit(VMOVDQU16_256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1097 }
1098}
1099
1100impl<'a> Vmovdqu16MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1101 fn vmovdqu16_maskz(&mut self, op0: Ymm, op1: Mem) {
1102 self.emit(VMOVDQU16_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1103 }
1104}
1105
1106impl<'a> Vmovdqu16MaskzEmitter<Zmm, Zmm> for Assembler<'a> {
1107 fn vmovdqu16_maskz(&mut self, op0: Zmm, op1: Zmm) {
1108 self.emit(VMOVDQU16_512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1109 }
1110}
1111
1112impl<'a> Vmovdqu16MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1113 fn vmovdqu16_maskz(&mut self, op0: Zmm, op1: Mem) {
1114 self.emit(VMOVDQU16_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1115 }
1116}
1117
1118/// `VMOVDQU8` (VMOVDQU8).
1119/// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
1120///
1121///
1122/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
1123///
1124/// Supported operand variants:
1125///
1126/// ```text
1127/// +---+----------+
1128/// | # | Operands |
1129/// +---+----------+
1130/// | 1 | Mem, Xmm |
1131/// | 2 | Mem, Ymm |
1132/// | 3 | Mem, Zmm |
1133/// | 4 | Xmm, Mem |
1134/// | 5 | Xmm, Xmm |
1135/// | 6 | Ymm, Mem |
1136/// | 7 | Ymm, Ymm |
1137/// | 8 | Zmm, Mem |
1138/// | 9 | Zmm, Zmm |
1139/// +---+----------+
1140/// ```
1141pub trait Vmovdqu8Emitter<A, B> {
1142 fn vmovdqu8(&mut self, op0: A, op1: B);
1143}
1144
1145impl<'a> Vmovdqu8Emitter<Xmm, Xmm> for Assembler<'a> {
1146 fn vmovdqu8(&mut self, op0: Xmm, op1: Xmm) {
1147 self.emit(VMOVDQU8_128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1148 }
1149}
1150
1151impl<'a> Vmovdqu8Emitter<Xmm, Mem> for Assembler<'a> {
1152 fn vmovdqu8(&mut self, op0: Xmm, op1: Mem) {
1153 self.emit(VMOVDQU8_128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1154 }
1155}
1156
1157impl<'a> Vmovdqu8Emitter<Ymm, Ymm> for Assembler<'a> {
1158 fn vmovdqu8(&mut self, op0: Ymm, op1: Ymm) {
1159 self.emit(VMOVDQU8_256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1160 }
1161}
1162
1163impl<'a> Vmovdqu8Emitter<Ymm, Mem> for Assembler<'a> {
1164 fn vmovdqu8(&mut self, op0: Ymm, op1: Mem) {
1165 self.emit(VMOVDQU8_256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1166 }
1167}
1168
1169impl<'a> Vmovdqu8Emitter<Zmm, Zmm> for Assembler<'a> {
1170 fn vmovdqu8(&mut self, op0: Zmm, op1: Zmm) {
1171 self.emit(VMOVDQU8_512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1172 }
1173}
1174
1175impl<'a> Vmovdqu8Emitter<Zmm, Mem> for Assembler<'a> {
1176 fn vmovdqu8(&mut self, op0: Zmm, op1: Mem) {
1177 self.emit(VMOVDQU8_512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1178 }
1179}
1180
1181impl<'a> Vmovdqu8Emitter<Mem, Xmm> for Assembler<'a> {
1182 fn vmovdqu8(&mut self, op0: Mem, op1: Xmm) {
1183 self.emit(VMOVDQU8_128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1184 }
1185}
1186
1187impl<'a> Vmovdqu8Emitter<Mem, Ymm> for Assembler<'a> {
1188 fn vmovdqu8(&mut self, op0: Mem, op1: Ymm) {
1189 self.emit(VMOVDQU8_256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1190 }
1191}
1192
1193impl<'a> Vmovdqu8Emitter<Mem, Zmm> for Assembler<'a> {
1194 fn vmovdqu8(&mut self, op0: Mem, op1: Zmm) {
1195 self.emit(VMOVDQU8_512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1196 }
1197}
1198
1199/// `VMOVDQU8_MASK` (VMOVDQU8).
1200/// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
1201///
1202///
1203/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
1204///
1205/// Supported operand variants:
1206///
1207/// ```text
1208/// +---+----------+
1209/// | # | Operands |
1210/// +---+----------+
1211/// | 1 | Mem, Xmm |
1212/// | 2 | Mem, Ymm |
1213/// | 3 | Mem, Zmm |
1214/// | 4 | Xmm, Mem |
1215/// | 5 | Xmm, Xmm |
1216/// | 6 | Ymm, Mem |
1217/// | 7 | Ymm, Ymm |
1218/// | 8 | Zmm, Mem |
1219/// | 9 | Zmm, Zmm |
1220/// +---+----------+
1221/// ```
1222pub trait Vmovdqu8MaskEmitter<A, B> {
1223 fn vmovdqu8_mask(&mut self, op0: A, op1: B);
1224}
1225
1226impl<'a> Vmovdqu8MaskEmitter<Xmm, Xmm> for Assembler<'a> {
1227 fn vmovdqu8_mask(&mut self, op0: Xmm, op1: Xmm) {
1228 self.emit(VMOVDQU8_128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1229 }
1230}
1231
1232impl<'a> Vmovdqu8MaskEmitter<Xmm, Mem> for Assembler<'a> {
1233 fn vmovdqu8_mask(&mut self, op0: Xmm, op1: Mem) {
1234 self.emit(VMOVDQU8_128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1235 }
1236}
1237
1238impl<'a> Vmovdqu8MaskEmitter<Ymm, Ymm> for Assembler<'a> {
1239 fn vmovdqu8_mask(&mut self, op0: Ymm, op1: Ymm) {
1240 self.emit(VMOVDQU8_256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1241 }
1242}
1243
1244impl<'a> Vmovdqu8MaskEmitter<Ymm, Mem> for Assembler<'a> {
1245 fn vmovdqu8_mask(&mut self, op0: Ymm, op1: Mem) {
1246 self.emit(VMOVDQU8_256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1247 }
1248}
1249
1250impl<'a> Vmovdqu8MaskEmitter<Zmm, Zmm> for Assembler<'a> {
1251 fn vmovdqu8_mask(&mut self, op0: Zmm, op1: Zmm) {
1252 self.emit(VMOVDQU8_512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1253 }
1254}
1255
1256impl<'a> Vmovdqu8MaskEmitter<Zmm, Mem> for Assembler<'a> {
1257 fn vmovdqu8_mask(&mut self, op0: Zmm, op1: Mem) {
1258 self.emit(VMOVDQU8_512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1259 }
1260}
1261
1262impl<'a> Vmovdqu8MaskEmitter<Mem, Xmm> for Assembler<'a> {
1263 fn vmovdqu8_mask(&mut self, op0: Mem, op1: Xmm) {
1264 self.emit(VMOVDQU8_128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1265 }
1266}
1267
1268impl<'a> Vmovdqu8MaskEmitter<Mem, Ymm> for Assembler<'a> {
1269 fn vmovdqu8_mask(&mut self, op0: Mem, op1: Ymm) {
1270 self.emit(VMOVDQU8_256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1271 }
1272}
1273
1274impl<'a> Vmovdqu8MaskEmitter<Mem, Zmm> for Assembler<'a> {
1275 fn vmovdqu8_mask(&mut self, op0: Mem, op1: Zmm) {
1276 self.emit(VMOVDQU8_512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1277 }
1278}
1279
1280/// `VMOVDQU8_MASKZ` (VMOVDQU8).
1281/// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
1282///
1283///
1284/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
1285///
1286/// Supported operand variants:
1287///
1288/// ```text
1289/// +---+----------+
1290/// | # | Operands |
1291/// +---+----------+
1292/// | 1 | Xmm, Mem |
1293/// | 2 | Xmm, Xmm |
1294/// | 3 | Ymm, Mem |
1295/// | 4 | Ymm, Ymm |
1296/// | 5 | Zmm, Mem |
1297/// | 6 | Zmm, Zmm |
1298/// +---+----------+
1299/// ```
1300pub trait Vmovdqu8MaskzEmitter<A, B> {
1301 fn vmovdqu8_maskz(&mut self, op0: A, op1: B);
1302}
1303
1304impl<'a> Vmovdqu8MaskzEmitter<Xmm, Xmm> for Assembler<'a> {
1305 fn vmovdqu8_maskz(&mut self, op0: Xmm, op1: Xmm) {
1306 self.emit(VMOVDQU8_128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1307 }
1308}
1309
1310impl<'a> Vmovdqu8MaskzEmitter<Xmm, Mem> for Assembler<'a> {
1311 fn vmovdqu8_maskz(&mut self, op0: Xmm, op1: Mem) {
1312 self.emit(VMOVDQU8_128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1313 }
1314}
1315
1316impl<'a> Vmovdqu8MaskzEmitter<Ymm, Ymm> for Assembler<'a> {
1317 fn vmovdqu8_maskz(&mut self, op0: Ymm, op1: Ymm) {
1318 self.emit(VMOVDQU8_256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1319 }
1320}
1321
1322impl<'a> Vmovdqu8MaskzEmitter<Ymm, Mem> for Assembler<'a> {
1323 fn vmovdqu8_maskz(&mut self, op0: Ymm, op1: Mem) {
1324 self.emit(VMOVDQU8_256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1325 }
1326}
1327
1328impl<'a> Vmovdqu8MaskzEmitter<Zmm, Zmm> for Assembler<'a> {
1329 fn vmovdqu8_maskz(&mut self, op0: Zmm, op1: Zmm) {
1330 self.emit(VMOVDQU8_512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1331 }
1332}
1333
1334impl<'a> Vmovdqu8MaskzEmitter<Zmm, Mem> for Assembler<'a> {
1335 fn vmovdqu8_maskz(&mut self, op0: Zmm, op1: Mem) {
1336 self.emit(VMOVDQU8_512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1337 }
1338}
1339
1340/// `VPABSB` (VPABSB).
1341/// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
1342///
1343///
1344/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
1345///
1346/// Supported operand variants:
1347///
1348/// ```text
1349/// +---+----------+
1350/// | # | Operands |
1351/// +---+----------+
1352/// | 1 | Xmm, Mem |
1353/// | 2 | Xmm, Xmm |
1354/// | 3 | Ymm, Mem |
1355/// | 4 | Ymm, Ymm |
1356/// | 5 | Zmm, Mem |
1357/// | 6 | Zmm, Zmm |
1358/// +---+----------+
1359/// ```
1360pub trait VpabsbEmitter<A, B> {
1361 fn vpabsb(&mut self, op0: A, op1: B);
1362}
1363
1364impl<'a> VpabsbEmitter<Xmm, Xmm> for Assembler<'a> {
1365 fn vpabsb(&mut self, op0: Xmm, op1: Xmm) {
1366 self.emit(VPABSB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1367 }
1368}
1369
1370impl<'a> VpabsbEmitter<Xmm, Mem> for Assembler<'a> {
1371 fn vpabsb(&mut self, op0: Xmm, op1: Mem) {
1372 self.emit(VPABSB128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1373 }
1374}
1375
1376impl<'a> VpabsbEmitter<Ymm, Ymm> for Assembler<'a> {
1377 fn vpabsb(&mut self, op0: Ymm, op1: Ymm) {
1378 self.emit(VPABSB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1379 }
1380}
1381
1382impl<'a> VpabsbEmitter<Ymm, Mem> for Assembler<'a> {
1383 fn vpabsb(&mut self, op0: Ymm, op1: Mem) {
1384 self.emit(VPABSB256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1385 }
1386}
1387
1388impl<'a> VpabsbEmitter<Zmm, Zmm> for Assembler<'a> {
1389 fn vpabsb(&mut self, op0: Zmm, op1: Zmm) {
1390 self.emit(VPABSB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1391 }
1392}
1393
1394impl<'a> VpabsbEmitter<Zmm, Mem> for Assembler<'a> {
1395 fn vpabsb(&mut self, op0: Zmm, op1: Mem) {
1396 self.emit(VPABSB512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1397 }
1398}
1399
1400/// `VPABSB_MASK` (VPABSB).
1401/// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
1402///
1403///
1404/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
1405///
1406/// Supported operand variants:
1407///
1408/// ```text
1409/// +---+----------+
1410/// | # | Operands |
1411/// +---+----------+
1412/// | 1 | Xmm, Mem |
1413/// | 2 | Xmm, Xmm |
1414/// | 3 | Ymm, Mem |
1415/// | 4 | Ymm, Ymm |
1416/// | 5 | Zmm, Mem |
1417/// | 6 | Zmm, Zmm |
1418/// +---+----------+
1419/// ```
1420pub trait VpabsbMaskEmitter<A, B> {
1421 fn vpabsb_mask(&mut self, op0: A, op1: B);
1422}
1423
1424impl<'a> VpabsbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
1425 fn vpabsb_mask(&mut self, op0: Xmm, op1: Xmm) {
1426 self.emit(VPABSB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1427 }
1428}
1429
1430impl<'a> VpabsbMaskEmitter<Xmm, Mem> for Assembler<'a> {
1431 fn vpabsb_mask(&mut self, op0: Xmm, op1: Mem) {
1432 self.emit(VPABSB128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1433 }
1434}
1435
1436impl<'a> VpabsbMaskEmitter<Ymm, Ymm> for Assembler<'a> {
1437 fn vpabsb_mask(&mut self, op0: Ymm, op1: Ymm) {
1438 self.emit(VPABSB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1439 }
1440}
1441
1442impl<'a> VpabsbMaskEmitter<Ymm, Mem> for Assembler<'a> {
1443 fn vpabsb_mask(&mut self, op0: Ymm, op1: Mem) {
1444 self.emit(VPABSB256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1445 }
1446}
1447
1448impl<'a> VpabsbMaskEmitter<Zmm, Zmm> for Assembler<'a> {
1449 fn vpabsb_mask(&mut self, op0: Zmm, op1: Zmm) {
1450 self.emit(VPABSB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1451 }
1452}
1453
1454impl<'a> VpabsbMaskEmitter<Zmm, Mem> for Assembler<'a> {
1455 fn vpabsb_mask(&mut self, op0: Zmm, op1: Mem) {
1456 self.emit(VPABSB512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1457 }
1458}
1459
1460/// `VPABSB_MASKZ` (VPABSB).
1461/// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
1462///
1463///
1464/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
1465///
1466/// Supported operand variants:
1467///
1468/// ```text
1469/// +---+----------+
1470/// | # | Operands |
1471/// +---+----------+
1472/// | 1 | Xmm, Mem |
1473/// | 2 | Xmm, Xmm |
1474/// | 3 | Ymm, Mem |
1475/// | 4 | Ymm, Ymm |
1476/// | 5 | Zmm, Mem |
1477/// | 6 | Zmm, Zmm |
1478/// +---+----------+
1479/// ```
1480pub trait VpabsbMaskzEmitter<A, B> {
1481 fn vpabsb_maskz(&mut self, op0: A, op1: B);
1482}
1483
1484impl<'a> VpabsbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
1485 fn vpabsb_maskz(&mut self, op0: Xmm, op1: Xmm) {
1486 self.emit(VPABSB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1487 }
1488}
1489
1490impl<'a> VpabsbMaskzEmitter<Xmm, Mem> for Assembler<'a> {
1491 fn vpabsb_maskz(&mut self, op0: Xmm, op1: Mem) {
1492 self.emit(VPABSB128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1493 }
1494}
1495
1496impl<'a> VpabsbMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
1497 fn vpabsb_maskz(&mut self, op0: Ymm, op1: Ymm) {
1498 self.emit(VPABSB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1499 }
1500}
1501
1502impl<'a> VpabsbMaskzEmitter<Ymm, Mem> for Assembler<'a> {
1503 fn vpabsb_maskz(&mut self, op0: Ymm, op1: Mem) {
1504 self.emit(VPABSB256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1505 }
1506}
1507
1508impl<'a> VpabsbMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
1509 fn vpabsb_maskz(&mut self, op0: Zmm, op1: Zmm) {
1510 self.emit(VPABSB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1511 }
1512}
1513
1514impl<'a> VpabsbMaskzEmitter<Zmm, Mem> for Assembler<'a> {
1515 fn vpabsb_maskz(&mut self, op0: Zmm, op1: Mem) {
1516 self.emit(VPABSB512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1517 }
1518}
1519
1520/// `VPABSW` (VPABSW).
1521/// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
1522///
1523///
1524/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
1525///
1526/// Supported operand variants:
1527///
1528/// ```text
1529/// +---+----------+
1530/// | # | Operands |
1531/// +---+----------+
1532/// | 1 | Xmm, Mem |
1533/// | 2 | Xmm, Xmm |
1534/// | 3 | Ymm, Mem |
1535/// | 4 | Ymm, Ymm |
1536/// | 5 | Zmm, Mem |
1537/// | 6 | Zmm, Zmm |
1538/// +---+----------+
1539/// ```
1540pub trait VpabswEmitter<A, B> {
1541 fn vpabsw(&mut self, op0: A, op1: B);
1542}
1543
1544impl<'a> VpabswEmitter<Xmm, Xmm> for Assembler<'a> {
1545 fn vpabsw(&mut self, op0: Xmm, op1: Xmm) {
1546 self.emit(VPABSW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1547 }
1548}
1549
1550impl<'a> VpabswEmitter<Xmm, Mem> for Assembler<'a> {
1551 fn vpabsw(&mut self, op0: Xmm, op1: Mem) {
1552 self.emit(VPABSW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1553 }
1554}
1555
1556impl<'a> VpabswEmitter<Ymm, Ymm> for Assembler<'a> {
1557 fn vpabsw(&mut self, op0: Ymm, op1: Ymm) {
1558 self.emit(VPABSW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1559 }
1560}
1561
1562impl<'a> VpabswEmitter<Ymm, Mem> for Assembler<'a> {
1563 fn vpabsw(&mut self, op0: Ymm, op1: Mem) {
1564 self.emit(VPABSW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1565 }
1566}
1567
1568impl<'a> VpabswEmitter<Zmm, Zmm> for Assembler<'a> {
1569 fn vpabsw(&mut self, op0: Zmm, op1: Zmm) {
1570 self.emit(VPABSW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1571 }
1572}
1573
1574impl<'a> VpabswEmitter<Zmm, Mem> for Assembler<'a> {
1575 fn vpabsw(&mut self, op0: Zmm, op1: Mem) {
1576 self.emit(VPABSW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1577 }
1578}
1579
1580/// `VPABSW_MASK` (VPABSW).
1581/// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
1582///
1583///
1584/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
1585///
1586/// Supported operand variants:
1587///
1588/// ```text
1589/// +---+----------+
1590/// | # | Operands |
1591/// +---+----------+
1592/// | 1 | Xmm, Mem |
1593/// | 2 | Xmm, Xmm |
1594/// | 3 | Ymm, Mem |
1595/// | 4 | Ymm, Ymm |
1596/// | 5 | Zmm, Mem |
1597/// | 6 | Zmm, Zmm |
1598/// +---+----------+
1599/// ```
1600pub trait VpabswMaskEmitter<A, B> {
1601 fn vpabsw_mask(&mut self, op0: A, op1: B);
1602}
1603
1604impl<'a> VpabswMaskEmitter<Xmm, Xmm> for Assembler<'a> {
1605 fn vpabsw_mask(&mut self, op0: Xmm, op1: Xmm) {
1606 self.emit(VPABSW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1607 }
1608}
1609
1610impl<'a> VpabswMaskEmitter<Xmm, Mem> for Assembler<'a> {
1611 fn vpabsw_mask(&mut self, op0: Xmm, op1: Mem) {
1612 self.emit(VPABSW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1613 }
1614}
1615
1616impl<'a> VpabswMaskEmitter<Ymm, Ymm> for Assembler<'a> {
1617 fn vpabsw_mask(&mut self, op0: Ymm, op1: Ymm) {
1618 self.emit(VPABSW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1619 }
1620}
1621
1622impl<'a> VpabswMaskEmitter<Ymm, Mem> for Assembler<'a> {
1623 fn vpabsw_mask(&mut self, op0: Ymm, op1: Mem) {
1624 self.emit(VPABSW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1625 }
1626}
1627
1628impl<'a> VpabswMaskEmitter<Zmm, Zmm> for Assembler<'a> {
1629 fn vpabsw_mask(&mut self, op0: Zmm, op1: Zmm) {
1630 self.emit(VPABSW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1631 }
1632}
1633
1634impl<'a> VpabswMaskEmitter<Zmm, Mem> for Assembler<'a> {
1635 fn vpabsw_mask(&mut self, op0: Zmm, op1: Mem) {
1636 self.emit(VPABSW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1637 }
1638}
1639
1640/// `VPABSW_MASKZ` (VPABSW).
1641/// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
1642///
1643///
1644/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
1645///
1646/// Supported operand variants:
1647///
1648/// ```text
1649/// +---+----------+
1650/// | # | Operands |
1651/// +---+----------+
1652/// | 1 | Xmm, Mem |
1653/// | 2 | Xmm, Xmm |
1654/// | 3 | Ymm, Mem |
1655/// | 4 | Ymm, Ymm |
1656/// | 5 | Zmm, Mem |
1657/// | 6 | Zmm, Zmm |
1658/// +---+----------+
1659/// ```
1660pub trait VpabswMaskzEmitter<A, B> {
1661 fn vpabsw_maskz(&mut self, op0: A, op1: B);
1662}
1663
1664impl<'a> VpabswMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
1665 fn vpabsw_maskz(&mut self, op0: Xmm, op1: Xmm) {
1666 self.emit(VPABSW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1667 }
1668}
1669
1670impl<'a> VpabswMaskzEmitter<Xmm, Mem> for Assembler<'a> {
1671 fn vpabsw_maskz(&mut self, op0: Xmm, op1: Mem) {
1672 self.emit(VPABSW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1673 }
1674}
1675
1676impl<'a> VpabswMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
1677 fn vpabsw_maskz(&mut self, op0: Ymm, op1: Ymm) {
1678 self.emit(VPABSW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1679 }
1680}
1681
1682impl<'a> VpabswMaskzEmitter<Ymm, Mem> for Assembler<'a> {
1683 fn vpabsw_maskz(&mut self, op0: Ymm, op1: Mem) {
1684 self.emit(VPABSW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1685 }
1686}
1687
1688impl<'a> VpabswMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
1689 fn vpabsw_maskz(&mut self, op0: Zmm, op1: Zmm) {
1690 self.emit(VPABSW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1691 }
1692}
1693
1694impl<'a> VpabswMaskzEmitter<Zmm, Mem> for Assembler<'a> {
1695 fn vpabsw_maskz(&mut self, op0: Zmm, op1: Mem) {
1696 self.emit(VPABSW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
1697 }
1698}
1699
1700/// `VPACKSSDW` (VPACKSSDW).
1701/// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
1702///
1703///
1704/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
1705///
1706/// Supported operand variants:
1707///
1708/// ```text
1709/// +---+---------------+
1710/// | # | Operands |
1711/// +---+---------------+
1712/// | 1 | Xmm, Xmm, Mem |
1713/// | 2 | Xmm, Xmm, Xmm |
1714/// | 3 | Ymm, Ymm, Mem |
1715/// | 4 | Ymm, Ymm, Ymm |
1716/// | 5 | Zmm, Zmm, Mem |
1717/// | 6 | Zmm, Zmm, Zmm |
1718/// +---+---------------+
1719/// ```
1720pub trait VpackssdwEmitter<A, B, C> {
1721 fn vpackssdw(&mut self, op0: A, op1: B, op2: C);
1722}
1723
1724impl<'a> VpackssdwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1725 fn vpackssdw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1726 self.emit(VPACKSSDW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1727 }
1728}
1729
1730impl<'a> VpackssdwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1731 fn vpackssdw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1732 self.emit(VPACKSSDW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1733 }
1734}
1735
1736impl<'a> VpackssdwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1737 fn vpackssdw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1738 self.emit(VPACKSSDW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1739 }
1740}
1741
1742impl<'a> VpackssdwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1743 fn vpackssdw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1744 self.emit(VPACKSSDW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1745 }
1746}
1747
1748impl<'a> VpackssdwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1749 fn vpackssdw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1750 self.emit(VPACKSSDW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1751 }
1752}
1753
1754impl<'a> VpackssdwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1755 fn vpackssdw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1756 self.emit(VPACKSSDW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1757 }
1758}
1759
1760/// `VPACKSSDW_MASK` (VPACKSSDW).
1761/// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
1762///
1763///
1764/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
1765///
1766/// Supported operand variants:
1767///
1768/// ```text
1769/// +---+---------------+
1770/// | # | Operands |
1771/// +---+---------------+
1772/// | 1 | Xmm, Xmm, Mem |
1773/// | 2 | Xmm, Xmm, Xmm |
1774/// | 3 | Ymm, Ymm, Mem |
1775/// | 4 | Ymm, Ymm, Ymm |
1776/// | 5 | Zmm, Zmm, Mem |
1777/// | 6 | Zmm, Zmm, Zmm |
1778/// +---+---------------+
1779/// ```
1780pub trait VpackssdwMaskEmitter<A, B, C> {
1781 fn vpackssdw_mask(&mut self, op0: A, op1: B, op2: C);
1782}
1783
1784impl<'a> VpackssdwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1785 fn vpackssdw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1786 self.emit(VPACKSSDW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1787 }
1788}
1789
1790impl<'a> VpackssdwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1791 fn vpackssdw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1792 self.emit(VPACKSSDW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1793 }
1794}
1795
1796impl<'a> VpackssdwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1797 fn vpackssdw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1798 self.emit(VPACKSSDW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1799 }
1800}
1801
1802impl<'a> VpackssdwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1803 fn vpackssdw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1804 self.emit(VPACKSSDW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1805 }
1806}
1807
1808impl<'a> VpackssdwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1809 fn vpackssdw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1810 self.emit(VPACKSSDW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1811 }
1812}
1813
1814impl<'a> VpackssdwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1815 fn vpackssdw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1816 self.emit(VPACKSSDW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1817 }
1818}
1819
1820/// `VPACKSSDW_MASKZ` (VPACKSSDW).
1821/// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
1822///
1823///
1824/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
1825///
1826/// Supported operand variants:
1827///
1828/// ```text
1829/// +---+---------------+
1830/// | # | Operands |
1831/// +---+---------------+
1832/// | 1 | Xmm, Xmm, Mem |
1833/// | 2 | Xmm, Xmm, Xmm |
1834/// | 3 | Ymm, Ymm, Mem |
1835/// | 4 | Ymm, Ymm, Ymm |
1836/// | 5 | Zmm, Zmm, Mem |
1837/// | 6 | Zmm, Zmm, Zmm |
1838/// +---+---------------+
1839/// ```
1840pub trait VpackssdwMaskzEmitter<A, B, C> {
1841 fn vpackssdw_maskz(&mut self, op0: A, op1: B, op2: C);
1842}
1843
1844impl<'a> VpackssdwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1845 fn vpackssdw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1846 self.emit(VPACKSSDW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1847 }
1848}
1849
1850impl<'a> VpackssdwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1851 fn vpackssdw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1852 self.emit(VPACKSSDW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1853 }
1854}
1855
1856impl<'a> VpackssdwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1857 fn vpackssdw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1858 self.emit(VPACKSSDW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1859 }
1860}
1861
1862impl<'a> VpackssdwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1863 fn vpackssdw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1864 self.emit(VPACKSSDW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1865 }
1866}
1867
1868impl<'a> VpackssdwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1869 fn vpackssdw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1870 self.emit(VPACKSSDW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1871 }
1872}
1873
1874impl<'a> VpackssdwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1875 fn vpackssdw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1876 self.emit(VPACKSSDW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1877 }
1878}
1879
1880/// `VPACKSSWB` (VPACKSSWB).
1881/// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
1882///
1883///
1884/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
1885///
1886/// Supported operand variants:
1887///
1888/// ```text
1889/// +---+---------------+
1890/// | # | Operands |
1891/// +---+---------------+
1892/// | 1 | Xmm, Xmm, Mem |
1893/// | 2 | Xmm, Xmm, Xmm |
1894/// | 3 | Ymm, Ymm, Mem |
1895/// | 4 | Ymm, Ymm, Ymm |
1896/// | 5 | Zmm, Zmm, Mem |
1897/// | 6 | Zmm, Zmm, Zmm |
1898/// +---+---------------+
1899/// ```
1900pub trait VpacksswbEmitter<A, B, C> {
1901 fn vpacksswb(&mut self, op0: A, op1: B, op2: C);
1902}
1903
1904impl<'a> VpacksswbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1905 fn vpacksswb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1906 self.emit(VPACKSSWB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1907 }
1908}
1909
1910impl<'a> VpacksswbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1911 fn vpacksswb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1912 self.emit(VPACKSSWB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1913 }
1914}
1915
1916impl<'a> VpacksswbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1917 fn vpacksswb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1918 self.emit(VPACKSSWB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1919 }
1920}
1921
1922impl<'a> VpacksswbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1923 fn vpacksswb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1924 self.emit(VPACKSSWB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1925 }
1926}
1927
1928impl<'a> VpacksswbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1929 fn vpacksswb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1930 self.emit(VPACKSSWB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1931 }
1932}
1933
1934impl<'a> VpacksswbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1935 fn vpacksswb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1936 self.emit(VPACKSSWB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1937 }
1938}
1939
1940/// `VPACKSSWB_MASK` (VPACKSSWB).
1941/// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
1942///
1943///
1944/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
1945///
1946/// Supported operand variants:
1947///
1948/// ```text
1949/// +---+---------------+
1950/// | # | Operands |
1951/// +---+---------------+
1952/// | 1 | Xmm, Xmm, Mem |
1953/// | 2 | Xmm, Xmm, Xmm |
1954/// | 3 | Ymm, Ymm, Mem |
1955/// | 4 | Ymm, Ymm, Ymm |
1956/// | 5 | Zmm, Zmm, Mem |
1957/// | 6 | Zmm, Zmm, Zmm |
1958/// +---+---------------+
1959/// ```
1960pub trait VpacksswbMaskEmitter<A, B, C> {
1961 fn vpacksswb_mask(&mut self, op0: A, op1: B, op2: C);
1962}
1963
1964impl<'a> VpacksswbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1965 fn vpacksswb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1966 self.emit(VPACKSSWB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1967 }
1968}
1969
1970impl<'a> VpacksswbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1971 fn vpacksswb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1972 self.emit(VPACKSSWB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1973 }
1974}
1975
1976impl<'a> VpacksswbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1977 fn vpacksswb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1978 self.emit(VPACKSSWB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1979 }
1980}
1981
1982impl<'a> VpacksswbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1983 fn vpacksswb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1984 self.emit(VPACKSSWB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1985 }
1986}
1987
1988impl<'a> VpacksswbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1989 fn vpacksswb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1990 self.emit(VPACKSSWB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1991 }
1992}
1993
1994impl<'a> VpacksswbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1995 fn vpacksswb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1996 self.emit(VPACKSSWB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1997 }
1998}
1999
2000/// `VPACKSSWB_MASKZ` (VPACKSSWB).
2001/// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
2002///
2003///
2004/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
2005///
2006/// Supported operand variants:
2007///
2008/// ```text
2009/// +---+---------------+
2010/// | # | Operands |
2011/// +---+---------------+
2012/// | 1 | Xmm, Xmm, Mem |
2013/// | 2 | Xmm, Xmm, Xmm |
2014/// | 3 | Ymm, Ymm, Mem |
2015/// | 4 | Ymm, Ymm, Ymm |
2016/// | 5 | Zmm, Zmm, Mem |
2017/// | 6 | Zmm, Zmm, Zmm |
2018/// +---+---------------+
2019/// ```
2020pub trait VpacksswbMaskzEmitter<A, B, C> {
2021 fn vpacksswb_maskz(&mut self, op0: A, op1: B, op2: C);
2022}
2023
2024impl<'a> VpacksswbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2025 fn vpacksswb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2026 self.emit(VPACKSSWB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2027 }
2028}
2029
2030impl<'a> VpacksswbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2031 fn vpacksswb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2032 self.emit(VPACKSSWB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2033 }
2034}
2035
2036impl<'a> VpacksswbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2037 fn vpacksswb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2038 self.emit(VPACKSSWB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2039 }
2040}
2041
2042impl<'a> VpacksswbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2043 fn vpacksswb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2044 self.emit(VPACKSSWB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2045 }
2046}
2047
2048impl<'a> VpacksswbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2049 fn vpacksswb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2050 self.emit(VPACKSSWB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2051 }
2052}
2053
2054impl<'a> VpacksswbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2055 fn vpacksswb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2056 self.emit(VPACKSSWB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2057 }
2058}
2059
2060/// `VPACKUSDW` (VPACKUSDW).
2061/// Converts packed signed doubleword integers in the first and second source operands into packed unsigned word integers using unsigned saturation to handle overflow conditions. If the signed doubleword value is beyond the range of an unsigned word (that is, greater than FFFFH or less than 0000H), the saturated unsigned word integer value of FFFFH or 0000H, respectively, is stored in the destination.
2062///
2063///
2064/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSDW.html).
2065///
2066/// Supported operand variants:
2067///
2068/// ```text
2069/// +---+---------------+
2070/// | # | Operands |
2071/// +---+---------------+
2072/// | 1 | Xmm, Xmm, Mem |
2073/// | 2 | Xmm, Xmm, Xmm |
2074/// | 3 | Ymm, Ymm, Mem |
2075/// | 4 | Ymm, Ymm, Ymm |
2076/// | 5 | Zmm, Zmm, Mem |
2077/// | 6 | Zmm, Zmm, Zmm |
2078/// +---+---------------+
2079/// ```
2080pub trait VpackusdwEmitter<A, B, C> {
2081 fn vpackusdw(&mut self, op0: A, op1: B, op2: C);
2082}
2083
2084impl<'a> VpackusdwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2085 fn vpackusdw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2086 self.emit(VPACKUSDW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2087 }
2088}
2089
2090impl<'a> VpackusdwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2091 fn vpackusdw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2092 self.emit(VPACKUSDW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2093 }
2094}
2095
2096impl<'a> VpackusdwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2097 fn vpackusdw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2098 self.emit(VPACKUSDW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2099 }
2100}
2101
2102impl<'a> VpackusdwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2103 fn vpackusdw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2104 self.emit(VPACKUSDW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2105 }
2106}
2107
2108impl<'a> VpackusdwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2109 fn vpackusdw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2110 self.emit(VPACKUSDW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2111 }
2112}
2113
2114impl<'a> VpackusdwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2115 fn vpackusdw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2116 self.emit(VPACKUSDW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2117 }
2118}
2119
2120/// `VPACKUSDW_MASK` (VPACKUSDW).
2121/// Converts packed signed doubleword integers in the first and second source operands into packed unsigned word integers using unsigned saturation to handle overflow conditions. If the signed doubleword value is beyond the range of an unsigned word (that is, greater than FFFFH or less than 0000H), the saturated unsigned word integer value of FFFFH or 0000H, respectively, is stored in the destination.
2122///
2123///
2124/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSDW.html).
2125///
2126/// Supported operand variants:
2127///
2128/// ```text
2129/// +---+---------------+
2130/// | # | Operands |
2131/// +---+---------------+
2132/// | 1 | Xmm, Xmm, Mem |
2133/// | 2 | Xmm, Xmm, Xmm |
2134/// | 3 | Ymm, Ymm, Mem |
2135/// | 4 | Ymm, Ymm, Ymm |
2136/// | 5 | Zmm, Zmm, Mem |
2137/// | 6 | Zmm, Zmm, Zmm |
2138/// +---+---------------+
2139/// ```
2140pub trait VpackusdwMaskEmitter<A, B, C> {
2141 fn vpackusdw_mask(&mut self, op0: A, op1: B, op2: C);
2142}
2143
2144impl<'a> VpackusdwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2145 fn vpackusdw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2146 self.emit(VPACKUSDW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2147 }
2148}
2149
2150impl<'a> VpackusdwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2151 fn vpackusdw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2152 self.emit(VPACKUSDW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2153 }
2154}
2155
2156impl<'a> VpackusdwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2157 fn vpackusdw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2158 self.emit(VPACKUSDW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2159 }
2160}
2161
2162impl<'a> VpackusdwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2163 fn vpackusdw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2164 self.emit(VPACKUSDW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2165 }
2166}
2167
2168impl<'a> VpackusdwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2169 fn vpackusdw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2170 self.emit(VPACKUSDW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2171 }
2172}
2173
2174impl<'a> VpackusdwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2175 fn vpackusdw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2176 self.emit(VPACKUSDW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2177 }
2178}
2179
2180/// `VPACKUSDW_MASKZ` (VPACKUSDW).
2181/// Converts packed signed doubleword integers in the first and second source operands into packed unsigned word integers using unsigned saturation to handle overflow conditions. If the signed doubleword value is beyond the range of an unsigned word (that is, greater than FFFFH or less than 0000H), the saturated unsigned word integer value of FFFFH or 0000H, respectively, is stored in the destination.
2182///
2183///
2184/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSDW.html).
2185///
2186/// Supported operand variants:
2187///
2188/// ```text
2189/// +---+---------------+
2190/// | # | Operands |
2191/// +---+---------------+
2192/// | 1 | Xmm, Xmm, Mem |
2193/// | 2 | Xmm, Xmm, Xmm |
2194/// | 3 | Ymm, Ymm, Mem |
2195/// | 4 | Ymm, Ymm, Ymm |
2196/// | 5 | Zmm, Zmm, Mem |
2197/// | 6 | Zmm, Zmm, Zmm |
2198/// +---+---------------+
2199/// ```
2200pub trait VpackusdwMaskzEmitter<A, B, C> {
2201 fn vpackusdw_maskz(&mut self, op0: A, op1: B, op2: C);
2202}
2203
2204impl<'a> VpackusdwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2205 fn vpackusdw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2206 self.emit(VPACKUSDW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2207 }
2208}
2209
2210impl<'a> VpackusdwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2211 fn vpackusdw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2212 self.emit(VPACKUSDW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2213 }
2214}
2215
2216impl<'a> VpackusdwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2217 fn vpackusdw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2218 self.emit(VPACKUSDW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2219 }
2220}
2221
2222impl<'a> VpackusdwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2223 fn vpackusdw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2224 self.emit(VPACKUSDW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2225 }
2226}
2227
2228impl<'a> VpackusdwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2229 fn vpackusdw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2230 self.emit(VPACKUSDW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2231 }
2232}
2233
2234impl<'a> VpackusdwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2235 fn vpackusdw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2236 self.emit(VPACKUSDW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2237 }
2238}
2239
2240/// `VPACKUSWB` (VPACKUSWB).
2241/// Converts 4, 8, 16, or 32 signed word integers from the destination operand (first operand) and 4, 8, 16, or 32 signed word integers from the source operand (second operand) into 8, 16, 32 or 64 unsigned byte integers and stores the result in the destination operand. (See Figure 4-6 for an example of the packing operation.) If a signed word integer value is beyond the range of an unsigned byte integer (that is, greater than FFH or less than 00H), the saturated unsigned byte integer value of FFH or 00H, respectively, is stored in the destination.
2242///
2243///
2244/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSWB.html).
2245///
2246/// Supported operand variants:
2247///
2248/// ```text
2249/// +---+---------------+
2250/// | # | Operands |
2251/// +---+---------------+
2252/// | 1 | Xmm, Xmm, Mem |
2253/// | 2 | Xmm, Xmm, Xmm |
2254/// | 3 | Ymm, Ymm, Mem |
2255/// | 4 | Ymm, Ymm, Ymm |
2256/// | 5 | Zmm, Zmm, Mem |
2257/// | 6 | Zmm, Zmm, Zmm |
2258/// +---+---------------+
2259/// ```
2260pub trait VpackuswbEmitter<A, B, C> {
2261 fn vpackuswb(&mut self, op0: A, op1: B, op2: C);
2262}
2263
2264impl<'a> VpackuswbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2265 fn vpackuswb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2266 self.emit(VPACKUSWB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2267 }
2268}
2269
2270impl<'a> VpackuswbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2271 fn vpackuswb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2272 self.emit(VPACKUSWB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2273 }
2274}
2275
2276impl<'a> VpackuswbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2277 fn vpackuswb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2278 self.emit(VPACKUSWB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2279 }
2280}
2281
2282impl<'a> VpackuswbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2283 fn vpackuswb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2284 self.emit(VPACKUSWB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2285 }
2286}
2287
2288impl<'a> VpackuswbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2289 fn vpackuswb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2290 self.emit(VPACKUSWB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2291 }
2292}
2293
2294impl<'a> VpackuswbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2295 fn vpackuswb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2296 self.emit(VPACKUSWB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2297 }
2298}
2299
2300/// `VPACKUSWB_MASK` (VPACKUSWB).
2301/// Converts 4, 8, 16, or 32 signed word integers from the destination operand (first operand) and 4, 8, 16, or 32 signed word integers from the source operand (second operand) into 8, 16, 32 or 64 unsigned byte integers and stores the result in the destination operand. (See Figure 4-6 for an example of the packing operation.) If a signed word integer value is beyond the range of an unsigned byte integer (that is, greater than FFH or less than 00H), the saturated unsigned byte integer value of FFH or 00H, respectively, is stored in the destination.
2302///
2303///
2304/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSWB.html).
2305///
2306/// Supported operand variants:
2307///
2308/// ```text
2309/// +---+---------------+
2310/// | # | Operands |
2311/// +---+---------------+
2312/// | 1 | Xmm, Xmm, Mem |
2313/// | 2 | Xmm, Xmm, Xmm |
2314/// | 3 | Ymm, Ymm, Mem |
2315/// | 4 | Ymm, Ymm, Ymm |
2316/// | 5 | Zmm, Zmm, Mem |
2317/// | 6 | Zmm, Zmm, Zmm |
2318/// +---+---------------+
2319/// ```
2320pub trait VpackuswbMaskEmitter<A, B, C> {
2321 fn vpackuswb_mask(&mut self, op0: A, op1: B, op2: C);
2322}
2323
2324impl<'a> VpackuswbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2325 fn vpackuswb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2326 self.emit(VPACKUSWB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2327 }
2328}
2329
2330impl<'a> VpackuswbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2331 fn vpackuswb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2332 self.emit(VPACKUSWB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2333 }
2334}
2335
2336impl<'a> VpackuswbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2337 fn vpackuswb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2338 self.emit(VPACKUSWB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2339 }
2340}
2341
2342impl<'a> VpackuswbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2343 fn vpackuswb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2344 self.emit(VPACKUSWB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2345 }
2346}
2347
2348impl<'a> VpackuswbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2349 fn vpackuswb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2350 self.emit(VPACKUSWB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2351 }
2352}
2353
2354impl<'a> VpackuswbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2355 fn vpackuswb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2356 self.emit(VPACKUSWB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2357 }
2358}
2359
2360/// `VPACKUSWB_MASKZ` (VPACKUSWB).
2361/// Converts 4, 8, 16, or 32 signed word integers from the destination operand (first operand) and 4, 8, 16, or 32 signed word integers from the source operand (second operand) into 8, 16, 32 or 64 unsigned byte integers and stores the result in the destination operand. (See Figure 4-6 for an example of the packing operation.) If a signed word integer value is beyond the range of an unsigned byte integer (that is, greater than FFH or less than 00H), the saturated unsigned byte integer value of FFH or 00H, respectively, is stored in the destination.
2362///
2363///
2364/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSWB.html).
2365///
2366/// Supported operand variants:
2367///
2368/// ```text
2369/// +---+---------------+
2370/// | # | Operands |
2371/// +---+---------------+
2372/// | 1 | Xmm, Xmm, Mem |
2373/// | 2 | Xmm, Xmm, Xmm |
2374/// | 3 | Ymm, Ymm, Mem |
2375/// | 4 | Ymm, Ymm, Ymm |
2376/// | 5 | Zmm, Zmm, Mem |
2377/// | 6 | Zmm, Zmm, Zmm |
2378/// +---+---------------+
2379/// ```
2380pub trait VpackuswbMaskzEmitter<A, B, C> {
2381 fn vpackuswb_maskz(&mut self, op0: A, op1: B, op2: C);
2382}
2383
2384impl<'a> VpackuswbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2385 fn vpackuswb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2386 self.emit(VPACKUSWB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2387 }
2388}
2389
2390impl<'a> VpackuswbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2391 fn vpackuswb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2392 self.emit(VPACKUSWB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2393 }
2394}
2395
2396impl<'a> VpackuswbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2397 fn vpackuswb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2398 self.emit(VPACKUSWB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2399 }
2400}
2401
2402impl<'a> VpackuswbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2403 fn vpackuswb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2404 self.emit(VPACKUSWB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2405 }
2406}
2407
2408impl<'a> VpackuswbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2409 fn vpackuswb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2410 self.emit(VPACKUSWB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2411 }
2412}
2413
2414impl<'a> VpackuswbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2415 fn vpackuswb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2416 self.emit(VPACKUSWB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2417 }
2418}
2419
2420/// `VPADDB` (VPADDB).
2421/// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
2422///
2423///
2424/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
2425///
2426/// Supported operand variants:
2427///
2428/// ```text
2429/// +---+---------------+
2430/// | # | Operands |
2431/// +---+---------------+
2432/// | 1 | Xmm, Xmm, Mem |
2433/// | 2 | Xmm, Xmm, Xmm |
2434/// | 3 | Ymm, Ymm, Mem |
2435/// | 4 | Ymm, Ymm, Ymm |
2436/// | 5 | Zmm, Zmm, Mem |
2437/// | 6 | Zmm, Zmm, Zmm |
2438/// +---+---------------+
2439/// ```
2440pub trait VpaddbEmitter<A, B, C> {
2441 fn vpaddb(&mut self, op0: A, op1: B, op2: C);
2442}
2443
2444impl<'a> VpaddbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2445 fn vpaddb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2446 self.emit(VPADDB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2447 }
2448}
2449
2450impl<'a> VpaddbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2451 fn vpaddb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2452 self.emit(VPADDB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2453 }
2454}
2455
2456impl<'a> VpaddbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2457 fn vpaddb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2458 self.emit(VPADDB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2459 }
2460}
2461
2462impl<'a> VpaddbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2463 fn vpaddb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2464 self.emit(VPADDB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2465 }
2466}
2467
2468impl<'a> VpaddbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2469 fn vpaddb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2470 self.emit(VPADDB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2471 }
2472}
2473
2474impl<'a> VpaddbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2475 fn vpaddb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2476 self.emit(VPADDB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2477 }
2478}
2479
2480/// `VPADDB_MASK` (VPADDB).
2481/// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
2482///
2483///
2484/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
2485///
2486/// Supported operand variants:
2487///
2488/// ```text
2489/// +---+---------------+
2490/// | # | Operands |
2491/// +---+---------------+
2492/// | 1 | Xmm, Xmm, Mem |
2493/// | 2 | Xmm, Xmm, Xmm |
2494/// | 3 | Ymm, Ymm, Mem |
2495/// | 4 | Ymm, Ymm, Ymm |
2496/// | 5 | Zmm, Zmm, Mem |
2497/// | 6 | Zmm, Zmm, Zmm |
2498/// +---+---------------+
2499/// ```
2500pub trait VpaddbMaskEmitter<A, B, C> {
2501 fn vpaddb_mask(&mut self, op0: A, op1: B, op2: C);
2502}
2503
2504impl<'a> VpaddbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2505 fn vpaddb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2506 self.emit(VPADDB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2507 }
2508}
2509
2510impl<'a> VpaddbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2511 fn vpaddb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2512 self.emit(VPADDB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2513 }
2514}
2515
2516impl<'a> VpaddbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2517 fn vpaddb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2518 self.emit(VPADDB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2519 }
2520}
2521
2522impl<'a> VpaddbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2523 fn vpaddb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2524 self.emit(VPADDB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2525 }
2526}
2527
2528impl<'a> VpaddbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2529 fn vpaddb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2530 self.emit(VPADDB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2531 }
2532}
2533
2534impl<'a> VpaddbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2535 fn vpaddb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2536 self.emit(VPADDB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2537 }
2538}
2539
2540/// `VPADDB_MASKZ` (VPADDB).
2541/// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
2542///
2543///
2544/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
2545///
2546/// Supported operand variants:
2547///
2548/// ```text
2549/// +---+---------------+
2550/// | # | Operands |
2551/// +---+---------------+
2552/// | 1 | Xmm, Xmm, Mem |
2553/// | 2 | Xmm, Xmm, Xmm |
2554/// | 3 | Ymm, Ymm, Mem |
2555/// | 4 | Ymm, Ymm, Ymm |
2556/// | 5 | Zmm, Zmm, Mem |
2557/// | 6 | Zmm, Zmm, Zmm |
2558/// +---+---------------+
2559/// ```
2560pub trait VpaddbMaskzEmitter<A, B, C> {
2561 fn vpaddb_maskz(&mut self, op0: A, op1: B, op2: C);
2562}
2563
2564impl<'a> VpaddbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2565 fn vpaddb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2566 self.emit(VPADDB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2567 }
2568}
2569
2570impl<'a> VpaddbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2571 fn vpaddb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2572 self.emit(VPADDB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2573 }
2574}
2575
2576impl<'a> VpaddbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2577 fn vpaddb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2578 self.emit(VPADDB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2579 }
2580}
2581
2582impl<'a> VpaddbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2583 fn vpaddb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2584 self.emit(VPADDB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2585 }
2586}
2587
2588impl<'a> VpaddbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2589 fn vpaddb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2590 self.emit(VPADDB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2591 }
2592}
2593
2594impl<'a> VpaddbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2595 fn vpaddb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2596 self.emit(VPADDB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2597 }
2598}
2599
2600/// `VPADDSB` (VPADDSB).
2601/// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
2602///
2603///
2604/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
2605///
2606/// Supported operand variants:
2607///
2608/// ```text
2609/// +---+---------------+
2610/// | # | Operands |
2611/// +---+---------------+
2612/// | 1 | Xmm, Xmm, Mem |
2613/// | 2 | Xmm, Xmm, Xmm |
2614/// | 3 | Ymm, Ymm, Mem |
2615/// | 4 | Ymm, Ymm, Ymm |
2616/// | 5 | Zmm, Zmm, Mem |
2617/// | 6 | Zmm, Zmm, Zmm |
2618/// +---+---------------+
2619/// ```
2620pub trait VpaddsbEmitter<A, B, C> {
2621 fn vpaddsb(&mut self, op0: A, op1: B, op2: C);
2622}
2623
2624impl<'a> VpaddsbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2625 fn vpaddsb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2626 self.emit(VPADDSB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2627 }
2628}
2629
2630impl<'a> VpaddsbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2631 fn vpaddsb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2632 self.emit(VPADDSB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2633 }
2634}
2635
2636impl<'a> VpaddsbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2637 fn vpaddsb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2638 self.emit(VPADDSB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2639 }
2640}
2641
2642impl<'a> VpaddsbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2643 fn vpaddsb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2644 self.emit(VPADDSB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2645 }
2646}
2647
2648impl<'a> VpaddsbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2649 fn vpaddsb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2650 self.emit(VPADDSB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2651 }
2652}
2653
2654impl<'a> VpaddsbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2655 fn vpaddsb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2656 self.emit(VPADDSB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2657 }
2658}
2659
2660/// `VPADDSB_MASK` (VPADDSB).
2661/// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
2662///
2663///
2664/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
2665///
2666/// Supported operand variants:
2667///
2668/// ```text
2669/// +---+---------------+
2670/// | # | Operands |
2671/// +---+---------------+
2672/// | 1 | Xmm, Xmm, Mem |
2673/// | 2 | Xmm, Xmm, Xmm |
2674/// | 3 | Ymm, Ymm, Mem |
2675/// | 4 | Ymm, Ymm, Ymm |
2676/// | 5 | Zmm, Zmm, Mem |
2677/// | 6 | Zmm, Zmm, Zmm |
2678/// +---+---------------+
2679/// ```
2680pub trait VpaddsbMaskEmitter<A, B, C> {
2681 fn vpaddsb_mask(&mut self, op0: A, op1: B, op2: C);
2682}
2683
2684impl<'a> VpaddsbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2685 fn vpaddsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2686 self.emit(VPADDSB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2687 }
2688}
2689
2690impl<'a> VpaddsbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2691 fn vpaddsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2692 self.emit(VPADDSB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2693 }
2694}
2695
2696impl<'a> VpaddsbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2697 fn vpaddsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2698 self.emit(VPADDSB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2699 }
2700}
2701
2702impl<'a> VpaddsbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2703 fn vpaddsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2704 self.emit(VPADDSB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2705 }
2706}
2707
2708impl<'a> VpaddsbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2709 fn vpaddsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2710 self.emit(VPADDSB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2711 }
2712}
2713
2714impl<'a> VpaddsbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2715 fn vpaddsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2716 self.emit(VPADDSB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2717 }
2718}
2719
2720/// `VPADDSB_MASKZ` (VPADDSB).
2721/// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
2722///
2723///
2724/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
2725///
2726/// Supported operand variants:
2727///
2728/// ```text
2729/// +---+---------------+
2730/// | # | Operands |
2731/// +---+---------------+
2732/// | 1 | Xmm, Xmm, Mem |
2733/// | 2 | Xmm, Xmm, Xmm |
2734/// | 3 | Ymm, Ymm, Mem |
2735/// | 4 | Ymm, Ymm, Ymm |
2736/// | 5 | Zmm, Zmm, Mem |
2737/// | 6 | Zmm, Zmm, Zmm |
2738/// +---+---------------+
2739/// ```
2740pub trait VpaddsbMaskzEmitter<A, B, C> {
2741 fn vpaddsb_maskz(&mut self, op0: A, op1: B, op2: C);
2742}
2743
2744impl<'a> VpaddsbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2745 fn vpaddsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2746 self.emit(VPADDSB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2747 }
2748}
2749
2750impl<'a> VpaddsbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2751 fn vpaddsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2752 self.emit(VPADDSB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2753 }
2754}
2755
2756impl<'a> VpaddsbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2757 fn vpaddsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2758 self.emit(VPADDSB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2759 }
2760}
2761
2762impl<'a> VpaddsbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2763 fn vpaddsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2764 self.emit(VPADDSB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2765 }
2766}
2767
2768impl<'a> VpaddsbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2769 fn vpaddsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2770 self.emit(VPADDSB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2771 }
2772}
2773
2774impl<'a> VpaddsbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2775 fn vpaddsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2776 self.emit(VPADDSB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2777 }
2778}
2779
2780/// `VPADDSW` (VPADDSW).
2781/// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
2782///
2783///
2784/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
2785///
2786/// Supported operand variants:
2787///
2788/// ```text
2789/// +---+---------------+
2790/// | # | Operands |
2791/// +---+---------------+
2792/// | 1 | Xmm, Xmm, Mem |
2793/// | 2 | Xmm, Xmm, Xmm |
2794/// | 3 | Ymm, Ymm, Mem |
2795/// | 4 | Ymm, Ymm, Ymm |
2796/// | 5 | Zmm, Zmm, Mem |
2797/// | 6 | Zmm, Zmm, Zmm |
2798/// +---+---------------+
2799/// ```
2800pub trait VpaddswEmitter<A, B, C> {
2801 fn vpaddsw(&mut self, op0: A, op1: B, op2: C);
2802}
2803
2804impl<'a> VpaddswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2805 fn vpaddsw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2806 self.emit(VPADDSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2807 }
2808}
2809
2810impl<'a> VpaddswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2811 fn vpaddsw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2812 self.emit(VPADDSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2813 }
2814}
2815
2816impl<'a> VpaddswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2817 fn vpaddsw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2818 self.emit(VPADDSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2819 }
2820}
2821
2822impl<'a> VpaddswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2823 fn vpaddsw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2824 self.emit(VPADDSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2825 }
2826}
2827
2828impl<'a> VpaddswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2829 fn vpaddsw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2830 self.emit(VPADDSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2831 }
2832}
2833
2834impl<'a> VpaddswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2835 fn vpaddsw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2836 self.emit(VPADDSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2837 }
2838}
2839
2840/// `VPADDSW_MASK` (VPADDSW).
2841/// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
2842///
2843///
2844/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
2845///
2846/// Supported operand variants:
2847///
2848/// ```text
2849/// +---+---------------+
2850/// | # | Operands |
2851/// +---+---------------+
2852/// | 1 | Xmm, Xmm, Mem |
2853/// | 2 | Xmm, Xmm, Xmm |
2854/// | 3 | Ymm, Ymm, Mem |
2855/// | 4 | Ymm, Ymm, Ymm |
2856/// | 5 | Zmm, Zmm, Mem |
2857/// | 6 | Zmm, Zmm, Zmm |
2858/// +---+---------------+
2859/// ```
2860pub trait VpaddswMaskEmitter<A, B, C> {
2861 fn vpaddsw_mask(&mut self, op0: A, op1: B, op2: C);
2862}
2863
2864impl<'a> VpaddswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2865 fn vpaddsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2866 self.emit(VPADDSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2867 }
2868}
2869
2870impl<'a> VpaddswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2871 fn vpaddsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2872 self.emit(VPADDSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2873 }
2874}
2875
2876impl<'a> VpaddswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2877 fn vpaddsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2878 self.emit(VPADDSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2879 }
2880}
2881
2882impl<'a> VpaddswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2883 fn vpaddsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2884 self.emit(VPADDSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2885 }
2886}
2887
2888impl<'a> VpaddswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2889 fn vpaddsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2890 self.emit(VPADDSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2891 }
2892}
2893
2894impl<'a> VpaddswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2895 fn vpaddsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2896 self.emit(VPADDSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2897 }
2898}
2899
2900/// `VPADDSW_MASKZ` (VPADDSW).
2901/// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
2902///
2903///
2904/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
2905///
2906/// Supported operand variants:
2907///
2908/// ```text
2909/// +---+---------------+
2910/// | # | Operands |
2911/// +---+---------------+
2912/// | 1 | Xmm, Xmm, Mem |
2913/// | 2 | Xmm, Xmm, Xmm |
2914/// | 3 | Ymm, Ymm, Mem |
2915/// | 4 | Ymm, Ymm, Ymm |
2916/// | 5 | Zmm, Zmm, Mem |
2917/// | 6 | Zmm, Zmm, Zmm |
2918/// +---+---------------+
2919/// ```
2920pub trait VpaddswMaskzEmitter<A, B, C> {
2921 fn vpaddsw_maskz(&mut self, op0: A, op1: B, op2: C);
2922}
2923
2924impl<'a> VpaddswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2925 fn vpaddsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2926 self.emit(VPADDSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2927 }
2928}
2929
2930impl<'a> VpaddswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2931 fn vpaddsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2932 self.emit(VPADDSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2933 }
2934}
2935
2936impl<'a> VpaddswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2937 fn vpaddsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2938 self.emit(VPADDSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2939 }
2940}
2941
2942impl<'a> VpaddswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2943 fn vpaddsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2944 self.emit(VPADDSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2945 }
2946}
2947
2948impl<'a> VpaddswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2949 fn vpaddsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2950 self.emit(VPADDSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2951 }
2952}
2953
2954impl<'a> VpaddswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2955 fn vpaddsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2956 self.emit(VPADDSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2957 }
2958}
2959
2960/// `VPADDUSB` (VPADDUSB).
2961/// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
2962///
2963///
2964/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
2965///
2966/// Supported operand variants:
2967///
2968/// ```text
2969/// +---+---------------+
2970/// | # | Operands |
2971/// +---+---------------+
2972/// | 1 | Xmm, Xmm, Mem |
2973/// | 2 | Xmm, Xmm, Xmm |
2974/// | 3 | Ymm, Ymm, Mem |
2975/// | 4 | Ymm, Ymm, Ymm |
2976/// | 5 | Zmm, Zmm, Mem |
2977/// | 6 | Zmm, Zmm, Zmm |
2978/// +---+---------------+
2979/// ```
2980pub trait VpaddusbEmitter<A, B, C> {
2981 fn vpaddusb(&mut self, op0: A, op1: B, op2: C);
2982}
2983
2984impl<'a> VpaddusbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2985 fn vpaddusb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2986 self.emit(VPADDUSB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2987 }
2988}
2989
2990impl<'a> VpaddusbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2991 fn vpaddusb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2992 self.emit(VPADDUSB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2993 }
2994}
2995
2996impl<'a> VpaddusbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2997 fn vpaddusb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2998 self.emit(VPADDUSB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2999 }
3000}
3001
3002impl<'a> VpaddusbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3003 fn vpaddusb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3004 self.emit(VPADDUSB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3005 }
3006}
3007
3008impl<'a> VpaddusbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3009 fn vpaddusb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3010 self.emit(VPADDUSB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3011 }
3012}
3013
3014impl<'a> VpaddusbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3015 fn vpaddusb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3016 self.emit(VPADDUSB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3017 }
3018}
3019
3020/// `VPADDUSB_MASK` (VPADDUSB).
3021/// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
3022///
3023///
3024/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
3025///
3026/// Supported operand variants:
3027///
3028/// ```text
3029/// +---+---------------+
3030/// | # | Operands |
3031/// +---+---------------+
3032/// | 1 | Xmm, Xmm, Mem |
3033/// | 2 | Xmm, Xmm, Xmm |
3034/// | 3 | Ymm, Ymm, Mem |
3035/// | 4 | Ymm, Ymm, Ymm |
3036/// | 5 | Zmm, Zmm, Mem |
3037/// | 6 | Zmm, Zmm, Zmm |
3038/// +---+---------------+
3039/// ```
3040pub trait VpaddusbMaskEmitter<A, B, C> {
3041 fn vpaddusb_mask(&mut self, op0: A, op1: B, op2: C);
3042}
3043
3044impl<'a> VpaddusbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3045 fn vpaddusb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3046 self.emit(VPADDUSB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3047 }
3048}
3049
3050impl<'a> VpaddusbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3051 fn vpaddusb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3052 self.emit(VPADDUSB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3053 }
3054}
3055
3056impl<'a> VpaddusbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3057 fn vpaddusb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3058 self.emit(VPADDUSB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3059 }
3060}
3061
3062impl<'a> VpaddusbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3063 fn vpaddusb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3064 self.emit(VPADDUSB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3065 }
3066}
3067
3068impl<'a> VpaddusbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3069 fn vpaddusb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3070 self.emit(VPADDUSB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3071 }
3072}
3073
3074impl<'a> VpaddusbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3075 fn vpaddusb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3076 self.emit(VPADDUSB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3077 }
3078}
3079
3080/// `VPADDUSB_MASKZ` (VPADDUSB).
3081/// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
3082///
3083///
3084/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
3085///
3086/// Supported operand variants:
3087///
3088/// ```text
3089/// +---+---------------+
3090/// | # | Operands |
3091/// +---+---------------+
3092/// | 1 | Xmm, Xmm, Mem |
3093/// | 2 | Xmm, Xmm, Xmm |
3094/// | 3 | Ymm, Ymm, Mem |
3095/// | 4 | Ymm, Ymm, Ymm |
3096/// | 5 | Zmm, Zmm, Mem |
3097/// | 6 | Zmm, Zmm, Zmm |
3098/// +---+---------------+
3099/// ```
3100pub trait VpaddusbMaskzEmitter<A, B, C> {
3101 fn vpaddusb_maskz(&mut self, op0: A, op1: B, op2: C);
3102}
3103
3104impl<'a> VpaddusbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3105 fn vpaddusb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3106 self.emit(VPADDUSB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3107 }
3108}
3109
3110impl<'a> VpaddusbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3111 fn vpaddusb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3112 self.emit(VPADDUSB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3113 }
3114}
3115
3116impl<'a> VpaddusbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3117 fn vpaddusb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3118 self.emit(VPADDUSB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3119 }
3120}
3121
3122impl<'a> VpaddusbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3123 fn vpaddusb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3124 self.emit(VPADDUSB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3125 }
3126}
3127
3128impl<'a> VpaddusbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3129 fn vpaddusb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3130 self.emit(VPADDUSB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3131 }
3132}
3133
3134impl<'a> VpaddusbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3135 fn vpaddusb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3136 self.emit(VPADDUSB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3137 }
3138}
3139
3140/// `VPADDUSW` (VPADDUSW).
3141/// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
3142///
3143///
3144/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
3145///
3146/// Supported operand variants:
3147///
3148/// ```text
3149/// +---+---------------+
3150/// | # | Operands |
3151/// +---+---------------+
3152/// | 1 | Xmm, Xmm, Mem |
3153/// | 2 | Xmm, Xmm, Xmm |
3154/// | 3 | Ymm, Ymm, Mem |
3155/// | 4 | Ymm, Ymm, Ymm |
3156/// | 5 | Zmm, Zmm, Mem |
3157/// | 6 | Zmm, Zmm, Zmm |
3158/// +---+---------------+
3159/// ```
3160pub trait VpadduswEmitter<A, B, C> {
3161 fn vpaddusw(&mut self, op0: A, op1: B, op2: C);
3162}
3163
3164impl<'a> VpadduswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3165 fn vpaddusw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3166 self.emit(VPADDUSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3167 }
3168}
3169
3170impl<'a> VpadduswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3171 fn vpaddusw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3172 self.emit(VPADDUSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3173 }
3174}
3175
3176impl<'a> VpadduswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3177 fn vpaddusw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3178 self.emit(VPADDUSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3179 }
3180}
3181
3182impl<'a> VpadduswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3183 fn vpaddusw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3184 self.emit(VPADDUSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3185 }
3186}
3187
3188impl<'a> VpadduswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3189 fn vpaddusw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3190 self.emit(VPADDUSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3191 }
3192}
3193
3194impl<'a> VpadduswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3195 fn vpaddusw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3196 self.emit(VPADDUSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3197 }
3198}
3199
3200/// `VPADDUSW_MASK` (VPADDUSW).
3201/// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
3202///
3203///
3204/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
3205///
3206/// Supported operand variants:
3207///
3208/// ```text
3209/// +---+---------------+
3210/// | # | Operands |
3211/// +---+---------------+
3212/// | 1 | Xmm, Xmm, Mem |
3213/// | 2 | Xmm, Xmm, Xmm |
3214/// | 3 | Ymm, Ymm, Mem |
3215/// | 4 | Ymm, Ymm, Ymm |
3216/// | 5 | Zmm, Zmm, Mem |
3217/// | 6 | Zmm, Zmm, Zmm |
3218/// +---+---------------+
3219/// ```
3220pub trait VpadduswMaskEmitter<A, B, C> {
3221 fn vpaddusw_mask(&mut self, op0: A, op1: B, op2: C);
3222}
3223
3224impl<'a> VpadduswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3225 fn vpaddusw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3226 self.emit(VPADDUSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3227 }
3228}
3229
3230impl<'a> VpadduswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3231 fn vpaddusw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3232 self.emit(VPADDUSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3233 }
3234}
3235
3236impl<'a> VpadduswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3237 fn vpaddusw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3238 self.emit(VPADDUSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3239 }
3240}
3241
3242impl<'a> VpadduswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3243 fn vpaddusw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3244 self.emit(VPADDUSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3245 }
3246}
3247
3248impl<'a> VpadduswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3249 fn vpaddusw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3250 self.emit(VPADDUSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3251 }
3252}
3253
3254impl<'a> VpadduswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3255 fn vpaddusw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3256 self.emit(VPADDUSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3257 }
3258}
3259
3260/// `VPADDUSW_MASKZ` (VPADDUSW).
3261/// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
3262///
3263///
3264/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
3265///
3266/// Supported operand variants:
3267///
3268/// ```text
3269/// +---+---------------+
3270/// | # | Operands |
3271/// +---+---------------+
3272/// | 1 | Xmm, Xmm, Mem |
3273/// | 2 | Xmm, Xmm, Xmm |
3274/// | 3 | Ymm, Ymm, Mem |
3275/// | 4 | Ymm, Ymm, Ymm |
3276/// | 5 | Zmm, Zmm, Mem |
3277/// | 6 | Zmm, Zmm, Zmm |
3278/// +---+---------------+
3279/// ```
3280pub trait VpadduswMaskzEmitter<A, B, C> {
3281 fn vpaddusw_maskz(&mut self, op0: A, op1: B, op2: C);
3282}
3283
3284impl<'a> VpadduswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3285 fn vpaddusw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3286 self.emit(VPADDUSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3287 }
3288}
3289
3290impl<'a> VpadduswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3291 fn vpaddusw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3292 self.emit(VPADDUSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3293 }
3294}
3295
3296impl<'a> VpadduswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3297 fn vpaddusw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3298 self.emit(VPADDUSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3299 }
3300}
3301
3302impl<'a> VpadduswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3303 fn vpaddusw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3304 self.emit(VPADDUSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3305 }
3306}
3307
3308impl<'a> VpadduswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3309 fn vpaddusw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3310 self.emit(VPADDUSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3311 }
3312}
3313
3314impl<'a> VpadduswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3315 fn vpaddusw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3316 self.emit(VPADDUSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3317 }
3318}
3319
3320/// `VPADDW` (VPADDW).
3321/// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
3322///
3323///
3324/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
3325///
3326/// Supported operand variants:
3327///
3328/// ```text
3329/// +---+---------------+
3330/// | # | Operands |
3331/// +---+---------------+
3332/// | 1 | Xmm, Xmm, Mem |
3333/// | 2 | Xmm, Xmm, Xmm |
3334/// | 3 | Ymm, Ymm, Mem |
3335/// | 4 | Ymm, Ymm, Ymm |
3336/// | 5 | Zmm, Zmm, Mem |
3337/// | 6 | Zmm, Zmm, Zmm |
3338/// +---+---------------+
3339/// ```
3340pub trait VpaddwEmitter<A, B, C> {
3341 fn vpaddw(&mut self, op0: A, op1: B, op2: C);
3342}
3343
3344impl<'a> VpaddwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3345 fn vpaddw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3346 self.emit(VPADDW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3347 }
3348}
3349
3350impl<'a> VpaddwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3351 fn vpaddw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3352 self.emit(VPADDW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3353 }
3354}
3355
3356impl<'a> VpaddwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3357 fn vpaddw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3358 self.emit(VPADDW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3359 }
3360}
3361
3362impl<'a> VpaddwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3363 fn vpaddw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3364 self.emit(VPADDW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3365 }
3366}
3367
3368impl<'a> VpaddwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3369 fn vpaddw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3370 self.emit(VPADDW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3371 }
3372}
3373
3374impl<'a> VpaddwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3375 fn vpaddw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3376 self.emit(VPADDW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3377 }
3378}
3379
3380/// `VPADDW_MASK` (VPADDW).
3381/// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
3382///
3383///
3384/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
3385///
3386/// Supported operand variants:
3387///
3388/// ```text
3389/// +---+---------------+
3390/// | # | Operands |
3391/// +---+---------------+
3392/// | 1 | Xmm, Xmm, Mem |
3393/// | 2 | Xmm, Xmm, Xmm |
3394/// | 3 | Ymm, Ymm, Mem |
3395/// | 4 | Ymm, Ymm, Ymm |
3396/// | 5 | Zmm, Zmm, Mem |
3397/// | 6 | Zmm, Zmm, Zmm |
3398/// +---+---------------+
3399/// ```
3400pub trait VpaddwMaskEmitter<A, B, C> {
3401 fn vpaddw_mask(&mut self, op0: A, op1: B, op2: C);
3402}
3403
3404impl<'a> VpaddwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3405 fn vpaddw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3406 self.emit(VPADDW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3407 }
3408}
3409
3410impl<'a> VpaddwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3411 fn vpaddw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3412 self.emit(VPADDW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3413 }
3414}
3415
3416impl<'a> VpaddwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3417 fn vpaddw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3418 self.emit(VPADDW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3419 }
3420}
3421
3422impl<'a> VpaddwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3423 fn vpaddw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3424 self.emit(VPADDW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3425 }
3426}
3427
3428impl<'a> VpaddwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3429 fn vpaddw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3430 self.emit(VPADDW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3431 }
3432}
3433
3434impl<'a> VpaddwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3435 fn vpaddw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3436 self.emit(VPADDW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3437 }
3438}
3439
3440/// `VPADDW_MASKZ` (VPADDW).
3441/// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
3442///
3443///
3444/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
3445///
3446/// Supported operand variants:
3447///
3448/// ```text
3449/// +---+---------------+
3450/// | # | Operands |
3451/// +---+---------------+
3452/// | 1 | Xmm, Xmm, Mem |
3453/// | 2 | Xmm, Xmm, Xmm |
3454/// | 3 | Ymm, Ymm, Mem |
3455/// | 4 | Ymm, Ymm, Ymm |
3456/// | 5 | Zmm, Zmm, Mem |
3457/// | 6 | Zmm, Zmm, Zmm |
3458/// +---+---------------+
3459/// ```
3460pub trait VpaddwMaskzEmitter<A, B, C> {
3461 fn vpaddw_maskz(&mut self, op0: A, op1: B, op2: C);
3462}
3463
3464impl<'a> VpaddwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3465 fn vpaddw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3466 self.emit(VPADDW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3467 }
3468}
3469
3470impl<'a> VpaddwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3471 fn vpaddw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3472 self.emit(VPADDW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3473 }
3474}
3475
3476impl<'a> VpaddwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3477 fn vpaddw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3478 self.emit(VPADDW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3479 }
3480}
3481
3482impl<'a> VpaddwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3483 fn vpaddw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3484 self.emit(VPADDW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3485 }
3486}
3487
3488impl<'a> VpaddwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3489 fn vpaddw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3490 self.emit(VPADDW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3491 }
3492}
3493
3494impl<'a> VpaddwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3495 fn vpaddw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3496 self.emit(VPADDW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3497 }
3498}
3499
3500/// `VPALIGNR` (VPALIGNR).
3501/// (V)PALIGNR concatenates the destination operand (the first operand) and the source operand (the second operand) into an intermediate composite, shifts the composite at byte granularity to the right by a constant immediate, and extracts the right-aligned result into the destination. The first and the second operands can be an MMX, XMM or a YMM register. The immediate value is considered unsigned. Immediate shift counts larger than the 2L (i.e., 32 for 128-bit operands, or 16 for 64-bit operands) produce a zero result. Both operands can be MMX registers, XMM registers or YMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
3502///
3503///
3504/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PALIGNR.html).
3505///
3506/// Supported operand variants:
3507///
3508/// ```text
3509/// +---+--------------------+
3510/// | # | Operands |
3511/// +---+--------------------+
3512/// | 1 | Xmm, Xmm, Mem, Imm |
3513/// | 2 | Xmm, Xmm, Xmm, Imm |
3514/// | 3 | Ymm, Ymm, Mem, Imm |
3515/// | 4 | Ymm, Ymm, Ymm, Imm |
3516/// | 5 | Zmm, Zmm, Mem, Imm |
3517/// | 6 | Zmm, Zmm, Zmm, Imm |
3518/// +---+--------------------+
3519/// ```
3520pub trait VpalignrEmitter<A, B, C, D> {
3521 fn vpalignr(&mut self, op0: A, op1: B, op2: C, op3: D);
3522}
3523
3524impl<'a> VpalignrEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
3525 fn vpalignr(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
3526 self.emit(VPALIGNR128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3527 }
3528}
3529
3530impl<'a> VpalignrEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
3531 fn vpalignr(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
3532 self.emit(VPALIGNR128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3533 }
3534}
3535
3536impl<'a> VpalignrEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
3537 fn vpalignr(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
3538 self.emit(VPALIGNR256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3539 }
3540}
3541
3542impl<'a> VpalignrEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
3543 fn vpalignr(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
3544 self.emit(VPALIGNR256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3545 }
3546}
3547
3548impl<'a> VpalignrEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
3549 fn vpalignr(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
3550 self.emit(VPALIGNR512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3551 }
3552}
3553
3554impl<'a> VpalignrEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3555 fn vpalignr(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3556 self.emit(VPALIGNR512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3557 }
3558}
3559
3560/// `VPALIGNR_MASK` (VPALIGNR).
3561/// (V)PALIGNR concatenates the destination operand (the first operand) and the source operand (the second operand) into an intermediate composite, shifts the composite at byte granularity to the right by a constant immediate, and extracts the right-aligned result into the destination. The first and the second operands can be an MMX, XMM or a YMM register. The immediate value is considered unsigned. Immediate shift counts larger than the 2L (i.e., 32 for 128-bit operands, or 16 for 64-bit operands) produce a zero result. Both operands can be MMX registers, XMM registers or YMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
3562///
3563///
3564/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PALIGNR.html).
3565///
3566/// Supported operand variants:
3567///
3568/// ```text
3569/// +---+--------------------+
3570/// | # | Operands |
3571/// +---+--------------------+
3572/// | 1 | Xmm, Xmm, Mem, Imm |
3573/// | 2 | Xmm, Xmm, Xmm, Imm |
3574/// | 3 | Ymm, Ymm, Mem, Imm |
3575/// | 4 | Ymm, Ymm, Ymm, Imm |
3576/// | 5 | Zmm, Zmm, Mem, Imm |
3577/// | 6 | Zmm, Zmm, Zmm, Imm |
3578/// +---+--------------------+
3579/// ```
3580pub trait VpalignrMaskEmitter<A, B, C, D> {
3581 fn vpalignr_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
3582}
3583
3584impl<'a> VpalignrMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
3585 fn vpalignr_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
3586 self.emit(VPALIGNR128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3587 }
3588}
3589
3590impl<'a> VpalignrMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
3591 fn vpalignr_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
3592 self.emit(VPALIGNR128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3593 }
3594}
3595
3596impl<'a> VpalignrMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
3597 fn vpalignr_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
3598 self.emit(VPALIGNR256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3599 }
3600}
3601
3602impl<'a> VpalignrMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
3603 fn vpalignr_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
3604 self.emit(VPALIGNR256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3605 }
3606}
3607
3608impl<'a> VpalignrMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
3609 fn vpalignr_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
3610 self.emit(VPALIGNR512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3611 }
3612}
3613
3614impl<'a> VpalignrMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3615 fn vpalignr_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3616 self.emit(VPALIGNR512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3617 }
3618}
3619
3620/// `VPALIGNR_MASKZ` (VPALIGNR).
3621/// (V)PALIGNR concatenates the destination operand (the first operand) and the source operand (the second operand) into an intermediate composite, shifts the composite at byte granularity to the right by a constant immediate, and extracts the right-aligned result into the destination. The first and the second operands can be an MMX, XMM or a YMM register. The immediate value is considered unsigned. Immediate shift counts larger than the 2L (i.e., 32 for 128-bit operands, or 16 for 64-bit operands) produce a zero result. Both operands can be MMX registers, XMM registers or YMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
3622///
3623///
3624/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PALIGNR.html).
3625///
3626/// Supported operand variants:
3627///
3628/// ```text
3629/// +---+--------------------+
3630/// | # | Operands |
3631/// +---+--------------------+
3632/// | 1 | Xmm, Xmm, Mem, Imm |
3633/// | 2 | Xmm, Xmm, Xmm, Imm |
3634/// | 3 | Ymm, Ymm, Mem, Imm |
3635/// | 4 | Ymm, Ymm, Ymm, Imm |
3636/// | 5 | Zmm, Zmm, Mem, Imm |
3637/// | 6 | Zmm, Zmm, Zmm, Imm |
3638/// +---+--------------------+
3639/// ```
3640pub trait VpalignrMaskzEmitter<A, B, C, D> {
3641 fn vpalignr_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
3642}
3643
3644impl<'a> VpalignrMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
3645 fn vpalignr_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
3646 self.emit(VPALIGNR128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3647 }
3648}
3649
3650impl<'a> VpalignrMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
3651 fn vpalignr_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
3652 self.emit(VPALIGNR128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3653 }
3654}
3655
3656impl<'a> VpalignrMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
3657 fn vpalignr_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
3658 self.emit(VPALIGNR256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3659 }
3660}
3661
3662impl<'a> VpalignrMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
3663 fn vpalignr_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
3664 self.emit(VPALIGNR256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3665 }
3666}
3667
3668impl<'a> VpalignrMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
3669 fn vpalignr_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
3670 self.emit(VPALIGNR512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3671 }
3672}
3673
3674impl<'a> VpalignrMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
3675 fn vpalignr_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
3676 self.emit(VPALIGNR512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
3677 }
3678}
3679
3680/// `VPAVGB` (VPAVGB).
3681/// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
3682///
3683///
3684/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
3685///
3686/// Supported operand variants:
3687///
3688/// ```text
3689/// +---+---------------+
3690/// | # | Operands |
3691/// +---+---------------+
3692/// | 1 | Xmm, Xmm, Mem |
3693/// | 2 | Xmm, Xmm, Xmm |
3694/// | 3 | Ymm, Ymm, Mem |
3695/// | 4 | Ymm, Ymm, Ymm |
3696/// | 5 | Zmm, Zmm, Mem |
3697/// | 6 | Zmm, Zmm, Zmm |
3698/// +---+---------------+
3699/// ```
3700pub trait VpavgbEmitter<A, B, C> {
3701 fn vpavgb(&mut self, op0: A, op1: B, op2: C);
3702}
3703
3704impl<'a> VpavgbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3705 fn vpavgb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3706 self.emit(VPAVGB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3707 }
3708}
3709
3710impl<'a> VpavgbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3711 fn vpavgb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3712 self.emit(VPAVGB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3713 }
3714}
3715
3716impl<'a> VpavgbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3717 fn vpavgb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3718 self.emit(VPAVGB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3719 }
3720}
3721
3722impl<'a> VpavgbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3723 fn vpavgb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3724 self.emit(VPAVGB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3725 }
3726}
3727
3728impl<'a> VpavgbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3729 fn vpavgb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3730 self.emit(VPAVGB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3731 }
3732}
3733
3734impl<'a> VpavgbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3735 fn vpavgb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3736 self.emit(VPAVGB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3737 }
3738}
3739
3740/// `VPAVGB_MASK` (VPAVGB).
3741/// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
3742///
3743///
3744/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
3745///
3746/// Supported operand variants:
3747///
3748/// ```text
3749/// +---+---------------+
3750/// | # | Operands |
3751/// +---+---------------+
3752/// | 1 | Xmm, Xmm, Mem |
3753/// | 2 | Xmm, Xmm, Xmm |
3754/// | 3 | Ymm, Ymm, Mem |
3755/// | 4 | Ymm, Ymm, Ymm |
3756/// | 5 | Zmm, Zmm, Mem |
3757/// | 6 | Zmm, Zmm, Zmm |
3758/// +---+---------------+
3759/// ```
3760pub trait VpavgbMaskEmitter<A, B, C> {
3761 fn vpavgb_mask(&mut self, op0: A, op1: B, op2: C);
3762}
3763
3764impl<'a> VpavgbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3765 fn vpavgb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3766 self.emit(VPAVGB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3767 }
3768}
3769
3770impl<'a> VpavgbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3771 fn vpavgb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3772 self.emit(VPAVGB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3773 }
3774}
3775
3776impl<'a> VpavgbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3777 fn vpavgb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3778 self.emit(VPAVGB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3779 }
3780}
3781
3782impl<'a> VpavgbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3783 fn vpavgb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3784 self.emit(VPAVGB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3785 }
3786}
3787
3788impl<'a> VpavgbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3789 fn vpavgb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3790 self.emit(VPAVGB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3791 }
3792}
3793
3794impl<'a> VpavgbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3795 fn vpavgb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3796 self.emit(VPAVGB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3797 }
3798}
3799
3800/// `VPAVGB_MASKZ` (VPAVGB).
3801/// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
3802///
3803///
3804/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
3805///
3806/// Supported operand variants:
3807///
3808/// ```text
3809/// +---+---------------+
3810/// | # | Operands |
3811/// +---+---------------+
3812/// | 1 | Xmm, Xmm, Mem |
3813/// | 2 | Xmm, Xmm, Xmm |
3814/// | 3 | Ymm, Ymm, Mem |
3815/// | 4 | Ymm, Ymm, Ymm |
3816/// | 5 | Zmm, Zmm, Mem |
3817/// | 6 | Zmm, Zmm, Zmm |
3818/// +---+---------------+
3819/// ```
3820pub trait VpavgbMaskzEmitter<A, B, C> {
3821 fn vpavgb_maskz(&mut self, op0: A, op1: B, op2: C);
3822}
3823
3824impl<'a> VpavgbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3825 fn vpavgb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3826 self.emit(VPAVGB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3827 }
3828}
3829
3830impl<'a> VpavgbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3831 fn vpavgb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3832 self.emit(VPAVGB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3833 }
3834}
3835
3836impl<'a> VpavgbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3837 fn vpavgb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3838 self.emit(VPAVGB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3839 }
3840}
3841
3842impl<'a> VpavgbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3843 fn vpavgb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3844 self.emit(VPAVGB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3845 }
3846}
3847
3848impl<'a> VpavgbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3849 fn vpavgb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3850 self.emit(VPAVGB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3851 }
3852}
3853
3854impl<'a> VpavgbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3855 fn vpavgb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3856 self.emit(VPAVGB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3857 }
3858}
3859
3860/// `VPAVGW` (VPAVGW).
3861/// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
3862///
3863///
3864/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
3865///
3866/// Supported operand variants:
3867///
3868/// ```text
3869/// +---+---------------+
3870/// | # | Operands |
3871/// +---+---------------+
3872/// | 1 | Xmm, Xmm, Mem |
3873/// | 2 | Xmm, Xmm, Xmm |
3874/// | 3 | Ymm, Ymm, Mem |
3875/// | 4 | Ymm, Ymm, Ymm |
3876/// | 5 | Zmm, Zmm, Mem |
3877/// | 6 | Zmm, Zmm, Zmm |
3878/// +---+---------------+
3879/// ```
3880pub trait VpavgwEmitter<A, B, C> {
3881 fn vpavgw(&mut self, op0: A, op1: B, op2: C);
3882}
3883
3884impl<'a> VpavgwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3885 fn vpavgw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3886 self.emit(VPAVGW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3887 }
3888}
3889
3890impl<'a> VpavgwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3891 fn vpavgw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3892 self.emit(VPAVGW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3893 }
3894}
3895
3896impl<'a> VpavgwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3897 fn vpavgw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3898 self.emit(VPAVGW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3899 }
3900}
3901
3902impl<'a> VpavgwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3903 fn vpavgw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3904 self.emit(VPAVGW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3905 }
3906}
3907
3908impl<'a> VpavgwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3909 fn vpavgw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3910 self.emit(VPAVGW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3911 }
3912}
3913
3914impl<'a> VpavgwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3915 fn vpavgw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3916 self.emit(VPAVGW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3917 }
3918}
3919
3920/// `VPAVGW_MASK` (VPAVGW).
3921/// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
3922///
3923///
3924/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
3925///
3926/// Supported operand variants:
3927///
3928/// ```text
3929/// +---+---------------+
3930/// | # | Operands |
3931/// +---+---------------+
3932/// | 1 | Xmm, Xmm, Mem |
3933/// | 2 | Xmm, Xmm, Xmm |
3934/// | 3 | Ymm, Ymm, Mem |
3935/// | 4 | Ymm, Ymm, Ymm |
3936/// | 5 | Zmm, Zmm, Mem |
3937/// | 6 | Zmm, Zmm, Zmm |
3938/// +---+---------------+
3939/// ```
3940pub trait VpavgwMaskEmitter<A, B, C> {
3941 fn vpavgw_mask(&mut self, op0: A, op1: B, op2: C);
3942}
3943
3944impl<'a> VpavgwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
3945 fn vpavgw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
3946 self.emit(VPAVGW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3947 }
3948}
3949
3950impl<'a> VpavgwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
3951 fn vpavgw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
3952 self.emit(VPAVGW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3953 }
3954}
3955
3956impl<'a> VpavgwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
3957 fn vpavgw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
3958 self.emit(VPAVGW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3959 }
3960}
3961
3962impl<'a> VpavgwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
3963 fn vpavgw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
3964 self.emit(VPAVGW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3965 }
3966}
3967
3968impl<'a> VpavgwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
3969 fn vpavgw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
3970 self.emit(VPAVGW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3971 }
3972}
3973
3974impl<'a> VpavgwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
3975 fn vpavgw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
3976 self.emit(VPAVGW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
3977 }
3978}
3979
3980/// `VPAVGW_MASKZ` (VPAVGW).
3981/// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
3982///
3983///
3984/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
3985///
3986/// Supported operand variants:
3987///
3988/// ```text
3989/// +---+---------------+
3990/// | # | Operands |
3991/// +---+---------------+
3992/// | 1 | Xmm, Xmm, Mem |
3993/// | 2 | Xmm, Xmm, Xmm |
3994/// | 3 | Ymm, Ymm, Mem |
3995/// | 4 | Ymm, Ymm, Ymm |
3996/// | 5 | Zmm, Zmm, Mem |
3997/// | 6 | Zmm, Zmm, Zmm |
3998/// +---+---------------+
3999/// ```
4000pub trait VpavgwMaskzEmitter<A, B, C> {
4001 fn vpavgw_maskz(&mut self, op0: A, op1: B, op2: C);
4002}
4003
4004impl<'a> VpavgwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4005 fn vpavgw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4006 self.emit(VPAVGW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4007 }
4008}
4009
4010impl<'a> VpavgwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4011 fn vpavgw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4012 self.emit(VPAVGW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4013 }
4014}
4015
4016impl<'a> VpavgwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4017 fn vpavgw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4018 self.emit(VPAVGW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4019 }
4020}
4021
4022impl<'a> VpavgwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4023 fn vpavgw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4024 self.emit(VPAVGW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4025 }
4026}
4027
4028impl<'a> VpavgwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4029 fn vpavgw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4030 self.emit(VPAVGW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4031 }
4032}
4033
4034impl<'a> VpavgwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4035 fn vpavgw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4036 self.emit(VPAVGW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4037 }
4038}
4039
4040/// `VPBLENDMB` (VPBLENDMB).
4041/// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
4042///
4043///
4044/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
4045///
4046/// Supported operand variants:
4047///
4048/// ```text
4049/// +---+---------------+
4050/// | # | Operands |
4051/// +---+---------------+
4052/// | 1 | Xmm, Xmm, Mem |
4053/// | 2 | Xmm, Xmm, Xmm |
4054/// | 3 | Ymm, Ymm, Mem |
4055/// | 4 | Ymm, Ymm, Ymm |
4056/// | 5 | Zmm, Zmm, Mem |
4057/// | 6 | Zmm, Zmm, Zmm |
4058/// +---+---------------+
4059/// ```
4060pub trait VpblendmbEmitter<A, B, C> {
4061 fn vpblendmb(&mut self, op0: A, op1: B, op2: C);
4062}
4063
4064impl<'a> VpblendmbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4065 fn vpblendmb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4066 self.emit(VPBLENDMB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4067 }
4068}
4069
4070impl<'a> VpblendmbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4071 fn vpblendmb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4072 self.emit(VPBLENDMB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4073 }
4074}
4075
4076impl<'a> VpblendmbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4077 fn vpblendmb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4078 self.emit(VPBLENDMB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4079 }
4080}
4081
4082impl<'a> VpblendmbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4083 fn vpblendmb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4084 self.emit(VPBLENDMB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4085 }
4086}
4087
4088impl<'a> VpblendmbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4089 fn vpblendmb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4090 self.emit(VPBLENDMB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4091 }
4092}
4093
4094impl<'a> VpblendmbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4095 fn vpblendmb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4096 self.emit(VPBLENDMB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4097 }
4098}
4099
4100/// `VPBLENDMB_MASK` (VPBLENDMB).
4101/// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
4102///
4103///
4104/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
4105///
4106/// Supported operand variants:
4107///
4108/// ```text
4109/// +---+---------------+
4110/// | # | Operands |
4111/// +---+---------------+
4112/// | 1 | Xmm, Xmm, Mem |
4113/// | 2 | Xmm, Xmm, Xmm |
4114/// | 3 | Ymm, Ymm, Mem |
4115/// | 4 | Ymm, Ymm, Ymm |
4116/// | 5 | Zmm, Zmm, Mem |
4117/// | 6 | Zmm, Zmm, Zmm |
4118/// +---+---------------+
4119/// ```
4120pub trait VpblendmbMaskEmitter<A, B, C> {
4121 fn vpblendmb_mask(&mut self, op0: A, op1: B, op2: C);
4122}
4123
4124impl<'a> VpblendmbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4125 fn vpblendmb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4126 self.emit(VPBLENDMB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4127 }
4128}
4129
4130impl<'a> VpblendmbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4131 fn vpblendmb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4132 self.emit(VPBLENDMB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4133 }
4134}
4135
4136impl<'a> VpblendmbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4137 fn vpblendmb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4138 self.emit(VPBLENDMB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4139 }
4140}
4141
4142impl<'a> VpblendmbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4143 fn vpblendmb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4144 self.emit(VPBLENDMB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4145 }
4146}
4147
4148impl<'a> VpblendmbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4149 fn vpblendmb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4150 self.emit(VPBLENDMB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4151 }
4152}
4153
4154impl<'a> VpblendmbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4155 fn vpblendmb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4156 self.emit(VPBLENDMB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4157 }
4158}
4159
4160/// `VPBLENDMB_MASKZ` (VPBLENDMB).
4161/// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
4162///
4163///
4164/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
4165///
4166/// Supported operand variants:
4167///
4168/// ```text
4169/// +---+---------------+
4170/// | # | Operands |
4171/// +---+---------------+
4172/// | 1 | Xmm, Xmm, Mem |
4173/// | 2 | Xmm, Xmm, Xmm |
4174/// | 3 | Ymm, Ymm, Mem |
4175/// | 4 | Ymm, Ymm, Ymm |
4176/// | 5 | Zmm, Zmm, Mem |
4177/// | 6 | Zmm, Zmm, Zmm |
4178/// +---+---------------+
4179/// ```
4180pub trait VpblendmbMaskzEmitter<A, B, C> {
4181 fn vpblendmb_maskz(&mut self, op0: A, op1: B, op2: C);
4182}
4183
4184impl<'a> VpblendmbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4185 fn vpblendmb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4186 self.emit(VPBLENDMB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4187 }
4188}
4189
4190impl<'a> VpblendmbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4191 fn vpblendmb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4192 self.emit(VPBLENDMB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4193 }
4194}
4195
4196impl<'a> VpblendmbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4197 fn vpblendmb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4198 self.emit(VPBLENDMB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4199 }
4200}
4201
4202impl<'a> VpblendmbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4203 fn vpblendmb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4204 self.emit(VPBLENDMB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4205 }
4206}
4207
4208impl<'a> VpblendmbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4209 fn vpblendmb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4210 self.emit(VPBLENDMB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4211 }
4212}
4213
4214impl<'a> VpblendmbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4215 fn vpblendmb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4216 self.emit(VPBLENDMB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4217 }
4218}
4219
4220/// `VPBLENDMW` (VPBLENDMW).
4221/// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
4222///
4223///
4224/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
4225///
4226/// Supported operand variants:
4227///
4228/// ```text
4229/// +---+---------------+
4230/// | # | Operands |
4231/// +---+---------------+
4232/// | 1 | Xmm, Xmm, Mem |
4233/// | 2 | Xmm, Xmm, Xmm |
4234/// | 3 | Ymm, Ymm, Mem |
4235/// | 4 | Ymm, Ymm, Ymm |
4236/// | 5 | Zmm, Zmm, Mem |
4237/// | 6 | Zmm, Zmm, Zmm |
4238/// +---+---------------+
4239/// ```
4240pub trait VpblendmwEmitter<A, B, C> {
4241 fn vpblendmw(&mut self, op0: A, op1: B, op2: C);
4242}
4243
4244impl<'a> VpblendmwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4245 fn vpblendmw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4246 self.emit(VPBLENDMW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4247 }
4248}
4249
4250impl<'a> VpblendmwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4251 fn vpblendmw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4252 self.emit(VPBLENDMW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4253 }
4254}
4255
4256impl<'a> VpblendmwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4257 fn vpblendmw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4258 self.emit(VPBLENDMW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4259 }
4260}
4261
4262impl<'a> VpblendmwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4263 fn vpblendmw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4264 self.emit(VPBLENDMW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4265 }
4266}
4267
4268impl<'a> VpblendmwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4269 fn vpblendmw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4270 self.emit(VPBLENDMW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4271 }
4272}
4273
4274impl<'a> VpblendmwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4275 fn vpblendmw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4276 self.emit(VPBLENDMW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4277 }
4278}
4279
4280/// `VPBLENDMW_MASK` (VPBLENDMW).
4281/// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
4282///
4283///
4284/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
4285///
4286/// Supported operand variants:
4287///
4288/// ```text
4289/// +---+---------------+
4290/// | # | Operands |
4291/// +---+---------------+
4292/// | 1 | Xmm, Xmm, Mem |
4293/// | 2 | Xmm, Xmm, Xmm |
4294/// | 3 | Ymm, Ymm, Mem |
4295/// | 4 | Ymm, Ymm, Ymm |
4296/// | 5 | Zmm, Zmm, Mem |
4297/// | 6 | Zmm, Zmm, Zmm |
4298/// +---+---------------+
4299/// ```
4300pub trait VpblendmwMaskEmitter<A, B, C> {
4301 fn vpblendmw_mask(&mut self, op0: A, op1: B, op2: C);
4302}
4303
4304impl<'a> VpblendmwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4305 fn vpblendmw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4306 self.emit(VPBLENDMW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4307 }
4308}
4309
4310impl<'a> VpblendmwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4311 fn vpblendmw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4312 self.emit(VPBLENDMW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4313 }
4314}
4315
4316impl<'a> VpblendmwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4317 fn vpblendmw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4318 self.emit(VPBLENDMW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4319 }
4320}
4321
4322impl<'a> VpblendmwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4323 fn vpblendmw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4324 self.emit(VPBLENDMW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4325 }
4326}
4327
4328impl<'a> VpblendmwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4329 fn vpblendmw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4330 self.emit(VPBLENDMW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4331 }
4332}
4333
4334impl<'a> VpblendmwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4335 fn vpblendmw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4336 self.emit(VPBLENDMW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4337 }
4338}
4339
4340/// `VPBLENDMW_MASKZ` (VPBLENDMW).
4341/// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
4342///
4343///
4344/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
4345///
4346/// Supported operand variants:
4347///
4348/// ```text
4349/// +---+---------------+
4350/// | # | Operands |
4351/// +---+---------------+
4352/// | 1 | Xmm, Xmm, Mem |
4353/// | 2 | Xmm, Xmm, Xmm |
4354/// | 3 | Ymm, Ymm, Mem |
4355/// | 4 | Ymm, Ymm, Ymm |
4356/// | 5 | Zmm, Zmm, Mem |
4357/// | 6 | Zmm, Zmm, Zmm |
4358/// +---+---------------+
4359/// ```
4360pub trait VpblendmwMaskzEmitter<A, B, C> {
4361 fn vpblendmw_maskz(&mut self, op0: A, op1: B, op2: C);
4362}
4363
4364impl<'a> VpblendmwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
4365 fn vpblendmw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
4366 self.emit(VPBLENDMW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4367 }
4368}
4369
4370impl<'a> VpblendmwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
4371 fn vpblendmw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
4372 self.emit(VPBLENDMW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4373 }
4374}
4375
4376impl<'a> VpblendmwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
4377 fn vpblendmw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
4378 self.emit(VPBLENDMW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4379 }
4380}
4381
4382impl<'a> VpblendmwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
4383 fn vpblendmw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
4384 self.emit(VPBLENDMW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4385 }
4386}
4387
4388impl<'a> VpblendmwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
4389 fn vpblendmw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
4390 self.emit(VPBLENDMW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4391 }
4392}
4393
4394impl<'a> VpblendmwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
4395 fn vpblendmw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
4396 self.emit(VPBLENDMW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
4397 }
4398}
4399
4400/// `VPBROADCASTB` (VPBROADCASTB).
4401/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
4402///
4403///
4404/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
4405///
4406/// Supported operand variants:
4407///
4408/// ```text
4409/// +---+----------+
4410/// | # | Operands |
4411/// +---+----------+
4412/// | 1 | Xmm, Mem |
4413/// | 2 | Xmm, Xmm |
4414/// | 3 | Ymm, Mem |
4415/// | 4 | Ymm, Xmm |
4416/// | 5 | Zmm, Mem |
4417/// | 6 | Zmm, Xmm |
4418/// +---+----------+
4419/// ```
4420pub trait VpbroadcastbEmitter<A, B> {
4421 fn vpbroadcastb(&mut self, op0: A, op1: B);
4422}
4423
4424impl<'a> VpbroadcastbEmitter<Xmm, Xmm> for Assembler<'a> {
4425 fn vpbroadcastb(&mut self, op0: Xmm, op1: Xmm) {
4426 self.emit(VPBROADCASTB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4427 }
4428}
4429
4430impl<'a> VpbroadcastbEmitter<Xmm, Mem> for Assembler<'a> {
4431 fn vpbroadcastb(&mut self, op0: Xmm, op1: Mem) {
4432 self.emit(VPBROADCASTB128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4433 }
4434}
4435
4436impl<'a> VpbroadcastbEmitter<Ymm, Xmm> for Assembler<'a> {
4437 fn vpbroadcastb(&mut self, op0: Ymm, op1: Xmm) {
4438 self.emit(VPBROADCASTB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4439 }
4440}
4441
4442impl<'a> VpbroadcastbEmitter<Ymm, Mem> for Assembler<'a> {
4443 fn vpbroadcastb(&mut self, op0: Ymm, op1: Mem) {
4444 self.emit(VPBROADCASTB256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4445 }
4446}
4447
4448impl<'a> VpbroadcastbEmitter<Zmm, Xmm> for Assembler<'a> {
4449 fn vpbroadcastb(&mut self, op0: Zmm, op1: Xmm) {
4450 self.emit(VPBROADCASTB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4451 }
4452}
4453
4454impl<'a> VpbroadcastbEmitter<Zmm, Mem> for Assembler<'a> {
4455 fn vpbroadcastb(&mut self, op0: Zmm, op1: Mem) {
4456 self.emit(VPBROADCASTB512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4457 }
4458}
4459
4460/// `VPBROADCASTB_GP`.
4461///
4462/// Supported operand variants:
4463///
4464/// ```text
4465/// +---+----------+
4466/// | # | Operands |
4467/// +---+----------+
4468/// | 1 | Xmm, Gpd |
4469/// | 2 | Ymm, Gpd |
4470/// | 3 | Zmm, Gpd |
4471/// +---+----------+
4472/// ```
4473pub trait VpbroadcastbGpEmitter<A, B> {
4474 fn vpbroadcastb_gp(&mut self, op0: A, op1: B);
4475}
4476
4477impl<'a> VpbroadcastbGpEmitter<Xmm, Gpd> for Assembler<'a> {
4478 fn vpbroadcastb_gp(&mut self, op0: Xmm, op1: Gpd) {
4479 self.emit(VPBROADCASTB_GP128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4480 }
4481}
4482
4483impl<'a> VpbroadcastbGpEmitter<Ymm, Gpd> for Assembler<'a> {
4484 fn vpbroadcastb_gp(&mut self, op0: Ymm, op1: Gpd) {
4485 self.emit(VPBROADCASTB_GP256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4486 }
4487}
4488
4489impl<'a> VpbroadcastbGpEmitter<Zmm, Gpd> for Assembler<'a> {
4490 fn vpbroadcastb_gp(&mut self, op0: Zmm, op1: Gpd) {
4491 self.emit(VPBROADCASTB_GP512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4492 }
4493}
4494
4495/// `VPBROADCASTB_GP_MASK`.
4496///
4497/// Supported operand variants:
4498///
4499/// ```text
4500/// +---+----------+
4501/// | # | Operands |
4502/// +---+----------+
4503/// | 1 | Xmm, Gpd |
4504/// | 2 | Ymm, Gpd |
4505/// | 3 | Zmm, Gpd |
4506/// +---+----------+
4507/// ```
4508pub trait VpbroadcastbGpMaskEmitter<A, B> {
4509 fn vpbroadcastb_gp_mask(&mut self, op0: A, op1: B);
4510}
4511
4512impl<'a> VpbroadcastbGpMaskEmitter<Xmm, Gpd> for Assembler<'a> {
4513 fn vpbroadcastb_gp_mask(&mut self, op0: Xmm, op1: Gpd) {
4514 self.emit(VPBROADCASTB_GP128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4515 }
4516}
4517
4518impl<'a> VpbroadcastbGpMaskEmitter<Ymm, Gpd> for Assembler<'a> {
4519 fn vpbroadcastb_gp_mask(&mut self, op0: Ymm, op1: Gpd) {
4520 self.emit(VPBROADCASTB_GP256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4521 }
4522}
4523
4524impl<'a> VpbroadcastbGpMaskEmitter<Zmm, Gpd> for Assembler<'a> {
4525 fn vpbroadcastb_gp_mask(&mut self, op0: Zmm, op1: Gpd) {
4526 self.emit(VPBROADCASTB_GP512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4527 }
4528}
4529
4530/// `VPBROADCASTB_GP_MASKZ`.
4531///
4532/// Supported operand variants:
4533///
4534/// ```text
4535/// +---+----------+
4536/// | # | Operands |
4537/// +---+----------+
4538/// | 1 | Xmm, Gpd |
4539/// | 2 | Ymm, Gpd |
4540/// | 3 | Zmm, Gpd |
4541/// +---+----------+
4542/// ```
4543pub trait VpbroadcastbGpMaskzEmitter<A, B> {
4544 fn vpbroadcastb_gp_maskz(&mut self, op0: A, op1: B);
4545}
4546
4547impl<'a> VpbroadcastbGpMaskzEmitter<Xmm, Gpd> for Assembler<'a> {
4548 fn vpbroadcastb_gp_maskz(&mut self, op0: Xmm, op1: Gpd) {
4549 self.emit(VPBROADCASTB_GP128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4550 }
4551}
4552
4553impl<'a> VpbroadcastbGpMaskzEmitter<Ymm, Gpd> for Assembler<'a> {
4554 fn vpbroadcastb_gp_maskz(&mut self, op0: Ymm, op1: Gpd) {
4555 self.emit(VPBROADCASTB_GP256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4556 }
4557}
4558
4559impl<'a> VpbroadcastbGpMaskzEmitter<Zmm, Gpd> for Assembler<'a> {
4560 fn vpbroadcastb_gp_maskz(&mut self, op0: Zmm, op1: Gpd) {
4561 self.emit(VPBROADCASTB_GP512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4562 }
4563}
4564
4565/// `VPBROADCASTB_MASK` (VPBROADCASTB).
4566/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
4567///
4568///
4569/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
4570///
4571/// Supported operand variants:
4572///
4573/// ```text
4574/// +---+----------+
4575/// | # | Operands |
4576/// +---+----------+
4577/// | 1 | Xmm, Mem |
4578/// | 2 | Xmm, Xmm |
4579/// | 3 | Ymm, Mem |
4580/// | 4 | Ymm, Xmm |
4581/// | 5 | Zmm, Mem |
4582/// | 6 | Zmm, Xmm |
4583/// +---+----------+
4584/// ```
4585pub trait VpbroadcastbMaskEmitter<A, B> {
4586 fn vpbroadcastb_mask(&mut self, op0: A, op1: B);
4587}
4588
4589impl<'a> VpbroadcastbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
4590 fn vpbroadcastb_mask(&mut self, op0: Xmm, op1: Xmm) {
4591 self.emit(VPBROADCASTB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4592 }
4593}
4594
4595impl<'a> VpbroadcastbMaskEmitter<Xmm, Mem> for Assembler<'a> {
4596 fn vpbroadcastb_mask(&mut self, op0: Xmm, op1: Mem) {
4597 self.emit(VPBROADCASTB128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4598 }
4599}
4600
4601impl<'a> VpbroadcastbMaskEmitter<Ymm, Xmm> for Assembler<'a> {
4602 fn vpbroadcastb_mask(&mut self, op0: Ymm, op1: Xmm) {
4603 self.emit(VPBROADCASTB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4604 }
4605}
4606
4607impl<'a> VpbroadcastbMaskEmitter<Ymm, Mem> for Assembler<'a> {
4608 fn vpbroadcastb_mask(&mut self, op0: Ymm, op1: Mem) {
4609 self.emit(VPBROADCASTB256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4610 }
4611}
4612
4613impl<'a> VpbroadcastbMaskEmitter<Zmm, Xmm> for Assembler<'a> {
4614 fn vpbroadcastb_mask(&mut self, op0: Zmm, op1: Xmm) {
4615 self.emit(VPBROADCASTB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4616 }
4617}
4618
4619impl<'a> VpbroadcastbMaskEmitter<Zmm, Mem> for Assembler<'a> {
4620 fn vpbroadcastb_mask(&mut self, op0: Zmm, op1: Mem) {
4621 self.emit(VPBROADCASTB512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4622 }
4623}
4624
4625/// `VPBROADCASTB_MASKZ` (VPBROADCASTB).
4626/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
4627///
4628///
4629/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
4630///
4631/// Supported operand variants:
4632///
4633/// ```text
4634/// +---+----------+
4635/// | # | Operands |
4636/// +---+----------+
4637/// | 1 | Xmm, Mem |
4638/// | 2 | Xmm, Xmm |
4639/// | 3 | Ymm, Mem |
4640/// | 4 | Ymm, Xmm |
4641/// | 5 | Zmm, Mem |
4642/// | 6 | Zmm, Xmm |
4643/// +---+----------+
4644/// ```
4645pub trait VpbroadcastbMaskzEmitter<A, B> {
4646 fn vpbroadcastb_maskz(&mut self, op0: A, op1: B);
4647}
4648
4649impl<'a> VpbroadcastbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
4650 fn vpbroadcastb_maskz(&mut self, op0: Xmm, op1: Xmm) {
4651 self.emit(VPBROADCASTB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4652 }
4653}
4654
4655impl<'a> VpbroadcastbMaskzEmitter<Xmm, Mem> for Assembler<'a> {
4656 fn vpbroadcastb_maskz(&mut self, op0: Xmm, op1: Mem) {
4657 self.emit(VPBROADCASTB128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4658 }
4659}
4660
4661impl<'a> VpbroadcastbMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
4662 fn vpbroadcastb_maskz(&mut self, op0: Ymm, op1: Xmm) {
4663 self.emit(VPBROADCASTB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4664 }
4665}
4666
4667impl<'a> VpbroadcastbMaskzEmitter<Ymm, Mem> for Assembler<'a> {
4668 fn vpbroadcastb_maskz(&mut self, op0: Ymm, op1: Mem) {
4669 self.emit(VPBROADCASTB256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4670 }
4671}
4672
4673impl<'a> VpbroadcastbMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
4674 fn vpbroadcastb_maskz(&mut self, op0: Zmm, op1: Xmm) {
4675 self.emit(VPBROADCASTB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4676 }
4677}
4678
4679impl<'a> VpbroadcastbMaskzEmitter<Zmm, Mem> for Assembler<'a> {
4680 fn vpbroadcastb_maskz(&mut self, op0: Zmm, op1: Mem) {
4681 self.emit(VPBROADCASTB512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4682 }
4683}
4684
4685/// `VPBROADCASTW` (VPBROADCASTW).
4686/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
4687///
4688///
4689/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
4690///
4691/// Supported operand variants:
4692///
4693/// ```text
4694/// +---+----------+
4695/// | # | Operands |
4696/// +---+----------+
4697/// | 1 | Xmm, Mem |
4698/// | 2 | Xmm, Xmm |
4699/// | 3 | Ymm, Mem |
4700/// | 4 | Ymm, Xmm |
4701/// | 5 | Zmm, Mem |
4702/// | 6 | Zmm, Xmm |
4703/// +---+----------+
4704/// ```
4705pub trait VpbroadcastwEmitter<A, B> {
4706 fn vpbroadcastw(&mut self, op0: A, op1: B);
4707}
4708
4709impl<'a> VpbroadcastwEmitter<Xmm, Xmm> for Assembler<'a> {
4710 fn vpbroadcastw(&mut self, op0: Xmm, op1: Xmm) {
4711 self.emit(VPBROADCASTW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4712 }
4713}
4714
4715impl<'a> VpbroadcastwEmitter<Xmm, Mem> for Assembler<'a> {
4716 fn vpbroadcastw(&mut self, op0: Xmm, op1: Mem) {
4717 self.emit(VPBROADCASTW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4718 }
4719}
4720
4721impl<'a> VpbroadcastwEmitter<Ymm, Xmm> for Assembler<'a> {
4722 fn vpbroadcastw(&mut self, op0: Ymm, op1: Xmm) {
4723 self.emit(VPBROADCASTW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4724 }
4725}
4726
4727impl<'a> VpbroadcastwEmitter<Ymm, Mem> for Assembler<'a> {
4728 fn vpbroadcastw(&mut self, op0: Ymm, op1: Mem) {
4729 self.emit(VPBROADCASTW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4730 }
4731}
4732
4733impl<'a> VpbroadcastwEmitter<Zmm, Xmm> for Assembler<'a> {
4734 fn vpbroadcastw(&mut self, op0: Zmm, op1: Xmm) {
4735 self.emit(VPBROADCASTW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4736 }
4737}
4738
4739impl<'a> VpbroadcastwEmitter<Zmm, Mem> for Assembler<'a> {
4740 fn vpbroadcastw(&mut self, op0: Zmm, op1: Mem) {
4741 self.emit(VPBROADCASTW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4742 }
4743}
4744
4745/// `VPBROADCASTW_GP`.
4746///
4747/// Supported operand variants:
4748///
4749/// ```text
4750/// +---+----------+
4751/// | # | Operands |
4752/// +---+----------+
4753/// | 1 | Xmm, Gpd |
4754/// | 2 | Ymm, Gpd |
4755/// | 3 | Zmm, Gpd |
4756/// +---+----------+
4757/// ```
4758pub trait VpbroadcastwGpEmitter<A, B> {
4759 fn vpbroadcastw_gp(&mut self, op0: A, op1: B);
4760}
4761
4762impl<'a> VpbroadcastwGpEmitter<Xmm, Gpd> for Assembler<'a> {
4763 fn vpbroadcastw_gp(&mut self, op0: Xmm, op1: Gpd) {
4764 self.emit(VPBROADCASTW_GP128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4765 }
4766}
4767
4768impl<'a> VpbroadcastwGpEmitter<Ymm, Gpd> for Assembler<'a> {
4769 fn vpbroadcastw_gp(&mut self, op0: Ymm, op1: Gpd) {
4770 self.emit(VPBROADCASTW_GP256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4771 }
4772}
4773
4774impl<'a> VpbroadcastwGpEmitter<Zmm, Gpd> for Assembler<'a> {
4775 fn vpbroadcastw_gp(&mut self, op0: Zmm, op1: Gpd) {
4776 self.emit(VPBROADCASTW_GP512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4777 }
4778}
4779
4780/// `VPBROADCASTW_GP_MASK`.
4781///
4782/// Supported operand variants:
4783///
4784/// ```text
4785/// +---+----------+
4786/// | # | Operands |
4787/// +---+----------+
4788/// | 1 | Xmm, Gpd |
4789/// | 2 | Ymm, Gpd |
4790/// | 3 | Zmm, Gpd |
4791/// +---+----------+
4792/// ```
4793pub trait VpbroadcastwGpMaskEmitter<A, B> {
4794 fn vpbroadcastw_gp_mask(&mut self, op0: A, op1: B);
4795}
4796
4797impl<'a> VpbroadcastwGpMaskEmitter<Xmm, Gpd> for Assembler<'a> {
4798 fn vpbroadcastw_gp_mask(&mut self, op0: Xmm, op1: Gpd) {
4799 self.emit(VPBROADCASTW_GP128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4800 }
4801}
4802
4803impl<'a> VpbroadcastwGpMaskEmitter<Ymm, Gpd> for Assembler<'a> {
4804 fn vpbroadcastw_gp_mask(&mut self, op0: Ymm, op1: Gpd) {
4805 self.emit(VPBROADCASTW_GP256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4806 }
4807}
4808
4809impl<'a> VpbroadcastwGpMaskEmitter<Zmm, Gpd> for Assembler<'a> {
4810 fn vpbroadcastw_gp_mask(&mut self, op0: Zmm, op1: Gpd) {
4811 self.emit(VPBROADCASTW_GP512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4812 }
4813}
4814
4815/// `VPBROADCASTW_GP_MASKZ`.
4816///
4817/// Supported operand variants:
4818///
4819/// ```text
4820/// +---+----------+
4821/// | # | Operands |
4822/// +---+----------+
4823/// | 1 | Xmm, Gpd |
4824/// | 2 | Ymm, Gpd |
4825/// | 3 | Zmm, Gpd |
4826/// +---+----------+
4827/// ```
4828pub trait VpbroadcastwGpMaskzEmitter<A, B> {
4829 fn vpbroadcastw_gp_maskz(&mut self, op0: A, op1: B);
4830}
4831
4832impl<'a> VpbroadcastwGpMaskzEmitter<Xmm, Gpd> for Assembler<'a> {
4833 fn vpbroadcastw_gp_maskz(&mut self, op0: Xmm, op1: Gpd) {
4834 self.emit(VPBROADCASTW_GP128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4835 }
4836}
4837
4838impl<'a> VpbroadcastwGpMaskzEmitter<Ymm, Gpd> for Assembler<'a> {
4839 fn vpbroadcastw_gp_maskz(&mut self, op0: Ymm, op1: Gpd) {
4840 self.emit(VPBROADCASTW_GP256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4841 }
4842}
4843
4844impl<'a> VpbroadcastwGpMaskzEmitter<Zmm, Gpd> for Assembler<'a> {
4845 fn vpbroadcastw_gp_maskz(&mut self, op0: Zmm, op1: Gpd) {
4846 self.emit(VPBROADCASTW_GP512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4847 }
4848}
4849
4850/// `VPBROADCASTW_MASK` (VPBROADCASTW).
4851/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
4852///
4853///
4854/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
4855///
4856/// Supported operand variants:
4857///
4858/// ```text
4859/// +---+----------+
4860/// | # | Operands |
4861/// +---+----------+
4862/// | 1 | Xmm, Mem |
4863/// | 2 | Xmm, Xmm |
4864/// | 3 | Ymm, Mem |
4865/// | 4 | Ymm, Xmm |
4866/// | 5 | Zmm, Mem |
4867/// | 6 | Zmm, Xmm |
4868/// +---+----------+
4869/// ```
4870pub trait VpbroadcastwMaskEmitter<A, B> {
4871 fn vpbroadcastw_mask(&mut self, op0: A, op1: B);
4872}
4873
4874impl<'a> VpbroadcastwMaskEmitter<Xmm, Xmm> for Assembler<'a> {
4875 fn vpbroadcastw_mask(&mut self, op0: Xmm, op1: Xmm) {
4876 self.emit(VPBROADCASTW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4877 }
4878}
4879
4880impl<'a> VpbroadcastwMaskEmitter<Xmm, Mem> for Assembler<'a> {
4881 fn vpbroadcastw_mask(&mut self, op0: Xmm, op1: Mem) {
4882 self.emit(VPBROADCASTW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4883 }
4884}
4885
4886impl<'a> VpbroadcastwMaskEmitter<Ymm, Xmm> for Assembler<'a> {
4887 fn vpbroadcastw_mask(&mut self, op0: Ymm, op1: Xmm) {
4888 self.emit(VPBROADCASTW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4889 }
4890}
4891
4892impl<'a> VpbroadcastwMaskEmitter<Ymm, Mem> for Assembler<'a> {
4893 fn vpbroadcastw_mask(&mut self, op0: Ymm, op1: Mem) {
4894 self.emit(VPBROADCASTW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4895 }
4896}
4897
4898impl<'a> VpbroadcastwMaskEmitter<Zmm, Xmm> for Assembler<'a> {
4899 fn vpbroadcastw_mask(&mut self, op0: Zmm, op1: Xmm) {
4900 self.emit(VPBROADCASTW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4901 }
4902}
4903
4904impl<'a> VpbroadcastwMaskEmitter<Zmm, Mem> for Assembler<'a> {
4905 fn vpbroadcastw_mask(&mut self, op0: Zmm, op1: Mem) {
4906 self.emit(VPBROADCASTW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4907 }
4908}
4909
4910/// `VPBROADCASTW_MASKZ` (VPBROADCASTW).
4911/// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
4912///
4913///
4914/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
4915///
4916/// Supported operand variants:
4917///
4918/// ```text
4919/// +---+----------+
4920/// | # | Operands |
4921/// +---+----------+
4922/// | 1 | Xmm, Mem |
4923/// | 2 | Xmm, Xmm |
4924/// | 3 | Ymm, Mem |
4925/// | 4 | Ymm, Xmm |
4926/// | 5 | Zmm, Mem |
4927/// | 6 | Zmm, Xmm |
4928/// +---+----------+
4929/// ```
4930pub trait VpbroadcastwMaskzEmitter<A, B> {
4931 fn vpbroadcastw_maskz(&mut self, op0: A, op1: B);
4932}
4933
4934impl<'a> VpbroadcastwMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
4935 fn vpbroadcastw_maskz(&mut self, op0: Xmm, op1: Xmm) {
4936 self.emit(VPBROADCASTW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4937 }
4938}
4939
4940impl<'a> VpbroadcastwMaskzEmitter<Xmm, Mem> for Assembler<'a> {
4941 fn vpbroadcastw_maskz(&mut self, op0: Xmm, op1: Mem) {
4942 self.emit(VPBROADCASTW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4943 }
4944}
4945
4946impl<'a> VpbroadcastwMaskzEmitter<Ymm, Xmm> for Assembler<'a> {
4947 fn vpbroadcastw_maskz(&mut self, op0: Ymm, op1: Xmm) {
4948 self.emit(VPBROADCASTW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4949 }
4950}
4951
4952impl<'a> VpbroadcastwMaskzEmitter<Ymm, Mem> for Assembler<'a> {
4953 fn vpbroadcastw_maskz(&mut self, op0: Ymm, op1: Mem) {
4954 self.emit(VPBROADCASTW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4955 }
4956}
4957
4958impl<'a> VpbroadcastwMaskzEmitter<Zmm, Xmm> for Assembler<'a> {
4959 fn vpbroadcastw_maskz(&mut self, op0: Zmm, op1: Xmm) {
4960 self.emit(VPBROADCASTW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4961 }
4962}
4963
4964impl<'a> VpbroadcastwMaskzEmitter<Zmm, Mem> for Assembler<'a> {
4965 fn vpbroadcastw_maskz(&mut self, op0: Zmm, op1: Mem) {
4966 self.emit(VPBROADCASTW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
4967 }
4968}
4969
4970/// `VPCMPB` (VPCMPB).
4971/// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
4972///
4973///
4974/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
4975///
4976/// Supported operand variants:
4977///
4978/// ```text
4979/// +---+---------------------+
4980/// | # | Operands |
4981/// +---+---------------------+
4982/// | 1 | KReg, Xmm, Mem, Imm |
4983/// | 2 | KReg, Xmm, Xmm, Imm |
4984/// | 3 | KReg, Ymm, Mem, Imm |
4985/// | 4 | KReg, Ymm, Ymm, Imm |
4986/// | 5 | KReg, Zmm, Mem, Imm |
4987/// | 6 | KReg, Zmm, Zmm, Imm |
4988/// +---+---------------------+
4989/// ```
4990pub trait VpcmpbEmitter<A, B, C, D> {
4991 fn vpcmpb(&mut self, op0: A, op1: B, op2: C, op3: D);
4992}
4993
4994impl<'a> VpcmpbEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
4995 fn vpcmpb(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
4996 self.emit(VPCMPB128KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
4997 }
4998}
4999
5000impl<'a> VpcmpbEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5001 fn vpcmpb(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5002 self.emit(VPCMPB128KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5003 }
5004}
5005
5006impl<'a> VpcmpbEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5007 fn vpcmpb(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5008 self.emit(VPCMPB256KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5009 }
5010}
5011
5012impl<'a> VpcmpbEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5013 fn vpcmpb(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5014 self.emit(VPCMPB256KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5015 }
5016}
5017
5018impl<'a> VpcmpbEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5019 fn vpcmpb(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5020 self.emit(VPCMPB512KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5021 }
5022}
5023
5024impl<'a> VpcmpbEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5025 fn vpcmpb(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5026 self.emit(VPCMPB512KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5027 }
5028}
5029
5030/// `VPCMPB_MASK` (VPCMPB).
5031/// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5032///
5033///
5034/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
5035///
5036/// Supported operand variants:
5037///
5038/// ```text
5039/// +---+---------------------+
5040/// | # | Operands |
5041/// +---+---------------------+
5042/// | 1 | KReg, Xmm, Mem, Imm |
5043/// | 2 | KReg, Xmm, Xmm, Imm |
5044/// | 3 | KReg, Ymm, Mem, Imm |
5045/// | 4 | KReg, Ymm, Ymm, Imm |
5046/// | 5 | KReg, Zmm, Mem, Imm |
5047/// | 6 | KReg, Zmm, Zmm, Imm |
5048/// +---+---------------------+
5049/// ```
5050pub trait VpcmpbMaskEmitter<A, B, C, D> {
5051 fn vpcmpb_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5052}
5053
5054impl<'a> VpcmpbMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5055 fn vpcmpb_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5056 self.emit(VPCMPB128KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5057 }
5058}
5059
5060impl<'a> VpcmpbMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5061 fn vpcmpb_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5062 self.emit(VPCMPB128KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5063 }
5064}
5065
5066impl<'a> VpcmpbMaskEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5067 fn vpcmpb_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5068 self.emit(VPCMPB256KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5069 }
5070}
5071
5072impl<'a> VpcmpbMaskEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5073 fn vpcmpb_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5074 self.emit(VPCMPB256KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5075 }
5076}
5077
5078impl<'a> VpcmpbMaskEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5079 fn vpcmpb_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5080 self.emit(VPCMPB512KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5081 }
5082}
5083
5084impl<'a> VpcmpbMaskEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5085 fn vpcmpb_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5086 self.emit(VPCMPB512KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5087 }
5088}
5089
5090/// `VPCMPUB` (VPCMPUB).
5091/// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5092///
5093///
5094/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
5095///
5096/// Supported operand variants:
5097///
5098/// ```text
5099/// +---+---------------------+
5100/// | # | Operands |
5101/// +---+---------------------+
5102/// | 1 | KReg, Xmm, Mem, Imm |
5103/// | 2 | KReg, Xmm, Xmm, Imm |
5104/// | 3 | KReg, Ymm, Mem, Imm |
5105/// | 4 | KReg, Ymm, Ymm, Imm |
5106/// | 5 | KReg, Zmm, Mem, Imm |
5107/// | 6 | KReg, Zmm, Zmm, Imm |
5108/// +---+---------------------+
5109/// ```
5110pub trait VpcmpubEmitter<A, B, C, D> {
5111 fn vpcmpub(&mut self, op0: A, op1: B, op2: C, op3: D);
5112}
5113
5114impl<'a> VpcmpubEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5115 fn vpcmpub(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5116 self.emit(VPCMPUB128KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5117 }
5118}
5119
5120impl<'a> VpcmpubEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5121 fn vpcmpub(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5122 self.emit(VPCMPUB128KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5123 }
5124}
5125
5126impl<'a> VpcmpubEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5127 fn vpcmpub(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5128 self.emit(VPCMPUB256KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5129 }
5130}
5131
5132impl<'a> VpcmpubEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5133 fn vpcmpub(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5134 self.emit(VPCMPUB256KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5135 }
5136}
5137
5138impl<'a> VpcmpubEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5139 fn vpcmpub(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5140 self.emit(VPCMPUB512KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5141 }
5142}
5143
5144impl<'a> VpcmpubEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5145 fn vpcmpub(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5146 self.emit(VPCMPUB512KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5147 }
5148}
5149
5150/// `VPCMPUB_MASK` (VPCMPUB).
5151/// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5152///
5153///
5154/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
5155///
5156/// Supported operand variants:
5157///
5158/// ```text
5159/// +---+---------------------+
5160/// | # | Operands |
5161/// +---+---------------------+
5162/// | 1 | KReg, Xmm, Mem, Imm |
5163/// | 2 | KReg, Xmm, Xmm, Imm |
5164/// | 3 | KReg, Ymm, Mem, Imm |
5165/// | 4 | KReg, Ymm, Ymm, Imm |
5166/// | 5 | KReg, Zmm, Mem, Imm |
5167/// | 6 | KReg, Zmm, Zmm, Imm |
5168/// +---+---------------------+
5169/// ```
5170pub trait VpcmpubMaskEmitter<A, B, C, D> {
5171 fn vpcmpub_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5172}
5173
5174impl<'a> VpcmpubMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5175 fn vpcmpub_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5176 self.emit(VPCMPUB128KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5177 }
5178}
5179
5180impl<'a> VpcmpubMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5181 fn vpcmpub_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5182 self.emit(VPCMPUB128KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5183 }
5184}
5185
5186impl<'a> VpcmpubMaskEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5187 fn vpcmpub_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5188 self.emit(VPCMPUB256KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5189 }
5190}
5191
5192impl<'a> VpcmpubMaskEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5193 fn vpcmpub_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5194 self.emit(VPCMPUB256KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5195 }
5196}
5197
5198impl<'a> VpcmpubMaskEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5199 fn vpcmpub_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5200 self.emit(VPCMPUB512KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5201 }
5202}
5203
5204impl<'a> VpcmpubMaskEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5205 fn vpcmpub_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5206 self.emit(VPCMPUB512KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5207 }
5208}
5209
5210/// `VPCMPUW` (VPCMPUW).
5211/// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5212///
5213///
5214/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
5215///
5216/// Supported operand variants:
5217///
5218/// ```text
5219/// +---+---------------------+
5220/// | # | Operands |
5221/// +---+---------------------+
5222/// | 1 | KReg, Xmm, Mem, Imm |
5223/// | 2 | KReg, Xmm, Xmm, Imm |
5224/// | 3 | KReg, Ymm, Mem, Imm |
5225/// | 4 | KReg, Ymm, Ymm, Imm |
5226/// | 5 | KReg, Zmm, Mem, Imm |
5227/// | 6 | KReg, Zmm, Zmm, Imm |
5228/// +---+---------------------+
5229/// ```
5230pub trait VpcmpuwEmitter<A, B, C, D> {
5231 fn vpcmpuw(&mut self, op0: A, op1: B, op2: C, op3: D);
5232}
5233
5234impl<'a> VpcmpuwEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5235 fn vpcmpuw(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5236 self.emit(VPCMPUW128KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5237 }
5238}
5239
5240impl<'a> VpcmpuwEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5241 fn vpcmpuw(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5242 self.emit(VPCMPUW128KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5243 }
5244}
5245
5246impl<'a> VpcmpuwEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5247 fn vpcmpuw(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5248 self.emit(VPCMPUW256KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5249 }
5250}
5251
5252impl<'a> VpcmpuwEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5253 fn vpcmpuw(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5254 self.emit(VPCMPUW256KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5255 }
5256}
5257
5258impl<'a> VpcmpuwEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5259 fn vpcmpuw(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5260 self.emit(VPCMPUW512KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5261 }
5262}
5263
5264impl<'a> VpcmpuwEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5265 fn vpcmpuw(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5266 self.emit(VPCMPUW512KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5267 }
5268}
5269
5270/// `VPCMPUW_MASK` (VPCMPUW).
5271/// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5272///
5273///
5274/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
5275///
5276/// Supported operand variants:
5277///
5278/// ```text
5279/// +---+---------------------+
5280/// | # | Operands |
5281/// +---+---------------------+
5282/// | 1 | KReg, Xmm, Mem, Imm |
5283/// | 2 | KReg, Xmm, Xmm, Imm |
5284/// | 3 | KReg, Ymm, Mem, Imm |
5285/// | 4 | KReg, Ymm, Ymm, Imm |
5286/// | 5 | KReg, Zmm, Mem, Imm |
5287/// | 6 | KReg, Zmm, Zmm, Imm |
5288/// +---+---------------------+
5289/// ```
5290pub trait VpcmpuwMaskEmitter<A, B, C, D> {
5291 fn vpcmpuw_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5292}
5293
5294impl<'a> VpcmpuwMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5295 fn vpcmpuw_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5296 self.emit(VPCMPUW128KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5297 }
5298}
5299
5300impl<'a> VpcmpuwMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5301 fn vpcmpuw_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5302 self.emit(VPCMPUW128KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5303 }
5304}
5305
5306impl<'a> VpcmpuwMaskEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5307 fn vpcmpuw_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5308 self.emit(VPCMPUW256KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5309 }
5310}
5311
5312impl<'a> VpcmpuwMaskEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5313 fn vpcmpuw_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5314 self.emit(VPCMPUW256KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5315 }
5316}
5317
5318impl<'a> VpcmpuwMaskEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5319 fn vpcmpuw_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5320 self.emit(VPCMPUW512KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5321 }
5322}
5323
5324impl<'a> VpcmpuwMaskEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5325 fn vpcmpuw_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5326 self.emit(VPCMPUW512KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5327 }
5328}
5329
5330/// `VPCMPW` (VPCMPW).
5331/// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5332///
5333///
5334/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
5335///
5336/// Supported operand variants:
5337///
5338/// ```text
5339/// +---+---------------------+
5340/// | # | Operands |
5341/// +---+---------------------+
5342/// | 1 | KReg, Xmm, Mem, Imm |
5343/// | 2 | KReg, Xmm, Xmm, Imm |
5344/// | 3 | KReg, Ymm, Mem, Imm |
5345/// | 4 | KReg, Ymm, Ymm, Imm |
5346/// | 5 | KReg, Zmm, Mem, Imm |
5347/// | 6 | KReg, Zmm, Zmm, Imm |
5348/// +---+---------------------+
5349/// ```
5350pub trait VpcmpwEmitter<A, B, C, D> {
5351 fn vpcmpw(&mut self, op0: A, op1: B, op2: C, op3: D);
5352}
5353
5354impl<'a> VpcmpwEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5355 fn vpcmpw(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5356 self.emit(VPCMPW128KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5357 }
5358}
5359
5360impl<'a> VpcmpwEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5361 fn vpcmpw(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5362 self.emit(VPCMPW128KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5363 }
5364}
5365
5366impl<'a> VpcmpwEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5367 fn vpcmpw(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5368 self.emit(VPCMPW256KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5369 }
5370}
5371
5372impl<'a> VpcmpwEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5373 fn vpcmpw(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5374 self.emit(VPCMPW256KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5375 }
5376}
5377
5378impl<'a> VpcmpwEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5379 fn vpcmpw(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5380 self.emit(VPCMPW512KRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5381 }
5382}
5383
5384impl<'a> VpcmpwEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5385 fn vpcmpw(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5386 self.emit(VPCMPW512KRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5387 }
5388}
5389
5390/// `VPCMPW_MASK` (VPCMPW).
5391/// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
5392///
5393///
5394/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
5395///
5396/// Supported operand variants:
5397///
5398/// ```text
5399/// +---+---------------------+
5400/// | # | Operands |
5401/// +---+---------------------+
5402/// | 1 | KReg, Xmm, Mem, Imm |
5403/// | 2 | KReg, Xmm, Xmm, Imm |
5404/// | 3 | KReg, Ymm, Mem, Imm |
5405/// | 4 | KReg, Ymm, Ymm, Imm |
5406/// | 5 | KReg, Zmm, Mem, Imm |
5407/// | 6 | KReg, Zmm, Zmm, Imm |
5408/// +---+---------------------+
5409/// ```
5410pub trait VpcmpwMaskEmitter<A, B, C, D> {
5411 fn vpcmpw_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
5412}
5413
5414impl<'a> VpcmpwMaskEmitter<KReg, Xmm, Xmm, Imm> for Assembler<'a> {
5415 fn vpcmpw_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm, op3: Imm) {
5416 self.emit(VPCMPW128KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5417 }
5418}
5419
5420impl<'a> VpcmpwMaskEmitter<KReg, Xmm, Mem, Imm> for Assembler<'a> {
5421 fn vpcmpw_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem, op3: Imm) {
5422 self.emit(VPCMPW128KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5423 }
5424}
5425
5426impl<'a> VpcmpwMaskEmitter<KReg, Ymm, Ymm, Imm> for Assembler<'a> {
5427 fn vpcmpw_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm, op3: Imm) {
5428 self.emit(VPCMPW256KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5429 }
5430}
5431
5432impl<'a> VpcmpwMaskEmitter<KReg, Ymm, Mem, Imm> for Assembler<'a> {
5433 fn vpcmpw_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem, op3: Imm) {
5434 self.emit(VPCMPW256KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5435 }
5436}
5437
5438impl<'a> VpcmpwMaskEmitter<KReg, Zmm, Zmm, Imm> for Assembler<'a> {
5439 fn vpcmpw_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm, op3: Imm) {
5440 self.emit(VPCMPW512KRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5441 }
5442}
5443
5444impl<'a> VpcmpwMaskEmitter<KReg, Zmm, Mem, Imm> for Assembler<'a> {
5445 fn vpcmpw_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem, op3: Imm) {
5446 self.emit(VPCMPW512KRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
5447 }
5448}
5449
5450/// `VPERMI2W` (VPERMI2W).
5451/// Permutes 16-bit/32-bit/64-bit values in the second operand (the first source operand) and the third operand (the second source operand) using indices in the first operand to select elements from the second and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
5452///
5453///
5454/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMI2W%3AVPERMI2D%3AVPERMI2Q%3AVPERMI2PS%3AVPERMI2PD.html).
5455///
5456/// Supported operand variants:
5457///
5458/// ```text
5459/// +---+---------------+
5460/// | # | Operands |
5461/// +---+---------------+
5462/// | 1 | Xmm, Xmm, Mem |
5463/// | 2 | Xmm, Xmm, Xmm |
5464/// | 3 | Ymm, Ymm, Mem |
5465/// | 4 | Ymm, Ymm, Ymm |
5466/// | 5 | Zmm, Zmm, Mem |
5467/// | 6 | Zmm, Zmm, Zmm |
5468/// +---+---------------+
5469/// ```
5470pub trait Vpermi2wEmitter<A, B, C> {
5471 fn vpermi2w(&mut self, op0: A, op1: B, op2: C);
5472}
5473
5474impl<'a> Vpermi2wEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5475 fn vpermi2w(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5476 self.emit(VPERMI2W128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5477 }
5478}
5479
5480impl<'a> Vpermi2wEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5481 fn vpermi2w(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5482 self.emit(VPERMI2W128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5483 }
5484}
5485
5486impl<'a> Vpermi2wEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5487 fn vpermi2w(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5488 self.emit(VPERMI2W256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5489 }
5490}
5491
5492impl<'a> Vpermi2wEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5493 fn vpermi2w(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5494 self.emit(VPERMI2W256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5495 }
5496}
5497
5498impl<'a> Vpermi2wEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5499 fn vpermi2w(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5500 self.emit(VPERMI2W512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5501 }
5502}
5503
5504impl<'a> Vpermi2wEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5505 fn vpermi2w(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5506 self.emit(VPERMI2W512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5507 }
5508}
5509
5510/// `VPERMI2W_MASK` (VPERMI2W).
5511/// Permutes 16-bit/32-bit/64-bit values in the second operand (the first source operand) and the third operand (the second source operand) using indices in the first operand to select elements from the second and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
5512///
5513///
5514/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMI2W%3AVPERMI2D%3AVPERMI2Q%3AVPERMI2PS%3AVPERMI2PD.html).
5515///
5516/// Supported operand variants:
5517///
5518/// ```text
5519/// +---+---------------+
5520/// | # | Operands |
5521/// +---+---------------+
5522/// | 1 | Xmm, Xmm, Mem |
5523/// | 2 | Xmm, Xmm, Xmm |
5524/// | 3 | Ymm, Ymm, Mem |
5525/// | 4 | Ymm, Ymm, Ymm |
5526/// | 5 | Zmm, Zmm, Mem |
5527/// | 6 | Zmm, Zmm, Zmm |
5528/// +---+---------------+
5529/// ```
5530pub trait Vpermi2wMaskEmitter<A, B, C> {
5531 fn vpermi2w_mask(&mut self, op0: A, op1: B, op2: C);
5532}
5533
5534impl<'a> Vpermi2wMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5535 fn vpermi2w_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5536 self.emit(VPERMI2W128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5537 }
5538}
5539
5540impl<'a> Vpermi2wMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5541 fn vpermi2w_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5542 self.emit(VPERMI2W128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5543 }
5544}
5545
5546impl<'a> Vpermi2wMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5547 fn vpermi2w_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5548 self.emit(VPERMI2W256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5549 }
5550}
5551
5552impl<'a> Vpermi2wMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5553 fn vpermi2w_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5554 self.emit(VPERMI2W256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5555 }
5556}
5557
5558impl<'a> Vpermi2wMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5559 fn vpermi2w_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5560 self.emit(VPERMI2W512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5561 }
5562}
5563
5564impl<'a> Vpermi2wMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5565 fn vpermi2w_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5566 self.emit(VPERMI2W512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5567 }
5568}
5569
5570/// `VPERMI2W_MASKZ` (VPERMI2W).
5571/// Permutes 16-bit/32-bit/64-bit values in the second operand (the first source operand) and the third operand (the second source operand) using indices in the first operand to select elements from the second and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
5572///
5573///
5574/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMI2W%3AVPERMI2D%3AVPERMI2Q%3AVPERMI2PS%3AVPERMI2PD.html).
5575///
5576/// Supported operand variants:
5577///
5578/// ```text
5579/// +---+---------------+
5580/// | # | Operands |
5581/// +---+---------------+
5582/// | 1 | Xmm, Xmm, Mem |
5583/// | 2 | Xmm, Xmm, Xmm |
5584/// | 3 | Ymm, Ymm, Mem |
5585/// | 4 | Ymm, Ymm, Ymm |
5586/// | 5 | Zmm, Zmm, Mem |
5587/// | 6 | Zmm, Zmm, Zmm |
5588/// +---+---------------+
5589/// ```
5590pub trait Vpermi2wMaskzEmitter<A, B, C> {
5591 fn vpermi2w_maskz(&mut self, op0: A, op1: B, op2: C);
5592}
5593
5594impl<'a> Vpermi2wMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5595 fn vpermi2w_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5596 self.emit(VPERMI2W128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5597 }
5598}
5599
5600impl<'a> Vpermi2wMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5601 fn vpermi2w_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5602 self.emit(VPERMI2W128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5603 }
5604}
5605
5606impl<'a> Vpermi2wMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5607 fn vpermi2w_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5608 self.emit(VPERMI2W256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5609 }
5610}
5611
5612impl<'a> Vpermi2wMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5613 fn vpermi2w_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5614 self.emit(VPERMI2W256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5615 }
5616}
5617
5618impl<'a> Vpermi2wMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5619 fn vpermi2w_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5620 self.emit(VPERMI2W512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5621 }
5622}
5623
5624impl<'a> Vpermi2wMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5625 fn vpermi2w_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5626 self.emit(VPERMI2W512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5627 }
5628}
5629
5630/// `VPERMT2W` (VPERMT2W).
5631/// Permutes 16-bit/32-bit/64-bit values in the first operand and the third operand (the second source operand) using indices in the second operand (the first source operand) to select elements from the first and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
5632///
5633///
5634/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMT2W%3AVPERMT2D%3AVPERMT2Q%3AVPERMT2PS%3AVPERMT2PD.html).
5635///
5636/// Supported operand variants:
5637///
5638/// ```text
5639/// +---+---------------+
5640/// | # | Operands |
5641/// +---+---------------+
5642/// | 1 | Xmm, Xmm, Mem |
5643/// | 2 | Xmm, Xmm, Xmm |
5644/// | 3 | Ymm, Ymm, Mem |
5645/// | 4 | Ymm, Ymm, Ymm |
5646/// | 5 | Zmm, Zmm, Mem |
5647/// | 6 | Zmm, Zmm, Zmm |
5648/// +---+---------------+
5649/// ```
5650pub trait Vpermt2wEmitter<A, B, C> {
5651 fn vpermt2w(&mut self, op0: A, op1: B, op2: C);
5652}
5653
5654impl<'a> Vpermt2wEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5655 fn vpermt2w(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5656 self.emit(VPERMT2W128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5657 }
5658}
5659
5660impl<'a> Vpermt2wEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5661 fn vpermt2w(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5662 self.emit(VPERMT2W128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5663 }
5664}
5665
5666impl<'a> Vpermt2wEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5667 fn vpermt2w(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5668 self.emit(VPERMT2W256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5669 }
5670}
5671
5672impl<'a> Vpermt2wEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5673 fn vpermt2w(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5674 self.emit(VPERMT2W256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5675 }
5676}
5677
5678impl<'a> Vpermt2wEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5679 fn vpermt2w(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5680 self.emit(VPERMT2W512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5681 }
5682}
5683
5684impl<'a> Vpermt2wEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5685 fn vpermt2w(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5686 self.emit(VPERMT2W512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5687 }
5688}
5689
5690/// `VPERMT2W_MASK` (VPERMT2W).
5691/// Permutes 16-bit/32-bit/64-bit values in the first operand and the third operand (the second source operand) using indices in the second operand (the first source operand) to select elements from the first and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
5692///
5693///
5694/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMT2W%3AVPERMT2D%3AVPERMT2Q%3AVPERMT2PS%3AVPERMT2PD.html).
5695///
5696/// Supported operand variants:
5697///
5698/// ```text
5699/// +---+---------------+
5700/// | # | Operands |
5701/// +---+---------------+
5702/// | 1 | Xmm, Xmm, Mem |
5703/// | 2 | Xmm, Xmm, Xmm |
5704/// | 3 | Ymm, Ymm, Mem |
5705/// | 4 | Ymm, Ymm, Ymm |
5706/// | 5 | Zmm, Zmm, Mem |
5707/// | 6 | Zmm, Zmm, Zmm |
5708/// +---+---------------+
5709/// ```
5710pub trait Vpermt2wMaskEmitter<A, B, C> {
5711 fn vpermt2w_mask(&mut self, op0: A, op1: B, op2: C);
5712}
5713
5714impl<'a> Vpermt2wMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5715 fn vpermt2w_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5716 self.emit(VPERMT2W128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5717 }
5718}
5719
5720impl<'a> Vpermt2wMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5721 fn vpermt2w_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5722 self.emit(VPERMT2W128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5723 }
5724}
5725
5726impl<'a> Vpermt2wMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5727 fn vpermt2w_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5728 self.emit(VPERMT2W256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5729 }
5730}
5731
5732impl<'a> Vpermt2wMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5733 fn vpermt2w_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5734 self.emit(VPERMT2W256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5735 }
5736}
5737
5738impl<'a> Vpermt2wMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5739 fn vpermt2w_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5740 self.emit(VPERMT2W512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5741 }
5742}
5743
5744impl<'a> Vpermt2wMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5745 fn vpermt2w_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5746 self.emit(VPERMT2W512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5747 }
5748}
5749
5750/// `VPERMT2W_MASKZ` (VPERMT2W).
5751/// Permutes 16-bit/32-bit/64-bit values in the first operand and the third operand (the second source operand) using indices in the second operand (the first source operand) to select elements from the first and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
5752///
5753///
5754/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMT2W%3AVPERMT2D%3AVPERMT2Q%3AVPERMT2PS%3AVPERMT2PD.html).
5755///
5756/// Supported operand variants:
5757///
5758/// ```text
5759/// +---+---------------+
5760/// | # | Operands |
5761/// +---+---------------+
5762/// | 1 | Xmm, Xmm, Mem |
5763/// | 2 | Xmm, Xmm, Xmm |
5764/// | 3 | Ymm, Ymm, Mem |
5765/// | 4 | Ymm, Ymm, Ymm |
5766/// | 5 | Zmm, Zmm, Mem |
5767/// | 6 | Zmm, Zmm, Zmm |
5768/// +---+---------------+
5769/// ```
5770pub trait Vpermt2wMaskzEmitter<A, B, C> {
5771 fn vpermt2w_maskz(&mut self, op0: A, op1: B, op2: C);
5772}
5773
5774impl<'a> Vpermt2wMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5775 fn vpermt2w_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5776 self.emit(VPERMT2W128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5777 }
5778}
5779
5780impl<'a> Vpermt2wMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5781 fn vpermt2w_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5782 self.emit(VPERMT2W128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5783 }
5784}
5785
5786impl<'a> Vpermt2wMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5787 fn vpermt2w_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5788 self.emit(VPERMT2W256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5789 }
5790}
5791
5792impl<'a> Vpermt2wMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5793 fn vpermt2w_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5794 self.emit(VPERMT2W256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5795 }
5796}
5797
5798impl<'a> Vpermt2wMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5799 fn vpermt2w_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5800 self.emit(VPERMT2W512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5801 }
5802}
5803
5804impl<'a> Vpermt2wMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5805 fn vpermt2w_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5806 self.emit(VPERMT2W512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5807 }
5808}
5809
5810/// `VPERMW` (VPERMW).
5811/// Copies doublewords (or words) from the second source operand (the third operand) to the destination operand (the first operand) according to the indices in the first source operand (the second operand). Note that this instruction permits a doubleword (word) in the source operand to be copied to more than one location in the destination operand.
5812///
5813///
5814/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMD%3AVPERMW.html).
5815///
5816/// Supported operand variants:
5817///
5818/// ```text
5819/// +---+---------------+
5820/// | # | Operands |
5821/// +---+---------------+
5822/// | 1 | Xmm, Xmm, Mem |
5823/// | 2 | Xmm, Xmm, Xmm |
5824/// | 3 | Ymm, Ymm, Mem |
5825/// | 4 | Ymm, Ymm, Ymm |
5826/// | 5 | Zmm, Zmm, Mem |
5827/// | 6 | Zmm, Zmm, Zmm |
5828/// +---+---------------+
5829/// ```
5830pub trait VpermwEmitter<A, B, C> {
5831 fn vpermw(&mut self, op0: A, op1: B, op2: C);
5832}
5833
5834impl<'a> VpermwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5835 fn vpermw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5836 self.emit(VPERMW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5837 }
5838}
5839
5840impl<'a> VpermwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5841 fn vpermw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5842 self.emit(VPERMW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5843 }
5844}
5845
5846impl<'a> VpermwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5847 fn vpermw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5848 self.emit(VPERMW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5849 }
5850}
5851
5852impl<'a> VpermwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5853 fn vpermw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5854 self.emit(VPERMW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5855 }
5856}
5857
5858impl<'a> VpermwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5859 fn vpermw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5860 self.emit(VPERMW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5861 }
5862}
5863
5864impl<'a> VpermwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5865 fn vpermw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5866 self.emit(VPERMW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5867 }
5868}
5869
5870/// `VPERMW_MASK` (VPERMW).
5871/// Copies doublewords (or words) from the second source operand (the third operand) to the destination operand (the first operand) according to the indices in the first source operand (the second operand). Note that this instruction permits a doubleword (word) in the source operand to be copied to more than one location in the destination operand.
5872///
5873///
5874/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMD%3AVPERMW.html).
5875///
5876/// Supported operand variants:
5877///
5878/// ```text
5879/// +---+---------------+
5880/// | # | Operands |
5881/// +---+---------------+
5882/// | 1 | Xmm, Xmm, Mem |
5883/// | 2 | Xmm, Xmm, Xmm |
5884/// | 3 | Ymm, Ymm, Mem |
5885/// | 4 | Ymm, Ymm, Ymm |
5886/// | 5 | Zmm, Zmm, Mem |
5887/// | 6 | Zmm, Zmm, Zmm |
5888/// +---+---------------+
5889/// ```
5890pub trait VpermwMaskEmitter<A, B, C> {
5891 fn vpermw_mask(&mut self, op0: A, op1: B, op2: C);
5892}
5893
5894impl<'a> VpermwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5895 fn vpermw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5896 self.emit(VPERMW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5897 }
5898}
5899
5900impl<'a> VpermwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5901 fn vpermw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5902 self.emit(VPERMW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5903 }
5904}
5905
5906impl<'a> VpermwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5907 fn vpermw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5908 self.emit(VPERMW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5909 }
5910}
5911
5912impl<'a> VpermwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5913 fn vpermw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5914 self.emit(VPERMW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5915 }
5916}
5917
5918impl<'a> VpermwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5919 fn vpermw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5920 self.emit(VPERMW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5921 }
5922}
5923
5924impl<'a> VpermwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5925 fn vpermw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5926 self.emit(VPERMW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5927 }
5928}
5929
5930/// `VPERMW_MASKZ` (VPERMW).
5931/// Copies doublewords (or words) from the second source operand (the third operand) to the destination operand (the first operand) according to the indices in the first source operand (the second operand). Note that this instruction permits a doubleword (word) in the source operand to be copied to more than one location in the destination operand.
5932///
5933///
5934/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMD%3AVPERMW.html).
5935///
5936/// Supported operand variants:
5937///
5938/// ```text
5939/// +---+---------------+
5940/// | # | Operands |
5941/// +---+---------------+
5942/// | 1 | Xmm, Xmm, Mem |
5943/// | 2 | Xmm, Xmm, Xmm |
5944/// | 3 | Ymm, Ymm, Mem |
5945/// | 4 | Ymm, Ymm, Ymm |
5946/// | 5 | Zmm, Zmm, Mem |
5947/// | 6 | Zmm, Zmm, Zmm |
5948/// +---+---------------+
5949/// ```
5950pub trait VpermwMaskzEmitter<A, B, C> {
5951 fn vpermw_maskz(&mut self, op0: A, op1: B, op2: C);
5952}
5953
5954impl<'a> VpermwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
5955 fn vpermw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
5956 self.emit(VPERMW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5957 }
5958}
5959
5960impl<'a> VpermwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
5961 fn vpermw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
5962 self.emit(VPERMW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5963 }
5964}
5965
5966impl<'a> VpermwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
5967 fn vpermw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
5968 self.emit(VPERMW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5969 }
5970}
5971
5972impl<'a> VpermwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
5973 fn vpermw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
5974 self.emit(VPERMW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5975 }
5976}
5977
5978impl<'a> VpermwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
5979 fn vpermw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
5980 self.emit(VPERMW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5981 }
5982}
5983
5984impl<'a> VpermwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
5985 fn vpermw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
5986 self.emit(VPERMW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
5987 }
5988}
5989
5990/// `VPEXTRB` (VPEXTRB).
5991/// Extract a byte/dword/qword integer value from the source XMM register at a byte/dword/qword offset determined from imm8[3:0]. The destination can be a register or byte/dword/qword memory location. If the destination is a register, the upper bits of the register are zero extended.
5992///
5993///
5994/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PEXTRB%3APEXTRD%3APEXTRQ.html).
5995///
5996/// Supported operand variants:
5997///
5998/// ```text
5999/// +---+---------------+
6000/// | # | Operands |
6001/// +---+---------------+
6002/// | 1 | Gpd, Xmm, Imm |
6003/// | 2 | Mem, Xmm, Imm |
6004/// +---+---------------+
6005/// ```
6006pub trait VpextrbEmitter<A, B, C> {
6007 fn vpextrb(&mut self, op0: A, op1: B, op2: C);
6008}
6009
6010impl<'a> VpextrbEmitter<Mem, Xmm, Imm> for Assembler<'a> {
6011 fn vpextrb(&mut self, op0: Mem, op1: Xmm, op2: Imm) {
6012 self.emit(VPEXTRBMRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6013 }
6014}
6015
6016impl<'a> VpextrbEmitter<Gpd, Xmm, Imm> for Assembler<'a> {
6017 fn vpextrb(&mut self, op0: Gpd, op1: Xmm, op2: Imm) {
6018 self.emit(VPEXTRBRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6019 }
6020}
6021
6022/// `VPEXTRW` (VPEXTRW).
6023/// Copies the word in the source operand (second operand) specified by the count operand (third operand) to the destination operand (first operand). The source operand can be an MMX technology register or an XMM register. The destination operand can be the low word of a general-purpose register or a 16-bit memory address. The count operand is an 8-bit immediate. When specifying a word location in an MMX technology register, the 2 least-significant bits of the count operand specify the location; for an XMM register, the 3 least-significant bits specify the location. The content of the destination register above bit 16 is cleared (set to all 0s).
6024///
6025///
6026/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PEXTRW.html).
6027///
6028/// Supported operand variants:
6029///
6030/// ```text
6031/// +---+---------------+
6032/// | # | Operands |
6033/// +---+---------------+
6034/// | 1 | Gpd, Xmm, Imm |
6035/// | 2 | Mem, Xmm, Imm |
6036/// +---+---------------+
6037/// ```
6038pub trait VpextrwEmitter<A, B, C> {
6039 fn vpextrw(&mut self, op0: A, op1: B, op2: C);
6040}
6041
6042impl<'a> VpextrwEmitter<Gpd, Xmm, Imm> for Assembler<'a> {
6043 fn vpextrw(&mut self, op0: Gpd, op1: Xmm, op2: Imm) {
6044 self.emit(VPEXTRWRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6045 }
6046}
6047
6048impl<'a> VpextrwEmitter<Mem, Xmm, Imm> for Assembler<'a> {
6049 fn vpextrw(&mut self, op0: Mem, op1: Xmm, op2: Imm) {
6050 self.emit(VPEXTRWMRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6051 }
6052}
6053
6054/// `VPINSRB`.
6055///
6056/// Supported operand variants:
6057///
6058/// ```text
6059/// +---+--------------------+
6060/// | # | Operands |
6061/// +---+--------------------+
6062/// | 1 | Xmm, Xmm, Gpd, Imm |
6063/// | 2 | Xmm, Xmm, Mem, Imm |
6064/// +---+--------------------+
6065/// ```
6066pub trait VpinsrbEmitter<A, B, C, D> {
6067 fn vpinsrb(&mut self, op0: A, op1: B, op2: C, op3: D);
6068}
6069
6070impl<'a> VpinsrbEmitter<Xmm, Xmm, Gpd, Imm> for Assembler<'a> {
6071 fn vpinsrb(&mut self, op0: Xmm, op1: Xmm, op2: Gpd, op3: Imm) {
6072 self.emit(VPINSRBRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6073 }
6074}
6075
6076impl<'a> VpinsrbEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6077 fn vpinsrb(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6078 self.emit(VPINSRBRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6079 }
6080}
6081
6082/// `VPINSRW`.
6083///
6084/// Supported operand variants:
6085///
6086/// ```text
6087/// +---+--------------------+
6088/// | # | Operands |
6089/// +---+--------------------+
6090/// | 1 | Xmm, Xmm, Gpd, Imm |
6091/// | 2 | Xmm, Xmm, Mem, Imm |
6092/// +---+--------------------+
6093/// ```
6094pub trait VpinsrwEmitter<A, B, C, D> {
6095 fn vpinsrw(&mut self, op0: A, op1: B, op2: C, op3: D);
6096}
6097
6098impl<'a> VpinsrwEmitter<Xmm, Xmm, Gpd, Imm> for Assembler<'a> {
6099 fn vpinsrw(&mut self, op0: Xmm, op1: Xmm, op2: Gpd, op3: Imm) {
6100 self.emit(VPINSRWRRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6101 }
6102}
6103
6104impl<'a> VpinsrwEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
6105 fn vpinsrw(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
6106 self.emit(VPINSRWRRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
6107 }
6108}
6109
6110/// `VPMADDUBSW` (VPMADDUBSW).
6111/// (V)PMADDUBSW multiplies vertically each unsigned byte of the destination operand (first operand) with the corresponding signed byte of the source operand (second operand), producing intermediate signed 16-bit integers. Each adjacent pair of signed words is added and the saturated result is packed to the destination operand. For example, the lowest-order bytes (bits 7-0) in the source and destination operands are multiplied and the intermediate signed word result is added with the corresponding intermediate result from the 2nd lowest-order bytes (bits 15-8) of the operands; the sign-saturated result is stored in the lowest word of the destination register (15-0). The same operation is performed on the other pairs of adjacent bytes. Both operands can be MMX register or XMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
6112///
6113///
6114/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDUBSW.html).
6115///
6116/// Supported operand variants:
6117///
6118/// ```text
6119/// +---+---------------+
6120/// | # | Operands |
6121/// +---+---------------+
6122/// | 1 | Xmm, Xmm, Mem |
6123/// | 2 | Xmm, Xmm, Xmm |
6124/// | 3 | Ymm, Ymm, Mem |
6125/// | 4 | Ymm, Ymm, Ymm |
6126/// | 5 | Zmm, Zmm, Mem |
6127/// | 6 | Zmm, Zmm, Zmm |
6128/// +---+---------------+
6129/// ```
6130pub trait VpmaddubswEmitter<A, B, C> {
6131 fn vpmaddubsw(&mut self, op0: A, op1: B, op2: C);
6132}
6133
6134impl<'a> VpmaddubswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6135 fn vpmaddubsw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6136 self.emit(VPMADDUBSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6137 }
6138}
6139
6140impl<'a> VpmaddubswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6141 fn vpmaddubsw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6142 self.emit(VPMADDUBSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6143 }
6144}
6145
6146impl<'a> VpmaddubswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6147 fn vpmaddubsw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6148 self.emit(VPMADDUBSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6149 }
6150}
6151
6152impl<'a> VpmaddubswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6153 fn vpmaddubsw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6154 self.emit(VPMADDUBSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6155 }
6156}
6157
6158impl<'a> VpmaddubswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6159 fn vpmaddubsw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6160 self.emit(VPMADDUBSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6161 }
6162}
6163
6164impl<'a> VpmaddubswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6165 fn vpmaddubsw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6166 self.emit(VPMADDUBSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6167 }
6168}
6169
6170/// `VPMADDUBSW_MASK` (VPMADDUBSW).
6171/// (V)PMADDUBSW multiplies vertically each unsigned byte of the destination operand (first operand) with the corresponding signed byte of the source operand (second operand), producing intermediate signed 16-bit integers. Each adjacent pair of signed words is added and the saturated result is packed to the destination operand. For example, the lowest-order bytes (bits 7-0) in the source and destination operands are multiplied and the intermediate signed word result is added with the corresponding intermediate result from the 2nd lowest-order bytes (bits 15-8) of the operands; the sign-saturated result is stored in the lowest word of the destination register (15-0). The same operation is performed on the other pairs of adjacent bytes. Both operands can be MMX register or XMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
6172///
6173///
6174/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDUBSW.html).
6175///
6176/// Supported operand variants:
6177///
6178/// ```text
6179/// +---+---------------+
6180/// | # | Operands |
6181/// +---+---------------+
6182/// | 1 | Xmm, Xmm, Mem |
6183/// | 2 | Xmm, Xmm, Xmm |
6184/// | 3 | Ymm, Ymm, Mem |
6185/// | 4 | Ymm, Ymm, Ymm |
6186/// | 5 | Zmm, Zmm, Mem |
6187/// | 6 | Zmm, Zmm, Zmm |
6188/// +---+---------------+
6189/// ```
6190pub trait VpmaddubswMaskEmitter<A, B, C> {
6191 fn vpmaddubsw_mask(&mut self, op0: A, op1: B, op2: C);
6192}
6193
6194impl<'a> VpmaddubswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6195 fn vpmaddubsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6196 self.emit(VPMADDUBSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6197 }
6198}
6199
6200impl<'a> VpmaddubswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6201 fn vpmaddubsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6202 self.emit(VPMADDUBSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6203 }
6204}
6205
6206impl<'a> VpmaddubswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6207 fn vpmaddubsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6208 self.emit(VPMADDUBSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6209 }
6210}
6211
6212impl<'a> VpmaddubswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6213 fn vpmaddubsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6214 self.emit(VPMADDUBSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6215 }
6216}
6217
6218impl<'a> VpmaddubswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6219 fn vpmaddubsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6220 self.emit(VPMADDUBSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6221 }
6222}
6223
6224impl<'a> VpmaddubswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6225 fn vpmaddubsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6226 self.emit(VPMADDUBSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6227 }
6228}
6229
6230/// `VPMADDUBSW_MASKZ` (VPMADDUBSW).
6231/// (V)PMADDUBSW multiplies vertically each unsigned byte of the destination operand (first operand) with the corresponding signed byte of the source operand (second operand), producing intermediate signed 16-bit integers. Each adjacent pair of signed words is added and the saturated result is packed to the destination operand. For example, the lowest-order bytes (bits 7-0) in the source and destination operands are multiplied and the intermediate signed word result is added with the corresponding intermediate result from the 2nd lowest-order bytes (bits 15-8) of the operands; the sign-saturated result is stored in the lowest word of the destination register (15-0). The same operation is performed on the other pairs of adjacent bytes. Both operands can be MMX register or XMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
6232///
6233///
6234/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDUBSW.html).
6235///
6236/// Supported operand variants:
6237///
6238/// ```text
6239/// +---+---------------+
6240/// | # | Operands |
6241/// +---+---------------+
6242/// | 1 | Xmm, Xmm, Mem |
6243/// | 2 | Xmm, Xmm, Xmm |
6244/// | 3 | Ymm, Ymm, Mem |
6245/// | 4 | Ymm, Ymm, Ymm |
6246/// | 5 | Zmm, Zmm, Mem |
6247/// | 6 | Zmm, Zmm, Zmm |
6248/// +---+---------------+
6249/// ```
6250pub trait VpmaddubswMaskzEmitter<A, B, C> {
6251 fn vpmaddubsw_maskz(&mut self, op0: A, op1: B, op2: C);
6252}
6253
6254impl<'a> VpmaddubswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6255 fn vpmaddubsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6256 self.emit(VPMADDUBSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6257 }
6258}
6259
6260impl<'a> VpmaddubswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6261 fn vpmaddubsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6262 self.emit(VPMADDUBSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6263 }
6264}
6265
6266impl<'a> VpmaddubswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6267 fn vpmaddubsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6268 self.emit(VPMADDUBSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6269 }
6270}
6271
6272impl<'a> VpmaddubswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6273 fn vpmaddubsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6274 self.emit(VPMADDUBSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6275 }
6276}
6277
6278impl<'a> VpmaddubswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6279 fn vpmaddubsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6280 self.emit(VPMADDUBSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6281 }
6282}
6283
6284impl<'a> VpmaddubswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6285 fn vpmaddubsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6286 self.emit(VPMADDUBSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6287 }
6288}
6289
6290/// `VPMADDWD` (VPMADDWD).
6291/// Multiplies the individual signed words of the destination operand (first operand) by the corresponding signed words of the source operand (second operand), producing temporary signed, doubleword results. The adjacent double-word results are then summed and stored in the destination operand. For example, the corresponding low-order words (15-0) and (31-16) in the source and destination operands are multiplied by one another and the double-word results are added together and stored in the low doubleword of the destination register (31-0). The same operation is performed on the other pairs of adjacent words. (Figure 4-11 shows this operation when using 64-bit operands).
6292///
6293///
6294/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDWD.html).
6295///
6296/// Supported operand variants:
6297///
6298/// ```text
6299/// +---+---------------+
6300/// | # | Operands |
6301/// +---+---------------+
6302/// | 1 | Xmm, Xmm, Mem |
6303/// | 2 | Xmm, Xmm, Xmm |
6304/// | 3 | Ymm, Ymm, Mem |
6305/// | 4 | Ymm, Ymm, Ymm |
6306/// | 5 | Zmm, Zmm, Mem |
6307/// | 6 | Zmm, Zmm, Zmm |
6308/// +---+---------------+
6309/// ```
6310pub trait VpmaddwdEmitter<A, B, C> {
6311 fn vpmaddwd(&mut self, op0: A, op1: B, op2: C);
6312}
6313
6314impl<'a> VpmaddwdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6315 fn vpmaddwd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6316 self.emit(VPMADDWD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6317 }
6318}
6319
6320impl<'a> VpmaddwdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6321 fn vpmaddwd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6322 self.emit(VPMADDWD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6323 }
6324}
6325
6326impl<'a> VpmaddwdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6327 fn vpmaddwd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6328 self.emit(VPMADDWD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6329 }
6330}
6331
6332impl<'a> VpmaddwdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6333 fn vpmaddwd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6334 self.emit(VPMADDWD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6335 }
6336}
6337
6338impl<'a> VpmaddwdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6339 fn vpmaddwd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6340 self.emit(VPMADDWD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6341 }
6342}
6343
6344impl<'a> VpmaddwdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6345 fn vpmaddwd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6346 self.emit(VPMADDWD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6347 }
6348}
6349
6350/// `VPMADDWD_MASK` (VPMADDWD).
6351/// Multiplies the individual signed words of the destination operand (first operand) by the corresponding signed words of the source operand (second operand), producing temporary signed, doubleword results. The adjacent double-word results are then summed and stored in the destination operand. For example, the corresponding low-order words (15-0) and (31-16) in the source and destination operands are multiplied by one another and the double-word results are added together and stored in the low doubleword of the destination register (31-0). The same operation is performed on the other pairs of adjacent words. (Figure 4-11 shows this operation when using 64-bit operands).
6352///
6353///
6354/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDWD.html).
6355///
6356/// Supported operand variants:
6357///
6358/// ```text
6359/// +---+---------------+
6360/// | # | Operands |
6361/// +---+---------------+
6362/// | 1 | Xmm, Xmm, Mem |
6363/// | 2 | Xmm, Xmm, Xmm |
6364/// | 3 | Ymm, Ymm, Mem |
6365/// | 4 | Ymm, Ymm, Ymm |
6366/// | 5 | Zmm, Zmm, Mem |
6367/// | 6 | Zmm, Zmm, Zmm |
6368/// +---+---------------+
6369/// ```
6370pub trait VpmaddwdMaskEmitter<A, B, C> {
6371 fn vpmaddwd_mask(&mut self, op0: A, op1: B, op2: C);
6372}
6373
6374impl<'a> VpmaddwdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6375 fn vpmaddwd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6376 self.emit(VPMADDWD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6377 }
6378}
6379
6380impl<'a> VpmaddwdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6381 fn vpmaddwd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6382 self.emit(VPMADDWD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6383 }
6384}
6385
6386impl<'a> VpmaddwdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6387 fn vpmaddwd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6388 self.emit(VPMADDWD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6389 }
6390}
6391
6392impl<'a> VpmaddwdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6393 fn vpmaddwd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6394 self.emit(VPMADDWD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6395 }
6396}
6397
6398impl<'a> VpmaddwdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6399 fn vpmaddwd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6400 self.emit(VPMADDWD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6401 }
6402}
6403
6404impl<'a> VpmaddwdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6405 fn vpmaddwd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6406 self.emit(VPMADDWD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6407 }
6408}
6409
6410/// `VPMADDWD_MASKZ` (VPMADDWD).
6411/// Multiplies the individual signed words of the destination operand (first operand) by the corresponding signed words of the source operand (second operand), producing temporary signed, doubleword results. The adjacent double-word results are then summed and stored in the destination operand. For example, the corresponding low-order words (15-0) and (31-16) in the source and destination operands are multiplied by one another and the double-word results are added together and stored in the low doubleword of the destination register (31-0). The same operation is performed on the other pairs of adjacent words. (Figure 4-11 shows this operation when using 64-bit operands).
6412///
6413///
6414/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDWD.html).
6415///
6416/// Supported operand variants:
6417///
6418/// ```text
6419/// +---+---------------+
6420/// | # | Operands |
6421/// +---+---------------+
6422/// | 1 | Xmm, Xmm, Mem |
6423/// | 2 | Xmm, Xmm, Xmm |
6424/// | 3 | Ymm, Ymm, Mem |
6425/// | 4 | Ymm, Ymm, Ymm |
6426/// | 5 | Zmm, Zmm, Mem |
6427/// | 6 | Zmm, Zmm, Zmm |
6428/// +---+---------------+
6429/// ```
6430pub trait VpmaddwdMaskzEmitter<A, B, C> {
6431 fn vpmaddwd_maskz(&mut self, op0: A, op1: B, op2: C);
6432}
6433
6434impl<'a> VpmaddwdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6435 fn vpmaddwd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6436 self.emit(VPMADDWD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6437 }
6438}
6439
6440impl<'a> VpmaddwdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6441 fn vpmaddwd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6442 self.emit(VPMADDWD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6443 }
6444}
6445
6446impl<'a> VpmaddwdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6447 fn vpmaddwd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6448 self.emit(VPMADDWD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6449 }
6450}
6451
6452impl<'a> VpmaddwdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6453 fn vpmaddwd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6454 self.emit(VPMADDWD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6455 }
6456}
6457
6458impl<'a> VpmaddwdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6459 fn vpmaddwd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6460 self.emit(VPMADDWD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6461 }
6462}
6463
6464impl<'a> VpmaddwdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6465 fn vpmaddwd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6466 self.emit(VPMADDWD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6467 }
6468}
6469
6470/// `VPMAXSB` (VPMAXSB).
6471/// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6472///
6473///
6474/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
6475///
6476/// Supported operand variants:
6477///
6478/// ```text
6479/// +---+---------------+
6480/// | # | Operands |
6481/// +---+---------------+
6482/// | 1 | Xmm, Xmm, Mem |
6483/// | 2 | Xmm, Xmm, Xmm |
6484/// | 3 | Ymm, Ymm, Mem |
6485/// | 4 | Ymm, Ymm, Ymm |
6486/// | 5 | Zmm, Zmm, Mem |
6487/// | 6 | Zmm, Zmm, Zmm |
6488/// +---+---------------+
6489/// ```
6490pub trait VpmaxsbEmitter<A, B, C> {
6491 fn vpmaxsb(&mut self, op0: A, op1: B, op2: C);
6492}
6493
6494impl<'a> VpmaxsbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6495 fn vpmaxsb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6496 self.emit(VPMAXSB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6497 }
6498}
6499
6500impl<'a> VpmaxsbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6501 fn vpmaxsb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6502 self.emit(VPMAXSB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6503 }
6504}
6505
6506impl<'a> VpmaxsbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6507 fn vpmaxsb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6508 self.emit(VPMAXSB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6509 }
6510}
6511
6512impl<'a> VpmaxsbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6513 fn vpmaxsb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6514 self.emit(VPMAXSB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6515 }
6516}
6517
6518impl<'a> VpmaxsbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6519 fn vpmaxsb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6520 self.emit(VPMAXSB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6521 }
6522}
6523
6524impl<'a> VpmaxsbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6525 fn vpmaxsb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6526 self.emit(VPMAXSB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6527 }
6528}
6529
6530/// `VPMAXSB_MASK` (VPMAXSB).
6531/// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6532///
6533///
6534/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
6535///
6536/// Supported operand variants:
6537///
6538/// ```text
6539/// +---+---------------+
6540/// | # | Operands |
6541/// +---+---------------+
6542/// | 1 | Xmm, Xmm, Mem |
6543/// | 2 | Xmm, Xmm, Xmm |
6544/// | 3 | Ymm, Ymm, Mem |
6545/// | 4 | Ymm, Ymm, Ymm |
6546/// | 5 | Zmm, Zmm, Mem |
6547/// | 6 | Zmm, Zmm, Zmm |
6548/// +---+---------------+
6549/// ```
6550pub trait VpmaxsbMaskEmitter<A, B, C> {
6551 fn vpmaxsb_mask(&mut self, op0: A, op1: B, op2: C);
6552}
6553
6554impl<'a> VpmaxsbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6555 fn vpmaxsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6556 self.emit(VPMAXSB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6557 }
6558}
6559
6560impl<'a> VpmaxsbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6561 fn vpmaxsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6562 self.emit(VPMAXSB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6563 }
6564}
6565
6566impl<'a> VpmaxsbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6567 fn vpmaxsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6568 self.emit(VPMAXSB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6569 }
6570}
6571
6572impl<'a> VpmaxsbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6573 fn vpmaxsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6574 self.emit(VPMAXSB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6575 }
6576}
6577
6578impl<'a> VpmaxsbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6579 fn vpmaxsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6580 self.emit(VPMAXSB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6581 }
6582}
6583
6584impl<'a> VpmaxsbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6585 fn vpmaxsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6586 self.emit(VPMAXSB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6587 }
6588}
6589
6590/// `VPMAXSB_MASKZ` (VPMAXSB).
6591/// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6592///
6593///
6594/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
6595///
6596/// Supported operand variants:
6597///
6598/// ```text
6599/// +---+---------------+
6600/// | # | Operands |
6601/// +---+---------------+
6602/// | 1 | Xmm, Xmm, Mem |
6603/// | 2 | Xmm, Xmm, Xmm |
6604/// | 3 | Ymm, Ymm, Mem |
6605/// | 4 | Ymm, Ymm, Ymm |
6606/// | 5 | Zmm, Zmm, Mem |
6607/// | 6 | Zmm, Zmm, Zmm |
6608/// +---+---------------+
6609/// ```
6610pub trait VpmaxsbMaskzEmitter<A, B, C> {
6611 fn vpmaxsb_maskz(&mut self, op0: A, op1: B, op2: C);
6612}
6613
6614impl<'a> VpmaxsbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6615 fn vpmaxsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6616 self.emit(VPMAXSB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6617 }
6618}
6619
6620impl<'a> VpmaxsbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6621 fn vpmaxsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6622 self.emit(VPMAXSB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6623 }
6624}
6625
6626impl<'a> VpmaxsbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6627 fn vpmaxsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6628 self.emit(VPMAXSB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6629 }
6630}
6631
6632impl<'a> VpmaxsbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6633 fn vpmaxsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6634 self.emit(VPMAXSB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6635 }
6636}
6637
6638impl<'a> VpmaxsbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6639 fn vpmaxsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6640 self.emit(VPMAXSB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6641 }
6642}
6643
6644impl<'a> VpmaxsbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6645 fn vpmaxsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6646 self.emit(VPMAXSB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6647 }
6648}
6649
6650/// `VPMAXSW` (VPMAXSW).
6651/// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6652///
6653///
6654/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
6655///
6656/// Supported operand variants:
6657///
6658/// ```text
6659/// +---+---------------+
6660/// | # | Operands |
6661/// +---+---------------+
6662/// | 1 | Xmm, Xmm, Mem |
6663/// | 2 | Xmm, Xmm, Xmm |
6664/// | 3 | Ymm, Ymm, Mem |
6665/// | 4 | Ymm, Ymm, Ymm |
6666/// | 5 | Zmm, Zmm, Mem |
6667/// | 6 | Zmm, Zmm, Zmm |
6668/// +---+---------------+
6669/// ```
6670pub trait VpmaxswEmitter<A, B, C> {
6671 fn vpmaxsw(&mut self, op0: A, op1: B, op2: C);
6672}
6673
6674impl<'a> VpmaxswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6675 fn vpmaxsw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6676 self.emit(VPMAXSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6677 }
6678}
6679
6680impl<'a> VpmaxswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6681 fn vpmaxsw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6682 self.emit(VPMAXSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6683 }
6684}
6685
6686impl<'a> VpmaxswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6687 fn vpmaxsw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6688 self.emit(VPMAXSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6689 }
6690}
6691
6692impl<'a> VpmaxswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6693 fn vpmaxsw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6694 self.emit(VPMAXSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6695 }
6696}
6697
6698impl<'a> VpmaxswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6699 fn vpmaxsw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6700 self.emit(VPMAXSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6701 }
6702}
6703
6704impl<'a> VpmaxswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6705 fn vpmaxsw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6706 self.emit(VPMAXSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6707 }
6708}
6709
6710/// `VPMAXSW_MASK` (VPMAXSW).
6711/// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6712///
6713///
6714/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
6715///
6716/// Supported operand variants:
6717///
6718/// ```text
6719/// +---+---------------+
6720/// | # | Operands |
6721/// +---+---------------+
6722/// | 1 | Xmm, Xmm, Mem |
6723/// | 2 | Xmm, Xmm, Xmm |
6724/// | 3 | Ymm, Ymm, Mem |
6725/// | 4 | Ymm, Ymm, Ymm |
6726/// | 5 | Zmm, Zmm, Mem |
6727/// | 6 | Zmm, Zmm, Zmm |
6728/// +---+---------------+
6729/// ```
6730pub trait VpmaxswMaskEmitter<A, B, C> {
6731 fn vpmaxsw_mask(&mut self, op0: A, op1: B, op2: C);
6732}
6733
6734impl<'a> VpmaxswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6735 fn vpmaxsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6736 self.emit(VPMAXSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6737 }
6738}
6739
6740impl<'a> VpmaxswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6741 fn vpmaxsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6742 self.emit(VPMAXSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6743 }
6744}
6745
6746impl<'a> VpmaxswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6747 fn vpmaxsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6748 self.emit(VPMAXSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6749 }
6750}
6751
6752impl<'a> VpmaxswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6753 fn vpmaxsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6754 self.emit(VPMAXSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6755 }
6756}
6757
6758impl<'a> VpmaxswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6759 fn vpmaxsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6760 self.emit(VPMAXSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6761 }
6762}
6763
6764impl<'a> VpmaxswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6765 fn vpmaxsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6766 self.emit(VPMAXSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6767 }
6768}
6769
6770/// `VPMAXSW_MASKZ` (VPMAXSW).
6771/// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6772///
6773///
6774/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
6775///
6776/// Supported operand variants:
6777///
6778/// ```text
6779/// +---+---------------+
6780/// | # | Operands |
6781/// +---+---------------+
6782/// | 1 | Xmm, Xmm, Mem |
6783/// | 2 | Xmm, Xmm, Xmm |
6784/// | 3 | Ymm, Ymm, Mem |
6785/// | 4 | Ymm, Ymm, Ymm |
6786/// | 5 | Zmm, Zmm, Mem |
6787/// | 6 | Zmm, Zmm, Zmm |
6788/// +---+---------------+
6789/// ```
6790pub trait VpmaxswMaskzEmitter<A, B, C> {
6791 fn vpmaxsw_maskz(&mut self, op0: A, op1: B, op2: C);
6792}
6793
6794impl<'a> VpmaxswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6795 fn vpmaxsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6796 self.emit(VPMAXSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6797 }
6798}
6799
6800impl<'a> VpmaxswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6801 fn vpmaxsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6802 self.emit(VPMAXSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6803 }
6804}
6805
6806impl<'a> VpmaxswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6807 fn vpmaxsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6808 self.emit(VPMAXSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6809 }
6810}
6811
6812impl<'a> VpmaxswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6813 fn vpmaxsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6814 self.emit(VPMAXSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6815 }
6816}
6817
6818impl<'a> VpmaxswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6819 fn vpmaxsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6820 self.emit(VPMAXSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6821 }
6822}
6823
6824impl<'a> VpmaxswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6825 fn vpmaxsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6826 self.emit(VPMAXSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6827 }
6828}
6829
6830/// `VPMAXUB` (VPMAXUB).
6831/// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6832///
6833///
6834/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
6835///
6836/// Supported operand variants:
6837///
6838/// ```text
6839/// +---+---------------+
6840/// | # | Operands |
6841/// +---+---------------+
6842/// | 1 | Xmm, Xmm, Mem |
6843/// | 2 | Xmm, Xmm, Xmm |
6844/// | 3 | Ymm, Ymm, Mem |
6845/// | 4 | Ymm, Ymm, Ymm |
6846/// | 5 | Zmm, Zmm, Mem |
6847/// | 6 | Zmm, Zmm, Zmm |
6848/// +---+---------------+
6849/// ```
6850pub trait VpmaxubEmitter<A, B, C> {
6851 fn vpmaxub(&mut self, op0: A, op1: B, op2: C);
6852}
6853
6854impl<'a> VpmaxubEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6855 fn vpmaxub(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6856 self.emit(VPMAXUB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6857 }
6858}
6859
6860impl<'a> VpmaxubEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6861 fn vpmaxub(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6862 self.emit(VPMAXUB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6863 }
6864}
6865
6866impl<'a> VpmaxubEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6867 fn vpmaxub(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6868 self.emit(VPMAXUB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6869 }
6870}
6871
6872impl<'a> VpmaxubEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6873 fn vpmaxub(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6874 self.emit(VPMAXUB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6875 }
6876}
6877
6878impl<'a> VpmaxubEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6879 fn vpmaxub(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6880 self.emit(VPMAXUB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6881 }
6882}
6883
6884impl<'a> VpmaxubEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6885 fn vpmaxub(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6886 self.emit(VPMAXUB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6887 }
6888}
6889
6890/// `VPMAXUB_MASK` (VPMAXUB).
6891/// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6892///
6893///
6894/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
6895///
6896/// Supported operand variants:
6897///
6898/// ```text
6899/// +---+---------------+
6900/// | # | Operands |
6901/// +---+---------------+
6902/// | 1 | Xmm, Xmm, Mem |
6903/// | 2 | Xmm, Xmm, Xmm |
6904/// | 3 | Ymm, Ymm, Mem |
6905/// | 4 | Ymm, Ymm, Ymm |
6906/// | 5 | Zmm, Zmm, Mem |
6907/// | 6 | Zmm, Zmm, Zmm |
6908/// +---+---------------+
6909/// ```
6910pub trait VpmaxubMaskEmitter<A, B, C> {
6911 fn vpmaxub_mask(&mut self, op0: A, op1: B, op2: C);
6912}
6913
6914impl<'a> VpmaxubMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6915 fn vpmaxub_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6916 self.emit(VPMAXUB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6917 }
6918}
6919
6920impl<'a> VpmaxubMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6921 fn vpmaxub_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6922 self.emit(VPMAXUB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6923 }
6924}
6925
6926impl<'a> VpmaxubMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6927 fn vpmaxub_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6928 self.emit(VPMAXUB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6929 }
6930}
6931
6932impl<'a> VpmaxubMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6933 fn vpmaxub_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6934 self.emit(VPMAXUB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6935 }
6936}
6937
6938impl<'a> VpmaxubMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6939 fn vpmaxub_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
6940 self.emit(VPMAXUB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6941 }
6942}
6943
6944impl<'a> VpmaxubMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
6945 fn vpmaxub_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
6946 self.emit(VPMAXUB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6947 }
6948}
6949
6950/// `VPMAXUB_MASKZ` (VPMAXUB).
6951/// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
6952///
6953///
6954/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
6955///
6956/// Supported operand variants:
6957///
6958/// ```text
6959/// +---+---------------+
6960/// | # | Operands |
6961/// +---+---------------+
6962/// | 1 | Xmm, Xmm, Mem |
6963/// | 2 | Xmm, Xmm, Xmm |
6964/// | 3 | Ymm, Ymm, Mem |
6965/// | 4 | Ymm, Ymm, Ymm |
6966/// | 5 | Zmm, Zmm, Mem |
6967/// | 6 | Zmm, Zmm, Zmm |
6968/// +---+---------------+
6969/// ```
6970pub trait VpmaxubMaskzEmitter<A, B, C> {
6971 fn vpmaxub_maskz(&mut self, op0: A, op1: B, op2: C);
6972}
6973
6974impl<'a> VpmaxubMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
6975 fn vpmaxub_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
6976 self.emit(VPMAXUB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6977 }
6978}
6979
6980impl<'a> VpmaxubMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
6981 fn vpmaxub_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
6982 self.emit(VPMAXUB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6983 }
6984}
6985
6986impl<'a> VpmaxubMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
6987 fn vpmaxub_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
6988 self.emit(VPMAXUB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6989 }
6990}
6991
6992impl<'a> VpmaxubMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
6993 fn vpmaxub_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
6994 self.emit(VPMAXUB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
6995 }
6996}
6997
6998impl<'a> VpmaxubMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
6999 fn vpmaxub_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7000 self.emit(VPMAXUB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7001 }
7002}
7003
7004impl<'a> VpmaxubMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7005 fn vpmaxub_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7006 self.emit(VPMAXUB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7007 }
7008}
7009
7010/// `VPMAXUW` (VPMAXUW).
7011/// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
7012///
7013///
7014/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
7015///
7016/// Supported operand variants:
7017///
7018/// ```text
7019/// +---+---------------+
7020/// | # | Operands |
7021/// +---+---------------+
7022/// | 1 | Xmm, Xmm, Mem |
7023/// | 2 | Xmm, Xmm, Xmm |
7024/// | 3 | Ymm, Ymm, Mem |
7025/// | 4 | Ymm, Ymm, Ymm |
7026/// | 5 | Zmm, Zmm, Mem |
7027/// | 6 | Zmm, Zmm, Zmm |
7028/// +---+---------------+
7029/// ```
7030pub trait VpmaxuwEmitter<A, B, C> {
7031 fn vpmaxuw(&mut self, op0: A, op1: B, op2: C);
7032}
7033
7034impl<'a> VpmaxuwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7035 fn vpmaxuw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7036 self.emit(VPMAXUW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7037 }
7038}
7039
7040impl<'a> VpmaxuwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7041 fn vpmaxuw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7042 self.emit(VPMAXUW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7043 }
7044}
7045
7046impl<'a> VpmaxuwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7047 fn vpmaxuw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7048 self.emit(VPMAXUW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7049 }
7050}
7051
7052impl<'a> VpmaxuwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7053 fn vpmaxuw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7054 self.emit(VPMAXUW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7055 }
7056}
7057
7058impl<'a> VpmaxuwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7059 fn vpmaxuw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7060 self.emit(VPMAXUW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7061 }
7062}
7063
7064impl<'a> VpmaxuwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7065 fn vpmaxuw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7066 self.emit(VPMAXUW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7067 }
7068}
7069
7070/// `VPMAXUW_MASK` (VPMAXUW).
7071/// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
7072///
7073///
7074/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
7075///
7076/// Supported operand variants:
7077///
7078/// ```text
7079/// +---+---------------+
7080/// | # | Operands |
7081/// +---+---------------+
7082/// | 1 | Xmm, Xmm, Mem |
7083/// | 2 | Xmm, Xmm, Xmm |
7084/// | 3 | Ymm, Ymm, Mem |
7085/// | 4 | Ymm, Ymm, Ymm |
7086/// | 5 | Zmm, Zmm, Mem |
7087/// | 6 | Zmm, Zmm, Zmm |
7088/// +---+---------------+
7089/// ```
7090pub trait VpmaxuwMaskEmitter<A, B, C> {
7091 fn vpmaxuw_mask(&mut self, op0: A, op1: B, op2: C);
7092}
7093
7094impl<'a> VpmaxuwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7095 fn vpmaxuw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7096 self.emit(VPMAXUW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7097 }
7098}
7099
7100impl<'a> VpmaxuwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7101 fn vpmaxuw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7102 self.emit(VPMAXUW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7103 }
7104}
7105
7106impl<'a> VpmaxuwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7107 fn vpmaxuw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7108 self.emit(VPMAXUW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7109 }
7110}
7111
7112impl<'a> VpmaxuwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7113 fn vpmaxuw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7114 self.emit(VPMAXUW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7115 }
7116}
7117
7118impl<'a> VpmaxuwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7119 fn vpmaxuw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7120 self.emit(VPMAXUW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7121 }
7122}
7123
7124impl<'a> VpmaxuwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7125 fn vpmaxuw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7126 self.emit(VPMAXUW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7127 }
7128}
7129
7130/// `VPMAXUW_MASKZ` (VPMAXUW).
7131/// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
7132///
7133///
7134/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
7135///
7136/// Supported operand variants:
7137///
7138/// ```text
7139/// +---+---------------+
7140/// | # | Operands |
7141/// +---+---------------+
7142/// | 1 | Xmm, Xmm, Mem |
7143/// | 2 | Xmm, Xmm, Xmm |
7144/// | 3 | Ymm, Ymm, Mem |
7145/// | 4 | Ymm, Ymm, Ymm |
7146/// | 5 | Zmm, Zmm, Mem |
7147/// | 6 | Zmm, Zmm, Zmm |
7148/// +---+---------------+
7149/// ```
7150pub trait VpmaxuwMaskzEmitter<A, B, C> {
7151 fn vpmaxuw_maskz(&mut self, op0: A, op1: B, op2: C);
7152}
7153
7154impl<'a> VpmaxuwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7155 fn vpmaxuw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7156 self.emit(VPMAXUW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7157 }
7158}
7159
7160impl<'a> VpmaxuwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7161 fn vpmaxuw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7162 self.emit(VPMAXUW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7163 }
7164}
7165
7166impl<'a> VpmaxuwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7167 fn vpmaxuw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7168 self.emit(VPMAXUW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7169 }
7170}
7171
7172impl<'a> VpmaxuwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7173 fn vpmaxuw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7174 self.emit(VPMAXUW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7175 }
7176}
7177
7178impl<'a> VpmaxuwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7179 fn vpmaxuw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7180 self.emit(VPMAXUW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7181 }
7182}
7183
7184impl<'a> VpmaxuwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7185 fn vpmaxuw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7186 self.emit(VPMAXUW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7187 }
7188}
7189
7190/// `VPMINSB` (VPMINSB).
7191/// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7192///
7193///
7194/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
7195///
7196/// Supported operand variants:
7197///
7198/// ```text
7199/// +---+---------------+
7200/// | # | Operands |
7201/// +---+---------------+
7202/// | 1 | Xmm, Xmm, Mem |
7203/// | 2 | Xmm, Xmm, Xmm |
7204/// | 3 | Ymm, Ymm, Mem |
7205/// | 4 | Ymm, Ymm, Ymm |
7206/// | 5 | Zmm, Zmm, Mem |
7207/// | 6 | Zmm, Zmm, Zmm |
7208/// +---+---------------+
7209/// ```
7210pub trait VpminsbEmitter<A, B, C> {
7211 fn vpminsb(&mut self, op0: A, op1: B, op2: C);
7212}
7213
7214impl<'a> VpminsbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7215 fn vpminsb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7216 self.emit(VPMINSB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7217 }
7218}
7219
7220impl<'a> VpminsbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7221 fn vpminsb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7222 self.emit(VPMINSB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7223 }
7224}
7225
7226impl<'a> VpminsbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7227 fn vpminsb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7228 self.emit(VPMINSB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7229 }
7230}
7231
7232impl<'a> VpminsbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7233 fn vpminsb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7234 self.emit(VPMINSB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7235 }
7236}
7237
7238impl<'a> VpminsbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7239 fn vpminsb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7240 self.emit(VPMINSB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7241 }
7242}
7243
7244impl<'a> VpminsbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7245 fn vpminsb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7246 self.emit(VPMINSB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7247 }
7248}
7249
7250/// `VPMINSB_MASK` (VPMINSB).
7251/// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7252///
7253///
7254/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
7255///
7256/// Supported operand variants:
7257///
7258/// ```text
7259/// +---+---------------+
7260/// | # | Operands |
7261/// +---+---------------+
7262/// | 1 | Xmm, Xmm, Mem |
7263/// | 2 | Xmm, Xmm, Xmm |
7264/// | 3 | Ymm, Ymm, Mem |
7265/// | 4 | Ymm, Ymm, Ymm |
7266/// | 5 | Zmm, Zmm, Mem |
7267/// | 6 | Zmm, Zmm, Zmm |
7268/// +---+---------------+
7269/// ```
7270pub trait VpminsbMaskEmitter<A, B, C> {
7271 fn vpminsb_mask(&mut self, op0: A, op1: B, op2: C);
7272}
7273
7274impl<'a> VpminsbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7275 fn vpminsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7276 self.emit(VPMINSB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7277 }
7278}
7279
7280impl<'a> VpminsbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7281 fn vpminsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7282 self.emit(VPMINSB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7283 }
7284}
7285
7286impl<'a> VpminsbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7287 fn vpminsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7288 self.emit(VPMINSB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7289 }
7290}
7291
7292impl<'a> VpminsbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7293 fn vpminsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7294 self.emit(VPMINSB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7295 }
7296}
7297
7298impl<'a> VpminsbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7299 fn vpminsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7300 self.emit(VPMINSB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7301 }
7302}
7303
7304impl<'a> VpminsbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7305 fn vpminsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7306 self.emit(VPMINSB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7307 }
7308}
7309
7310/// `VPMINSB_MASKZ` (VPMINSB).
7311/// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7312///
7313///
7314/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
7315///
7316/// Supported operand variants:
7317///
7318/// ```text
7319/// +---+---------------+
7320/// | # | Operands |
7321/// +---+---------------+
7322/// | 1 | Xmm, Xmm, Mem |
7323/// | 2 | Xmm, Xmm, Xmm |
7324/// | 3 | Ymm, Ymm, Mem |
7325/// | 4 | Ymm, Ymm, Ymm |
7326/// | 5 | Zmm, Zmm, Mem |
7327/// | 6 | Zmm, Zmm, Zmm |
7328/// +---+---------------+
7329/// ```
7330pub trait VpminsbMaskzEmitter<A, B, C> {
7331 fn vpminsb_maskz(&mut self, op0: A, op1: B, op2: C);
7332}
7333
7334impl<'a> VpminsbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7335 fn vpminsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7336 self.emit(VPMINSB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7337 }
7338}
7339
7340impl<'a> VpminsbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7341 fn vpminsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7342 self.emit(VPMINSB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7343 }
7344}
7345
7346impl<'a> VpminsbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7347 fn vpminsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7348 self.emit(VPMINSB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7349 }
7350}
7351
7352impl<'a> VpminsbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7353 fn vpminsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7354 self.emit(VPMINSB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7355 }
7356}
7357
7358impl<'a> VpminsbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7359 fn vpminsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7360 self.emit(VPMINSB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7361 }
7362}
7363
7364impl<'a> VpminsbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7365 fn vpminsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7366 self.emit(VPMINSB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7367 }
7368}
7369
7370/// `VPMINSW` (VPMINSW).
7371/// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7372///
7373///
7374/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
7375///
7376/// Supported operand variants:
7377///
7378/// ```text
7379/// +---+---------------+
7380/// | # | Operands |
7381/// +---+---------------+
7382/// | 1 | Xmm, Xmm, Mem |
7383/// | 2 | Xmm, Xmm, Xmm |
7384/// | 3 | Ymm, Ymm, Mem |
7385/// | 4 | Ymm, Ymm, Ymm |
7386/// | 5 | Zmm, Zmm, Mem |
7387/// | 6 | Zmm, Zmm, Zmm |
7388/// +---+---------------+
7389/// ```
7390pub trait VpminswEmitter<A, B, C> {
7391 fn vpminsw(&mut self, op0: A, op1: B, op2: C);
7392}
7393
7394impl<'a> VpminswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7395 fn vpminsw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7396 self.emit(VPMINSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7397 }
7398}
7399
7400impl<'a> VpminswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7401 fn vpminsw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7402 self.emit(VPMINSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7403 }
7404}
7405
7406impl<'a> VpminswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7407 fn vpminsw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7408 self.emit(VPMINSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7409 }
7410}
7411
7412impl<'a> VpminswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7413 fn vpminsw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7414 self.emit(VPMINSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7415 }
7416}
7417
7418impl<'a> VpminswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7419 fn vpminsw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7420 self.emit(VPMINSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7421 }
7422}
7423
7424impl<'a> VpminswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7425 fn vpminsw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7426 self.emit(VPMINSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7427 }
7428}
7429
7430/// `VPMINSW_MASK` (VPMINSW).
7431/// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7432///
7433///
7434/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
7435///
7436/// Supported operand variants:
7437///
7438/// ```text
7439/// +---+---------------+
7440/// | # | Operands |
7441/// +---+---------------+
7442/// | 1 | Xmm, Xmm, Mem |
7443/// | 2 | Xmm, Xmm, Xmm |
7444/// | 3 | Ymm, Ymm, Mem |
7445/// | 4 | Ymm, Ymm, Ymm |
7446/// | 5 | Zmm, Zmm, Mem |
7447/// | 6 | Zmm, Zmm, Zmm |
7448/// +---+---------------+
7449/// ```
7450pub trait VpminswMaskEmitter<A, B, C> {
7451 fn vpminsw_mask(&mut self, op0: A, op1: B, op2: C);
7452}
7453
7454impl<'a> VpminswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7455 fn vpminsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7456 self.emit(VPMINSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7457 }
7458}
7459
7460impl<'a> VpminswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7461 fn vpminsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7462 self.emit(VPMINSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7463 }
7464}
7465
7466impl<'a> VpminswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7467 fn vpminsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7468 self.emit(VPMINSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7469 }
7470}
7471
7472impl<'a> VpminswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7473 fn vpminsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7474 self.emit(VPMINSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7475 }
7476}
7477
7478impl<'a> VpminswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7479 fn vpminsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7480 self.emit(VPMINSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7481 }
7482}
7483
7484impl<'a> VpminswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7485 fn vpminsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7486 self.emit(VPMINSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7487 }
7488}
7489
7490/// `VPMINSW_MASKZ` (VPMINSW).
7491/// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7492///
7493///
7494/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
7495///
7496/// Supported operand variants:
7497///
7498/// ```text
7499/// +---+---------------+
7500/// | # | Operands |
7501/// +---+---------------+
7502/// | 1 | Xmm, Xmm, Mem |
7503/// | 2 | Xmm, Xmm, Xmm |
7504/// | 3 | Ymm, Ymm, Mem |
7505/// | 4 | Ymm, Ymm, Ymm |
7506/// | 5 | Zmm, Zmm, Mem |
7507/// | 6 | Zmm, Zmm, Zmm |
7508/// +---+---------------+
7509/// ```
7510pub trait VpminswMaskzEmitter<A, B, C> {
7511 fn vpminsw_maskz(&mut self, op0: A, op1: B, op2: C);
7512}
7513
7514impl<'a> VpminswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7515 fn vpminsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7516 self.emit(VPMINSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7517 }
7518}
7519
7520impl<'a> VpminswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7521 fn vpminsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7522 self.emit(VPMINSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7523 }
7524}
7525
7526impl<'a> VpminswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7527 fn vpminsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7528 self.emit(VPMINSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7529 }
7530}
7531
7532impl<'a> VpminswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7533 fn vpminsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7534 self.emit(VPMINSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7535 }
7536}
7537
7538impl<'a> VpminswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7539 fn vpminsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7540 self.emit(VPMINSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7541 }
7542}
7543
7544impl<'a> VpminswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7545 fn vpminsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7546 self.emit(VPMINSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7547 }
7548}
7549
7550/// `VPMINUB` (VPMINUB).
7551/// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7552///
7553///
7554/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
7555///
7556/// Supported operand variants:
7557///
7558/// ```text
7559/// +---+---------------+
7560/// | # | Operands |
7561/// +---+---------------+
7562/// | 1 | Xmm, Xmm, Mem |
7563/// | 2 | Xmm, Xmm, Xmm |
7564/// | 3 | Ymm, Ymm, Mem |
7565/// | 4 | Ymm, Ymm, Ymm |
7566/// | 5 | Zmm, Zmm, Mem |
7567/// | 6 | Zmm, Zmm, Zmm |
7568/// +---+---------------+
7569/// ```
7570pub trait VpminubEmitter<A, B, C> {
7571 fn vpminub(&mut self, op0: A, op1: B, op2: C);
7572}
7573
7574impl<'a> VpminubEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7575 fn vpminub(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7576 self.emit(VPMINUB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7577 }
7578}
7579
7580impl<'a> VpminubEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7581 fn vpminub(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7582 self.emit(VPMINUB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7583 }
7584}
7585
7586impl<'a> VpminubEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7587 fn vpminub(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7588 self.emit(VPMINUB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7589 }
7590}
7591
7592impl<'a> VpminubEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7593 fn vpminub(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7594 self.emit(VPMINUB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7595 }
7596}
7597
7598impl<'a> VpminubEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7599 fn vpminub(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7600 self.emit(VPMINUB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7601 }
7602}
7603
7604impl<'a> VpminubEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7605 fn vpminub(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7606 self.emit(VPMINUB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7607 }
7608}
7609
7610/// `VPMINUB_MASK` (VPMINUB).
7611/// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7612///
7613///
7614/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
7615///
7616/// Supported operand variants:
7617///
7618/// ```text
7619/// +---+---------------+
7620/// | # | Operands |
7621/// +---+---------------+
7622/// | 1 | Xmm, Xmm, Mem |
7623/// | 2 | Xmm, Xmm, Xmm |
7624/// | 3 | Ymm, Ymm, Mem |
7625/// | 4 | Ymm, Ymm, Ymm |
7626/// | 5 | Zmm, Zmm, Mem |
7627/// | 6 | Zmm, Zmm, Zmm |
7628/// +---+---------------+
7629/// ```
7630pub trait VpminubMaskEmitter<A, B, C> {
7631 fn vpminub_mask(&mut self, op0: A, op1: B, op2: C);
7632}
7633
7634impl<'a> VpminubMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7635 fn vpminub_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7636 self.emit(VPMINUB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7637 }
7638}
7639
7640impl<'a> VpminubMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7641 fn vpminub_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7642 self.emit(VPMINUB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7643 }
7644}
7645
7646impl<'a> VpminubMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7647 fn vpminub_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7648 self.emit(VPMINUB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7649 }
7650}
7651
7652impl<'a> VpminubMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7653 fn vpminub_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7654 self.emit(VPMINUB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7655 }
7656}
7657
7658impl<'a> VpminubMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7659 fn vpminub_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7660 self.emit(VPMINUB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7661 }
7662}
7663
7664impl<'a> VpminubMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7665 fn vpminub_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7666 self.emit(VPMINUB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7667 }
7668}
7669
7670/// `VPMINUB_MASKZ` (VPMINUB).
7671/// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7672///
7673///
7674/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
7675///
7676/// Supported operand variants:
7677///
7678/// ```text
7679/// +---+---------------+
7680/// | # | Operands |
7681/// +---+---------------+
7682/// | 1 | Xmm, Xmm, Mem |
7683/// | 2 | Xmm, Xmm, Xmm |
7684/// | 3 | Ymm, Ymm, Mem |
7685/// | 4 | Ymm, Ymm, Ymm |
7686/// | 5 | Zmm, Zmm, Mem |
7687/// | 6 | Zmm, Zmm, Zmm |
7688/// +---+---------------+
7689/// ```
7690pub trait VpminubMaskzEmitter<A, B, C> {
7691 fn vpminub_maskz(&mut self, op0: A, op1: B, op2: C);
7692}
7693
7694impl<'a> VpminubMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7695 fn vpminub_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7696 self.emit(VPMINUB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7697 }
7698}
7699
7700impl<'a> VpminubMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7701 fn vpminub_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7702 self.emit(VPMINUB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7703 }
7704}
7705
7706impl<'a> VpminubMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7707 fn vpminub_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7708 self.emit(VPMINUB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7709 }
7710}
7711
7712impl<'a> VpminubMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7713 fn vpminub_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7714 self.emit(VPMINUB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7715 }
7716}
7717
7718impl<'a> VpminubMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7719 fn vpminub_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7720 self.emit(VPMINUB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7721 }
7722}
7723
7724impl<'a> VpminubMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7725 fn vpminub_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7726 self.emit(VPMINUB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7727 }
7728}
7729
7730/// `VPMINUW` (VPMINUW).
7731/// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7732///
7733///
7734/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
7735///
7736/// Supported operand variants:
7737///
7738/// ```text
7739/// +---+---------------+
7740/// | # | Operands |
7741/// +---+---------------+
7742/// | 1 | Xmm, Xmm, Mem |
7743/// | 2 | Xmm, Xmm, Xmm |
7744/// | 3 | Ymm, Ymm, Mem |
7745/// | 4 | Ymm, Ymm, Ymm |
7746/// | 5 | Zmm, Zmm, Mem |
7747/// | 6 | Zmm, Zmm, Zmm |
7748/// +---+---------------+
7749/// ```
7750pub trait VpminuwEmitter<A, B, C> {
7751 fn vpminuw(&mut self, op0: A, op1: B, op2: C);
7752}
7753
7754impl<'a> VpminuwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7755 fn vpminuw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7756 self.emit(VPMINUW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7757 }
7758}
7759
7760impl<'a> VpminuwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7761 fn vpminuw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7762 self.emit(VPMINUW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7763 }
7764}
7765
7766impl<'a> VpminuwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7767 fn vpminuw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7768 self.emit(VPMINUW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7769 }
7770}
7771
7772impl<'a> VpminuwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7773 fn vpminuw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7774 self.emit(VPMINUW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7775 }
7776}
7777
7778impl<'a> VpminuwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7779 fn vpminuw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7780 self.emit(VPMINUW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7781 }
7782}
7783
7784impl<'a> VpminuwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7785 fn vpminuw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7786 self.emit(VPMINUW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7787 }
7788}
7789
7790/// `VPMINUW_MASK` (VPMINUW).
7791/// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7792///
7793///
7794/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
7795///
7796/// Supported operand variants:
7797///
7798/// ```text
7799/// +---+---------------+
7800/// | # | Operands |
7801/// +---+---------------+
7802/// | 1 | Xmm, Xmm, Mem |
7803/// | 2 | Xmm, Xmm, Xmm |
7804/// | 3 | Ymm, Ymm, Mem |
7805/// | 4 | Ymm, Ymm, Ymm |
7806/// | 5 | Zmm, Zmm, Mem |
7807/// | 6 | Zmm, Zmm, Zmm |
7808/// +---+---------------+
7809/// ```
7810pub trait VpminuwMaskEmitter<A, B, C> {
7811 fn vpminuw_mask(&mut self, op0: A, op1: B, op2: C);
7812}
7813
7814impl<'a> VpminuwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7815 fn vpminuw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7816 self.emit(VPMINUW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7817 }
7818}
7819
7820impl<'a> VpminuwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7821 fn vpminuw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7822 self.emit(VPMINUW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7823 }
7824}
7825
7826impl<'a> VpminuwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7827 fn vpminuw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7828 self.emit(VPMINUW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7829 }
7830}
7831
7832impl<'a> VpminuwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7833 fn vpminuw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7834 self.emit(VPMINUW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7835 }
7836}
7837
7838impl<'a> VpminuwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7839 fn vpminuw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7840 self.emit(VPMINUW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7841 }
7842}
7843
7844impl<'a> VpminuwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7845 fn vpminuw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7846 self.emit(VPMINUW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7847 }
7848}
7849
7850/// `VPMINUW_MASKZ` (VPMINUW).
7851/// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
7852///
7853///
7854/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
7855///
7856/// Supported operand variants:
7857///
7858/// ```text
7859/// +---+---------------+
7860/// | # | Operands |
7861/// +---+---------------+
7862/// | 1 | Xmm, Xmm, Mem |
7863/// | 2 | Xmm, Xmm, Xmm |
7864/// | 3 | Ymm, Ymm, Mem |
7865/// | 4 | Ymm, Ymm, Ymm |
7866/// | 5 | Zmm, Zmm, Mem |
7867/// | 6 | Zmm, Zmm, Zmm |
7868/// +---+---------------+
7869/// ```
7870pub trait VpminuwMaskzEmitter<A, B, C> {
7871 fn vpminuw_maskz(&mut self, op0: A, op1: B, op2: C);
7872}
7873
7874impl<'a> VpminuwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
7875 fn vpminuw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
7876 self.emit(VPMINUW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7877 }
7878}
7879
7880impl<'a> VpminuwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
7881 fn vpminuw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
7882 self.emit(VPMINUW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7883 }
7884}
7885
7886impl<'a> VpminuwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
7887 fn vpminuw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
7888 self.emit(VPMINUW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7889 }
7890}
7891
7892impl<'a> VpminuwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
7893 fn vpminuw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
7894 self.emit(VPMINUW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7895 }
7896}
7897
7898impl<'a> VpminuwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
7899 fn vpminuw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
7900 self.emit(VPMINUW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7901 }
7902}
7903
7904impl<'a> VpminuwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
7905 fn vpminuw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
7906 self.emit(VPMINUW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
7907 }
7908}
7909
7910/// `VPMOVB2M` (VPMOVB2M).
7911/// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
7912///
7913///
7914/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
7915///
7916/// Supported operand variants:
7917///
7918/// ```text
7919/// +---+-----------+
7920/// | # | Operands |
7921/// +---+-----------+
7922/// | 1 | KReg, Xmm |
7923/// | 2 | KReg, Ymm |
7924/// | 3 | KReg, Zmm |
7925/// +---+-----------+
7926/// ```
7927pub trait Vpmovb2mEmitter<A, B> {
7928 fn vpmovb2m(&mut self, op0: A, op1: B);
7929}
7930
7931impl<'a> Vpmovb2mEmitter<KReg, Xmm> for Assembler<'a> {
7932 fn vpmovb2m(&mut self, op0: KReg, op1: Xmm) {
7933 self.emit(VPMOVB2M128KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7934 }
7935}
7936
7937impl<'a> Vpmovb2mEmitter<KReg, Ymm> for Assembler<'a> {
7938 fn vpmovb2m(&mut self, op0: KReg, op1: Ymm) {
7939 self.emit(VPMOVB2M256KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7940 }
7941}
7942
7943impl<'a> Vpmovb2mEmitter<KReg, Zmm> for Assembler<'a> {
7944 fn vpmovb2m(&mut self, op0: KReg, op1: Zmm) {
7945 self.emit(VPMOVB2M512KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7946 }
7947}
7948
7949/// `VPMOVM2B` (VPMOVM2B).
7950/// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
7951///
7952///
7953/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
7954///
7955/// Supported operand variants:
7956///
7957/// ```text
7958/// +---+-----------+
7959/// | # | Operands |
7960/// +---+-----------+
7961/// | 1 | Xmm, KReg |
7962/// | 2 | Ymm, KReg |
7963/// | 3 | Zmm, KReg |
7964/// +---+-----------+
7965/// ```
7966pub trait Vpmovm2bEmitter<A, B> {
7967 fn vpmovm2b(&mut self, op0: A, op1: B);
7968}
7969
7970impl<'a> Vpmovm2bEmitter<Xmm, KReg> for Assembler<'a> {
7971 fn vpmovm2b(&mut self, op0: Xmm, op1: KReg) {
7972 self.emit(VPMOVM2B128RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7973 }
7974}
7975
7976impl<'a> Vpmovm2bEmitter<Ymm, KReg> for Assembler<'a> {
7977 fn vpmovm2b(&mut self, op0: Ymm, op1: KReg) {
7978 self.emit(VPMOVM2B256RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7979 }
7980}
7981
7982impl<'a> Vpmovm2bEmitter<Zmm, KReg> for Assembler<'a> {
7983 fn vpmovm2b(&mut self, op0: Zmm, op1: KReg) {
7984 self.emit(VPMOVM2B512RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
7985 }
7986}
7987
7988/// `VPMOVM2W` (VPMOVM2W).
7989/// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
7990///
7991///
7992/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
7993///
7994/// Supported operand variants:
7995///
7996/// ```text
7997/// +---+-----------+
7998/// | # | Operands |
7999/// +---+-----------+
8000/// | 1 | Xmm, KReg |
8001/// | 2 | Ymm, KReg |
8002/// | 3 | Zmm, KReg |
8003/// +---+-----------+
8004/// ```
8005pub trait Vpmovm2wEmitter<A, B> {
8006 fn vpmovm2w(&mut self, op0: A, op1: B);
8007}
8008
8009impl<'a> Vpmovm2wEmitter<Xmm, KReg> for Assembler<'a> {
8010 fn vpmovm2w(&mut self, op0: Xmm, op1: KReg) {
8011 self.emit(VPMOVM2W128RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8012 }
8013}
8014
8015impl<'a> Vpmovm2wEmitter<Ymm, KReg> for Assembler<'a> {
8016 fn vpmovm2w(&mut self, op0: Ymm, op1: KReg) {
8017 self.emit(VPMOVM2W256RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8018 }
8019}
8020
8021impl<'a> Vpmovm2wEmitter<Zmm, KReg> for Assembler<'a> {
8022 fn vpmovm2w(&mut self, op0: Zmm, op1: KReg) {
8023 self.emit(VPMOVM2W512RK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8024 }
8025}
8026
8027/// `VPMOVSWB` (VPMOVSWB).
8028/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8029///
8030///
8031/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8032///
8033/// Supported operand variants:
8034///
8035/// ```text
8036/// +---+----------+
8037/// | # | Operands |
8038/// +---+----------+
8039/// | 1 | Mem, Xmm |
8040/// | 2 | Mem, Ymm |
8041/// | 3 | Mem, Zmm |
8042/// | 4 | Xmm, Xmm |
8043/// | 5 | Xmm, Ymm |
8044/// | 6 | Ymm, Zmm |
8045/// +---+----------+
8046/// ```
8047pub trait VpmovswbEmitter<A, B> {
8048 fn vpmovswb(&mut self, op0: A, op1: B);
8049}
8050
8051impl<'a> VpmovswbEmitter<Xmm, Xmm> for Assembler<'a> {
8052 fn vpmovswb(&mut self, op0: Xmm, op1: Xmm) {
8053 self.emit(VPMOVSWB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8054 }
8055}
8056
8057impl<'a> VpmovswbEmitter<Mem, Xmm> for Assembler<'a> {
8058 fn vpmovswb(&mut self, op0: Mem, op1: Xmm) {
8059 self.emit(VPMOVSWB128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8060 }
8061}
8062
8063impl<'a> VpmovswbEmitter<Xmm, Ymm> for Assembler<'a> {
8064 fn vpmovswb(&mut self, op0: Xmm, op1: Ymm) {
8065 self.emit(VPMOVSWB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8066 }
8067}
8068
8069impl<'a> VpmovswbEmitter<Mem, Ymm> for Assembler<'a> {
8070 fn vpmovswb(&mut self, op0: Mem, op1: Ymm) {
8071 self.emit(VPMOVSWB256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8072 }
8073}
8074
8075impl<'a> VpmovswbEmitter<Ymm, Zmm> for Assembler<'a> {
8076 fn vpmovswb(&mut self, op0: Ymm, op1: Zmm) {
8077 self.emit(VPMOVSWB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8078 }
8079}
8080
8081impl<'a> VpmovswbEmitter<Mem, Zmm> for Assembler<'a> {
8082 fn vpmovswb(&mut self, op0: Mem, op1: Zmm) {
8083 self.emit(VPMOVSWB512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8084 }
8085}
8086
8087/// `VPMOVSWB_MASK` (VPMOVSWB).
8088/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8089///
8090///
8091/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8092///
8093/// Supported operand variants:
8094///
8095/// ```text
8096/// +---+----------+
8097/// | # | Operands |
8098/// +---+----------+
8099/// | 1 | Mem, Xmm |
8100/// | 2 | Mem, Ymm |
8101/// | 3 | Mem, Zmm |
8102/// | 4 | Xmm, Xmm |
8103/// | 5 | Xmm, Ymm |
8104/// | 6 | Ymm, Zmm |
8105/// +---+----------+
8106/// ```
8107pub trait VpmovswbMaskEmitter<A, B> {
8108 fn vpmovswb_mask(&mut self, op0: A, op1: B);
8109}
8110
8111impl<'a> VpmovswbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
8112 fn vpmovswb_mask(&mut self, op0: Xmm, op1: Xmm) {
8113 self.emit(VPMOVSWB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8114 }
8115}
8116
8117impl<'a> VpmovswbMaskEmitter<Mem, Xmm> for Assembler<'a> {
8118 fn vpmovswb_mask(&mut self, op0: Mem, op1: Xmm) {
8119 self.emit(VPMOVSWB128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8120 }
8121}
8122
8123impl<'a> VpmovswbMaskEmitter<Xmm, Ymm> for Assembler<'a> {
8124 fn vpmovswb_mask(&mut self, op0: Xmm, op1: Ymm) {
8125 self.emit(VPMOVSWB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8126 }
8127}
8128
8129impl<'a> VpmovswbMaskEmitter<Mem, Ymm> for Assembler<'a> {
8130 fn vpmovswb_mask(&mut self, op0: Mem, op1: Ymm) {
8131 self.emit(VPMOVSWB256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8132 }
8133}
8134
8135impl<'a> VpmovswbMaskEmitter<Ymm, Zmm> for Assembler<'a> {
8136 fn vpmovswb_mask(&mut self, op0: Ymm, op1: Zmm) {
8137 self.emit(VPMOVSWB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8138 }
8139}
8140
8141impl<'a> VpmovswbMaskEmitter<Mem, Zmm> for Assembler<'a> {
8142 fn vpmovswb_mask(&mut self, op0: Mem, op1: Zmm) {
8143 self.emit(VPMOVSWB512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8144 }
8145}
8146
8147/// `VPMOVSWB_MASKZ` (VPMOVSWB).
8148/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8149///
8150///
8151/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8152///
8153/// Supported operand variants:
8154///
8155/// ```text
8156/// +---+----------+
8157/// | # | Operands |
8158/// +---+----------+
8159/// | 1 | Xmm, Xmm |
8160/// | 2 | Xmm, Ymm |
8161/// | 3 | Ymm, Zmm |
8162/// +---+----------+
8163/// ```
8164pub trait VpmovswbMaskzEmitter<A, B> {
8165 fn vpmovswb_maskz(&mut self, op0: A, op1: B);
8166}
8167
8168impl<'a> VpmovswbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
8169 fn vpmovswb_maskz(&mut self, op0: Xmm, op1: Xmm) {
8170 self.emit(VPMOVSWB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8171 }
8172}
8173
8174impl<'a> VpmovswbMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
8175 fn vpmovswb_maskz(&mut self, op0: Xmm, op1: Ymm) {
8176 self.emit(VPMOVSWB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8177 }
8178}
8179
8180impl<'a> VpmovswbMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
8181 fn vpmovswb_maskz(&mut self, op0: Ymm, op1: Zmm) {
8182 self.emit(VPMOVSWB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8183 }
8184}
8185
8186/// `VPMOVUSWB` (VPMOVUSWB).
8187/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8188///
8189///
8190/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8191///
8192/// Supported operand variants:
8193///
8194/// ```text
8195/// +---+----------+
8196/// | # | Operands |
8197/// +---+----------+
8198/// | 1 | Mem, Xmm |
8199/// | 2 | Mem, Ymm |
8200/// | 3 | Mem, Zmm |
8201/// | 4 | Xmm, Xmm |
8202/// | 5 | Xmm, Ymm |
8203/// | 6 | Ymm, Zmm |
8204/// +---+----------+
8205/// ```
8206pub trait VpmovuswbEmitter<A, B> {
8207 fn vpmovuswb(&mut self, op0: A, op1: B);
8208}
8209
8210impl<'a> VpmovuswbEmitter<Xmm, Xmm> for Assembler<'a> {
8211 fn vpmovuswb(&mut self, op0: Xmm, op1: Xmm) {
8212 self.emit(VPMOVUSWB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8213 }
8214}
8215
8216impl<'a> VpmovuswbEmitter<Mem, Xmm> for Assembler<'a> {
8217 fn vpmovuswb(&mut self, op0: Mem, op1: Xmm) {
8218 self.emit(VPMOVUSWB128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8219 }
8220}
8221
8222impl<'a> VpmovuswbEmitter<Xmm, Ymm> for Assembler<'a> {
8223 fn vpmovuswb(&mut self, op0: Xmm, op1: Ymm) {
8224 self.emit(VPMOVUSWB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8225 }
8226}
8227
8228impl<'a> VpmovuswbEmitter<Mem, Ymm> for Assembler<'a> {
8229 fn vpmovuswb(&mut self, op0: Mem, op1: Ymm) {
8230 self.emit(VPMOVUSWB256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8231 }
8232}
8233
8234impl<'a> VpmovuswbEmitter<Ymm, Zmm> for Assembler<'a> {
8235 fn vpmovuswb(&mut self, op0: Ymm, op1: Zmm) {
8236 self.emit(VPMOVUSWB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8237 }
8238}
8239
8240impl<'a> VpmovuswbEmitter<Mem, Zmm> for Assembler<'a> {
8241 fn vpmovuswb(&mut self, op0: Mem, op1: Zmm) {
8242 self.emit(VPMOVUSWB512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8243 }
8244}
8245
8246/// `VPMOVUSWB_MASK` (VPMOVUSWB).
8247/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8248///
8249///
8250/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8251///
8252/// Supported operand variants:
8253///
8254/// ```text
8255/// +---+----------+
8256/// | # | Operands |
8257/// +---+----------+
8258/// | 1 | Mem, Xmm |
8259/// | 2 | Mem, Ymm |
8260/// | 3 | Mem, Zmm |
8261/// | 4 | Xmm, Xmm |
8262/// | 5 | Xmm, Ymm |
8263/// | 6 | Ymm, Zmm |
8264/// +---+----------+
8265/// ```
8266pub trait VpmovuswbMaskEmitter<A, B> {
8267 fn vpmovuswb_mask(&mut self, op0: A, op1: B);
8268}
8269
8270impl<'a> VpmovuswbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
8271 fn vpmovuswb_mask(&mut self, op0: Xmm, op1: Xmm) {
8272 self.emit(VPMOVUSWB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8273 }
8274}
8275
8276impl<'a> VpmovuswbMaskEmitter<Mem, Xmm> for Assembler<'a> {
8277 fn vpmovuswb_mask(&mut self, op0: Mem, op1: Xmm) {
8278 self.emit(VPMOVUSWB128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8279 }
8280}
8281
8282impl<'a> VpmovuswbMaskEmitter<Xmm, Ymm> for Assembler<'a> {
8283 fn vpmovuswb_mask(&mut self, op0: Xmm, op1: Ymm) {
8284 self.emit(VPMOVUSWB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8285 }
8286}
8287
8288impl<'a> VpmovuswbMaskEmitter<Mem, Ymm> for Assembler<'a> {
8289 fn vpmovuswb_mask(&mut self, op0: Mem, op1: Ymm) {
8290 self.emit(VPMOVUSWB256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8291 }
8292}
8293
8294impl<'a> VpmovuswbMaskEmitter<Ymm, Zmm> for Assembler<'a> {
8295 fn vpmovuswb_mask(&mut self, op0: Ymm, op1: Zmm) {
8296 self.emit(VPMOVUSWB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8297 }
8298}
8299
8300impl<'a> VpmovuswbMaskEmitter<Mem, Zmm> for Assembler<'a> {
8301 fn vpmovuswb_mask(&mut self, op0: Mem, op1: Zmm) {
8302 self.emit(VPMOVUSWB512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8303 }
8304}
8305
8306/// `VPMOVUSWB_MASKZ` (VPMOVUSWB).
8307/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8308///
8309///
8310/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8311///
8312/// Supported operand variants:
8313///
8314/// ```text
8315/// +---+----------+
8316/// | # | Operands |
8317/// +---+----------+
8318/// | 1 | Xmm, Xmm |
8319/// | 2 | Xmm, Ymm |
8320/// | 3 | Ymm, Zmm |
8321/// +---+----------+
8322/// ```
8323pub trait VpmovuswbMaskzEmitter<A, B> {
8324 fn vpmovuswb_maskz(&mut self, op0: A, op1: B);
8325}
8326
8327impl<'a> VpmovuswbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
8328 fn vpmovuswb_maskz(&mut self, op0: Xmm, op1: Xmm) {
8329 self.emit(VPMOVUSWB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8330 }
8331}
8332
8333impl<'a> VpmovuswbMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
8334 fn vpmovuswb_maskz(&mut self, op0: Xmm, op1: Ymm) {
8335 self.emit(VPMOVUSWB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8336 }
8337}
8338
8339impl<'a> VpmovuswbMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
8340 fn vpmovuswb_maskz(&mut self, op0: Ymm, op1: Zmm) {
8341 self.emit(VPMOVUSWB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8342 }
8343}
8344
8345/// `VPMOVW2M` (VPMOVW2M).
8346/// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
8347///
8348///
8349/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
8350///
8351/// Supported operand variants:
8352///
8353/// ```text
8354/// +---+-----------+
8355/// | # | Operands |
8356/// +---+-----------+
8357/// | 1 | KReg, Xmm |
8358/// | 2 | KReg, Ymm |
8359/// | 3 | KReg, Zmm |
8360/// +---+-----------+
8361/// ```
8362pub trait Vpmovw2mEmitter<A, B> {
8363 fn vpmovw2m(&mut self, op0: A, op1: B);
8364}
8365
8366impl<'a> Vpmovw2mEmitter<KReg, Xmm> for Assembler<'a> {
8367 fn vpmovw2m(&mut self, op0: KReg, op1: Xmm) {
8368 self.emit(VPMOVW2M128KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8369 }
8370}
8371
8372impl<'a> Vpmovw2mEmitter<KReg, Ymm> for Assembler<'a> {
8373 fn vpmovw2m(&mut self, op0: KReg, op1: Ymm) {
8374 self.emit(VPMOVW2M256KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8375 }
8376}
8377
8378impl<'a> Vpmovw2mEmitter<KReg, Zmm> for Assembler<'a> {
8379 fn vpmovw2m(&mut self, op0: KReg, op1: Zmm) {
8380 self.emit(VPMOVW2M512KR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8381 }
8382}
8383
8384/// `VPMOVWB` (VPMOVWB).
8385/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8386///
8387///
8388/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8389///
8390/// Supported operand variants:
8391///
8392/// ```text
8393/// +---+----------+
8394/// | # | Operands |
8395/// +---+----------+
8396/// | 1 | Mem, Xmm |
8397/// | 2 | Mem, Ymm |
8398/// | 3 | Mem, Zmm |
8399/// | 4 | Xmm, Xmm |
8400/// | 5 | Xmm, Ymm |
8401/// | 6 | Ymm, Zmm |
8402/// +---+----------+
8403/// ```
8404pub trait VpmovwbEmitter<A, B> {
8405 fn vpmovwb(&mut self, op0: A, op1: B);
8406}
8407
8408impl<'a> VpmovwbEmitter<Xmm, Xmm> for Assembler<'a> {
8409 fn vpmovwb(&mut self, op0: Xmm, op1: Xmm) {
8410 self.emit(VPMOVWB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8411 }
8412}
8413
8414impl<'a> VpmovwbEmitter<Mem, Xmm> for Assembler<'a> {
8415 fn vpmovwb(&mut self, op0: Mem, op1: Xmm) {
8416 self.emit(VPMOVWB128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8417 }
8418}
8419
8420impl<'a> VpmovwbEmitter<Xmm, Ymm> for Assembler<'a> {
8421 fn vpmovwb(&mut self, op0: Xmm, op1: Ymm) {
8422 self.emit(VPMOVWB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8423 }
8424}
8425
8426impl<'a> VpmovwbEmitter<Mem, Ymm> for Assembler<'a> {
8427 fn vpmovwb(&mut self, op0: Mem, op1: Ymm) {
8428 self.emit(VPMOVWB256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8429 }
8430}
8431
8432impl<'a> VpmovwbEmitter<Ymm, Zmm> for Assembler<'a> {
8433 fn vpmovwb(&mut self, op0: Ymm, op1: Zmm) {
8434 self.emit(VPMOVWB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8435 }
8436}
8437
8438impl<'a> VpmovwbEmitter<Mem, Zmm> for Assembler<'a> {
8439 fn vpmovwb(&mut self, op0: Mem, op1: Zmm) {
8440 self.emit(VPMOVWB512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8441 }
8442}
8443
8444/// `VPMOVWB_MASK` (VPMOVWB).
8445/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8446///
8447///
8448/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8449///
8450/// Supported operand variants:
8451///
8452/// ```text
8453/// +---+----------+
8454/// | # | Operands |
8455/// +---+----------+
8456/// | 1 | Mem, Xmm |
8457/// | 2 | Mem, Ymm |
8458/// | 3 | Mem, Zmm |
8459/// | 4 | Xmm, Xmm |
8460/// | 5 | Xmm, Ymm |
8461/// | 6 | Ymm, Zmm |
8462/// +---+----------+
8463/// ```
8464pub trait VpmovwbMaskEmitter<A, B> {
8465 fn vpmovwb_mask(&mut self, op0: A, op1: B);
8466}
8467
8468impl<'a> VpmovwbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
8469 fn vpmovwb_mask(&mut self, op0: Xmm, op1: Xmm) {
8470 self.emit(VPMOVWB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8471 }
8472}
8473
8474impl<'a> VpmovwbMaskEmitter<Mem, Xmm> for Assembler<'a> {
8475 fn vpmovwb_mask(&mut self, op0: Mem, op1: Xmm) {
8476 self.emit(VPMOVWB128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8477 }
8478}
8479
8480impl<'a> VpmovwbMaskEmitter<Xmm, Ymm> for Assembler<'a> {
8481 fn vpmovwb_mask(&mut self, op0: Xmm, op1: Ymm) {
8482 self.emit(VPMOVWB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8483 }
8484}
8485
8486impl<'a> VpmovwbMaskEmitter<Mem, Ymm> for Assembler<'a> {
8487 fn vpmovwb_mask(&mut self, op0: Mem, op1: Ymm) {
8488 self.emit(VPMOVWB256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8489 }
8490}
8491
8492impl<'a> VpmovwbMaskEmitter<Ymm, Zmm> for Assembler<'a> {
8493 fn vpmovwb_mask(&mut self, op0: Ymm, op1: Zmm) {
8494 self.emit(VPMOVWB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8495 }
8496}
8497
8498impl<'a> VpmovwbMaskEmitter<Mem, Zmm> for Assembler<'a> {
8499 fn vpmovwb_mask(&mut self, op0: Mem, op1: Zmm) {
8500 self.emit(VPMOVWB512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8501 }
8502}
8503
8504/// `VPMOVWB_MASKZ` (VPMOVWB).
8505/// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
8506///
8507///
8508/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
8509///
8510/// Supported operand variants:
8511///
8512/// ```text
8513/// +---+----------+
8514/// | # | Operands |
8515/// +---+----------+
8516/// | 1 | Xmm, Xmm |
8517/// | 2 | Xmm, Ymm |
8518/// | 3 | Ymm, Zmm |
8519/// +---+----------+
8520/// ```
8521pub trait VpmovwbMaskzEmitter<A, B> {
8522 fn vpmovwb_maskz(&mut self, op0: A, op1: B);
8523}
8524
8525impl<'a> VpmovwbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
8526 fn vpmovwb_maskz(&mut self, op0: Xmm, op1: Xmm) {
8527 self.emit(VPMOVWB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8528 }
8529}
8530
8531impl<'a> VpmovwbMaskzEmitter<Xmm, Ymm> for Assembler<'a> {
8532 fn vpmovwb_maskz(&mut self, op0: Xmm, op1: Ymm) {
8533 self.emit(VPMOVWB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8534 }
8535}
8536
8537impl<'a> VpmovwbMaskzEmitter<Ymm, Zmm> for Assembler<'a> {
8538 fn vpmovwb_maskz(&mut self, op0: Ymm, op1: Zmm) {
8539 self.emit(VPMOVWB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
8540 }
8541}
8542
8543/// `VPMULHRSW` (VPMULHRSW).
8544/// PMULHRSW multiplies vertically each signed 16-bit integer from the destination operand (first operand) with the corresponding signed 16-bit integer of the source operand (second operand), producing intermediate, signed 32-bit integers. Each intermediate 32-bit integer is truncated to the 18 most significant bits. Rounding is always performed by adding 1 to the least significant bit of the 18-bit intermediate result. The final result is obtained by selecting the 16 bits immediately to the right of the most significant bit of each 18-bit intermediate result and packed to the destination operand.
8545///
8546///
8547/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHRSW.html).
8548///
8549/// Supported operand variants:
8550///
8551/// ```text
8552/// +---+---------------+
8553/// | # | Operands |
8554/// +---+---------------+
8555/// | 1 | Xmm, Xmm, Mem |
8556/// | 2 | Xmm, Xmm, Xmm |
8557/// | 3 | Ymm, Ymm, Mem |
8558/// | 4 | Ymm, Ymm, Ymm |
8559/// | 5 | Zmm, Zmm, Mem |
8560/// | 6 | Zmm, Zmm, Zmm |
8561/// +---+---------------+
8562/// ```
8563pub trait VpmulhrswEmitter<A, B, C> {
8564 fn vpmulhrsw(&mut self, op0: A, op1: B, op2: C);
8565}
8566
8567impl<'a> VpmulhrswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8568 fn vpmulhrsw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8569 self.emit(VPMULHRSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8570 }
8571}
8572
8573impl<'a> VpmulhrswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8574 fn vpmulhrsw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8575 self.emit(VPMULHRSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8576 }
8577}
8578
8579impl<'a> VpmulhrswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8580 fn vpmulhrsw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8581 self.emit(VPMULHRSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8582 }
8583}
8584
8585impl<'a> VpmulhrswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8586 fn vpmulhrsw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8587 self.emit(VPMULHRSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8588 }
8589}
8590
8591impl<'a> VpmulhrswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8592 fn vpmulhrsw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8593 self.emit(VPMULHRSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8594 }
8595}
8596
8597impl<'a> VpmulhrswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8598 fn vpmulhrsw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8599 self.emit(VPMULHRSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8600 }
8601}
8602
8603/// `VPMULHRSW_MASK` (VPMULHRSW).
8604/// PMULHRSW multiplies vertically each signed 16-bit integer from the destination operand (first operand) with the corresponding signed 16-bit integer of the source operand (second operand), producing intermediate, signed 32-bit integers. Each intermediate 32-bit integer is truncated to the 18 most significant bits. Rounding is always performed by adding 1 to the least significant bit of the 18-bit intermediate result. The final result is obtained by selecting the 16 bits immediately to the right of the most significant bit of each 18-bit intermediate result and packed to the destination operand.
8605///
8606///
8607/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHRSW.html).
8608///
8609/// Supported operand variants:
8610///
8611/// ```text
8612/// +---+---------------+
8613/// | # | Operands |
8614/// +---+---------------+
8615/// | 1 | Xmm, Xmm, Mem |
8616/// | 2 | Xmm, Xmm, Xmm |
8617/// | 3 | Ymm, Ymm, Mem |
8618/// | 4 | Ymm, Ymm, Ymm |
8619/// | 5 | Zmm, Zmm, Mem |
8620/// | 6 | Zmm, Zmm, Zmm |
8621/// +---+---------------+
8622/// ```
8623pub trait VpmulhrswMaskEmitter<A, B, C> {
8624 fn vpmulhrsw_mask(&mut self, op0: A, op1: B, op2: C);
8625}
8626
8627impl<'a> VpmulhrswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8628 fn vpmulhrsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8629 self.emit(VPMULHRSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8630 }
8631}
8632
8633impl<'a> VpmulhrswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8634 fn vpmulhrsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8635 self.emit(VPMULHRSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8636 }
8637}
8638
8639impl<'a> VpmulhrswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8640 fn vpmulhrsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8641 self.emit(VPMULHRSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8642 }
8643}
8644
8645impl<'a> VpmulhrswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8646 fn vpmulhrsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8647 self.emit(VPMULHRSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8648 }
8649}
8650
8651impl<'a> VpmulhrswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8652 fn vpmulhrsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8653 self.emit(VPMULHRSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8654 }
8655}
8656
8657impl<'a> VpmulhrswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8658 fn vpmulhrsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8659 self.emit(VPMULHRSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8660 }
8661}
8662
8663/// `VPMULHRSW_MASKZ` (VPMULHRSW).
8664/// PMULHRSW multiplies vertically each signed 16-bit integer from the destination operand (first operand) with the corresponding signed 16-bit integer of the source operand (second operand), producing intermediate, signed 32-bit integers. Each intermediate 32-bit integer is truncated to the 18 most significant bits. Rounding is always performed by adding 1 to the least significant bit of the 18-bit intermediate result. The final result is obtained by selecting the 16 bits immediately to the right of the most significant bit of each 18-bit intermediate result and packed to the destination operand.
8665///
8666///
8667/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHRSW.html).
8668///
8669/// Supported operand variants:
8670///
8671/// ```text
8672/// +---+---------------+
8673/// | # | Operands |
8674/// +---+---------------+
8675/// | 1 | Xmm, Xmm, Mem |
8676/// | 2 | Xmm, Xmm, Xmm |
8677/// | 3 | Ymm, Ymm, Mem |
8678/// | 4 | Ymm, Ymm, Ymm |
8679/// | 5 | Zmm, Zmm, Mem |
8680/// | 6 | Zmm, Zmm, Zmm |
8681/// +---+---------------+
8682/// ```
8683pub trait VpmulhrswMaskzEmitter<A, B, C> {
8684 fn vpmulhrsw_maskz(&mut self, op0: A, op1: B, op2: C);
8685}
8686
8687impl<'a> VpmulhrswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8688 fn vpmulhrsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8689 self.emit(VPMULHRSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8690 }
8691}
8692
8693impl<'a> VpmulhrswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8694 fn vpmulhrsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8695 self.emit(VPMULHRSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8696 }
8697}
8698
8699impl<'a> VpmulhrswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8700 fn vpmulhrsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8701 self.emit(VPMULHRSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8702 }
8703}
8704
8705impl<'a> VpmulhrswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8706 fn vpmulhrsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8707 self.emit(VPMULHRSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8708 }
8709}
8710
8711impl<'a> VpmulhrswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8712 fn vpmulhrsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8713 self.emit(VPMULHRSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8714 }
8715}
8716
8717impl<'a> VpmulhrswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8718 fn vpmulhrsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8719 self.emit(VPMULHRSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8720 }
8721}
8722
8723/// `VPMULHUW` (VPMULHUW).
8724/// Performs a SIMD unsigned multiply of the packed unsigned word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each 32-bit intermediate results in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
8725///
8726///
8727/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHUW.html).
8728///
8729/// Supported operand variants:
8730///
8731/// ```text
8732/// +---+---------------+
8733/// | # | Operands |
8734/// +---+---------------+
8735/// | 1 | Xmm, Xmm, Mem |
8736/// | 2 | Xmm, Xmm, Xmm |
8737/// | 3 | Ymm, Ymm, Mem |
8738/// | 4 | Ymm, Ymm, Ymm |
8739/// | 5 | Zmm, Zmm, Mem |
8740/// | 6 | Zmm, Zmm, Zmm |
8741/// +---+---------------+
8742/// ```
8743pub trait VpmulhuwEmitter<A, B, C> {
8744 fn vpmulhuw(&mut self, op0: A, op1: B, op2: C);
8745}
8746
8747impl<'a> VpmulhuwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8748 fn vpmulhuw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8749 self.emit(VPMULHUW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8750 }
8751}
8752
8753impl<'a> VpmulhuwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8754 fn vpmulhuw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8755 self.emit(VPMULHUW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8756 }
8757}
8758
8759impl<'a> VpmulhuwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8760 fn vpmulhuw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8761 self.emit(VPMULHUW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8762 }
8763}
8764
8765impl<'a> VpmulhuwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8766 fn vpmulhuw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8767 self.emit(VPMULHUW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8768 }
8769}
8770
8771impl<'a> VpmulhuwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8772 fn vpmulhuw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8773 self.emit(VPMULHUW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8774 }
8775}
8776
8777impl<'a> VpmulhuwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8778 fn vpmulhuw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8779 self.emit(VPMULHUW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8780 }
8781}
8782
8783/// `VPMULHUW_MASK` (VPMULHUW).
8784/// Performs a SIMD unsigned multiply of the packed unsigned word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each 32-bit intermediate results in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
8785///
8786///
8787/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHUW.html).
8788///
8789/// Supported operand variants:
8790///
8791/// ```text
8792/// +---+---------------+
8793/// | # | Operands |
8794/// +---+---------------+
8795/// | 1 | Xmm, Xmm, Mem |
8796/// | 2 | Xmm, Xmm, Xmm |
8797/// | 3 | Ymm, Ymm, Mem |
8798/// | 4 | Ymm, Ymm, Ymm |
8799/// | 5 | Zmm, Zmm, Mem |
8800/// | 6 | Zmm, Zmm, Zmm |
8801/// +---+---------------+
8802/// ```
8803pub trait VpmulhuwMaskEmitter<A, B, C> {
8804 fn vpmulhuw_mask(&mut self, op0: A, op1: B, op2: C);
8805}
8806
8807impl<'a> VpmulhuwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8808 fn vpmulhuw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8809 self.emit(VPMULHUW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8810 }
8811}
8812
8813impl<'a> VpmulhuwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8814 fn vpmulhuw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8815 self.emit(VPMULHUW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8816 }
8817}
8818
8819impl<'a> VpmulhuwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8820 fn vpmulhuw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8821 self.emit(VPMULHUW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8822 }
8823}
8824
8825impl<'a> VpmulhuwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8826 fn vpmulhuw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8827 self.emit(VPMULHUW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8828 }
8829}
8830
8831impl<'a> VpmulhuwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8832 fn vpmulhuw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8833 self.emit(VPMULHUW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8834 }
8835}
8836
8837impl<'a> VpmulhuwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8838 fn vpmulhuw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8839 self.emit(VPMULHUW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8840 }
8841}
8842
8843/// `VPMULHUW_MASKZ` (VPMULHUW).
8844/// Performs a SIMD unsigned multiply of the packed unsigned word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each 32-bit intermediate results in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
8845///
8846///
8847/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHUW.html).
8848///
8849/// Supported operand variants:
8850///
8851/// ```text
8852/// +---+---------------+
8853/// | # | Operands |
8854/// +---+---------------+
8855/// | 1 | Xmm, Xmm, Mem |
8856/// | 2 | Xmm, Xmm, Xmm |
8857/// | 3 | Ymm, Ymm, Mem |
8858/// | 4 | Ymm, Ymm, Ymm |
8859/// | 5 | Zmm, Zmm, Mem |
8860/// | 6 | Zmm, Zmm, Zmm |
8861/// +---+---------------+
8862/// ```
8863pub trait VpmulhuwMaskzEmitter<A, B, C> {
8864 fn vpmulhuw_maskz(&mut self, op0: A, op1: B, op2: C);
8865}
8866
8867impl<'a> VpmulhuwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8868 fn vpmulhuw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8869 self.emit(VPMULHUW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8870 }
8871}
8872
8873impl<'a> VpmulhuwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8874 fn vpmulhuw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8875 self.emit(VPMULHUW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8876 }
8877}
8878
8879impl<'a> VpmulhuwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8880 fn vpmulhuw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8881 self.emit(VPMULHUW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8882 }
8883}
8884
8885impl<'a> VpmulhuwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8886 fn vpmulhuw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8887 self.emit(VPMULHUW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8888 }
8889}
8890
8891impl<'a> VpmulhuwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8892 fn vpmulhuw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8893 self.emit(VPMULHUW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8894 }
8895}
8896
8897impl<'a> VpmulhuwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8898 fn vpmulhuw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8899 self.emit(VPMULHUW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8900 }
8901}
8902
8903/// `VPMULHW` (VPMULHW).
8904/// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
8905///
8906///
8907/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHW.html).
8908///
8909/// Supported operand variants:
8910///
8911/// ```text
8912/// +---+---------------+
8913/// | # | Operands |
8914/// +---+---------------+
8915/// | 1 | Xmm, Xmm, Mem |
8916/// | 2 | Xmm, Xmm, Xmm |
8917/// | 3 | Ymm, Ymm, Mem |
8918/// | 4 | Ymm, Ymm, Ymm |
8919/// | 5 | Zmm, Zmm, Mem |
8920/// | 6 | Zmm, Zmm, Zmm |
8921/// +---+---------------+
8922/// ```
8923pub trait VpmulhwEmitter<A, B, C> {
8924 fn vpmulhw(&mut self, op0: A, op1: B, op2: C);
8925}
8926
8927impl<'a> VpmulhwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8928 fn vpmulhw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8929 self.emit(VPMULHW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8930 }
8931}
8932
8933impl<'a> VpmulhwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8934 fn vpmulhw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8935 self.emit(VPMULHW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8936 }
8937}
8938
8939impl<'a> VpmulhwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
8940 fn vpmulhw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
8941 self.emit(VPMULHW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8942 }
8943}
8944
8945impl<'a> VpmulhwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
8946 fn vpmulhw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
8947 self.emit(VPMULHW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8948 }
8949}
8950
8951impl<'a> VpmulhwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
8952 fn vpmulhw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
8953 self.emit(VPMULHW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8954 }
8955}
8956
8957impl<'a> VpmulhwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
8958 fn vpmulhw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
8959 self.emit(VPMULHW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8960 }
8961}
8962
8963/// `VPMULHW_MASK` (VPMULHW).
8964/// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
8965///
8966///
8967/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHW.html).
8968///
8969/// Supported operand variants:
8970///
8971/// ```text
8972/// +---+---------------+
8973/// | # | Operands |
8974/// +---+---------------+
8975/// | 1 | Xmm, Xmm, Mem |
8976/// | 2 | Xmm, Xmm, Xmm |
8977/// | 3 | Ymm, Ymm, Mem |
8978/// | 4 | Ymm, Ymm, Ymm |
8979/// | 5 | Zmm, Zmm, Mem |
8980/// | 6 | Zmm, Zmm, Zmm |
8981/// +---+---------------+
8982/// ```
8983pub trait VpmulhwMaskEmitter<A, B, C> {
8984 fn vpmulhw_mask(&mut self, op0: A, op1: B, op2: C);
8985}
8986
8987impl<'a> VpmulhwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
8988 fn vpmulhw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
8989 self.emit(VPMULHW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8990 }
8991}
8992
8993impl<'a> VpmulhwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
8994 fn vpmulhw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
8995 self.emit(VPMULHW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
8996 }
8997}
8998
8999impl<'a> VpmulhwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9000 fn vpmulhw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9001 self.emit(VPMULHW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9002 }
9003}
9004
9005impl<'a> VpmulhwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9006 fn vpmulhw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9007 self.emit(VPMULHW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9008 }
9009}
9010
9011impl<'a> VpmulhwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9012 fn vpmulhw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9013 self.emit(VPMULHW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9014 }
9015}
9016
9017impl<'a> VpmulhwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9018 fn vpmulhw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9019 self.emit(VPMULHW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9020 }
9021}
9022
9023/// `VPMULHW_MASKZ` (VPMULHW).
9024/// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
9025///
9026///
9027/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHW.html).
9028///
9029/// Supported operand variants:
9030///
9031/// ```text
9032/// +---+---------------+
9033/// | # | Operands |
9034/// +---+---------------+
9035/// | 1 | Xmm, Xmm, Mem |
9036/// | 2 | Xmm, Xmm, Xmm |
9037/// | 3 | Ymm, Ymm, Mem |
9038/// | 4 | Ymm, Ymm, Ymm |
9039/// | 5 | Zmm, Zmm, Mem |
9040/// | 6 | Zmm, Zmm, Zmm |
9041/// +---+---------------+
9042/// ```
9043pub trait VpmulhwMaskzEmitter<A, B, C> {
9044 fn vpmulhw_maskz(&mut self, op0: A, op1: B, op2: C);
9045}
9046
9047impl<'a> VpmulhwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9048 fn vpmulhw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9049 self.emit(VPMULHW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9050 }
9051}
9052
9053impl<'a> VpmulhwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9054 fn vpmulhw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9055 self.emit(VPMULHW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9056 }
9057}
9058
9059impl<'a> VpmulhwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9060 fn vpmulhw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9061 self.emit(VPMULHW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9062 }
9063}
9064
9065impl<'a> VpmulhwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9066 fn vpmulhw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9067 self.emit(VPMULHW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9068 }
9069}
9070
9071impl<'a> VpmulhwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9072 fn vpmulhw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9073 self.emit(VPMULHW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9074 }
9075}
9076
9077impl<'a> VpmulhwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9078 fn vpmulhw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9079 self.emit(VPMULHW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9080 }
9081}
9082
9083/// `VPMULLW` (VPMULLW).
9084/// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the low 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
9085///
9086///
9087/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLW.html).
9088///
9089/// Supported operand variants:
9090///
9091/// ```text
9092/// +---+---------------+
9093/// | # | Operands |
9094/// +---+---------------+
9095/// | 1 | Xmm, Xmm, Mem |
9096/// | 2 | Xmm, Xmm, Xmm |
9097/// | 3 | Ymm, Ymm, Mem |
9098/// | 4 | Ymm, Ymm, Ymm |
9099/// | 5 | Zmm, Zmm, Mem |
9100/// | 6 | Zmm, Zmm, Zmm |
9101/// +---+---------------+
9102/// ```
9103pub trait VpmullwEmitter<A, B, C> {
9104 fn vpmullw(&mut self, op0: A, op1: B, op2: C);
9105}
9106
9107impl<'a> VpmullwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9108 fn vpmullw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9109 self.emit(VPMULLW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9110 }
9111}
9112
9113impl<'a> VpmullwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9114 fn vpmullw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9115 self.emit(VPMULLW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9116 }
9117}
9118
9119impl<'a> VpmullwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9120 fn vpmullw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9121 self.emit(VPMULLW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9122 }
9123}
9124
9125impl<'a> VpmullwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9126 fn vpmullw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9127 self.emit(VPMULLW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9128 }
9129}
9130
9131impl<'a> VpmullwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9132 fn vpmullw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9133 self.emit(VPMULLW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9134 }
9135}
9136
9137impl<'a> VpmullwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9138 fn vpmullw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9139 self.emit(VPMULLW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9140 }
9141}
9142
9143/// `VPMULLW_MASK` (VPMULLW).
9144/// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the low 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
9145///
9146///
9147/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLW.html).
9148///
9149/// Supported operand variants:
9150///
9151/// ```text
9152/// +---+---------------+
9153/// | # | Operands |
9154/// +---+---------------+
9155/// | 1 | Xmm, Xmm, Mem |
9156/// | 2 | Xmm, Xmm, Xmm |
9157/// | 3 | Ymm, Ymm, Mem |
9158/// | 4 | Ymm, Ymm, Ymm |
9159/// | 5 | Zmm, Zmm, Mem |
9160/// | 6 | Zmm, Zmm, Zmm |
9161/// +---+---------------+
9162/// ```
9163pub trait VpmullwMaskEmitter<A, B, C> {
9164 fn vpmullw_mask(&mut self, op0: A, op1: B, op2: C);
9165}
9166
9167impl<'a> VpmullwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9168 fn vpmullw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9169 self.emit(VPMULLW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9170 }
9171}
9172
9173impl<'a> VpmullwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9174 fn vpmullw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9175 self.emit(VPMULLW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9176 }
9177}
9178
9179impl<'a> VpmullwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9180 fn vpmullw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9181 self.emit(VPMULLW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9182 }
9183}
9184
9185impl<'a> VpmullwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9186 fn vpmullw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9187 self.emit(VPMULLW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9188 }
9189}
9190
9191impl<'a> VpmullwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9192 fn vpmullw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9193 self.emit(VPMULLW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9194 }
9195}
9196
9197impl<'a> VpmullwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9198 fn vpmullw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9199 self.emit(VPMULLW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9200 }
9201}
9202
9203/// `VPMULLW_MASKZ` (VPMULLW).
9204/// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the low 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
9205///
9206///
9207/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLW.html).
9208///
9209/// Supported operand variants:
9210///
9211/// ```text
9212/// +---+---------------+
9213/// | # | Operands |
9214/// +---+---------------+
9215/// | 1 | Xmm, Xmm, Mem |
9216/// | 2 | Xmm, Xmm, Xmm |
9217/// | 3 | Ymm, Ymm, Mem |
9218/// | 4 | Ymm, Ymm, Ymm |
9219/// | 5 | Zmm, Zmm, Mem |
9220/// | 6 | Zmm, Zmm, Zmm |
9221/// +---+---------------+
9222/// ```
9223pub trait VpmullwMaskzEmitter<A, B, C> {
9224 fn vpmullw_maskz(&mut self, op0: A, op1: B, op2: C);
9225}
9226
9227impl<'a> VpmullwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9228 fn vpmullw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9229 self.emit(VPMULLW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9230 }
9231}
9232
9233impl<'a> VpmullwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9234 fn vpmullw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9235 self.emit(VPMULLW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9236 }
9237}
9238
9239impl<'a> VpmullwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9240 fn vpmullw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9241 self.emit(VPMULLW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9242 }
9243}
9244
9245impl<'a> VpmullwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9246 fn vpmullw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9247 self.emit(VPMULLW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9248 }
9249}
9250
9251impl<'a> VpmullwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9252 fn vpmullw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9253 self.emit(VPMULLW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9254 }
9255}
9256
9257impl<'a> VpmullwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9258 fn vpmullw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9259 self.emit(VPMULLW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9260 }
9261}
9262
9263/// `VPSADBW` (VPSADBW).
9264/// Computes the absolute value of the difference of 8 unsigned byte integers from the source operand (second operand) and from the destination operand (first operand). These 8 differences are then summed to produce an unsigned word integer result that is stored in the destination operand. Figure 4-14 shows the operation of the PSADBW instruction when using 64-bit operands.
9265///
9266///
9267/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSADBW.html).
9268///
9269/// Supported operand variants:
9270///
9271/// ```text
9272/// +---+---------------+
9273/// | # | Operands |
9274/// +---+---------------+
9275/// | 1 | Xmm, Xmm, Mem |
9276/// | 2 | Xmm, Xmm, Xmm |
9277/// | 3 | Ymm, Ymm, Mem |
9278/// | 4 | Ymm, Ymm, Ymm |
9279/// | 5 | Zmm, Zmm, Mem |
9280/// | 6 | Zmm, Zmm, Zmm |
9281/// +---+---------------+
9282/// ```
9283pub trait VpsadbwEmitter<A, B, C> {
9284 fn vpsadbw(&mut self, op0: A, op1: B, op2: C);
9285}
9286
9287impl<'a> VpsadbwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9288 fn vpsadbw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9289 self.emit(VPSADBW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9290 }
9291}
9292
9293impl<'a> VpsadbwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9294 fn vpsadbw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9295 self.emit(VPSADBW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9296 }
9297}
9298
9299impl<'a> VpsadbwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9300 fn vpsadbw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9301 self.emit(VPSADBW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9302 }
9303}
9304
9305impl<'a> VpsadbwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9306 fn vpsadbw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9307 self.emit(VPSADBW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9308 }
9309}
9310
9311impl<'a> VpsadbwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9312 fn vpsadbw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9313 self.emit(VPSADBW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9314 }
9315}
9316
9317impl<'a> VpsadbwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9318 fn vpsadbw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9319 self.emit(VPSADBW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9320 }
9321}
9322
9323/// `VPSHUFB` (VPSHUFB).
9324/// PSHUFB performs in-place shuffles of bytes in the destination operand (the first operand) according to the shuffle control mask in the source operand (the second operand). The instruction permutes the data in the destination operand, leaving the shuffle mask unaffected. If the most significant bit (bit[7]) of each byte of the shuffle control mask is set, then constant zero is written in the result byte. Each byte in the shuffle control mask forms an index to permute the corresponding byte in the destination operand. The value of each index is the least significant 4 bits (128-bit operation) or 3 bits (64-bit operation) of the shuffle control byte. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
9325///
9326///
9327/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFB.html).
9328///
9329/// Supported operand variants:
9330///
9331/// ```text
9332/// +---+---------------+
9333/// | # | Operands |
9334/// +---+---------------+
9335/// | 1 | Xmm, Xmm, Mem |
9336/// | 2 | Xmm, Xmm, Xmm |
9337/// | 3 | Ymm, Ymm, Mem |
9338/// | 4 | Ymm, Ymm, Ymm |
9339/// | 5 | Zmm, Zmm, Mem |
9340/// | 6 | Zmm, Zmm, Zmm |
9341/// +---+---------------+
9342/// ```
9343pub trait VpshufbEmitter<A, B, C> {
9344 fn vpshufb(&mut self, op0: A, op1: B, op2: C);
9345}
9346
9347impl<'a> VpshufbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9348 fn vpshufb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9349 self.emit(VPSHUFB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9350 }
9351}
9352
9353impl<'a> VpshufbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9354 fn vpshufb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9355 self.emit(VPSHUFB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9356 }
9357}
9358
9359impl<'a> VpshufbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9360 fn vpshufb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9361 self.emit(VPSHUFB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9362 }
9363}
9364
9365impl<'a> VpshufbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9366 fn vpshufb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9367 self.emit(VPSHUFB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9368 }
9369}
9370
9371impl<'a> VpshufbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9372 fn vpshufb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9373 self.emit(VPSHUFB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9374 }
9375}
9376
9377impl<'a> VpshufbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9378 fn vpshufb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9379 self.emit(VPSHUFB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9380 }
9381}
9382
9383/// `VPSHUFB_MASK` (VPSHUFB).
9384/// PSHUFB performs in-place shuffles of bytes in the destination operand (the first operand) according to the shuffle control mask in the source operand (the second operand). The instruction permutes the data in the destination operand, leaving the shuffle mask unaffected. If the most significant bit (bit[7]) of each byte of the shuffle control mask is set, then constant zero is written in the result byte. Each byte in the shuffle control mask forms an index to permute the corresponding byte in the destination operand. The value of each index is the least significant 4 bits (128-bit operation) or 3 bits (64-bit operation) of the shuffle control byte. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
9385///
9386///
9387/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFB.html).
9388///
9389/// Supported operand variants:
9390///
9391/// ```text
9392/// +---+---------------+
9393/// | # | Operands |
9394/// +---+---------------+
9395/// | 1 | Xmm, Xmm, Mem |
9396/// | 2 | Xmm, Xmm, Xmm |
9397/// | 3 | Ymm, Ymm, Mem |
9398/// | 4 | Ymm, Ymm, Ymm |
9399/// | 5 | Zmm, Zmm, Mem |
9400/// | 6 | Zmm, Zmm, Zmm |
9401/// +---+---------------+
9402/// ```
9403pub trait VpshufbMaskEmitter<A, B, C> {
9404 fn vpshufb_mask(&mut self, op0: A, op1: B, op2: C);
9405}
9406
9407impl<'a> VpshufbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9408 fn vpshufb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9409 self.emit(VPSHUFB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9410 }
9411}
9412
9413impl<'a> VpshufbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9414 fn vpshufb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9415 self.emit(VPSHUFB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9416 }
9417}
9418
9419impl<'a> VpshufbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9420 fn vpshufb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9421 self.emit(VPSHUFB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9422 }
9423}
9424
9425impl<'a> VpshufbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9426 fn vpshufb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9427 self.emit(VPSHUFB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9428 }
9429}
9430
9431impl<'a> VpshufbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9432 fn vpshufb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9433 self.emit(VPSHUFB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9434 }
9435}
9436
9437impl<'a> VpshufbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9438 fn vpshufb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9439 self.emit(VPSHUFB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9440 }
9441}
9442
9443/// `VPSHUFB_MASKZ` (VPSHUFB).
9444/// PSHUFB performs in-place shuffles of bytes in the destination operand (the first operand) according to the shuffle control mask in the source operand (the second operand). The instruction permutes the data in the destination operand, leaving the shuffle mask unaffected. If the most significant bit (bit[7]) of each byte of the shuffle control mask is set, then constant zero is written in the result byte. Each byte in the shuffle control mask forms an index to permute the corresponding byte in the destination operand. The value of each index is the least significant 4 bits (128-bit operation) or 3 bits (64-bit operation) of the shuffle control byte. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
9445///
9446///
9447/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFB.html).
9448///
9449/// Supported operand variants:
9450///
9451/// ```text
9452/// +---+---------------+
9453/// | # | Operands |
9454/// +---+---------------+
9455/// | 1 | Xmm, Xmm, Mem |
9456/// | 2 | Xmm, Xmm, Xmm |
9457/// | 3 | Ymm, Ymm, Mem |
9458/// | 4 | Ymm, Ymm, Ymm |
9459/// | 5 | Zmm, Zmm, Mem |
9460/// | 6 | Zmm, Zmm, Zmm |
9461/// +---+---------------+
9462/// ```
9463pub trait VpshufbMaskzEmitter<A, B, C> {
9464 fn vpshufb_maskz(&mut self, op0: A, op1: B, op2: C);
9465}
9466
9467impl<'a> VpshufbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9468 fn vpshufb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9469 self.emit(VPSHUFB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9470 }
9471}
9472
9473impl<'a> VpshufbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9474 fn vpshufb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9475 self.emit(VPSHUFB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9476 }
9477}
9478
9479impl<'a> VpshufbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9480 fn vpshufb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9481 self.emit(VPSHUFB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9482 }
9483}
9484
9485impl<'a> VpshufbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9486 fn vpshufb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9487 self.emit(VPSHUFB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9488 }
9489}
9490
9491impl<'a> VpshufbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9492 fn vpshufb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9493 self.emit(VPSHUFB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9494 }
9495}
9496
9497impl<'a> VpshufbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9498 fn vpshufb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9499 self.emit(VPSHUFB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9500 }
9501}
9502
9503/// `VPSHUFHW` (VPSHUFHW).
9504/// Copies words from the high quadword of a 128-bit lane of the source operand and inserts them in the high quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. This 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the high quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3, 4) from the high quadword of the source operand to be copied to the destination operand. The low quadword of the source operand is copied to the low quadword of the destination operand, for each 128-bit lane.
9505///
9506///
9507/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFHW.html).
9508///
9509/// Supported operand variants:
9510///
9511/// ```text
9512/// +---+---------------+
9513/// | # | Operands |
9514/// +---+---------------+
9515/// | 1 | Xmm, Mem, Imm |
9516/// | 2 | Xmm, Xmm, Imm |
9517/// | 3 | Ymm, Mem, Imm |
9518/// | 4 | Ymm, Ymm, Imm |
9519/// | 5 | Zmm, Mem, Imm |
9520/// | 6 | Zmm, Zmm, Imm |
9521/// +---+---------------+
9522/// ```
9523pub trait VpshufhwEmitter<A, B, C> {
9524 fn vpshufhw(&mut self, op0: A, op1: B, op2: C);
9525}
9526
9527impl<'a> VpshufhwEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9528 fn vpshufhw(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9529 self.emit(VPSHUFHW128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9530 }
9531}
9532
9533impl<'a> VpshufhwEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9534 fn vpshufhw(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9535 self.emit(VPSHUFHW128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9536 }
9537}
9538
9539impl<'a> VpshufhwEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9540 fn vpshufhw(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9541 self.emit(VPSHUFHW256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9542 }
9543}
9544
9545impl<'a> VpshufhwEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9546 fn vpshufhw(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9547 self.emit(VPSHUFHW256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9548 }
9549}
9550
9551impl<'a> VpshufhwEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9552 fn vpshufhw(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9553 self.emit(VPSHUFHW512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9554 }
9555}
9556
9557impl<'a> VpshufhwEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9558 fn vpshufhw(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9559 self.emit(VPSHUFHW512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9560 }
9561}
9562
9563/// `VPSHUFHW_MASK` (VPSHUFHW).
9564/// Copies words from the high quadword of a 128-bit lane of the source operand and inserts them in the high quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. This 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the high quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3, 4) from the high quadword of the source operand to be copied to the destination operand. The low quadword of the source operand is copied to the low quadword of the destination operand, for each 128-bit lane.
9565///
9566///
9567/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFHW.html).
9568///
9569/// Supported operand variants:
9570///
9571/// ```text
9572/// +---+---------------+
9573/// | # | Operands |
9574/// +---+---------------+
9575/// | 1 | Xmm, Mem, Imm |
9576/// | 2 | Xmm, Xmm, Imm |
9577/// | 3 | Ymm, Mem, Imm |
9578/// | 4 | Ymm, Ymm, Imm |
9579/// | 5 | Zmm, Mem, Imm |
9580/// | 6 | Zmm, Zmm, Imm |
9581/// +---+---------------+
9582/// ```
9583pub trait VpshufhwMaskEmitter<A, B, C> {
9584 fn vpshufhw_mask(&mut self, op0: A, op1: B, op2: C);
9585}
9586
9587impl<'a> VpshufhwMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9588 fn vpshufhw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9589 self.emit(VPSHUFHW128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9590 }
9591}
9592
9593impl<'a> VpshufhwMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9594 fn vpshufhw_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9595 self.emit(VPSHUFHW128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9596 }
9597}
9598
9599impl<'a> VpshufhwMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9600 fn vpshufhw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9601 self.emit(VPSHUFHW256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9602 }
9603}
9604
9605impl<'a> VpshufhwMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9606 fn vpshufhw_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9607 self.emit(VPSHUFHW256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9608 }
9609}
9610
9611impl<'a> VpshufhwMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9612 fn vpshufhw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9613 self.emit(VPSHUFHW512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9614 }
9615}
9616
9617impl<'a> VpshufhwMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9618 fn vpshufhw_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9619 self.emit(VPSHUFHW512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9620 }
9621}
9622
9623/// `VPSHUFHW_MASKZ` (VPSHUFHW).
9624/// Copies words from the high quadword of a 128-bit lane of the source operand and inserts them in the high quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. This 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the high quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3, 4) from the high quadword of the source operand to be copied to the destination operand. The low quadword of the source operand is copied to the low quadword of the destination operand, for each 128-bit lane.
9625///
9626///
9627/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFHW.html).
9628///
9629/// Supported operand variants:
9630///
9631/// ```text
9632/// +---+---------------+
9633/// | # | Operands |
9634/// +---+---------------+
9635/// | 1 | Xmm, Mem, Imm |
9636/// | 2 | Xmm, Xmm, Imm |
9637/// | 3 | Ymm, Mem, Imm |
9638/// | 4 | Ymm, Ymm, Imm |
9639/// | 5 | Zmm, Mem, Imm |
9640/// | 6 | Zmm, Zmm, Imm |
9641/// +---+---------------+
9642/// ```
9643pub trait VpshufhwMaskzEmitter<A, B, C> {
9644 fn vpshufhw_maskz(&mut self, op0: A, op1: B, op2: C);
9645}
9646
9647impl<'a> VpshufhwMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9648 fn vpshufhw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9649 self.emit(VPSHUFHW128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9650 }
9651}
9652
9653impl<'a> VpshufhwMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9654 fn vpshufhw_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9655 self.emit(VPSHUFHW128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9656 }
9657}
9658
9659impl<'a> VpshufhwMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9660 fn vpshufhw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9661 self.emit(VPSHUFHW256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9662 }
9663}
9664
9665impl<'a> VpshufhwMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9666 fn vpshufhw_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9667 self.emit(VPSHUFHW256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9668 }
9669}
9670
9671impl<'a> VpshufhwMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9672 fn vpshufhw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9673 self.emit(VPSHUFHW512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9674 }
9675}
9676
9677impl<'a> VpshufhwMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9678 fn vpshufhw_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9679 self.emit(VPSHUFHW512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9680 }
9681}
9682
9683/// `VPSHUFLW` (VPSHUFLW).
9684/// Copies words from the low quadword of a 128-bit lane of the source operand and inserts them in the low quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. The 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the low quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3) from the low quadword of the source operand to be copied to the destination operand. The high quadword of the source operand is copied to the high quadword of the destination operand, for each 128-bit lane.
9685///
9686///
9687/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFLW.html).
9688///
9689/// Supported operand variants:
9690///
9691/// ```text
9692/// +---+---------------+
9693/// | # | Operands |
9694/// +---+---------------+
9695/// | 1 | Xmm, Mem, Imm |
9696/// | 2 | Xmm, Xmm, Imm |
9697/// | 3 | Ymm, Mem, Imm |
9698/// | 4 | Ymm, Ymm, Imm |
9699/// | 5 | Zmm, Mem, Imm |
9700/// | 6 | Zmm, Zmm, Imm |
9701/// +---+---------------+
9702/// ```
9703pub trait VpshuflwEmitter<A, B, C> {
9704 fn vpshuflw(&mut self, op0: A, op1: B, op2: C);
9705}
9706
9707impl<'a> VpshuflwEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9708 fn vpshuflw(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9709 self.emit(VPSHUFLW128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9710 }
9711}
9712
9713impl<'a> VpshuflwEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9714 fn vpshuflw(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9715 self.emit(VPSHUFLW128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9716 }
9717}
9718
9719impl<'a> VpshuflwEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9720 fn vpshuflw(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9721 self.emit(VPSHUFLW256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9722 }
9723}
9724
9725impl<'a> VpshuflwEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9726 fn vpshuflw(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9727 self.emit(VPSHUFLW256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9728 }
9729}
9730
9731impl<'a> VpshuflwEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9732 fn vpshuflw(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9733 self.emit(VPSHUFLW512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9734 }
9735}
9736
9737impl<'a> VpshuflwEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9738 fn vpshuflw(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9739 self.emit(VPSHUFLW512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9740 }
9741}
9742
9743/// `VPSHUFLW_MASK` (VPSHUFLW).
9744/// Copies words from the low quadword of a 128-bit lane of the source operand and inserts them in the low quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. The 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the low quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3) from the low quadword of the source operand to be copied to the destination operand. The high quadword of the source operand is copied to the high quadword of the destination operand, for each 128-bit lane.
9745///
9746///
9747/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFLW.html).
9748///
9749/// Supported operand variants:
9750///
9751/// ```text
9752/// +---+---------------+
9753/// | # | Operands |
9754/// +---+---------------+
9755/// | 1 | Xmm, Mem, Imm |
9756/// | 2 | Xmm, Xmm, Imm |
9757/// | 3 | Ymm, Mem, Imm |
9758/// | 4 | Ymm, Ymm, Imm |
9759/// | 5 | Zmm, Mem, Imm |
9760/// | 6 | Zmm, Zmm, Imm |
9761/// +---+---------------+
9762/// ```
9763pub trait VpshuflwMaskEmitter<A, B, C> {
9764 fn vpshuflw_mask(&mut self, op0: A, op1: B, op2: C);
9765}
9766
9767impl<'a> VpshuflwMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9768 fn vpshuflw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9769 self.emit(VPSHUFLW128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9770 }
9771}
9772
9773impl<'a> VpshuflwMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9774 fn vpshuflw_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9775 self.emit(VPSHUFLW128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9776 }
9777}
9778
9779impl<'a> VpshuflwMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9780 fn vpshuflw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9781 self.emit(VPSHUFLW256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9782 }
9783}
9784
9785impl<'a> VpshuflwMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9786 fn vpshuflw_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9787 self.emit(VPSHUFLW256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9788 }
9789}
9790
9791impl<'a> VpshuflwMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9792 fn vpshuflw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9793 self.emit(VPSHUFLW512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9794 }
9795}
9796
9797impl<'a> VpshuflwMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9798 fn vpshuflw_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9799 self.emit(VPSHUFLW512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9800 }
9801}
9802
9803/// `VPSHUFLW_MASKZ` (VPSHUFLW).
9804/// Copies words from the low quadword of a 128-bit lane of the source operand and inserts them in the low quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. The 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the low quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3) from the low quadword of the source operand to be copied to the destination operand. The high quadword of the source operand is copied to the high quadword of the destination operand, for each 128-bit lane.
9805///
9806///
9807/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFLW.html).
9808///
9809/// Supported operand variants:
9810///
9811/// ```text
9812/// +---+---------------+
9813/// | # | Operands |
9814/// +---+---------------+
9815/// | 1 | Xmm, Mem, Imm |
9816/// | 2 | Xmm, Xmm, Imm |
9817/// | 3 | Ymm, Mem, Imm |
9818/// | 4 | Ymm, Ymm, Imm |
9819/// | 5 | Zmm, Mem, Imm |
9820/// | 6 | Zmm, Zmm, Imm |
9821/// +---+---------------+
9822/// ```
9823pub trait VpshuflwMaskzEmitter<A, B, C> {
9824 fn vpshuflw_maskz(&mut self, op0: A, op1: B, op2: C);
9825}
9826
9827impl<'a> VpshuflwMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9828 fn vpshuflw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9829 self.emit(VPSHUFLW128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9830 }
9831}
9832
9833impl<'a> VpshuflwMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9834 fn vpshuflw_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9835 self.emit(VPSHUFLW128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9836 }
9837}
9838
9839impl<'a> VpshuflwMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9840 fn vpshuflw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9841 self.emit(VPSHUFLW256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9842 }
9843}
9844
9845impl<'a> VpshuflwMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9846 fn vpshuflw_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9847 self.emit(VPSHUFLW256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9848 }
9849}
9850
9851impl<'a> VpshuflwMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9852 fn vpshuflw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9853 self.emit(VPSHUFLW512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9854 }
9855}
9856
9857impl<'a> VpshuflwMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9858 fn vpshuflw_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9859 self.emit(VPSHUFLW512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9860 }
9861}
9862
9863/// `VPSLLDQ` (VPSLLDQ).
9864/// Shifts the destination operand (first operand) to the left by the number of bytes specified in the count operand (second operand). The empty low-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The count operand is an 8-bit immediate.
9865///
9866///
9867/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLDQ.html).
9868///
9869/// Supported operand variants:
9870///
9871/// ```text
9872/// +---+---------------+
9873/// | # | Operands |
9874/// +---+---------------+
9875/// | 1 | Xmm, Mem, Imm |
9876/// | 2 | Xmm, Xmm, Imm |
9877/// | 3 | Ymm, Mem, Imm |
9878/// | 4 | Ymm, Ymm, Imm |
9879/// | 5 | Zmm, Mem, Imm |
9880/// | 6 | Zmm, Zmm, Imm |
9881/// +---+---------------+
9882/// ```
9883pub trait VpslldqEmitter<A, B, C> {
9884 fn vpslldq(&mut self, op0: A, op1: B, op2: C);
9885}
9886
9887impl<'a> VpslldqEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
9888 fn vpslldq(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
9889 self.emit(VPSLLDQ128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9890 }
9891}
9892
9893impl<'a> VpslldqEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
9894 fn vpslldq(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
9895 self.emit(VPSLLDQ256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9896 }
9897}
9898
9899impl<'a> VpslldqEmitter<Xmm, Mem, Imm> for Assembler<'a> {
9900 fn vpslldq(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
9901 self.emit(VPSLLDQ128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9902 }
9903}
9904
9905impl<'a> VpslldqEmitter<Ymm, Mem, Imm> for Assembler<'a> {
9906 fn vpslldq(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
9907 self.emit(VPSLLDQ256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9908 }
9909}
9910
9911impl<'a> VpslldqEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
9912 fn vpslldq(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
9913 self.emit(VPSLLDQ512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9914 }
9915}
9916
9917impl<'a> VpslldqEmitter<Zmm, Mem, Imm> for Assembler<'a> {
9918 fn vpslldq(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
9919 self.emit(VPSLLDQ512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9920 }
9921}
9922
9923/// `VPSLLVW` (VPSLLVW).
9924/// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the left by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0).
9925///
9926///
9927/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSLLVW%3AVPSLLVD%3AVPSLLVQ.html).
9928///
9929/// Supported operand variants:
9930///
9931/// ```text
9932/// +---+---------------+
9933/// | # | Operands |
9934/// +---+---------------+
9935/// | 1 | Xmm, Xmm, Mem |
9936/// | 2 | Xmm, Xmm, Xmm |
9937/// | 3 | Ymm, Ymm, Mem |
9938/// | 4 | Ymm, Ymm, Ymm |
9939/// | 5 | Zmm, Zmm, Mem |
9940/// | 6 | Zmm, Zmm, Zmm |
9941/// +---+---------------+
9942/// ```
9943pub trait VpsllvwEmitter<A, B, C> {
9944 fn vpsllvw(&mut self, op0: A, op1: B, op2: C);
9945}
9946
9947impl<'a> VpsllvwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
9948 fn vpsllvw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
9949 self.emit(VPSLLVW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9950 }
9951}
9952
9953impl<'a> VpsllvwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
9954 fn vpsllvw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
9955 self.emit(VPSLLVW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9956 }
9957}
9958
9959impl<'a> VpsllvwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
9960 fn vpsllvw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
9961 self.emit(VPSLLVW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9962 }
9963}
9964
9965impl<'a> VpsllvwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
9966 fn vpsllvw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
9967 self.emit(VPSLLVW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9968 }
9969}
9970
9971impl<'a> VpsllvwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
9972 fn vpsllvw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
9973 self.emit(VPSLLVW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9974 }
9975}
9976
9977impl<'a> VpsllvwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
9978 fn vpsllvw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
9979 self.emit(VPSLLVW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
9980 }
9981}
9982
9983/// `VPSLLVW_MASK` (VPSLLVW).
9984/// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the left by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0).
9985///
9986///
9987/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSLLVW%3AVPSLLVD%3AVPSLLVQ.html).
9988///
9989/// Supported operand variants:
9990///
9991/// ```text
9992/// +---+---------------+
9993/// | # | Operands |
9994/// +---+---------------+
9995/// | 1 | Xmm, Xmm, Mem |
9996/// | 2 | Xmm, Xmm, Xmm |
9997/// | 3 | Ymm, Ymm, Mem |
9998/// | 4 | Ymm, Ymm, Ymm |
9999/// | 5 | Zmm, Zmm, Mem |
10000/// | 6 | Zmm, Zmm, Zmm |
10001/// +---+---------------+
10002/// ```
10003pub trait VpsllvwMaskEmitter<A, B, C> {
10004 fn vpsllvw_mask(&mut self, op0: A, op1: B, op2: C);
10005}
10006
10007impl<'a> VpsllvwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10008 fn vpsllvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10009 self.emit(VPSLLVW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10010 }
10011}
10012
10013impl<'a> VpsllvwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10014 fn vpsllvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10015 self.emit(VPSLLVW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10016 }
10017}
10018
10019impl<'a> VpsllvwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
10020 fn vpsllvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
10021 self.emit(VPSLLVW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10022 }
10023}
10024
10025impl<'a> VpsllvwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10026 fn vpsllvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10027 self.emit(VPSLLVW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10028 }
10029}
10030
10031impl<'a> VpsllvwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
10032 fn vpsllvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
10033 self.emit(VPSLLVW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10034 }
10035}
10036
10037impl<'a> VpsllvwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10038 fn vpsllvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10039 self.emit(VPSLLVW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10040 }
10041}
10042
10043/// `VPSLLVW_MASKZ` (VPSLLVW).
10044/// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the left by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0).
10045///
10046///
10047/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSLLVW%3AVPSLLVD%3AVPSLLVQ.html).
10048///
10049/// Supported operand variants:
10050///
10051/// ```text
10052/// +---+---------------+
10053/// | # | Operands |
10054/// +---+---------------+
10055/// | 1 | Xmm, Xmm, Mem |
10056/// | 2 | Xmm, Xmm, Xmm |
10057/// | 3 | Ymm, Ymm, Mem |
10058/// | 4 | Ymm, Ymm, Ymm |
10059/// | 5 | Zmm, Zmm, Mem |
10060/// | 6 | Zmm, Zmm, Zmm |
10061/// +---+---------------+
10062/// ```
10063pub trait VpsllvwMaskzEmitter<A, B, C> {
10064 fn vpsllvw_maskz(&mut self, op0: A, op1: B, op2: C);
10065}
10066
10067impl<'a> VpsllvwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10068 fn vpsllvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10069 self.emit(VPSLLVW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10070 }
10071}
10072
10073impl<'a> VpsllvwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10074 fn vpsllvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10075 self.emit(VPSLLVW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10076 }
10077}
10078
10079impl<'a> VpsllvwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
10080 fn vpsllvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
10081 self.emit(VPSLLVW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10082 }
10083}
10084
10085impl<'a> VpsllvwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10086 fn vpsllvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10087 self.emit(VPSLLVW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10088 }
10089}
10090
10091impl<'a> VpsllvwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
10092 fn vpsllvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
10093 self.emit(VPSLLVW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10094 }
10095}
10096
10097impl<'a> VpsllvwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10098 fn vpsllvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10099 self.emit(VPSLLVW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10100 }
10101}
10102
10103/// `VPSLLW` (VPSLLW).
10104/// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the left by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-17 gives an example of shifting words in a 64-bit operand.
10105///
10106///
10107/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLW%3APSLLD%3APSLLQ.html).
10108///
10109/// Supported operand variants:
10110///
10111/// ```text
10112/// +----+---------------+
10113/// | # | Operands |
10114/// +----+---------------+
10115/// | 1 | Xmm, Mem, Imm |
10116/// | 2 | Xmm, Xmm, Imm |
10117/// | 3 | Xmm, Xmm, Mem |
10118/// | 4 | Xmm, Xmm, Xmm |
10119/// | 5 | Ymm, Mem, Imm |
10120/// | 6 | Ymm, Ymm, Imm |
10121/// | 7 | Ymm, Ymm, Mem |
10122/// | 8 | Ymm, Ymm, Xmm |
10123/// | 9 | Zmm, Mem, Imm |
10124/// | 10 | Zmm, Zmm, Imm |
10125/// | 11 | Zmm, Zmm, Mem |
10126/// | 12 | Zmm, Zmm, Xmm |
10127/// +----+---------------+
10128/// ```
10129pub trait VpsllwEmitter<A, B, C> {
10130 fn vpsllw(&mut self, op0: A, op1: B, op2: C);
10131}
10132
10133impl<'a> VpsllwEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10134 fn vpsllw(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10135 self.emit(VPSLLW128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10136 }
10137}
10138
10139impl<'a> VpsllwEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10140 fn vpsllw(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10141 self.emit(VPSLLW256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10142 }
10143}
10144
10145impl<'a> VpsllwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10146 fn vpsllw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10147 self.emit(VPSLLW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10148 }
10149}
10150
10151impl<'a> VpsllwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10152 fn vpsllw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10153 self.emit(VPSLLW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10154 }
10155}
10156
10157impl<'a> VpsllwEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
10158 fn vpsllw(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
10159 self.emit(VPSLLW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10160 }
10161}
10162
10163impl<'a> VpsllwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10164 fn vpsllw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10165 self.emit(VPSLLW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10166 }
10167}
10168
10169impl<'a> VpsllwEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10170 fn vpsllw(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10171 self.emit(VPSLLW128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10172 }
10173}
10174
10175impl<'a> VpsllwEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10176 fn vpsllw(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10177 self.emit(VPSLLW256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10178 }
10179}
10180
10181impl<'a> VpsllwEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10182 fn vpsllw(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10183 self.emit(VPSLLW512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10184 }
10185}
10186
10187impl<'a> VpsllwEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10188 fn vpsllw(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10189 self.emit(VPSLLW512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10190 }
10191}
10192
10193impl<'a> VpsllwEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
10194 fn vpsllw(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
10195 self.emit(VPSLLW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10196 }
10197}
10198
10199impl<'a> VpsllwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10200 fn vpsllw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10201 self.emit(VPSLLW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10202 }
10203}
10204
10205/// `VPSLLW_MASK` (VPSLLW).
10206/// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the left by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-17 gives an example of shifting words in a 64-bit operand.
10207///
10208///
10209/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLW%3APSLLD%3APSLLQ.html).
10210///
10211/// Supported operand variants:
10212///
10213/// ```text
10214/// +----+---------------+
10215/// | # | Operands |
10216/// +----+---------------+
10217/// | 1 | Xmm, Mem, Imm |
10218/// | 2 | Xmm, Xmm, Imm |
10219/// | 3 | Xmm, Xmm, Mem |
10220/// | 4 | Xmm, Xmm, Xmm |
10221/// | 5 | Ymm, Mem, Imm |
10222/// | 6 | Ymm, Ymm, Imm |
10223/// | 7 | Ymm, Ymm, Mem |
10224/// | 8 | Ymm, Ymm, Xmm |
10225/// | 9 | Zmm, Mem, Imm |
10226/// | 10 | Zmm, Zmm, Imm |
10227/// | 11 | Zmm, Zmm, Mem |
10228/// | 12 | Zmm, Zmm, Xmm |
10229/// +----+---------------+
10230/// ```
10231pub trait VpsllwMaskEmitter<A, B, C> {
10232 fn vpsllw_mask(&mut self, op0: A, op1: B, op2: C);
10233}
10234
10235impl<'a> VpsllwMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10236 fn vpsllw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10237 self.emit(VPSLLW128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10238 }
10239}
10240
10241impl<'a> VpsllwMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10242 fn vpsllw_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10243 self.emit(VPSLLW128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10244 }
10245}
10246
10247impl<'a> VpsllwMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10248 fn vpsllw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10249 self.emit(VPSLLW256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10250 }
10251}
10252
10253impl<'a> VpsllwMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10254 fn vpsllw_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10255 self.emit(VPSLLW256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10256 }
10257}
10258
10259impl<'a> VpsllwMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10260 fn vpsllw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10261 self.emit(VPSLLW512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10262 }
10263}
10264
10265impl<'a> VpsllwMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10266 fn vpsllw_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10267 self.emit(VPSLLW512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10268 }
10269}
10270
10271impl<'a> VpsllwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10272 fn vpsllw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10273 self.emit(VPSLLW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10274 }
10275}
10276
10277impl<'a> VpsllwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10278 fn vpsllw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10279 self.emit(VPSLLW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10280 }
10281}
10282
10283impl<'a> VpsllwMaskEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
10284 fn vpsllw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
10285 self.emit(VPSLLW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10286 }
10287}
10288
10289impl<'a> VpsllwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10290 fn vpsllw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10291 self.emit(VPSLLW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10292 }
10293}
10294
10295impl<'a> VpsllwMaskEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
10296 fn vpsllw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
10297 self.emit(VPSLLW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10298 }
10299}
10300
10301impl<'a> VpsllwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10302 fn vpsllw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10303 self.emit(VPSLLW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10304 }
10305}
10306
10307/// `VPSLLW_MASKZ` (VPSLLW).
10308/// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the left by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-17 gives an example of shifting words in a 64-bit operand.
10309///
10310///
10311/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLW%3APSLLD%3APSLLQ.html).
10312///
10313/// Supported operand variants:
10314///
10315/// ```text
10316/// +----+---------------+
10317/// | # | Operands |
10318/// +----+---------------+
10319/// | 1 | Xmm, Mem, Imm |
10320/// | 2 | Xmm, Xmm, Imm |
10321/// | 3 | Xmm, Xmm, Mem |
10322/// | 4 | Xmm, Xmm, Xmm |
10323/// | 5 | Ymm, Mem, Imm |
10324/// | 6 | Ymm, Ymm, Imm |
10325/// | 7 | Ymm, Ymm, Mem |
10326/// | 8 | Ymm, Ymm, Xmm |
10327/// | 9 | Zmm, Mem, Imm |
10328/// | 10 | Zmm, Zmm, Imm |
10329/// | 11 | Zmm, Zmm, Mem |
10330/// | 12 | Zmm, Zmm, Xmm |
10331/// +----+---------------+
10332/// ```
10333pub trait VpsllwMaskzEmitter<A, B, C> {
10334 fn vpsllw_maskz(&mut self, op0: A, op1: B, op2: C);
10335}
10336
10337impl<'a> VpsllwMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10338 fn vpsllw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10339 self.emit(VPSLLW128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10340 }
10341}
10342
10343impl<'a> VpsllwMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10344 fn vpsllw_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10345 self.emit(VPSLLW128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10346 }
10347}
10348
10349impl<'a> VpsllwMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10350 fn vpsllw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10351 self.emit(VPSLLW256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10352 }
10353}
10354
10355impl<'a> VpsllwMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10356 fn vpsllw_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10357 self.emit(VPSLLW256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10358 }
10359}
10360
10361impl<'a> VpsllwMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10362 fn vpsllw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10363 self.emit(VPSLLW512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10364 }
10365}
10366
10367impl<'a> VpsllwMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10368 fn vpsllw_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10369 self.emit(VPSLLW512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10370 }
10371}
10372
10373impl<'a> VpsllwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10374 fn vpsllw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10375 self.emit(VPSLLW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10376 }
10377}
10378
10379impl<'a> VpsllwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10380 fn vpsllw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10381 self.emit(VPSLLW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10382 }
10383}
10384
10385impl<'a> VpsllwMaskzEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
10386 fn vpsllw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
10387 self.emit(VPSLLW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10388 }
10389}
10390
10391impl<'a> VpsllwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10392 fn vpsllw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10393 self.emit(VPSLLW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10394 }
10395}
10396
10397impl<'a> VpsllwMaskzEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
10398 fn vpsllw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
10399 self.emit(VPSLLW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10400 }
10401}
10402
10403impl<'a> VpsllwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10404 fn vpsllw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10405 self.emit(VPSLLW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10406 }
10407}
10408
10409/// `VPSRAVW` (VPSRAVW).
10410/// Shifts the bits in the individual data elements (word/doublewords/quadword) in the first source operand (the second operand) to the right by the number of bits specified in the count value of respective data elements in the second source operand (the third operand). As the bits in the data elements are shifted right, the empty high-order bits are set to the MSB (sign extension).
10411///
10412///
10413/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRAVW%3AVPSRAVD%3AVPSRAVQ.html).
10414///
10415/// Supported operand variants:
10416///
10417/// ```text
10418/// +---+---------------+
10419/// | # | Operands |
10420/// +---+---------------+
10421/// | 1 | Xmm, Xmm, Mem |
10422/// | 2 | Xmm, Xmm, Xmm |
10423/// | 3 | Ymm, Ymm, Mem |
10424/// | 4 | Ymm, Ymm, Ymm |
10425/// | 5 | Zmm, Zmm, Mem |
10426/// | 6 | Zmm, Zmm, Zmm |
10427/// +---+---------------+
10428/// ```
10429pub trait VpsravwEmitter<A, B, C> {
10430 fn vpsravw(&mut self, op0: A, op1: B, op2: C);
10431}
10432
10433impl<'a> VpsravwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10434 fn vpsravw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10435 self.emit(VPSRAVW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10436 }
10437}
10438
10439impl<'a> VpsravwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10440 fn vpsravw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10441 self.emit(VPSRAVW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10442 }
10443}
10444
10445impl<'a> VpsravwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
10446 fn vpsravw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
10447 self.emit(VPSRAVW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10448 }
10449}
10450
10451impl<'a> VpsravwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10452 fn vpsravw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10453 self.emit(VPSRAVW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10454 }
10455}
10456
10457impl<'a> VpsravwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
10458 fn vpsravw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
10459 self.emit(VPSRAVW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10460 }
10461}
10462
10463impl<'a> VpsravwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10464 fn vpsravw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10465 self.emit(VPSRAVW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10466 }
10467}
10468
10469/// `VPSRAVW_MASK` (VPSRAVW).
10470/// Shifts the bits in the individual data elements (word/doublewords/quadword) in the first source operand (the second operand) to the right by the number of bits specified in the count value of respective data elements in the second source operand (the third operand). As the bits in the data elements are shifted right, the empty high-order bits are set to the MSB (sign extension).
10471///
10472///
10473/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRAVW%3AVPSRAVD%3AVPSRAVQ.html).
10474///
10475/// Supported operand variants:
10476///
10477/// ```text
10478/// +---+---------------+
10479/// | # | Operands |
10480/// +---+---------------+
10481/// | 1 | Xmm, Xmm, Mem |
10482/// | 2 | Xmm, Xmm, Xmm |
10483/// | 3 | Ymm, Ymm, Mem |
10484/// | 4 | Ymm, Ymm, Ymm |
10485/// | 5 | Zmm, Zmm, Mem |
10486/// | 6 | Zmm, Zmm, Zmm |
10487/// +---+---------------+
10488/// ```
10489pub trait VpsravwMaskEmitter<A, B, C> {
10490 fn vpsravw_mask(&mut self, op0: A, op1: B, op2: C);
10491}
10492
10493impl<'a> VpsravwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10494 fn vpsravw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10495 self.emit(VPSRAVW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10496 }
10497}
10498
10499impl<'a> VpsravwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10500 fn vpsravw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10501 self.emit(VPSRAVW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10502 }
10503}
10504
10505impl<'a> VpsravwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
10506 fn vpsravw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
10507 self.emit(VPSRAVW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10508 }
10509}
10510
10511impl<'a> VpsravwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10512 fn vpsravw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10513 self.emit(VPSRAVW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10514 }
10515}
10516
10517impl<'a> VpsravwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
10518 fn vpsravw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
10519 self.emit(VPSRAVW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10520 }
10521}
10522
10523impl<'a> VpsravwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10524 fn vpsravw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10525 self.emit(VPSRAVW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10526 }
10527}
10528
10529/// `VPSRAVW_MASKZ` (VPSRAVW).
10530/// Shifts the bits in the individual data elements (word/doublewords/quadword) in the first source operand (the second operand) to the right by the number of bits specified in the count value of respective data elements in the second source operand (the third operand). As the bits in the data elements are shifted right, the empty high-order bits are set to the MSB (sign extension).
10531///
10532///
10533/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRAVW%3AVPSRAVD%3AVPSRAVQ.html).
10534///
10535/// Supported operand variants:
10536///
10537/// ```text
10538/// +---+---------------+
10539/// | # | Operands |
10540/// +---+---------------+
10541/// | 1 | Xmm, Xmm, Mem |
10542/// | 2 | Xmm, Xmm, Xmm |
10543/// | 3 | Ymm, Ymm, Mem |
10544/// | 4 | Ymm, Ymm, Ymm |
10545/// | 5 | Zmm, Zmm, Mem |
10546/// | 6 | Zmm, Zmm, Zmm |
10547/// +---+---------------+
10548/// ```
10549pub trait VpsravwMaskzEmitter<A, B, C> {
10550 fn vpsravw_maskz(&mut self, op0: A, op1: B, op2: C);
10551}
10552
10553impl<'a> VpsravwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10554 fn vpsravw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10555 self.emit(VPSRAVW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10556 }
10557}
10558
10559impl<'a> VpsravwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10560 fn vpsravw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10561 self.emit(VPSRAVW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10562 }
10563}
10564
10565impl<'a> VpsravwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
10566 fn vpsravw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
10567 self.emit(VPSRAVW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10568 }
10569}
10570
10571impl<'a> VpsravwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10572 fn vpsravw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10573 self.emit(VPSRAVW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10574 }
10575}
10576
10577impl<'a> VpsravwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
10578 fn vpsravw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
10579 self.emit(VPSRAVW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10580 }
10581}
10582
10583impl<'a> VpsravwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10584 fn vpsravw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10585 self.emit(VPSRAVW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10586 }
10587}
10588
10589/// `VPSRAW` (VPSRAW).
10590/// Shifts the bits in the individual data elements (words, doublewords or quadwords) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are filled with the initial value of the sign bit of the data element. If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for quadwords), each destination data element is filled with the initial value of the sign bit of the element. (Figure 4-18 gives an example of shifting words in a 64-bit operand.)
10591///
10592///
10593/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRAW%3APSRAD%3APSRAQ.html).
10594///
10595/// Supported operand variants:
10596///
10597/// ```text
10598/// +----+---------------+
10599/// | # | Operands |
10600/// +----+---------------+
10601/// | 1 | Xmm, Mem, Imm |
10602/// | 2 | Xmm, Xmm, Imm |
10603/// | 3 | Xmm, Xmm, Mem |
10604/// | 4 | Xmm, Xmm, Xmm |
10605/// | 5 | Ymm, Mem, Imm |
10606/// | 6 | Ymm, Ymm, Imm |
10607/// | 7 | Ymm, Ymm, Mem |
10608/// | 8 | Ymm, Ymm, Xmm |
10609/// | 9 | Zmm, Mem, Imm |
10610/// | 10 | Zmm, Zmm, Imm |
10611/// | 11 | Zmm, Zmm, Mem |
10612/// | 12 | Zmm, Zmm, Xmm |
10613/// +----+---------------+
10614/// ```
10615pub trait VpsrawEmitter<A, B, C> {
10616 fn vpsraw(&mut self, op0: A, op1: B, op2: C);
10617}
10618
10619impl<'a> VpsrawEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10620 fn vpsraw(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10621 self.emit(VPSRAW128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10622 }
10623}
10624
10625impl<'a> VpsrawEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10626 fn vpsraw(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10627 self.emit(VPSRAW256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10628 }
10629}
10630
10631impl<'a> VpsrawEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10632 fn vpsraw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10633 self.emit(VPSRAW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10634 }
10635}
10636
10637impl<'a> VpsrawEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10638 fn vpsraw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10639 self.emit(VPSRAW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10640 }
10641}
10642
10643impl<'a> VpsrawEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
10644 fn vpsraw(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
10645 self.emit(VPSRAW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10646 }
10647}
10648
10649impl<'a> VpsrawEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10650 fn vpsraw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10651 self.emit(VPSRAW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10652 }
10653}
10654
10655impl<'a> VpsrawEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10656 fn vpsraw(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10657 self.emit(VPSRAW128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10658 }
10659}
10660
10661impl<'a> VpsrawEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10662 fn vpsraw(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10663 self.emit(VPSRAW256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10664 }
10665}
10666
10667impl<'a> VpsrawEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10668 fn vpsraw(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10669 self.emit(VPSRAW512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10670 }
10671}
10672
10673impl<'a> VpsrawEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10674 fn vpsraw(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10675 self.emit(VPSRAW512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10676 }
10677}
10678
10679impl<'a> VpsrawEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
10680 fn vpsraw(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
10681 self.emit(VPSRAW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10682 }
10683}
10684
10685impl<'a> VpsrawEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10686 fn vpsraw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10687 self.emit(VPSRAW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10688 }
10689}
10690
10691/// `VPSRAW_MASK` (VPSRAW).
10692/// Shifts the bits in the individual data elements (words, doublewords or quadwords) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are filled with the initial value of the sign bit of the data element. If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for quadwords), each destination data element is filled with the initial value of the sign bit of the element. (Figure 4-18 gives an example of shifting words in a 64-bit operand.)
10693///
10694///
10695/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRAW%3APSRAD%3APSRAQ.html).
10696///
10697/// Supported operand variants:
10698///
10699/// ```text
10700/// +----+---------------+
10701/// | # | Operands |
10702/// +----+---------------+
10703/// | 1 | Xmm, Mem, Imm |
10704/// | 2 | Xmm, Xmm, Imm |
10705/// | 3 | Xmm, Xmm, Mem |
10706/// | 4 | Xmm, Xmm, Xmm |
10707/// | 5 | Ymm, Mem, Imm |
10708/// | 6 | Ymm, Ymm, Imm |
10709/// | 7 | Ymm, Ymm, Mem |
10710/// | 8 | Ymm, Ymm, Xmm |
10711/// | 9 | Zmm, Mem, Imm |
10712/// | 10 | Zmm, Zmm, Imm |
10713/// | 11 | Zmm, Zmm, Mem |
10714/// | 12 | Zmm, Zmm, Xmm |
10715/// +----+---------------+
10716/// ```
10717pub trait VpsrawMaskEmitter<A, B, C> {
10718 fn vpsraw_mask(&mut self, op0: A, op1: B, op2: C);
10719}
10720
10721impl<'a> VpsrawMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10722 fn vpsraw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10723 self.emit(VPSRAW128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10724 }
10725}
10726
10727impl<'a> VpsrawMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10728 fn vpsraw_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10729 self.emit(VPSRAW128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10730 }
10731}
10732
10733impl<'a> VpsrawMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10734 fn vpsraw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10735 self.emit(VPSRAW256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10736 }
10737}
10738
10739impl<'a> VpsrawMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10740 fn vpsraw_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10741 self.emit(VPSRAW256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10742 }
10743}
10744
10745impl<'a> VpsrawMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10746 fn vpsraw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10747 self.emit(VPSRAW512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10748 }
10749}
10750
10751impl<'a> VpsrawMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10752 fn vpsraw_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10753 self.emit(VPSRAW512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10754 }
10755}
10756
10757impl<'a> VpsrawMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10758 fn vpsraw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10759 self.emit(VPSRAW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10760 }
10761}
10762
10763impl<'a> VpsrawMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10764 fn vpsraw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10765 self.emit(VPSRAW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10766 }
10767}
10768
10769impl<'a> VpsrawMaskEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
10770 fn vpsraw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
10771 self.emit(VPSRAW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10772 }
10773}
10774
10775impl<'a> VpsrawMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10776 fn vpsraw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10777 self.emit(VPSRAW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10778 }
10779}
10780
10781impl<'a> VpsrawMaskEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
10782 fn vpsraw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
10783 self.emit(VPSRAW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10784 }
10785}
10786
10787impl<'a> VpsrawMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10788 fn vpsraw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10789 self.emit(VPSRAW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10790 }
10791}
10792
10793/// `VPSRAW_MASKZ` (VPSRAW).
10794/// Shifts the bits in the individual data elements (words, doublewords or quadwords) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are filled with the initial value of the sign bit of the data element. If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for quadwords), each destination data element is filled with the initial value of the sign bit of the element. (Figure 4-18 gives an example of shifting words in a 64-bit operand.)
10795///
10796///
10797/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRAW%3APSRAD%3APSRAQ.html).
10798///
10799/// Supported operand variants:
10800///
10801/// ```text
10802/// +----+---------------+
10803/// | # | Operands |
10804/// +----+---------------+
10805/// | 1 | Xmm, Mem, Imm |
10806/// | 2 | Xmm, Xmm, Imm |
10807/// | 3 | Xmm, Xmm, Mem |
10808/// | 4 | Xmm, Xmm, Xmm |
10809/// | 5 | Ymm, Mem, Imm |
10810/// | 6 | Ymm, Ymm, Imm |
10811/// | 7 | Ymm, Ymm, Mem |
10812/// | 8 | Ymm, Ymm, Xmm |
10813/// | 9 | Zmm, Mem, Imm |
10814/// | 10 | Zmm, Zmm, Imm |
10815/// | 11 | Zmm, Zmm, Mem |
10816/// | 12 | Zmm, Zmm, Xmm |
10817/// +----+---------------+
10818/// ```
10819pub trait VpsrawMaskzEmitter<A, B, C> {
10820 fn vpsraw_maskz(&mut self, op0: A, op1: B, op2: C);
10821}
10822
10823impl<'a> VpsrawMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10824 fn vpsraw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10825 self.emit(VPSRAW128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10826 }
10827}
10828
10829impl<'a> VpsrawMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10830 fn vpsraw_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10831 self.emit(VPSRAW128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10832 }
10833}
10834
10835impl<'a> VpsrawMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10836 fn vpsraw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10837 self.emit(VPSRAW256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10838 }
10839}
10840
10841impl<'a> VpsrawMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10842 fn vpsraw_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10843 self.emit(VPSRAW256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10844 }
10845}
10846
10847impl<'a> VpsrawMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10848 fn vpsraw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10849 self.emit(VPSRAW512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10850 }
10851}
10852
10853impl<'a> VpsrawMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10854 fn vpsraw_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10855 self.emit(VPSRAW512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10856 }
10857}
10858
10859impl<'a> VpsrawMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10860 fn vpsraw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10861 self.emit(VPSRAW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10862 }
10863}
10864
10865impl<'a> VpsrawMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10866 fn vpsraw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10867 self.emit(VPSRAW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10868 }
10869}
10870
10871impl<'a> VpsrawMaskzEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
10872 fn vpsraw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
10873 self.emit(VPSRAW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10874 }
10875}
10876
10877impl<'a> VpsrawMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10878 fn vpsraw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10879 self.emit(VPSRAW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10880 }
10881}
10882
10883impl<'a> VpsrawMaskzEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
10884 fn vpsraw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
10885 self.emit(VPSRAW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10886 }
10887}
10888
10889impl<'a> VpsrawMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
10890 fn vpsraw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
10891 self.emit(VPSRAW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10892 }
10893}
10894
10895/// `VPSRLDQ` (VPSRLDQ).
10896/// Shifts the destination operand (first operand) to the right by the number of bytes specified in the count operand (second operand). The empty high-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The count operand is an 8-bit immediate.
10897///
10898///
10899/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLDQ.html).
10900///
10901/// Supported operand variants:
10902///
10903/// ```text
10904/// +---+---------------+
10905/// | # | Operands |
10906/// +---+---------------+
10907/// | 1 | Xmm, Mem, Imm |
10908/// | 2 | Xmm, Xmm, Imm |
10909/// | 3 | Ymm, Mem, Imm |
10910/// | 4 | Ymm, Ymm, Imm |
10911/// | 5 | Zmm, Mem, Imm |
10912/// | 6 | Zmm, Zmm, Imm |
10913/// +---+---------------+
10914/// ```
10915pub trait VpsrldqEmitter<A, B, C> {
10916 fn vpsrldq(&mut self, op0: A, op1: B, op2: C);
10917}
10918
10919impl<'a> VpsrldqEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
10920 fn vpsrldq(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
10921 self.emit(VPSRLDQ128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10922 }
10923}
10924
10925impl<'a> VpsrldqEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
10926 fn vpsrldq(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
10927 self.emit(VPSRLDQ256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10928 }
10929}
10930
10931impl<'a> VpsrldqEmitter<Xmm, Mem, Imm> for Assembler<'a> {
10932 fn vpsrldq(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
10933 self.emit(VPSRLDQ128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10934 }
10935}
10936
10937impl<'a> VpsrldqEmitter<Ymm, Mem, Imm> for Assembler<'a> {
10938 fn vpsrldq(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
10939 self.emit(VPSRLDQ256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10940 }
10941}
10942
10943impl<'a> VpsrldqEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
10944 fn vpsrldq(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
10945 self.emit(VPSRLDQ512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10946 }
10947}
10948
10949impl<'a> VpsrldqEmitter<Zmm, Mem, Imm> for Assembler<'a> {
10950 fn vpsrldq(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
10951 self.emit(VPSRLDQ512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10952 }
10953}
10954
10955/// `VPSRLVW` (VPSRLVW).
10956/// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the right by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0).
10957///
10958///
10959/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRLVW%3AVPSRLVD%3AVPSRLVQ.html).
10960///
10961/// Supported operand variants:
10962///
10963/// ```text
10964/// +---+---------------+
10965/// | # | Operands |
10966/// +---+---------------+
10967/// | 1 | Xmm, Xmm, Mem |
10968/// | 2 | Xmm, Xmm, Xmm |
10969/// | 3 | Ymm, Ymm, Mem |
10970/// | 4 | Ymm, Ymm, Ymm |
10971/// | 5 | Zmm, Zmm, Mem |
10972/// | 6 | Zmm, Zmm, Zmm |
10973/// +---+---------------+
10974/// ```
10975pub trait VpsrlvwEmitter<A, B, C> {
10976 fn vpsrlvw(&mut self, op0: A, op1: B, op2: C);
10977}
10978
10979impl<'a> VpsrlvwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
10980 fn vpsrlvw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
10981 self.emit(VPSRLVW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10982 }
10983}
10984
10985impl<'a> VpsrlvwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
10986 fn vpsrlvw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
10987 self.emit(VPSRLVW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10988 }
10989}
10990
10991impl<'a> VpsrlvwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
10992 fn vpsrlvw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
10993 self.emit(VPSRLVW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
10994 }
10995}
10996
10997impl<'a> VpsrlvwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
10998 fn vpsrlvw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
10999 self.emit(VPSRLVW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11000 }
11001}
11002
11003impl<'a> VpsrlvwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11004 fn vpsrlvw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11005 self.emit(VPSRLVW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11006 }
11007}
11008
11009impl<'a> VpsrlvwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11010 fn vpsrlvw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11011 self.emit(VPSRLVW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11012 }
11013}
11014
11015/// `VPSRLVW_MASK` (VPSRLVW).
11016/// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the right by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0).
11017///
11018///
11019/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRLVW%3AVPSRLVD%3AVPSRLVQ.html).
11020///
11021/// Supported operand variants:
11022///
11023/// ```text
11024/// +---+---------------+
11025/// | # | Operands |
11026/// +---+---------------+
11027/// | 1 | Xmm, Xmm, Mem |
11028/// | 2 | Xmm, Xmm, Xmm |
11029/// | 3 | Ymm, Ymm, Mem |
11030/// | 4 | Ymm, Ymm, Ymm |
11031/// | 5 | Zmm, Zmm, Mem |
11032/// | 6 | Zmm, Zmm, Zmm |
11033/// +---+---------------+
11034/// ```
11035pub trait VpsrlvwMaskEmitter<A, B, C> {
11036 fn vpsrlvw_mask(&mut self, op0: A, op1: B, op2: C);
11037}
11038
11039impl<'a> VpsrlvwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11040 fn vpsrlvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11041 self.emit(VPSRLVW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11042 }
11043}
11044
11045impl<'a> VpsrlvwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11046 fn vpsrlvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11047 self.emit(VPSRLVW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11048 }
11049}
11050
11051impl<'a> VpsrlvwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11052 fn vpsrlvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11053 self.emit(VPSRLVW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11054 }
11055}
11056
11057impl<'a> VpsrlvwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11058 fn vpsrlvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11059 self.emit(VPSRLVW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11060 }
11061}
11062
11063impl<'a> VpsrlvwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11064 fn vpsrlvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11065 self.emit(VPSRLVW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11066 }
11067}
11068
11069impl<'a> VpsrlvwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11070 fn vpsrlvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11071 self.emit(VPSRLVW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11072 }
11073}
11074
11075/// `VPSRLVW_MASKZ` (VPSRLVW).
11076/// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the right by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0).
11077///
11078///
11079/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRLVW%3AVPSRLVD%3AVPSRLVQ.html).
11080///
11081/// Supported operand variants:
11082///
11083/// ```text
11084/// +---+---------------+
11085/// | # | Operands |
11086/// +---+---------------+
11087/// | 1 | Xmm, Xmm, Mem |
11088/// | 2 | Xmm, Xmm, Xmm |
11089/// | 3 | Ymm, Ymm, Mem |
11090/// | 4 | Ymm, Ymm, Ymm |
11091/// | 5 | Zmm, Zmm, Mem |
11092/// | 6 | Zmm, Zmm, Zmm |
11093/// +---+---------------+
11094/// ```
11095pub trait VpsrlvwMaskzEmitter<A, B, C> {
11096 fn vpsrlvw_maskz(&mut self, op0: A, op1: B, op2: C);
11097}
11098
11099impl<'a> VpsrlvwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11100 fn vpsrlvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11101 self.emit(VPSRLVW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11102 }
11103}
11104
11105impl<'a> VpsrlvwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11106 fn vpsrlvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11107 self.emit(VPSRLVW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11108 }
11109}
11110
11111impl<'a> VpsrlvwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11112 fn vpsrlvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11113 self.emit(VPSRLVW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11114 }
11115}
11116
11117impl<'a> VpsrlvwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11118 fn vpsrlvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11119 self.emit(VPSRLVW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11120 }
11121}
11122
11123impl<'a> VpsrlvwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11124 fn vpsrlvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11125 self.emit(VPSRLVW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11126 }
11127}
11128
11129impl<'a> VpsrlvwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11130 fn vpsrlvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11131 self.emit(VPSRLVW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11132 }
11133}
11134
11135/// `VPSRLW` (VPSRLW).
11136/// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-19 gives an example of shifting words in a 64-bit operand.
11137///
11138///
11139/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLW%3APSRLD%3APSRLQ.html).
11140///
11141/// Supported operand variants:
11142///
11143/// ```text
11144/// +----+---------------+
11145/// | # | Operands |
11146/// +----+---------------+
11147/// | 1 | Xmm, Mem, Imm |
11148/// | 2 | Xmm, Xmm, Imm |
11149/// | 3 | Xmm, Xmm, Mem |
11150/// | 4 | Xmm, Xmm, Xmm |
11151/// | 5 | Ymm, Mem, Imm |
11152/// | 6 | Ymm, Ymm, Imm |
11153/// | 7 | Ymm, Ymm, Mem |
11154/// | 8 | Ymm, Ymm, Xmm |
11155/// | 9 | Zmm, Mem, Imm |
11156/// | 10 | Zmm, Zmm, Imm |
11157/// | 11 | Zmm, Zmm, Mem |
11158/// | 12 | Zmm, Zmm, Xmm |
11159/// +----+---------------+
11160/// ```
11161pub trait VpsrlwEmitter<A, B, C> {
11162 fn vpsrlw(&mut self, op0: A, op1: B, op2: C);
11163}
11164
11165impl<'a> VpsrlwEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
11166 fn vpsrlw(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
11167 self.emit(VPSRLW128RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11168 }
11169}
11170
11171impl<'a> VpsrlwEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
11172 fn vpsrlw(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
11173 self.emit(VPSRLW256RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11174 }
11175}
11176
11177impl<'a> VpsrlwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11178 fn vpsrlw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11179 self.emit(VPSRLW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11180 }
11181}
11182
11183impl<'a> VpsrlwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11184 fn vpsrlw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11185 self.emit(VPSRLW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11186 }
11187}
11188
11189impl<'a> VpsrlwEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
11190 fn vpsrlw(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
11191 self.emit(VPSRLW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11192 }
11193}
11194
11195impl<'a> VpsrlwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11196 fn vpsrlw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11197 self.emit(VPSRLW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11198 }
11199}
11200
11201impl<'a> VpsrlwEmitter<Xmm, Mem, Imm> for Assembler<'a> {
11202 fn vpsrlw(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
11203 self.emit(VPSRLW128RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11204 }
11205}
11206
11207impl<'a> VpsrlwEmitter<Ymm, Mem, Imm> for Assembler<'a> {
11208 fn vpsrlw(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
11209 self.emit(VPSRLW256RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11210 }
11211}
11212
11213impl<'a> VpsrlwEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
11214 fn vpsrlw(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
11215 self.emit(VPSRLW512RRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11216 }
11217}
11218
11219impl<'a> VpsrlwEmitter<Zmm, Mem, Imm> for Assembler<'a> {
11220 fn vpsrlw(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
11221 self.emit(VPSRLW512RMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11222 }
11223}
11224
11225impl<'a> VpsrlwEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
11226 fn vpsrlw(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
11227 self.emit(VPSRLW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11228 }
11229}
11230
11231impl<'a> VpsrlwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11232 fn vpsrlw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11233 self.emit(VPSRLW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11234 }
11235}
11236
11237/// `VPSRLW_MASK` (VPSRLW).
11238/// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-19 gives an example of shifting words in a 64-bit operand.
11239///
11240///
11241/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLW%3APSRLD%3APSRLQ.html).
11242///
11243/// Supported operand variants:
11244///
11245/// ```text
11246/// +----+---------------+
11247/// | # | Operands |
11248/// +----+---------------+
11249/// | 1 | Xmm, Mem, Imm |
11250/// | 2 | Xmm, Xmm, Imm |
11251/// | 3 | Xmm, Xmm, Mem |
11252/// | 4 | Xmm, Xmm, Xmm |
11253/// | 5 | Ymm, Mem, Imm |
11254/// | 6 | Ymm, Ymm, Imm |
11255/// | 7 | Ymm, Ymm, Mem |
11256/// | 8 | Ymm, Ymm, Xmm |
11257/// | 9 | Zmm, Mem, Imm |
11258/// | 10 | Zmm, Zmm, Imm |
11259/// | 11 | Zmm, Zmm, Mem |
11260/// | 12 | Zmm, Zmm, Xmm |
11261/// +----+---------------+
11262/// ```
11263pub trait VpsrlwMaskEmitter<A, B, C> {
11264 fn vpsrlw_mask(&mut self, op0: A, op1: B, op2: C);
11265}
11266
11267impl<'a> VpsrlwMaskEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
11268 fn vpsrlw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
11269 self.emit(VPSRLW128RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11270 }
11271}
11272
11273impl<'a> VpsrlwMaskEmitter<Xmm, Mem, Imm> for Assembler<'a> {
11274 fn vpsrlw_mask(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
11275 self.emit(VPSRLW128RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11276 }
11277}
11278
11279impl<'a> VpsrlwMaskEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
11280 fn vpsrlw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
11281 self.emit(VPSRLW256RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11282 }
11283}
11284
11285impl<'a> VpsrlwMaskEmitter<Ymm, Mem, Imm> for Assembler<'a> {
11286 fn vpsrlw_mask(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
11287 self.emit(VPSRLW256RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11288 }
11289}
11290
11291impl<'a> VpsrlwMaskEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
11292 fn vpsrlw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
11293 self.emit(VPSRLW512RRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11294 }
11295}
11296
11297impl<'a> VpsrlwMaskEmitter<Zmm, Mem, Imm> for Assembler<'a> {
11298 fn vpsrlw_mask(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
11299 self.emit(VPSRLW512RMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11300 }
11301}
11302
11303impl<'a> VpsrlwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11304 fn vpsrlw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11305 self.emit(VPSRLW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11306 }
11307}
11308
11309impl<'a> VpsrlwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11310 fn vpsrlw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11311 self.emit(VPSRLW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11312 }
11313}
11314
11315impl<'a> VpsrlwMaskEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
11316 fn vpsrlw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
11317 self.emit(VPSRLW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11318 }
11319}
11320
11321impl<'a> VpsrlwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11322 fn vpsrlw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11323 self.emit(VPSRLW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11324 }
11325}
11326
11327impl<'a> VpsrlwMaskEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
11328 fn vpsrlw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
11329 self.emit(VPSRLW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11330 }
11331}
11332
11333impl<'a> VpsrlwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11334 fn vpsrlw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11335 self.emit(VPSRLW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11336 }
11337}
11338
11339/// `VPSRLW_MASKZ` (VPSRLW).
11340/// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-19 gives an example of shifting words in a 64-bit operand.
11341///
11342///
11343/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLW%3APSRLD%3APSRLQ.html).
11344///
11345/// Supported operand variants:
11346///
11347/// ```text
11348/// +----+---------------+
11349/// | # | Operands |
11350/// +----+---------------+
11351/// | 1 | Xmm, Mem, Imm |
11352/// | 2 | Xmm, Xmm, Imm |
11353/// | 3 | Xmm, Xmm, Mem |
11354/// | 4 | Xmm, Xmm, Xmm |
11355/// | 5 | Ymm, Mem, Imm |
11356/// | 6 | Ymm, Ymm, Imm |
11357/// | 7 | Ymm, Ymm, Mem |
11358/// | 8 | Ymm, Ymm, Xmm |
11359/// | 9 | Zmm, Mem, Imm |
11360/// | 10 | Zmm, Zmm, Imm |
11361/// | 11 | Zmm, Zmm, Mem |
11362/// | 12 | Zmm, Zmm, Xmm |
11363/// +----+---------------+
11364/// ```
11365pub trait VpsrlwMaskzEmitter<A, B, C> {
11366 fn vpsrlw_maskz(&mut self, op0: A, op1: B, op2: C);
11367}
11368
11369impl<'a> VpsrlwMaskzEmitter<Xmm, Xmm, Imm> for Assembler<'a> {
11370 fn vpsrlw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Imm) {
11371 self.emit(VPSRLW128RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11372 }
11373}
11374
11375impl<'a> VpsrlwMaskzEmitter<Xmm, Mem, Imm> for Assembler<'a> {
11376 fn vpsrlw_maskz(&mut self, op0: Xmm, op1: Mem, op2: Imm) {
11377 self.emit(VPSRLW128RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11378 }
11379}
11380
11381impl<'a> VpsrlwMaskzEmitter<Ymm, Ymm, Imm> for Assembler<'a> {
11382 fn vpsrlw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Imm) {
11383 self.emit(VPSRLW256RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11384 }
11385}
11386
11387impl<'a> VpsrlwMaskzEmitter<Ymm, Mem, Imm> for Assembler<'a> {
11388 fn vpsrlw_maskz(&mut self, op0: Ymm, op1: Mem, op2: Imm) {
11389 self.emit(VPSRLW256RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11390 }
11391}
11392
11393impl<'a> VpsrlwMaskzEmitter<Zmm, Zmm, Imm> for Assembler<'a> {
11394 fn vpsrlw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Imm) {
11395 self.emit(VPSRLW512RRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11396 }
11397}
11398
11399impl<'a> VpsrlwMaskzEmitter<Zmm, Mem, Imm> for Assembler<'a> {
11400 fn vpsrlw_maskz(&mut self, op0: Zmm, op1: Mem, op2: Imm) {
11401 self.emit(VPSRLW512RMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11402 }
11403}
11404
11405impl<'a> VpsrlwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11406 fn vpsrlw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11407 self.emit(VPSRLW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11408 }
11409}
11410
11411impl<'a> VpsrlwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11412 fn vpsrlw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11413 self.emit(VPSRLW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11414 }
11415}
11416
11417impl<'a> VpsrlwMaskzEmitter<Ymm, Ymm, Xmm> for Assembler<'a> {
11418 fn vpsrlw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Xmm) {
11419 self.emit(VPSRLW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11420 }
11421}
11422
11423impl<'a> VpsrlwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11424 fn vpsrlw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11425 self.emit(VPSRLW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11426 }
11427}
11428
11429impl<'a> VpsrlwMaskzEmitter<Zmm, Zmm, Xmm> for Assembler<'a> {
11430 fn vpsrlw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Xmm) {
11431 self.emit(VPSRLW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11432 }
11433}
11434
11435impl<'a> VpsrlwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11436 fn vpsrlw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11437 self.emit(VPSRLW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11438 }
11439}
11440
11441/// `VPSUBB` (VPSUBB).
11442/// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
11443///
11444///
11445/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
11446///
11447/// Supported operand variants:
11448///
11449/// ```text
11450/// +---+---------------+
11451/// | # | Operands |
11452/// +---+---------------+
11453/// | 1 | Xmm, Xmm, Mem |
11454/// | 2 | Xmm, Xmm, Xmm |
11455/// | 3 | Ymm, Ymm, Mem |
11456/// | 4 | Ymm, Ymm, Ymm |
11457/// | 5 | Zmm, Zmm, Mem |
11458/// | 6 | Zmm, Zmm, Zmm |
11459/// +---+---------------+
11460/// ```
11461pub trait VpsubbEmitter<A, B, C> {
11462 fn vpsubb(&mut self, op0: A, op1: B, op2: C);
11463}
11464
11465impl<'a> VpsubbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11466 fn vpsubb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11467 self.emit(VPSUBB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11468 }
11469}
11470
11471impl<'a> VpsubbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11472 fn vpsubb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11473 self.emit(VPSUBB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11474 }
11475}
11476
11477impl<'a> VpsubbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11478 fn vpsubb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11479 self.emit(VPSUBB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11480 }
11481}
11482
11483impl<'a> VpsubbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11484 fn vpsubb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11485 self.emit(VPSUBB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11486 }
11487}
11488
11489impl<'a> VpsubbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11490 fn vpsubb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11491 self.emit(VPSUBB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11492 }
11493}
11494
11495impl<'a> VpsubbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11496 fn vpsubb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11497 self.emit(VPSUBB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11498 }
11499}
11500
11501/// `VPSUBB_MASK` (VPSUBB).
11502/// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
11503///
11504///
11505/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
11506///
11507/// Supported operand variants:
11508///
11509/// ```text
11510/// +---+---------------+
11511/// | # | Operands |
11512/// +---+---------------+
11513/// | 1 | Xmm, Xmm, Mem |
11514/// | 2 | Xmm, Xmm, Xmm |
11515/// | 3 | Ymm, Ymm, Mem |
11516/// | 4 | Ymm, Ymm, Ymm |
11517/// | 5 | Zmm, Zmm, Mem |
11518/// | 6 | Zmm, Zmm, Zmm |
11519/// +---+---------------+
11520/// ```
11521pub trait VpsubbMaskEmitter<A, B, C> {
11522 fn vpsubb_mask(&mut self, op0: A, op1: B, op2: C);
11523}
11524
11525impl<'a> VpsubbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11526 fn vpsubb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11527 self.emit(VPSUBB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11528 }
11529}
11530
11531impl<'a> VpsubbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11532 fn vpsubb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11533 self.emit(VPSUBB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11534 }
11535}
11536
11537impl<'a> VpsubbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11538 fn vpsubb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11539 self.emit(VPSUBB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11540 }
11541}
11542
11543impl<'a> VpsubbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11544 fn vpsubb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11545 self.emit(VPSUBB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11546 }
11547}
11548
11549impl<'a> VpsubbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11550 fn vpsubb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11551 self.emit(VPSUBB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11552 }
11553}
11554
11555impl<'a> VpsubbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11556 fn vpsubb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11557 self.emit(VPSUBB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11558 }
11559}
11560
11561/// `VPSUBB_MASKZ` (VPSUBB).
11562/// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
11563///
11564///
11565/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
11566///
11567/// Supported operand variants:
11568///
11569/// ```text
11570/// +---+---------------+
11571/// | # | Operands |
11572/// +---+---------------+
11573/// | 1 | Xmm, Xmm, Mem |
11574/// | 2 | Xmm, Xmm, Xmm |
11575/// | 3 | Ymm, Ymm, Mem |
11576/// | 4 | Ymm, Ymm, Ymm |
11577/// | 5 | Zmm, Zmm, Mem |
11578/// | 6 | Zmm, Zmm, Zmm |
11579/// +---+---------------+
11580/// ```
11581pub trait VpsubbMaskzEmitter<A, B, C> {
11582 fn vpsubb_maskz(&mut self, op0: A, op1: B, op2: C);
11583}
11584
11585impl<'a> VpsubbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11586 fn vpsubb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11587 self.emit(VPSUBB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11588 }
11589}
11590
11591impl<'a> VpsubbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11592 fn vpsubb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11593 self.emit(VPSUBB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11594 }
11595}
11596
11597impl<'a> VpsubbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11598 fn vpsubb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11599 self.emit(VPSUBB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11600 }
11601}
11602
11603impl<'a> VpsubbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11604 fn vpsubb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11605 self.emit(VPSUBB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11606 }
11607}
11608
11609impl<'a> VpsubbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11610 fn vpsubb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11611 self.emit(VPSUBB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11612 }
11613}
11614
11615impl<'a> VpsubbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11616 fn vpsubb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11617 self.emit(VPSUBB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11618 }
11619}
11620
11621/// `VPSUBSB` (VPSUBSB).
11622/// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
11623///
11624///
11625/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
11626///
11627/// Supported operand variants:
11628///
11629/// ```text
11630/// +---+---------------+
11631/// | # | Operands |
11632/// +---+---------------+
11633/// | 1 | Xmm, Xmm, Mem |
11634/// | 2 | Xmm, Xmm, Xmm |
11635/// | 3 | Ymm, Ymm, Mem |
11636/// | 4 | Ymm, Ymm, Ymm |
11637/// | 5 | Zmm, Zmm, Mem |
11638/// | 6 | Zmm, Zmm, Zmm |
11639/// +---+---------------+
11640/// ```
11641pub trait VpsubsbEmitter<A, B, C> {
11642 fn vpsubsb(&mut self, op0: A, op1: B, op2: C);
11643}
11644
11645impl<'a> VpsubsbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11646 fn vpsubsb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11647 self.emit(VPSUBSB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11648 }
11649}
11650
11651impl<'a> VpsubsbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11652 fn vpsubsb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11653 self.emit(VPSUBSB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11654 }
11655}
11656
11657impl<'a> VpsubsbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11658 fn vpsubsb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11659 self.emit(VPSUBSB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11660 }
11661}
11662
11663impl<'a> VpsubsbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11664 fn vpsubsb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11665 self.emit(VPSUBSB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11666 }
11667}
11668
11669impl<'a> VpsubsbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11670 fn vpsubsb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11671 self.emit(VPSUBSB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11672 }
11673}
11674
11675impl<'a> VpsubsbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11676 fn vpsubsb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11677 self.emit(VPSUBSB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11678 }
11679}
11680
11681/// `VPSUBSB_MASK` (VPSUBSB).
11682/// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
11683///
11684///
11685/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
11686///
11687/// Supported operand variants:
11688///
11689/// ```text
11690/// +---+---------------+
11691/// | # | Operands |
11692/// +---+---------------+
11693/// | 1 | Xmm, Xmm, Mem |
11694/// | 2 | Xmm, Xmm, Xmm |
11695/// | 3 | Ymm, Ymm, Mem |
11696/// | 4 | Ymm, Ymm, Ymm |
11697/// | 5 | Zmm, Zmm, Mem |
11698/// | 6 | Zmm, Zmm, Zmm |
11699/// +---+---------------+
11700/// ```
11701pub trait VpsubsbMaskEmitter<A, B, C> {
11702 fn vpsubsb_mask(&mut self, op0: A, op1: B, op2: C);
11703}
11704
11705impl<'a> VpsubsbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11706 fn vpsubsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11707 self.emit(VPSUBSB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11708 }
11709}
11710
11711impl<'a> VpsubsbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11712 fn vpsubsb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11713 self.emit(VPSUBSB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11714 }
11715}
11716
11717impl<'a> VpsubsbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11718 fn vpsubsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11719 self.emit(VPSUBSB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11720 }
11721}
11722
11723impl<'a> VpsubsbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11724 fn vpsubsb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11725 self.emit(VPSUBSB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11726 }
11727}
11728
11729impl<'a> VpsubsbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11730 fn vpsubsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11731 self.emit(VPSUBSB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11732 }
11733}
11734
11735impl<'a> VpsubsbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11736 fn vpsubsb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11737 self.emit(VPSUBSB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11738 }
11739}
11740
11741/// `VPSUBSB_MASKZ` (VPSUBSB).
11742/// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
11743///
11744///
11745/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
11746///
11747/// Supported operand variants:
11748///
11749/// ```text
11750/// +---+---------------+
11751/// | # | Operands |
11752/// +---+---------------+
11753/// | 1 | Xmm, Xmm, Mem |
11754/// | 2 | Xmm, Xmm, Xmm |
11755/// | 3 | Ymm, Ymm, Mem |
11756/// | 4 | Ymm, Ymm, Ymm |
11757/// | 5 | Zmm, Zmm, Mem |
11758/// | 6 | Zmm, Zmm, Zmm |
11759/// +---+---------------+
11760/// ```
11761pub trait VpsubsbMaskzEmitter<A, B, C> {
11762 fn vpsubsb_maskz(&mut self, op0: A, op1: B, op2: C);
11763}
11764
11765impl<'a> VpsubsbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11766 fn vpsubsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11767 self.emit(VPSUBSB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11768 }
11769}
11770
11771impl<'a> VpsubsbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11772 fn vpsubsb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11773 self.emit(VPSUBSB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11774 }
11775}
11776
11777impl<'a> VpsubsbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11778 fn vpsubsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11779 self.emit(VPSUBSB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11780 }
11781}
11782
11783impl<'a> VpsubsbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11784 fn vpsubsb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11785 self.emit(VPSUBSB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11786 }
11787}
11788
11789impl<'a> VpsubsbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11790 fn vpsubsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11791 self.emit(VPSUBSB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11792 }
11793}
11794
11795impl<'a> VpsubsbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11796 fn vpsubsb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11797 self.emit(VPSUBSB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11798 }
11799}
11800
11801/// `VPSUBSW` (VPSUBSW).
11802/// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
11803///
11804///
11805/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
11806///
11807/// Supported operand variants:
11808///
11809/// ```text
11810/// +---+---------------+
11811/// | # | Operands |
11812/// +---+---------------+
11813/// | 1 | Xmm, Xmm, Mem |
11814/// | 2 | Xmm, Xmm, Xmm |
11815/// | 3 | Ymm, Ymm, Mem |
11816/// | 4 | Ymm, Ymm, Ymm |
11817/// | 5 | Zmm, Zmm, Mem |
11818/// | 6 | Zmm, Zmm, Zmm |
11819/// +---+---------------+
11820/// ```
11821pub trait VpsubswEmitter<A, B, C> {
11822 fn vpsubsw(&mut self, op0: A, op1: B, op2: C);
11823}
11824
11825impl<'a> VpsubswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11826 fn vpsubsw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11827 self.emit(VPSUBSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11828 }
11829}
11830
11831impl<'a> VpsubswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11832 fn vpsubsw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11833 self.emit(VPSUBSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11834 }
11835}
11836
11837impl<'a> VpsubswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11838 fn vpsubsw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11839 self.emit(VPSUBSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11840 }
11841}
11842
11843impl<'a> VpsubswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11844 fn vpsubsw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11845 self.emit(VPSUBSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11846 }
11847}
11848
11849impl<'a> VpsubswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11850 fn vpsubsw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11851 self.emit(VPSUBSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11852 }
11853}
11854
11855impl<'a> VpsubswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11856 fn vpsubsw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11857 self.emit(VPSUBSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11858 }
11859}
11860
11861/// `VPSUBSW_MASK` (VPSUBSW).
11862/// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
11863///
11864///
11865/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
11866///
11867/// Supported operand variants:
11868///
11869/// ```text
11870/// +---+---------------+
11871/// | # | Operands |
11872/// +---+---------------+
11873/// | 1 | Xmm, Xmm, Mem |
11874/// | 2 | Xmm, Xmm, Xmm |
11875/// | 3 | Ymm, Ymm, Mem |
11876/// | 4 | Ymm, Ymm, Ymm |
11877/// | 5 | Zmm, Zmm, Mem |
11878/// | 6 | Zmm, Zmm, Zmm |
11879/// +---+---------------+
11880/// ```
11881pub trait VpsubswMaskEmitter<A, B, C> {
11882 fn vpsubsw_mask(&mut self, op0: A, op1: B, op2: C);
11883}
11884
11885impl<'a> VpsubswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11886 fn vpsubsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11887 self.emit(VPSUBSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11888 }
11889}
11890
11891impl<'a> VpsubswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11892 fn vpsubsw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11893 self.emit(VPSUBSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11894 }
11895}
11896
11897impl<'a> VpsubswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11898 fn vpsubsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11899 self.emit(VPSUBSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11900 }
11901}
11902
11903impl<'a> VpsubswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11904 fn vpsubsw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11905 self.emit(VPSUBSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11906 }
11907}
11908
11909impl<'a> VpsubswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11910 fn vpsubsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11911 self.emit(VPSUBSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11912 }
11913}
11914
11915impl<'a> VpsubswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11916 fn vpsubsw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11917 self.emit(VPSUBSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11918 }
11919}
11920
11921/// `VPSUBSW_MASKZ` (VPSUBSW).
11922/// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
11923///
11924///
11925/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
11926///
11927/// Supported operand variants:
11928///
11929/// ```text
11930/// +---+---------------+
11931/// | # | Operands |
11932/// +---+---------------+
11933/// | 1 | Xmm, Xmm, Mem |
11934/// | 2 | Xmm, Xmm, Xmm |
11935/// | 3 | Ymm, Ymm, Mem |
11936/// | 4 | Ymm, Ymm, Ymm |
11937/// | 5 | Zmm, Zmm, Mem |
11938/// | 6 | Zmm, Zmm, Zmm |
11939/// +---+---------------+
11940/// ```
11941pub trait VpsubswMaskzEmitter<A, B, C> {
11942 fn vpsubsw_maskz(&mut self, op0: A, op1: B, op2: C);
11943}
11944
11945impl<'a> VpsubswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
11946 fn vpsubsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
11947 self.emit(VPSUBSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11948 }
11949}
11950
11951impl<'a> VpsubswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
11952 fn vpsubsw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
11953 self.emit(VPSUBSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11954 }
11955}
11956
11957impl<'a> VpsubswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
11958 fn vpsubsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
11959 self.emit(VPSUBSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11960 }
11961}
11962
11963impl<'a> VpsubswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
11964 fn vpsubsw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
11965 self.emit(VPSUBSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11966 }
11967}
11968
11969impl<'a> VpsubswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
11970 fn vpsubsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
11971 self.emit(VPSUBSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11972 }
11973}
11974
11975impl<'a> VpsubswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
11976 fn vpsubsw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
11977 self.emit(VPSUBSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
11978 }
11979}
11980
11981/// `VPSUBUSB` (VPSUBUSB).
11982/// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
11983///
11984///
11985/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
11986///
11987/// Supported operand variants:
11988///
11989/// ```text
11990/// +---+---------------+
11991/// | # | Operands |
11992/// +---+---------------+
11993/// | 1 | Xmm, Xmm, Mem |
11994/// | 2 | Xmm, Xmm, Xmm |
11995/// | 3 | Ymm, Ymm, Mem |
11996/// | 4 | Ymm, Ymm, Ymm |
11997/// | 5 | Zmm, Zmm, Mem |
11998/// | 6 | Zmm, Zmm, Zmm |
11999/// +---+---------------+
12000/// ```
12001pub trait VpsubusbEmitter<A, B, C> {
12002 fn vpsubusb(&mut self, op0: A, op1: B, op2: C);
12003}
12004
12005impl<'a> VpsubusbEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12006 fn vpsubusb(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12007 self.emit(VPSUBUSB128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12008 }
12009}
12010
12011impl<'a> VpsubusbEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12012 fn vpsubusb(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12013 self.emit(VPSUBUSB128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12014 }
12015}
12016
12017impl<'a> VpsubusbEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12018 fn vpsubusb(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12019 self.emit(VPSUBUSB256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12020 }
12021}
12022
12023impl<'a> VpsubusbEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12024 fn vpsubusb(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12025 self.emit(VPSUBUSB256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12026 }
12027}
12028
12029impl<'a> VpsubusbEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12030 fn vpsubusb(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12031 self.emit(VPSUBUSB512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12032 }
12033}
12034
12035impl<'a> VpsubusbEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12036 fn vpsubusb(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12037 self.emit(VPSUBUSB512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12038 }
12039}
12040
12041/// `VPSUBUSB_MASK` (VPSUBUSB).
12042/// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
12043///
12044///
12045/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
12046///
12047/// Supported operand variants:
12048///
12049/// ```text
12050/// +---+---------------+
12051/// | # | Operands |
12052/// +---+---------------+
12053/// | 1 | Xmm, Xmm, Mem |
12054/// | 2 | Xmm, Xmm, Xmm |
12055/// | 3 | Ymm, Ymm, Mem |
12056/// | 4 | Ymm, Ymm, Ymm |
12057/// | 5 | Zmm, Zmm, Mem |
12058/// | 6 | Zmm, Zmm, Zmm |
12059/// +---+---------------+
12060/// ```
12061pub trait VpsubusbMaskEmitter<A, B, C> {
12062 fn vpsubusb_mask(&mut self, op0: A, op1: B, op2: C);
12063}
12064
12065impl<'a> VpsubusbMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12066 fn vpsubusb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12067 self.emit(VPSUBUSB128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12068 }
12069}
12070
12071impl<'a> VpsubusbMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12072 fn vpsubusb_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12073 self.emit(VPSUBUSB128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12074 }
12075}
12076
12077impl<'a> VpsubusbMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12078 fn vpsubusb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12079 self.emit(VPSUBUSB256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12080 }
12081}
12082
12083impl<'a> VpsubusbMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12084 fn vpsubusb_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12085 self.emit(VPSUBUSB256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12086 }
12087}
12088
12089impl<'a> VpsubusbMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12090 fn vpsubusb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12091 self.emit(VPSUBUSB512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12092 }
12093}
12094
12095impl<'a> VpsubusbMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12096 fn vpsubusb_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12097 self.emit(VPSUBUSB512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12098 }
12099}
12100
12101/// `VPSUBUSB_MASKZ` (VPSUBUSB).
12102/// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
12103///
12104///
12105/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
12106///
12107/// Supported operand variants:
12108///
12109/// ```text
12110/// +---+---------------+
12111/// | # | Operands |
12112/// +---+---------------+
12113/// | 1 | Xmm, Xmm, Mem |
12114/// | 2 | Xmm, Xmm, Xmm |
12115/// | 3 | Ymm, Ymm, Mem |
12116/// | 4 | Ymm, Ymm, Ymm |
12117/// | 5 | Zmm, Zmm, Mem |
12118/// | 6 | Zmm, Zmm, Zmm |
12119/// +---+---------------+
12120/// ```
12121pub trait VpsubusbMaskzEmitter<A, B, C> {
12122 fn vpsubusb_maskz(&mut self, op0: A, op1: B, op2: C);
12123}
12124
12125impl<'a> VpsubusbMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12126 fn vpsubusb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12127 self.emit(VPSUBUSB128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12128 }
12129}
12130
12131impl<'a> VpsubusbMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12132 fn vpsubusb_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12133 self.emit(VPSUBUSB128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12134 }
12135}
12136
12137impl<'a> VpsubusbMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12138 fn vpsubusb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12139 self.emit(VPSUBUSB256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12140 }
12141}
12142
12143impl<'a> VpsubusbMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12144 fn vpsubusb_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12145 self.emit(VPSUBUSB256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12146 }
12147}
12148
12149impl<'a> VpsubusbMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12150 fn vpsubusb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12151 self.emit(VPSUBUSB512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12152 }
12153}
12154
12155impl<'a> VpsubusbMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12156 fn vpsubusb_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12157 self.emit(VPSUBUSB512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12158 }
12159}
12160
12161/// `VPSUBUSW` (VPSUBUSW).
12162/// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
12163///
12164///
12165/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
12166///
12167/// Supported operand variants:
12168///
12169/// ```text
12170/// +---+---------------+
12171/// | # | Operands |
12172/// +---+---------------+
12173/// | 1 | Xmm, Xmm, Mem |
12174/// | 2 | Xmm, Xmm, Xmm |
12175/// | 3 | Ymm, Ymm, Mem |
12176/// | 4 | Ymm, Ymm, Ymm |
12177/// | 5 | Zmm, Zmm, Mem |
12178/// | 6 | Zmm, Zmm, Zmm |
12179/// +---+---------------+
12180/// ```
12181pub trait VpsubuswEmitter<A, B, C> {
12182 fn vpsubusw(&mut self, op0: A, op1: B, op2: C);
12183}
12184
12185impl<'a> VpsubuswEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12186 fn vpsubusw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12187 self.emit(VPSUBUSW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12188 }
12189}
12190
12191impl<'a> VpsubuswEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12192 fn vpsubusw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12193 self.emit(VPSUBUSW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12194 }
12195}
12196
12197impl<'a> VpsubuswEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12198 fn vpsubusw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12199 self.emit(VPSUBUSW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12200 }
12201}
12202
12203impl<'a> VpsubuswEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12204 fn vpsubusw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12205 self.emit(VPSUBUSW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12206 }
12207}
12208
12209impl<'a> VpsubuswEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12210 fn vpsubusw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12211 self.emit(VPSUBUSW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12212 }
12213}
12214
12215impl<'a> VpsubuswEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12216 fn vpsubusw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12217 self.emit(VPSUBUSW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12218 }
12219}
12220
12221/// `VPSUBUSW_MASK` (VPSUBUSW).
12222/// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
12223///
12224///
12225/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
12226///
12227/// Supported operand variants:
12228///
12229/// ```text
12230/// +---+---------------+
12231/// | # | Operands |
12232/// +---+---------------+
12233/// | 1 | Xmm, Xmm, Mem |
12234/// | 2 | Xmm, Xmm, Xmm |
12235/// | 3 | Ymm, Ymm, Mem |
12236/// | 4 | Ymm, Ymm, Ymm |
12237/// | 5 | Zmm, Zmm, Mem |
12238/// | 6 | Zmm, Zmm, Zmm |
12239/// +---+---------------+
12240/// ```
12241pub trait VpsubuswMaskEmitter<A, B, C> {
12242 fn vpsubusw_mask(&mut self, op0: A, op1: B, op2: C);
12243}
12244
12245impl<'a> VpsubuswMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12246 fn vpsubusw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12247 self.emit(VPSUBUSW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12248 }
12249}
12250
12251impl<'a> VpsubuswMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12252 fn vpsubusw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12253 self.emit(VPSUBUSW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12254 }
12255}
12256
12257impl<'a> VpsubuswMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12258 fn vpsubusw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12259 self.emit(VPSUBUSW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12260 }
12261}
12262
12263impl<'a> VpsubuswMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12264 fn vpsubusw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12265 self.emit(VPSUBUSW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12266 }
12267}
12268
12269impl<'a> VpsubuswMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12270 fn vpsubusw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12271 self.emit(VPSUBUSW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12272 }
12273}
12274
12275impl<'a> VpsubuswMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12276 fn vpsubusw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12277 self.emit(VPSUBUSW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12278 }
12279}
12280
12281/// `VPSUBUSW_MASKZ` (VPSUBUSW).
12282/// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
12283///
12284///
12285/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
12286///
12287/// Supported operand variants:
12288///
12289/// ```text
12290/// +---+---------------+
12291/// | # | Operands |
12292/// +---+---------------+
12293/// | 1 | Xmm, Xmm, Mem |
12294/// | 2 | Xmm, Xmm, Xmm |
12295/// | 3 | Ymm, Ymm, Mem |
12296/// | 4 | Ymm, Ymm, Ymm |
12297/// | 5 | Zmm, Zmm, Mem |
12298/// | 6 | Zmm, Zmm, Zmm |
12299/// +---+---------------+
12300/// ```
12301pub trait VpsubuswMaskzEmitter<A, B, C> {
12302 fn vpsubusw_maskz(&mut self, op0: A, op1: B, op2: C);
12303}
12304
12305impl<'a> VpsubuswMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12306 fn vpsubusw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12307 self.emit(VPSUBUSW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12308 }
12309}
12310
12311impl<'a> VpsubuswMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12312 fn vpsubusw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12313 self.emit(VPSUBUSW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12314 }
12315}
12316
12317impl<'a> VpsubuswMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12318 fn vpsubusw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12319 self.emit(VPSUBUSW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12320 }
12321}
12322
12323impl<'a> VpsubuswMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12324 fn vpsubusw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12325 self.emit(VPSUBUSW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12326 }
12327}
12328
12329impl<'a> VpsubuswMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12330 fn vpsubusw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12331 self.emit(VPSUBUSW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12332 }
12333}
12334
12335impl<'a> VpsubuswMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12336 fn vpsubusw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12337 self.emit(VPSUBUSW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12338 }
12339}
12340
12341/// `VPSUBW` (VPSUBW).
12342/// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
12343///
12344///
12345/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
12346///
12347/// Supported operand variants:
12348///
12349/// ```text
12350/// +---+---------------+
12351/// | # | Operands |
12352/// +---+---------------+
12353/// | 1 | Xmm, Xmm, Mem |
12354/// | 2 | Xmm, Xmm, Xmm |
12355/// | 3 | Ymm, Ymm, Mem |
12356/// | 4 | Ymm, Ymm, Ymm |
12357/// | 5 | Zmm, Zmm, Mem |
12358/// | 6 | Zmm, Zmm, Zmm |
12359/// +---+---------------+
12360/// ```
12361pub trait VpsubwEmitter<A, B, C> {
12362 fn vpsubw(&mut self, op0: A, op1: B, op2: C);
12363}
12364
12365impl<'a> VpsubwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12366 fn vpsubw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12367 self.emit(VPSUBW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12368 }
12369}
12370
12371impl<'a> VpsubwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12372 fn vpsubw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12373 self.emit(VPSUBW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12374 }
12375}
12376
12377impl<'a> VpsubwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12378 fn vpsubw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12379 self.emit(VPSUBW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12380 }
12381}
12382
12383impl<'a> VpsubwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12384 fn vpsubw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12385 self.emit(VPSUBW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12386 }
12387}
12388
12389impl<'a> VpsubwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12390 fn vpsubw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12391 self.emit(VPSUBW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12392 }
12393}
12394
12395impl<'a> VpsubwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12396 fn vpsubw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12397 self.emit(VPSUBW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12398 }
12399}
12400
12401/// `VPSUBW_MASK` (VPSUBW).
12402/// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
12403///
12404///
12405/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
12406///
12407/// Supported operand variants:
12408///
12409/// ```text
12410/// +---+---------------+
12411/// | # | Operands |
12412/// +---+---------------+
12413/// | 1 | Xmm, Xmm, Mem |
12414/// | 2 | Xmm, Xmm, Xmm |
12415/// | 3 | Ymm, Ymm, Mem |
12416/// | 4 | Ymm, Ymm, Ymm |
12417/// | 5 | Zmm, Zmm, Mem |
12418/// | 6 | Zmm, Zmm, Zmm |
12419/// +---+---------------+
12420/// ```
12421pub trait VpsubwMaskEmitter<A, B, C> {
12422 fn vpsubw_mask(&mut self, op0: A, op1: B, op2: C);
12423}
12424
12425impl<'a> VpsubwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12426 fn vpsubw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12427 self.emit(VPSUBW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12428 }
12429}
12430
12431impl<'a> VpsubwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12432 fn vpsubw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12433 self.emit(VPSUBW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12434 }
12435}
12436
12437impl<'a> VpsubwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12438 fn vpsubw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12439 self.emit(VPSUBW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12440 }
12441}
12442
12443impl<'a> VpsubwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12444 fn vpsubw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12445 self.emit(VPSUBW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12446 }
12447}
12448
12449impl<'a> VpsubwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12450 fn vpsubw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12451 self.emit(VPSUBW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12452 }
12453}
12454
12455impl<'a> VpsubwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12456 fn vpsubw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12457 self.emit(VPSUBW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12458 }
12459}
12460
12461/// `VPSUBW_MASKZ` (VPSUBW).
12462/// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
12463///
12464///
12465/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
12466///
12467/// Supported operand variants:
12468///
12469/// ```text
12470/// +---+---------------+
12471/// | # | Operands |
12472/// +---+---------------+
12473/// | 1 | Xmm, Xmm, Mem |
12474/// | 2 | Xmm, Xmm, Xmm |
12475/// | 3 | Ymm, Ymm, Mem |
12476/// | 4 | Ymm, Ymm, Ymm |
12477/// | 5 | Zmm, Zmm, Mem |
12478/// | 6 | Zmm, Zmm, Zmm |
12479/// +---+---------------+
12480/// ```
12481pub trait VpsubwMaskzEmitter<A, B, C> {
12482 fn vpsubw_maskz(&mut self, op0: A, op1: B, op2: C);
12483}
12484
12485impl<'a> VpsubwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
12486 fn vpsubw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
12487 self.emit(VPSUBW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12488 }
12489}
12490
12491impl<'a> VpsubwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
12492 fn vpsubw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
12493 self.emit(VPSUBW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12494 }
12495}
12496
12497impl<'a> VpsubwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
12498 fn vpsubw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
12499 self.emit(VPSUBW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12500 }
12501}
12502
12503impl<'a> VpsubwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
12504 fn vpsubw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
12505 self.emit(VPSUBW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12506 }
12507}
12508
12509impl<'a> VpsubwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
12510 fn vpsubw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
12511 self.emit(VPSUBW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12512 }
12513}
12514
12515impl<'a> VpsubwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
12516 fn vpsubw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
12517 self.emit(VPSUBW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12518 }
12519}
12520
12521/// `VPTESTMB` (VPTESTMB).
12522/// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
12523///
12524///
12525/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
12526///
12527/// Supported operand variants:
12528///
12529/// ```text
12530/// +---+----------------+
12531/// | # | Operands |
12532/// +---+----------------+
12533/// | 1 | KReg, Xmm, Mem |
12534/// | 2 | KReg, Xmm, Xmm |
12535/// | 3 | KReg, Ymm, Mem |
12536/// | 4 | KReg, Ymm, Ymm |
12537/// | 5 | KReg, Zmm, Mem |
12538/// | 6 | KReg, Zmm, Zmm |
12539/// +---+----------------+
12540/// ```
12541pub trait VptestmbEmitter<A, B, C> {
12542 fn vptestmb(&mut self, op0: A, op1: B, op2: C);
12543}
12544
12545impl<'a> VptestmbEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12546 fn vptestmb(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12547 self.emit(VPTESTMB128KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12548 }
12549}
12550
12551impl<'a> VptestmbEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12552 fn vptestmb(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12553 self.emit(VPTESTMB128KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12554 }
12555}
12556
12557impl<'a> VptestmbEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12558 fn vptestmb(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12559 self.emit(VPTESTMB256KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12560 }
12561}
12562
12563impl<'a> VptestmbEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12564 fn vptestmb(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12565 self.emit(VPTESTMB256KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12566 }
12567}
12568
12569impl<'a> VptestmbEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12570 fn vptestmb(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12571 self.emit(VPTESTMB512KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12572 }
12573}
12574
12575impl<'a> VptestmbEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12576 fn vptestmb(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12577 self.emit(VPTESTMB512KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12578 }
12579}
12580
12581/// `VPTESTMB_MASK` (VPTESTMB).
12582/// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
12583///
12584///
12585/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
12586///
12587/// Supported operand variants:
12588///
12589/// ```text
12590/// +---+----------------+
12591/// | # | Operands |
12592/// +---+----------------+
12593/// | 1 | KReg, Xmm, Mem |
12594/// | 2 | KReg, Xmm, Xmm |
12595/// | 3 | KReg, Ymm, Mem |
12596/// | 4 | KReg, Ymm, Ymm |
12597/// | 5 | KReg, Zmm, Mem |
12598/// | 6 | KReg, Zmm, Zmm |
12599/// +---+----------------+
12600/// ```
12601pub trait VptestmbMaskEmitter<A, B, C> {
12602 fn vptestmb_mask(&mut self, op0: A, op1: B, op2: C);
12603}
12604
12605impl<'a> VptestmbMaskEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12606 fn vptestmb_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12607 self.emit(VPTESTMB128KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12608 }
12609}
12610
12611impl<'a> VptestmbMaskEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12612 fn vptestmb_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12613 self.emit(VPTESTMB128KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12614 }
12615}
12616
12617impl<'a> VptestmbMaskEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12618 fn vptestmb_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12619 self.emit(VPTESTMB256KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12620 }
12621}
12622
12623impl<'a> VptestmbMaskEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12624 fn vptestmb_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12625 self.emit(VPTESTMB256KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12626 }
12627}
12628
12629impl<'a> VptestmbMaskEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12630 fn vptestmb_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12631 self.emit(VPTESTMB512KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12632 }
12633}
12634
12635impl<'a> VptestmbMaskEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12636 fn vptestmb_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12637 self.emit(VPTESTMB512KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12638 }
12639}
12640
12641/// `VPTESTMW` (VPTESTMW).
12642/// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
12643///
12644///
12645/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
12646///
12647/// Supported operand variants:
12648///
12649/// ```text
12650/// +---+----------------+
12651/// | # | Operands |
12652/// +---+----------------+
12653/// | 1 | KReg, Xmm, Mem |
12654/// | 2 | KReg, Xmm, Xmm |
12655/// | 3 | KReg, Ymm, Mem |
12656/// | 4 | KReg, Ymm, Ymm |
12657/// | 5 | KReg, Zmm, Mem |
12658/// | 6 | KReg, Zmm, Zmm |
12659/// +---+----------------+
12660/// ```
12661pub trait VptestmwEmitter<A, B, C> {
12662 fn vptestmw(&mut self, op0: A, op1: B, op2: C);
12663}
12664
12665impl<'a> VptestmwEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12666 fn vptestmw(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12667 self.emit(VPTESTMW128KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12668 }
12669}
12670
12671impl<'a> VptestmwEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12672 fn vptestmw(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12673 self.emit(VPTESTMW128KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12674 }
12675}
12676
12677impl<'a> VptestmwEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12678 fn vptestmw(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12679 self.emit(VPTESTMW256KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12680 }
12681}
12682
12683impl<'a> VptestmwEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12684 fn vptestmw(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12685 self.emit(VPTESTMW256KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12686 }
12687}
12688
12689impl<'a> VptestmwEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12690 fn vptestmw(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12691 self.emit(VPTESTMW512KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12692 }
12693}
12694
12695impl<'a> VptestmwEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12696 fn vptestmw(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12697 self.emit(VPTESTMW512KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12698 }
12699}
12700
12701/// `VPTESTMW_MASK` (VPTESTMW).
12702/// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
12703///
12704///
12705/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
12706///
12707/// Supported operand variants:
12708///
12709/// ```text
12710/// +---+----------------+
12711/// | # | Operands |
12712/// +---+----------------+
12713/// | 1 | KReg, Xmm, Mem |
12714/// | 2 | KReg, Xmm, Xmm |
12715/// | 3 | KReg, Ymm, Mem |
12716/// | 4 | KReg, Ymm, Ymm |
12717/// | 5 | KReg, Zmm, Mem |
12718/// | 6 | KReg, Zmm, Zmm |
12719/// +---+----------------+
12720/// ```
12721pub trait VptestmwMaskEmitter<A, B, C> {
12722 fn vptestmw_mask(&mut self, op0: A, op1: B, op2: C);
12723}
12724
12725impl<'a> VptestmwMaskEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12726 fn vptestmw_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12727 self.emit(VPTESTMW128KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12728 }
12729}
12730
12731impl<'a> VptestmwMaskEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12732 fn vptestmw_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12733 self.emit(VPTESTMW128KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12734 }
12735}
12736
12737impl<'a> VptestmwMaskEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12738 fn vptestmw_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12739 self.emit(VPTESTMW256KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12740 }
12741}
12742
12743impl<'a> VptestmwMaskEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12744 fn vptestmw_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12745 self.emit(VPTESTMW256KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12746 }
12747}
12748
12749impl<'a> VptestmwMaskEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12750 fn vptestmw_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12751 self.emit(VPTESTMW512KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12752 }
12753}
12754
12755impl<'a> VptestmwMaskEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12756 fn vptestmw_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12757 self.emit(VPTESTMW512KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12758 }
12759}
12760
12761/// `VPTESTNMB` (VPTESTNMB).
12762/// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
12763///
12764///
12765/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
12766///
12767/// Supported operand variants:
12768///
12769/// ```text
12770/// +---+----------------+
12771/// | # | Operands |
12772/// +---+----------------+
12773/// | 1 | KReg, Xmm, Mem |
12774/// | 2 | KReg, Xmm, Xmm |
12775/// | 3 | KReg, Ymm, Mem |
12776/// | 4 | KReg, Ymm, Ymm |
12777/// | 5 | KReg, Zmm, Mem |
12778/// | 6 | KReg, Zmm, Zmm |
12779/// +---+----------------+
12780/// ```
12781pub trait VptestnmbEmitter<A, B, C> {
12782 fn vptestnmb(&mut self, op0: A, op1: B, op2: C);
12783}
12784
12785impl<'a> VptestnmbEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12786 fn vptestnmb(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12787 self.emit(VPTESTNMB128KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12788 }
12789}
12790
12791impl<'a> VptestnmbEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12792 fn vptestnmb(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12793 self.emit(VPTESTNMB128KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12794 }
12795}
12796
12797impl<'a> VptestnmbEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12798 fn vptestnmb(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12799 self.emit(VPTESTNMB256KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12800 }
12801}
12802
12803impl<'a> VptestnmbEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12804 fn vptestnmb(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12805 self.emit(VPTESTNMB256KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12806 }
12807}
12808
12809impl<'a> VptestnmbEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12810 fn vptestnmb(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12811 self.emit(VPTESTNMB512KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12812 }
12813}
12814
12815impl<'a> VptestnmbEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12816 fn vptestnmb(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12817 self.emit(VPTESTNMB512KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12818 }
12819}
12820
12821/// `VPTESTNMB_MASK` (VPTESTNMB).
12822/// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
12823///
12824///
12825/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
12826///
12827/// Supported operand variants:
12828///
12829/// ```text
12830/// +---+----------------+
12831/// | # | Operands |
12832/// +---+----------------+
12833/// | 1 | KReg, Xmm, Mem |
12834/// | 2 | KReg, Xmm, Xmm |
12835/// | 3 | KReg, Ymm, Mem |
12836/// | 4 | KReg, Ymm, Ymm |
12837/// | 5 | KReg, Zmm, Mem |
12838/// | 6 | KReg, Zmm, Zmm |
12839/// +---+----------------+
12840/// ```
12841pub trait VptestnmbMaskEmitter<A, B, C> {
12842 fn vptestnmb_mask(&mut self, op0: A, op1: B, op2: C);
12843}
12844
12845impl<'a> VptestnmbMaskEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12846 fn vptestnmb_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12847 self.emit(VPTESTNMB128KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12848 }
12849}
12850
12851impl<'a> VptestnmbMaskEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12852 fn vptestnmb_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12853 self.emit(VPTESTNMB128KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12854 }
12855}
12856
12857impl<'a> VptestnmbMaskEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12858 fn vptestnmb_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12859 self.emit(VPTESTNMB256KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12860 }
12861}
12862
12863impl<'a> VptestnmbMaskEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12864 fn vptestnmb_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12865 self.emit(VPTESTNMB256KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12866 }
12867}
12868
12869impl<'a> VptestnmbMaskEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12870 fn vptestnmb_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12871 self.emit(VPTESTNMB512KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12872 }
12873}
12874
12875impl<'a> VptestnmbMaskEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12876 fn vptestnmb_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12877 self.emit(VPTESTNMB512KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12878 }
12879}
12880
12881/// `VPTESTNMW` (VPTESTNMW).
12882/// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
12883///
12884///
12885/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
12886///
12887/// Supported operand variants:
12888///
12889/// ```text
12890/// +---+----------------+
12891/// | # | Operands |
12892/// +---+----------------+
12893/// | 1 | KReg, Xmm, Mem |
12894/// | 2 | KReg, Xmm, Xmm |
12895/// | 3 | KReg, Ymm, Mem |
12896/// | 4 | KReg, Ymm, Ymm |
12897/// | 5 | KReg, Zmm, Mem |
12898/// | 6 | KReg, Zmm, Zmm |
12899/// +---+----------------+
12900/// ```
12901pub trait VptestnmwEmitter<A, B, C> {
12902 fn vptestnmw(&mut self, op0: A, op1: B, op2: C);
12903}
12904
12905impl<'a> VptestnmwEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12906 fn vptestnmw(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12907 self.emit(VPTESTNMW128KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12908 }
12909}
12910
12911impl<'a> VptestnmwEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12912 fn vptestnmw(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12913 self.emit(VPTESTNMW128KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12914 }
12915}
12916
12917impl<'a> VptestnmwEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12918 fn vptestnmw(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12919 self.emit(VPTESTNMW256KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12920 }
12921}
12922
12923impl<'a> VptestnmwEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12924 fn vptestnmw(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12925 self.emit(VPTESTNMW256KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12926 }
12927}
12928
12929impl<'a> VptestnmwEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12930 fn vptestnmw(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12931 self.emit(VPTESTNMW512KRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12932 }
12933}
12934
12935impl<'a> VptestnmwEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12936 fn vptestnmw(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12937 self.emit(VPTESTNMW512KRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12938 }
12939}
12940
12941/// `VPTESTNMW_MASK` (VPTESTNMW).
12942/// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
12943///
12944///
12945/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
12946///
12947/// Supported operand variants:
12948///
12949/// ```text
12950/// +---+----------------+
12951/// | # | Operands |
12952/// +---+----------------+
12953/// | 1 | KReg, Xmm, Mem |
12954/// | 2 | KReg, Xmm, Xmm |
12955/// | 3 | KReg, Ymm, Mem |
12956/// | 4 | KReg, Ymm, Ymm |
12957/// | 5 | KReg, Zmm, Mem |
12958/// | 6 | KReg, Zmm, Zmm |
12959/// +---+----------------+
12960/// ```
12961pub trait VptestnmwMaskEmitter<A, B, C> {
12962 fn vptestnmw_mask(&mut self, op0: A, op1: B, op2: C);
12963}
12964
12965impl<'a> VptestnmwMaskEmitter<KReg, Xmm, Xmm> for Assembler<'a> {
12966 fn vptestnmw_mask(&mut self, op0: KReg, op1: Xmm, op2: Xmm) {
12967 self.emit(VPTESTNMW128KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12968 }
12969}
12970
12971impl<'a> VptestnmwMaskEmitter<KReg, Xmm, Mem> for Assembler<'a> {
12972 fn vptestnmw_mask(&mut self, op0: KReg, op1: Xmm, op2: Mem) {
12973 self.emit(VPTESTNMW128KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12974 }
12975}
12976
12977impl<'a> VptestnmwMaskEmitter<KReg, Ymm, Ymm> for Assembler<'a> {
12978 fn vptestnmw_mask(&mut self, op0: KReg, op1: Ymm, op2: Ymm) {
12979 self.emit(VPTESTNMW256KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12980 }
12981}
12982
12983impl<'a> VptestnmwMaskEmitter<KReg, Ymm, Mem> for Assembler<'a> {
12984 fn vptestnmw_mask(&mut self, op0: KReg, op1: Ymm, op2: Mem) {
12985 self.emit(VPTESTNMW256KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12986 }
12987}
12988
12989impl<'a> VptestnmwMaskEmitter<KReg, Zmm, Zmm> for Assembler<'a> {
12990 fn vptestnmw_mask(&mut self, op0: KReg, op1: Zmm, op2: Zmm) {
12991 self.emit(VPTESTNMW512KRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12992 }
12993}
12994
12995impl<'a> VptestnmwMaskEmitter<KReg, Zmm, Mem> for Assembler<'a> {
12996 fn vptestnmw_mask(&mut self, op0: KReg, op1: Zmm, op2: Mem) {
12997 self.emit(VPTESTNMW512KRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
12998 }
12999}
13000
13001/// `VPUNPCKHBW` (VPUNPCKHBW).
13002/// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
13003///
13004///
13005/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
13006///
13007/// Supported operand variants:
13008///
13009/// ```text
13010/// +---+---------------+
13011/// | # | Operands |
13012/// +---+---------------+
13013/// | 1 | Xmm, Xmm, Mem |
13014/// | 2 | Xmm, Xmm, Xmm |
13015/// | 3 | Ymm, Ymm, Mem |
13016/// | 4 | Ymm, Ymm, Ymm |
13017/// | 5 | Zmm, Zmm, Mem |
13018/// | 6 | Zmm, Zmm, Zmm |
13019/// +---+---------------+
13020/// ```
13021pub trait VpunpckhbwEmitter<A, B, C> {
13022 fn vpunpckhbw(&mut self, op0: A, op1: B, op2: C);
13023}
13024
13025impl<'a> VpunpckhbwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13026 fn vpunpckhbw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13027 self.emit(VPUNPCKHBW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13028 }
13029}
13030
13031impl<'a> VpunpckhbwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13032 fn vpunpckhbw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13033 self.emit(VPUNPCKHBW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13034 }
13035}
13036
13037impl<'a> VpunpckhbwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13038 fn vpunpckhbw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13039 self.emit(VPUNPCKHBW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13040 }
13041}
13042
13043impl<'a> VpunpckhbwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13044 fn vpunpckhbw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13045 self.emit(VPUNPCKHBW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13046 }
13047}
13048
13049impl<'a> VpunpckhbwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13050 fn vpunpckhbw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13051 self.emit(VPUNPCKHBW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13052 }
13053}
13054
13055impl<'a> VpunpckhbwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13056 fn vpunpckhbw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13057 self.emit(VPUNPCKHBW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13058 }
13059}
13060
13061/// `VPUNPCKHBW_MASK` (VPUNPCKHBW).
13062/// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
13063///
13064///
13065/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
13066///
13067/// Supported operand variants:
13068///
13069/// ```text
13070/// +---+---------------+
13071/// | # | Operands |
13072/// +---+---------------+
13073/// | 1 | Xmm, Xmm, Mem |
13074/// | 2 | Xmm, Xmm, Xmm |
13075/// | 3 | Ymm, Ymm, Mem |
13076/// | 4 | Ymm, Ymm, Ymm |
13077/// | 5 | Zmm, Zmm, Mem |
13078/// | 6 | Zmm, Zmm, Zmm |
13079/// +---+---------------+
13080/// ```
13081pub trait VpunpckhbwMaskEmitter<A, B, C> {
13082 fn vpunpckhbw_mask(&mut self, op0: A, op1: B, op2: C);
13083}
13084
13085impl<'a> VpunpckhbwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13086 fn vpunpckhbw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13087 self.emit(VPUNPCKHBW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13088 }
13089}
13090
13091impl<'a> VpunpckhbwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13092 fn vpunpckhbw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13093 self.emit(VPUNPCKHBW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13094 }
13095}
13096
13097impl<'a> VpunpckhbwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13098 fn vpunpckhbw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13099 self.emit(VPUNPCKHBW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13100 }
13101}
13102
13103impl<'a> VpunpckhbwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13104 fn vpunpckhbw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13105 self.emit(VPUNPCKHBW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13106 }
13107}
13108
13109impl<'a> VpunpckhbwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13110 fn vpunpckhbw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13111 self.emit(VPUNPCKHBW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13112 }
13113}
13114
13115impl<'a> VpunpckhbwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13116 fn vpunpckhbw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13117 self.emit(VPUNPCKHBW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13118 }
13119}
13120
13121/// `VPUNPCKHBW_MASKZ` (VPUNPCKHBW).
13122/// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
13123///
13124///
13125/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
13126///
13127/// Supported operand variants:
13128///
13129/// ```text
13130/// +---+---------------+
13131/// | # | Operands |
13132/// +---+---------------+
13133/// | 1 | Xmm, Xmm, Mem |
13134/// | 2 | Xmm, Xmm, Xmm |
13135/// | 3 | Ymm, Ymm, Mem |
13136/// | 4 | Ymm, Ymm, Ymm |
13137/// | 5 | Zmm, Zmm, Mem |
13138/// | 6 | Zmm, Zmm, Zmm |
13139/// +---+---------------+
13140/// ```
13141pub trait VpunpckhbwMaskzEmitter<A, B, C> {
13142 fn vpunpckhbw_maskz(&mut self, op0: A, op1: B, op2: C);
13143}
13144
13145impl<'a> VpunpckhbwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13146 fn vpunpckhbw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13147 self.emit(VPUNPCKHBW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13148 }
13149}
13150
13151impl<'a> VpunpckhbwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13152 fn vpunpckhbw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13153 self.emit(VPUNPCKHBW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13154 }
13155}
13156
13157impl<'a> VpunpckhbwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13158 fn vpunpckhbw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13159 self.emit(VPUNPCKHBW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13160 }
13161}
13162
13163impl<'a> VpunpckhbwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13164 fn vpunpckhbw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13165 self.emit(VPUNPCKHBW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13166 }
13167}
13168
13169impl<'a> VpunpckhbwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13170 fn vpunpckhbw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13171 self.emit(VPUNPCKHBW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13172 }
13173}
13174
13175impl<'a> VpunpckhbwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13176 fn vpunpckhbw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13177 self.emit(VPUNPCKHBW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13178 }
13179}
13180
13181/// `VPUNPCKHWD` (VPUNPCKHWD).
13182/// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
13183///
13184///
13185/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
13186///
13187/// Supported operand variants:
13188///
13189/// ```text
13190/// +---+---------------+
13191/// | # | Operands |
13192/// +---+---------------+
13193/// | 1 | Xmm, Xmm, Mem |
13194/// | 2 | Xmm, Xmm, Xmm |
13195/// | 3 | Ymm, Ymm, Mem |
13196/// | 4 | Ymm, Ymm, Ymm |
13197/// | 5 | Zmm, Zmm, Mem |
13198/// | 6 | Zmm, Zmm, Zmm |
13199/// +---+---------------+
13200/// ```
13201pub trait VpunpckhwdEmitter<A, B, C> {
13202 fn vpunpckhwd(&mut self, op0: A, op1: B, op2: C);
13203}
13204
13205impl<'a> VpunpckhwdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13206 fn vpunpckhwd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13207 self.emit(VPUNPCKHWD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13208 }
13209}
13210
13211impl<'a> VpunpckhwdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13212 fn vpunpckhwd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13213 self.emit(VPUNPCKHWD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13214 }
13215}
13216
13217impl<'a> VpunpckhwdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13218 fn vpunpckhwd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13219 self.emit(VPUNPCKHWD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13220 }
13221}
13222
13223impl<'a> VpunpckhwdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13224 fn vpunpckhwd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13225 self.emit(VPUNPCKHWD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13226 }
13227}
13228
13229impl<'a> VpunpckhwdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13230 fn vpunpckhwd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13231 self.emit(VPUNPCKHWD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13232 }
13233}
13234
13235impl<'a> VpunpckhwdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13236 fn vpunpckhwd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13237 self.emit(VPUNPCKHWD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13238 }
13239}
13240
13241/// `VPUNPCKHWD_MASK` (VPUNPCKHWD).
13242/// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
13243///
13244///
13245/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
13246///
13247/// Supported operand variants:
13248///
13249/// ```text
13250/// +---+---------------+
13251/// | # | Operands |
13252/// +---+---------------+
13253/// | 1 | Xmm, Xmm, Mem |
13254/// | 2 | Xmm, Xmm, Xmm |
13255/// | 3 | Ymm, Ymm, Mem |
13256/// | 4 | Ymm, Ymm, Ymm |
13257/// | 5 | Zmm, Zmm, Mem |
13258/// | 6 | Zmm, Zmm, Zmm |
13259/// +---+---------------+
13260/// ```
13261pub trait VpunpckhwdMaskEmitter<A, B, C> {
13262 fn vpunpckhwd_mask(&mut self, op0: A, op1: B, op2: C);
13263}
13264
13265impl<'a> VpunpckhwdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13266 fn vpunpckhwd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13267 self.emit(VPUNPCKHWD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13268 }
13269}
13270
13271impl<'a> VpunpckhwdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13272 fn vpunpckhwd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13273 self.emit(VPUNPCKHWD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13274 }
13275}
13276
13277impl<'a> VpunpckhwdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13278 fn vpunpckhwd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13279 self.emit(VPUNPCKHWD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13280 }
13281}
13282
13283impl<'a> VpunpckhwdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13284 fn vpunpckhwd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13285 self.emit(VPUNPCKHWD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13286 }
13287}
13288
13289impl<'a> VpunpckhwdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13290 fn vpunpckhwd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13291 self.emit(VPUNPCKHWD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13292 }
13293}
13294
13295impl<'a> VpunpckhwdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13296 fn vpunpckhwd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13297 self.emit(VPUNPCKHWD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13298 }
13299}
13300
13301/// `VPUNPCKHWD_MASKZ` (VPUNPCKHWD).
13302/// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
13303///
13304///
13305/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
13306///
13307/// Supported operand variants:
13308///
13309/// ```text
13310/// +---+---------------+
13311/// | # | Operands |
13312/// +---+---------------+
13313/// | 1 | Xmm, Xmm, Mem |
13314/// | 2 | Xmm, Xmm, Xmm |
13315/// | 3 | Ymm, Ymm, Mem |
13316/// | 4 | Ymm, Ymm, Ymm |
13317/// | 5 | Zmm, Zmm, Mem |
13318/// | 6 | Zmm, Zmm, Zmm |
13319/// +---+---------------+
13320/// ```
13321pub trait VpunpckhwdMaskzEmitter<A, B, C> {
13322 fn vpunpckhwd_maskz(&mut self, op0: A, op1: B, op2: C);
13323}
13324
13325impl<'a> VpunpckhwdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13326 fn vpunpckhwd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13327 self.emit(VPUNPCKHWD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13328 }
13329}
13330
13331impl<'a> VpunpckhwdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13332 fn vpunpckhwd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13333 self.emit(VPUNPCKHWD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13334 }
13335}
13336
13337impl<'a> VpunpckhwdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13338 fn vpunpckhwd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13339 self.emit(VPUNPCKHWD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13340 }
13341}
13342
13343impl<'a> VpunpckhwdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13344 fn vpunpckhwd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13345 self.emit(VPUNPCKHWD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13346 }
13347}
13348
13349impl<'a> VpunpckhwdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13350 fn vpunpckhwd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13351 self.emit(VPUNPCKHWD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13352 }
13353}
13354
13355impl<'a> VpunpckhwdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13356 fn vpunpckhwd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13357 self.emit(VPUNPCKHWD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13358 }
13359}
13360
13361/// `VPUNPCKLBW` (VPUNPCKLBW).
13362/// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
13363///
13364///
13365/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
13366///
13367/// Supported operand variants:
13368///
13369/// ```text
13370/// +---+---------------+
13371/// | # | Operands |
13372/// +---+---------------+
13373/// | 1 | Xmm, Xmm, Mem |
13374/// | 2 | Xmm, Xmm, Xmm |
13375/// | 3 | Ymm, Ymm, Mem |
13376/// | 4 | Ymm, Ymm, Ymm |
13377/// | 5 | Zmm, Zmm, Mem |
13378/// | 6 | Zmm, Zmm, Zmm |
13379/// +---+---------------+
13380/// ```
13381pub trait VpunpcklbwEmitter<A, B, C> {
13382 fn vpunpcklbw(&mut self, op0: A, op1: B, op2: C);
13383}
13384
13385impl<'a> VpunpcklbwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13386 fn vpunpcklbw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13387 self.emit(VPUNPCKLBW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13388 }
13389}
13390
13391impl<'a> VpunpcklbwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13392 fn vpunpcklbw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13393 self.emit(VPUNPCKLBW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13394 }
13395}
13396
13397impl<'a> VpunpcklbwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13398 fn vpunpcklbw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13399 self.emit(VPUNPCKLBW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13400 }
13401}
13402
13403impl<'a> VpunpcklbwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13404 fn vpunpcklbw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13405 self.emit(VPUNPCKLBW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13406 }
13407}
13408
13409impl<'a> VpunpcklbwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13410 fn vpunpcklbw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13411 self.emit(VPUNPCKLBW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13412 }
13413}
13414
13415impl<'a> VpunpcklbwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13416 fn vpunpcklbw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13417 self.emit(VPUNPCKLBW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13418 }
13419}
13420
13421/// `VPUNPCKLBW_MASK` (VPUNPCKLBW).
13422/// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
13423///
13424///
13425/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
13426///
13427/// Supported operand variants:
13428///
13429/// ```text
13430/// +---+---------------+
13431/// | # | Operands |
13432/// +---+---------------+
13433/// | 1 | Xmm, Xmm, Mem |
13434/// | 2 | Xmm, Xmm, Xmm |
13435/// | 3 | Ymm, Ymm, Mem |
13436/// | 4 | Ymm, Ymm, Ymm |
13437/// | 5 | Zmm, Zmm, Mem |
13438/// | 6 | Zmm, Zmm, Zmm |
13439/// +---+---------------+
13440/// ```
13441pub trait VpunpcklbwMaskEmitter<A, B, C> {
13442 fn vpunpcklbw_mask(&mut self, op0: A, op1: B, op2: C);
13443}
13444
13445impl<'a> VpunpcklbwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13446 fn vpunpcklbw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13447 self.emit(VPUNPCKLBW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13448 }
13449}
13450
13451impl<'a> VpunpcklbwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13452 fn vpunpcklbw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13453 self.emit(VPUNPCKLBW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13454 }
13455}
13456
13457impl<'a> VpunpcklbwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13458 fn vpunpcklbw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13459 self.emit(VPUNPCKLBW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13460 }
13461}
13462
13463impl<'a> VpunpcklbwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13464 fn vpunpcklbw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13465 self.emit(VPUNPCKLBW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13466 }
13467}
13468
13469impl<'a> VpunpcklbwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13470 fn vpunpcklbw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13471 self.emit(VPUNPCKLBW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13472 }
13473}
13474
13475impl<'a> VpunpcklbwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13476 fn vpunpcklbw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13477 self.emit(VPUNPCKLBW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13478 }
13479}
13480
13481/// `VPUNPCKLBW_MASKZ` (VPUNPCKLBW).
13482/// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
13483///
13484///
13485/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
13486///
13487/// Supported operand variants:
13488///
13489/// ```text
13490/// +---+---------------+
13491/// | # | Operands |
13492/// +---+---------------+
13493/// | 1 | Xmm, Xmm, Mem |
13494/// | 2 | Xmm, Xmm, Xmm |
13495/// | 3 | Ymm, Ymm, Mem |
13496/// | 4 | Ymm, Ymm, Ymm |
13497/// | 5 | Zmm, Zmm, Mem |
13498/// | 6 | Zmm, Zmm, Zmm |
13499/// +---+---------------+
13500/// ```
13501pub trait VpunpcklbwMaskzEmitter<A, B, C> {
13502 fn vpunpcklbw_maskz(&mut self, op0: A, op1: B, op2: C);
13503}
13504
13505impl<'a> VpunpcklbwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13506 fn vpunpcklbw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13507 self.emit(VPUNPCKLBW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13508 }
13509}
13510
13511impl<'a> VpunpcklbwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13512 fn vpunpcklbw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13513 self.emit(VPUNPCKLBW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13514 }
13515}
13516
13517impl<'a> VpunpcklbwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13518 fn vpunpcklbw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13519 self.emit(VPUNPCKLBW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13520 }
13521}
13522
13523impl<'a> VpunpcklbwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13524 fn vpunpcklbw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13525 self.emit(VPUNPCKLBW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13526 }
13527}
13528
13529impl<'a> VpunpcklbwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13530 fn vpunpcklbw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13531 self.emit(VPUNPCKLBW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13532 }
13533}
13534
13535impl<'a> VpunpcklbwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13536 fn vpunpcklbw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13537 self.emit(VPUNPCKLBW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13538 }
13539}
13540
13541/// `VPUNPCKLWD` (VPUNPCKLWD).
13542/// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
13543///
13544///
13545/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
13546///
13547/// Supported operand variants:
13548///
13549/// ```text
13550/// +---+---------------+
13551/// | # | Operands |
13552/// +---+---------------+
13553/// | 1 | Xmm, Xmm, Mem |
13554/// | 2 | Xmm, Xmm, Xmm |
13555/// | 3 | Ymm, Ymm, Mem |
13556/// | 4 | Ymm, Ymm, Ymm |
13557/// | 5 | Zmm, Zmm, Mem |
13558/// | 6 | Zmm, Zmm, Zmm |
13559/// +---+---------------+
13560/// ```
13561pub trait VpunpcklwdEmitter<A, B, C> {
13562 fn vpunpcklwd(&mut self, op0: A, op1: B, op2: C);
13563}
13564
13565impl<'a> VpunpcklwdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13566 fn vpunpcklwd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13567 self.emit(VPUNPCKLWD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13568 }
13569}
13570
13571impl<'a> VpunpcklwdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13572 fn vpunpcklwd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13573 self.emit(VPUNPCKLWD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13574 }
13575}
13576
13577impl<'a> VpunpcklwdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13578 fn vpunpcklwd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13579 self.emit(VPUNPCKLWD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13580 }
13581}
13582
13583impl<'a> VpunpcklwdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13584 fn vpunpcklwd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13585 self.emit(VPUNPCKLWD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13586 }
13587}
13588
13589impl<'a> VpunpcklwdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13590 fn vpunpcklwd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13591 self.emit(VPUNPCKLWD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13592 }
13593}
13594
13595impl<'a> VpunpcklwdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13596 fn vpunpcklwd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13597 self.emit(VPUNPCKLWD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13598 }
13599}
13600
13601/// `VPUNPCKLWD_MASK` (VPUNPCKLWD).
13602/// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
13603///
13604///
13605/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
13606///
13607/// Supported operand variants:
13608///
13609/// ```text
13610/// +---+---------------+
13611/// | # | Operands |
13612/// +---+---------------+
13613/// | 1 | Xmm, Xmm, Mem |
13614/// | 2 | Xmm, Xmm, Xmm |
13615/// | 3 | Ymm, Ymm, Mem |
13616/// | 4 | Ymm, Ymm, Ymm |
13617/// | 5 | Zmm, Zmm, Mem |
13618/// | 6 | Zmm, Zmm, Zmm |
13619/// +---+---------------+
13620/// ```
13621pub trait VpunpcklwdMaskEmitter<A, B, C> {
13622 fn vpunpcklwd_mask(&mut self, op0: A, op1: B, op2: C);
13623}
13624
13625impl<'a> VpunpcklwdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13626 fn vpunpcklwd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13627 self.emit(VPUNPCKLWD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13628 }
13629}
13630
13631impl<'a> VpunpcklwdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13632 fn vpunpcklwd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13633 self.emit(VPUNPCKLWD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13634 }
13635}
13636
13637impl<'a> VpunpcklwdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13638 fn vpunpcklwd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13639 self.emit(VPUNPCKLWD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13640 }
13641}
13642
13643impl<'a> VpunpcklwdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13644 fn vpunpcklwd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13645 self.emit(VPUNPCKLWD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13646 }
13647}
13648
13649impl<'a> VpunpcklwdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13650 fn vpunpcklwd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13651 self.emit(VPUNPCKLWD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13652 }
13653}
13654
13655impl<'a> VpunpcklwdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13656 fn vpunpcklwd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13657 self.emit(VPUNPCKLWD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13658 }
13659}
13660
13661/// `VPUNPCKLWD_MASKZ` (VPUNPCKLWD).
13662/// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
13663///
13664///
13665/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
13666///
13667/// Supported operand variants:
13668///
13669/// ```text
13670/// +---+---------------+
13671/// | # | Operands |
13672/// +---+---------------+
13673/// | 1 | Xmm, Xmm, Mem |
13674/// | 2 | Xmm, Xmm, Xmm |
13675/// | 3 | Ymm, Ymm, Mem |
13676/// | 4 | Ymm, Ymm, Ymm |
13677/// | 5 | Zmm, Zmm, Mem |
13678/// | 6 | Zmm, Zmm, Zmm |
13679/// +---+---------------+
13680/// ```
13681pub trait VpunpcklwdMaskzEmitter<A, B, C> {
13682 fn vpunpcklwd_maskz(&mut self, op0: A, op1: B, op2: C);
13683}
13684
13685impl<'a> VpunpcklwdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
13686 fn vpunpcklwd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
13687 self.emit(VPUNPCKLWD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13688 }
13689}
13690
13691impl<'a> VpunpcklwdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
13692 fn vpunpcklwd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
13693 self.emit(VPUNPCKLWD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13694 }
13695}
13696
13697impl<'a> VpunpcklwdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
13698 fn vpunpcklwd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
13699 self.emit(VPUNPCKLWD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13700 }
13701}
13702
13703impl<'a> VpunpcklwdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
13704 fn vpunpcklwd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
13705 self.emit(VPUNPCKLWD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13706 }
13707}
13708
13709impl<'a> VpunpcklwdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
13710 fn vpunpcklwd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
13711 self.emit(VPUNPCKLWD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13712 }
13713}
13714
13715impl<'a> VpunpcklwdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
13716 fn vpunpcklwd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
13717 self.emit(VPUNPCKLWD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
13718 }
13719}
13720
13721
13722impl<'a> Assembler<'a> {
13723 /// `KADDD` (KADDD).
13724 /// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
13725 ///
13726 ///
13727 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
13728 ///
13729 /// Supported operand variants:
13730 ///
13731 /// ```text
13732 /// +---+------------------+
13733 /// | # | Operands |
13734 /// +---+------------------+
13735 /// | 1 | KReg, KReg, KReg |
13736 /// +---+------------------+
13737 /// ```
13738 #[inline]
13739 pub fn kaddd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13740 where Assembler<'a>: KadddEmitter<A, B, C> {
13741 <Self as KadddEmitter<A, B, C>>::kaddd(self, op0, op1, op2);
13742 }
13743 /// `KADDQ` (KADDQ).
13744 /// Adds the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
13745 ///
13746 ///
13747 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KADDW%3AKADDB%3AKADDQ%3AKADDD.html).
13748 ///
13749 /// Supported operand variants:
13750 ///
13751 /// ```text
13752 /// +---+------------------+
13753 /// | # | Operands |
13754 /// +---+------------------+
13755 /// | 1 | KReg, KReg, KReg |
13756 /// +---+------------------+
13757 /// ```
13758 #[inline]
13759 pub fn kaddq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13760 where Assembler<'a>: KaddqEmitter<A, B, C> {
13761 <Self as KaddqEmitter<A, B, C>>::kaddq(self, op0, op1, op2);
13762 }
13763 /// `KANDD` (KANDD).
13764 /// Performs a bitwise AND between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
13765 ///
13766 ///
13767 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDW%3AKANDB%3AKANDQ%3AKANDD.html).
13768 ///
13769 /// Supported operand variants:
13770 ///
13771 /// ```text
13772 /// +---+------------------+
13773 /// | # | Operands |
13774 /// +---+------------------+
13775 /// | 1 | KReg, KReg, KReg |
13776 /// +---+------------------+
13777 /// ```
13778 #[inline]
13779 pub fn kandd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13780 where Assembler<'a>: KanddEmitter<A, B, C> {
13781 <Self as KanddEmitter<A, B, C>>::kandd(self, op0, op1, op2);
13782 }
13783 /// `KANDND` (KANDND).
13784 /// Performs a bitwise AND NOT between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
13785 ///
13786 ///
13787 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDNW%3AKANDNB%3AKANDNQ%3AKANDND.html).
13788 ///
13789 /// Supported operand variants:
13790 ///
13791 /// ```text
13792 /// +---+------------------+
13793 /// | # | Operands |
13794 /// +---+------------------+
13795 /// | 1 | KReg, KReg, KReg |
13796 /// +---+------------------+
13797 /// ```
13798 #[inline]
13799 pub fn kandnd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13800 where Assembler<'a>: KandndEmitter<A, B, C> {
13801 <Self as KandndEmitter<A, B, C>>::kandnd(self, op0, op1, op2);
13802 }
13803 /// `KANDNQ` (KANDNQ).
13804 /// Performs a bitwise AND NOT between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
13805 ///
13806 ///
13807 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDNW%3AKANDNB%3AKANDNQ%3AKANDND.html).
13808 ///
13809 /// Supported operand variants:
13810 ///
13811 /// ```text
13812 /// +---+------------------+
13813 /// | # | Operands |
13814 /// +---+------------------+
13815 /// | 1 | KReg, KReg, KReg |
13816 /// +---+------------------+
13817 /// ```
13818 #[inline]
13819 pub fn kandnq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13820 where Assembler<'a>: KandnqEmitter<A, B, C> {
13821 <Self as KandnqEmitter<A, B, C>>::kandnq(self, op0, op1, op2);
13822 }
13823 /// `KANDQ` (KANDQ).
13824 /// Performs a bitwise AND between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1.
13825 ///
13826 ///
13827 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KANDW%3AKANDB%3AKANDQ%3AKANDD.html).
13828 ///
13829 /// Supported operand variants:
13830 ///
13831 /// ```text
13832 /// +---+------------------+
13833 /// | # | Operands |
13834 /// +---+------------------+
13835 /// | 1 | KReg, KReg, KReg |
13836 /// +---+------------------+
13837 /// ```
13838 #[inline]
13839 pub fn kandq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13840 where Assembler<'a>: KandqEmitter<A, B, C> {
13841 <Self as KandqEmitter<A, B, C>>::kandq(self, op0, op1, op2);
13842 }
13843 /// `KMOVD` (KMOVD).
13844 /// Copies values from the source operand (second operand) to the destination operand (first operand). The source and destination operands can be mask registers, memory location or general purpose. The instruction cannot be used to transfer data between general purpose registers and or memory locations.
13845 ///
13846 ///
13847 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KMOVW%3AKMOVB%3AKMOVQ%3AKMOVD.html).
13848 ///
13849 /// Supported operand variants:
13850 ///
13851 /// ```text
13852 /// +---+------------+
13853 /// | # | Operands |
13854 /// +---+------------+
13855 /// | 1 | Gpd, KReg |
13856 /// | 2 | KReg, Gpd |
13857 /// | 3 | KReg, KReg |
13858 /// | 4 | KReg, Mem |
13859 /// | 5 | Mem, KReg |
13860 /// +---+------------+
13861 /// ```
13862 #[inline]
13863 pub fn kmovd<A, B>(&mut self, op0: A, op1: B)
13864 where Assembler<'a>: KmovdEmitter<A, B> {
13865 <Self as KmovdEmitter<A, B>>::kmovd(self, op0, op1);
13866 }
13867 /// `KMOVQ` (KMOVQ).
13868 /// Copies values from the source operand (second operand) to the destination operand (first operand). The source and destination operands can be mask registers, memory location or general purpose. The instruction cannot be used to transfer data between general purpose registers and or memory locations.
13869 ///
13870 ///
13871 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KMOVW%3AKMOVB%3AKMOVQ%3AKMOVD.html).
13872 ///
13873 /// Supported operand variants:
13874 ///
13875 /// ```text
13876 /// +---+------------+
13877 /// | # | Operands |
13878 /// +---+------------+
13879 /// | 1 | Gpd, KReg |
13880 /// | 2 | KReg, Gpd |
13881 /// | 3 | KReg, KReg |
13882 /// | 4 | KReg, Mem |
13883 /// | 5 | Mem, KReg |
13884 /// +---+------------+
13885 /// ```
13886 #[inline]
13887 pub fn kmovq<A, B>(&mut self, op0: A, op1: B)
13888 where Assembler<'a>: KmovqEmitter<A, B> {
13889 <Self as KmovqEmitter<A, B>>::kmovq(self, op0, op1);
13890 }
13891 /// `KNOTD` (KNOTD).
13892 /// Performs a bitwise NOT of vector mask k2 and writes the result into vector mask k1.
13893 ///
13894 ///
13895 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KNOTW%3AKNOTB%3AKNOTQ%3AKNOTD.html).
13896 ///
13897 /// Supported operand variants:
13898 ///
13899 /// ```text
13900 /// +---+------------+
13901 /// | # | Operands |
13902 /// +---+------------+
13903 /// | 1 | KReg, KReg |
13904 /// +---+------------+
13905 /// ```
13906 #[inline]
13907 pub fn knotd<A, B>(&mut self, op0: A, op1: B)
13908 where Assembler<'a>: KnotdEmitter<A, B> {
13909 <Self as KnotdEmitter<A, B>>::knotd(self, op0, op1);
13910 }
13911 /// `KNOTQ` (KNOTQ).
13912 /// Performs a bitwise NOT of vector mask k2 and writes the result into vector mask k1.
13913 ///
13914 ///
13915 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KNOTW%3AKNOTB%3AKNOTQ%3AKNOTD.html).
13916 ///
13917 /// Supported operand variants:
13918 ///
13919 /// ```text
13920 /// +---+------------+
13921 /// | # | Operands |
13922 /// +---+------------+
13923 /// | 1 | KReg, KReg |
13924 /// +---+------------+
13925 /// ```
13926 #[inline]
13927 pub fn knotq<A, B>(&mut self, op0: A, op1: B)
13928 where Assembler<'a>: KnotqEmitter<A, B> {
13929 <Self as KnotqEmitter<A, B>>::knotq(self, op0, op1);
13930 }
13931 /// `KORD` (KORD).
13932 /// Performs a bitwise OR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
13933 ///
13934 ///
13935 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORW%3AKORB%3AKORQ%3AKORD.html).
13936 ///
13937 /// Supported operand variants:
13938 ///
13939 /// ```text
13940 /// +---+------------------+
13941 /// | # | Operands |
13942 /// +---+------------------+
13943 /// | 1 | KReg, KReg, KReg |
13944 /// +---+------------------+
13945 /// ```
13946 #[inline]
13947 pub fn kord<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13948 where Assembler<'a>: KordEmitter<A, B, C> {
13949 <Self as KordEmitter<A, B, C>>::kord(self, op0, op1, op2);
13950 }
13951 /// `KORQ` (KORQ).
13952 /// Performs a bitwise OR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
13953 ///
13954 ///
13955 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORW%3AKORB%3AKORQ%3AKORD.html).
13956 ///
13957 /// Supported operand variants:
13958 ///
13959 /// ```text
13960 /// +---+------------------+
13961 /// | # | Operands |
13962 /// +---+------------------+
13963 /// | 1 | KReg, KReg, KReg |
13964 /// +---+------------------+
13965 /// ```
13966 #[inline]
13967 pub fn korq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
13968 where Assembler<'a>: KorqEmitter<A, B, C> {
13969 <Self as KorqEmitter<A, B, C>>::korq(self, op0, op1, op2);
13970 }
13971 /// `KORTESTD` (KORTESTD).
13972 /// Performs a bitwise OR between the vector mask register k2, and the vector mask register k1, and sets CF and ZF based on the operation result.
13973 ///
13974 ///
13975 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORTESTW%3AKORTESTB%3AKORTESTQ%3AKORTESTD.html).
13976 ///
13977 /// Supported operand variants:
13978 ///
13979 /// ```text
13980 /// +---+------------+
13981 /// | # | Operands |
13982 /// +---+------------+
13983 /// | 1 | KReg, KReg |
13984 /// +---+------------+
13985 /// ```
13986 #[inline]
13987 pub fn kortestd<A, B>(&mut self, op0: A, op1: B)
13988 where Assembler<'a>: KortestdEmitter<A, B> {
13989 <Self as KortestdEmitter<A, B>>::kortestd(self, op0, op1);
13990 }
13991 /// `KORTESTQ` (KORTESTQ).
13992 /// Performs a bitwise OR between the vector mask register k2, and the vector mask register k1, and sets CF and ZF based on the operation result.
13993 ///
13994 ///
13995 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KORTESTW%3AKORTESTB%3AKORTESTQ%3AKORTESTD.html).
13996 ///
13997 /// Supported operand variants:
13998 ///
13999 /// ```text
14000 /// +---+------------+
14001 /// | # | Operands |
14002 /// +---+------------+
14003 /// | 1 | KReg, KReg |
14004 /// +---+------------+
14005 /// ```
14006 #[inline]
14007 pub fn kortestq<A, B>(&mut self, op0: A, op1: B)
14008 where Assembler<'a>: KortestqEmitter<A, B> {
14009 <Self as KortestqEmitter<A, B>>::kortestq(self, op0, op1);
14010 }
14011 /// `KSHIFTLD` (KSHIFTLD).
14012 /// Shifts 8/16/32/64 bits in the second operand (source operand) left by the count specified in immediate byte and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
14013 ///
14014 ///
14015 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTLW%3AKSHIFTLB%3AKSHIFTLQ%3AKSHIFTLD.html).
14016 ///
14017 /// Supported operand variants:
14018 ///
14019 /// ```text
14020 /// +---+-----------------+
14021 /// | # | Operands |
14022 /// +---+-----------------+
14023 /// | 1 | KReg, KReg, Imm |
14024 /// +---+-----------------+
14025 /// ```
14026 #[inline]
14027 pub fn kshiftld<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14028 where Assembler<'a>: KshiftldEmitter<A, B, C> {
14029 <Self as KshiftldEmitter<A, B, C>>::kshiftld(self, op0, op1, op2);
14030 }
14031 /// `KSHIFTLQ` (KSHIFTLQ).
14032 /// Shifts 8/16/32/64 bits in the second operand (source operand) left by the count specified in immediate byte and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
14033 ///
14034 ///
14035 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTLW%3AKSHIFTLB%3AKSHIFTLQ%3AKSHIFTLD.html).
14036 ///
14037 /// Supported operand variants:
14038 ///
14039 /// ```text
14040 /// +---+-----------------+
14041 /// | # | Operands |
14042 /// +---+-----------------+
14043 /// | 1 | KReg, KReg, Imm |
14044 /// +---+-----------------+
14045 /// ```
14046 #[inline]
14047 pub fn kshiftlq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14048 where Assembler<'a>: KshiftlqEmitter<A, B, C> {
14049 <Self as KshiftlqEmitter<A, B, C>>::kshiftlq(self, op0, op1, op2);
14050 }
14051 /// `KSHIFTRD` (KSHIFTRD).
14052 /// Shifts 8/16/32/64 bits in the second operand (source operand) right by the count specified in immediate and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
14053 ///
14054 ///
14055 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTRW%3AKSHIFTRB%3AKSHIFTRQ%3AKSHIFTRD.html).
14056 ///
14057 /// Supported operand variants:
14058 ///
14059 /// ```text
14060 /// +---+-----------------+
14061 /// | # | Operands |
14062 /// +---+-----------------+
14063 /// | 1 | KReg, KReg, Imm |
14064 /// +---+-----------------+
14065 /// ```
14066 #[inline]
14067 pub fn kshiftrd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14068 where Assembler<'a>: KshiftrdEmitter<A, B, C> {
14069 <Self as KshiftrdEmitter<A, B, C>>::kshiftrd(self, op0, op1, op2);
14070 }
14071 /// `KSHIFTRQ` (KSHIFTRQ).
14072 /// Shifts 8/16/32/64 bits in the second operand (source operand) right by the count specified in immediate and place the least significant 8/16/32/64 bits of the result in the destination operand. The higher bits of the destination are zero-extended. The destination is set to zero if the count value is greater than 7 (for byte shift), 15 (for word shift), 31 (for doubleword shift) or 63 (for quadword shift).
14073 ///
14074 ///
14075 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KSHIFTRW%3AKSHIFTRB%3AKSHIFTRQ%3AKSHIFTRD.html).
14076 ///
14077 /// Supported operand variants:
14078 ///
14079 /// ```text
14080 /// +---+-----------------+
14081 /// | # | Operands |
14082 /// +---+-----------------+
14083 /// | 1 | KReg, KReg, Imm |
14084 /// +---+-----------------+
14085 /// ```
14086 #[inline]
14087 pub fn kshiftrq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14088 where Assembler<'a>: KshiftrqEmitter<A, B, C> {
14089 <Self as KshiftrqEmitter<A, B, C>>::kshiftrq(self, op0, op1, op2);
14090 }
14091 /// `KTESTD` (KTESTD).
14092 /// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
14093 ///
14094 ///
14095 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
14096 ///
14097 /// Supported operand variants:
14098 ///
14099 /// ```text
14100 /// +---+------------+
14101 /// | # | Operands |
14102 /// +---+------------+
14103 /// | 1 | KReg, KReg |
14104 /// +---+------------+
14105 /// ```
14106 #[inline]
14107 pub fn ktestd<A, B>(&mut self, op0: A, op1: B)
14108 where Assembler<'a>: KtestdEmitter<A, B> {
14109 <Self as KtestdEmitter<A, B>>::ktestd(self, op0, op1);
14110 }
14111 /// `KTESTQ` (KTESTQ).
14112 /// Performs a bitwise comparison of the bits of the first source operand and corresponding bits in the second source operand. If the AND operation produces all zeros, the ZF is set else the ZF is clear. If the bitwise AND operation of the inverted first source operand with the second source operand produces all zeros the CF is set else the CF is clear. Only the EFLAGS register is updated.
14113 ///
14114 ///
14115 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KTESTW%3AKTESTB%3AKTESTQ%3AKTESTD.html).
14116 ///
14117 /// Supported operand variants:
14118 ///
14119 /// ```text
14120 /// +---+------------+
14121 /// | # | Operands |
14122 /// +---+------------+
14123 /// | 1 | KReg, KReg |
14124 /// +---+------------+
14125 /// ```
14126 #[inline]
14127 pub fn ktestq<A, B>(&mut self, op0: A, op1: B)
14128 where Assembler<'a>: KtestqEmitter<A, B> {
14129 <Self as KtestqEmitter<A, B>>::ktestq(self, op0, op1);
14130 }
14131 /// `KUNPCKDQ` (KUNPCKDQ).
14132 /// Unpacks the lower 8/16/32 bits of the second and third operands (source operands) into the low part of the first operand (destination operand), starting from the low bytes. The result is zero-extended in the destination.
14133 ///
14134 ///
14135 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KUNPCKBW%3AKUNPCKWD%3AKUNPCKDQ.html).
14136 ///
14137 /// Supported operand variants:
14138 ///
14139 /// ```text
14140 /// +---+------------------+
14141 /// | # | Operands |
14142 /// +---+------------------+
14143 /// | 1 | KReg, KReg, KReg |
14144 /// +---+------------------+
14145 /// ```
14146 #[inline]
14147 pub fn kunpckdq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14148 where Assembler<'a>: KunpckdqEmitter<A, B, C> {
14149 <Self as KunpckdqEmitter<A, B, C>>::kunpckdq(self, op0, op1, op2);
14150 }
14151 /// `KUNPCKWD` (KUNPCKWD).
14152 /// Unpacks the lower 8/16/32 bits of the second and third operands (source operands) into the low part of the first operand (destination operand), starting from the low bytes. The result is zero-extended in the destination.
14153 ///
14154 ///
14155 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KUNPCKBW%3AKUNPCKWD%3AKUNPCKDQ.html).
14156 ///
14157 /// Supported operand variants:
14158 ///
14159 /// ```text
14160 /// +---+------------------+
14161 /// | # | Operands |
14162 /// +---+------------------+
14163 /// | 1 | KReg, KReg, KReg |
14164 /// +---+------------------+
14165 /// ```
14166 #[inline]
14167 pub fn kunpckwd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14168 where Assembler<'a>: KunpckwdEmitter<A, B, C> {
14169 <Self as KunpckwdEmitter<A, B, C>>::kunpckwd(self, op0, op1, op2);
14170 }
14171 /// `KXNORD` (KXNORD).
14172 /// Performs a bitwise XNOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
14173 ///
14174 ///
14175 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXNORW%3AKXNORB%3AKXNORQ%3AKXNORD.html).
14176 ///
14177 /// Supported operand variants:
14178 ///
14179 /// ```text
14180 /// +---+------------------+
14181 /// | # | Operands |
14182 /// +---+------------------+
14183 /// | 1 | KReg, KReg, KReg |
14184 /// +---+------------------+
14185 /// ```
14186 #[inline]
14187 pub fn kxnord<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14188 where Assembler<'a>: KxnordEmitter<A, B, C> {
14189 <Self as KxnordEmitter<A, B, C>>::kxnord(self, op0, op1, op2);
14190 }
14191 /// `KXNORQ` (KXNORQ).
14192 /// Performs a bitwise XNOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
14193 ///
14194 ///
14195 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXNORW%3AKXNORB%3AKXNORQ%3AKXNORD.html).
14196 ///
14197 /// Supported operand variants:
14198 ///
14199 /// ```text
14200 /// +---+------------------+
14201 /// | # | Operands |
14202 /// +---+------------------+
14203 /// | 1 | KReg, KReg, KReg |
14204 /// +---+------------------+
14205 /// ```
14206 #[inline]
14207 pub fn kxnorq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14208 where Assembler<'a>: KxnorqEmitter<A, B, C> {
14209 <Self as KxnorqEmitter<A, B, C>>::kxnorq(self, op0, op1, op2);
14210 }
14211 /// `KXORD` (KXORD).
14212 /// Performs a bitwise XOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
14213 ///
14214 ///
14215 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXORW%3AKXORB%3AKXORQ%3AKXORD.html).
14216 ///
14217 /// Supported operand variants:
14218 ///
14219 /// ```text
14220 /// +---+------------------+
14221 /// | # | Operands |
14222 /// +---+------------------+
14223 /// | 1 | KReg, KReg, KReg |
14224 /// +---+------------------+
14225 /// ```
14226 #[inline]
14227 pub fn kxord<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14228 where Assembler<'a>: KxordEmitter<A, B, C> {
14229 <Self as KxordEmitter<A, B, C>>::kxord(self, op0, op1, op2);
14230 }
14231 /// `KXORQ` (KXORQ).
14232 /// Performs a bitwise XOR between the vector mask k2 and the vector mask k3, and writes the result into vector mask k1 (three-operand form).
14233 ///
14234 ///
14235 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/KXORW%3AKXORB%3AKXORQ%3AKXORD.html).
14236 ///
14237 /// Supported operand variants:
14238 ///
14239 /// ```text
14240 /// +---+------------------+
14241 /// | # | Operands |
14242 /// +---+------------------+
14243 /// | 1 | KReg, KReg, KReg |
14244 /// +---+------------------+
14245 /// ```
14246 #[inline]
14247 pub fn kxorq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14248 where Assembler<'a>: KxorqEmitter<A, B, C> {
14249 <Self as KxorqEmitter<A, B, C>>::kxorq(self, op0, op1, op2);
14250 }
14251 /// `VDBPSADBW` (VDBPSADBW).
14252 /// Compute packed SAD (sum of absolute differences) word results of unsigned bytes from two 32-bit dword elements. Packed SAD word results are calculated in multiples of qword superblocks, producing 4 SAD word results in each 64-bit superblock of the destination register.
14253 ///
14254 ///
14255 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VDBPSADBW.html).
14256 ///
14257 /// Supported operand variants:
14258 ///
14259 /// ```text
14260 /// +---+--------------------+
14261 /// | # | Operands |
14262 /// +---+--------------------+
14263 /// | 1 | Xmm, Xmm, Mem, Imm |
14264 /// | 2 | Xmm, Xmm, Xmm, Imm |
14265 /// | 3 | Ymm, Ymm, Mem, Imm |
14266 /// | 4 | Ymm, Ymm, Ymm, Imm |
14267 /// | 5 | Zmm, Zmm, Mem, Imm |
14268 /// | 6 | Zmm, Zmm, Zmm, Imm |
14269 /// +---+--------------------+
14270 /// ```
14271 #[inline]
14272 pub fn vdbpsadbw<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
14273 where Assembler<'a>: VdbpsadbwEmitter<A, B, C, D> {
14274 <Self as VdbpsadbwEmitter<A, B, C, D>>::vdbpsadbw(self, op0, op1, op2, op3);
14275 }
14276 /// `VDBPSADBW_MASK` (VDBPSADBW).
14277 /// Compute packed SAD (sum of absolute differences) word results of unsigned bytes from two 32-bit dword elements. Packed SAD word results are calculated in multiples of qword superblocks, producing 4 SAD word results in each 64-bit superblock of the destination register.
14278 ///
14279 ///
14280 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VDBPSADBW.html).
14281 ///
14282 /// Supported operand variants:
14283 ///
14284 /// ```text
14285 /// +---+--------------------+
14286 /// | # | Operands |
14287 /// +---+--------------------+
14288 /// | 1 | Xmm, Xmm, Mem, Imm |
14289 /// | 2 | Xmm, Xmm, Xmm, Imm |
14290 /// | 3 | Ymm, Ymm, Mem, Imm |
14291 /// | 4 | Ymm, Ymm, Ymm, Imm |
14292 /// | 5 | Zmm, Zmm, Mem, Imm |
14293 /// | 6 | Zmm, Zmm, Zmm, Imm |
14294 /// +---+--------------------+
14295 /// ```
14296 #[inline]
14297 pub fn vdbpsadbw_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
14298 where Assembler<'a>: VdbpsadbwMaskEmitter<A, B, C, D> {
14299 <Self as VdbpsadbwMaskEmitter<A, B, C, D>>::vdbpsadbw_mask(self, op0, op1, op2, op3);
14300 }
14301 /// `VDBPSADBW_MASKZ` (VDBPSADBW).
14302 /// Compute packed SAD (sum of absolute differences) word results of unsigned bytes from two 32-bit dword elements. Packed SAD word results are calculated in multiples of qword superblocks, producing 4 SAD word results in each 64-bit superblock of the destination register.
14303 ///
14304 ///
14305 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VDBPSADBW.html).
14306 ///
14307 /// Supported operand variants:
14308 ///
14309 /// ```text
14310 /// +---+--------------------+
14311 /// | # | Operands |
14312 /// +---+--------------------+
14313 /// | 1 | Xmm, Xmm, Mem, Imm |
14314 /// | 2 | Xmm, Xmm, Xmm, Imm |
14315 /// | 3 | Ymm, Ymm, Mem, Imm |
14316 /// | 4 | Ymm, Ymm, Ymm, Imm |
14317 /// | 5 | Zmm, Zmm, Mem, Imm |
14318 /// | 6 | Zmm, Zmm, Zmm, Imm |
14319 /// +---+--------------------+
14320 /// ```
14321 #[inline]
14322 pub fn vdbpsadbw_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
14323 where Assembler<'a>: VdbpsadbwMaskzEmitter<A, B, C, D> {
14324 <Self as VdbpsadbwMaskzEmitter<A, B, C, D>>::vdbpsadbw_maskz(self, op0, op1, op2, op3);
14325 }
14326 /// `VMOVDQU16` (VMOVDQU16).
14327 /// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
14328 ///
14329 ///
14330 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
14331 ///
14332 /// Supported operand variants:
14333 ///
14334 /// ```text
14335 /// +---+----------+
14336 /// | # | Operands |
14337 /// +---+----------+
14338 /// | 1 | Mem, Xmm |
14339 /// | 2 | Mem, Ymm |
14340 /// | 3 | Mem, Zmm |
14341 /// | 4 | Xmm, Mem |
14342 /// | 5 | Xmm, Xmm |
14343 /// | 6 | Ymm, Mem |
14344 /// | 7 | Ymm, Ymm |
14345 /// | 8 | Zmm, Mem |
14346 /// | 9 | Zmm, Zmm |
14347 /// +---+----------+
14348 /// ```
14349 #[inline]
14350 pub fn vmovdqu16<A, B>(&mut self, op0: A, op1: B)
14351 where Assembler<'a>: Vmovdqu16Emitter<A, B> {
14352 <Self as Vmovdqu16Emitter<A, B>>::vmovdqu16(self, op0, op1);
14353 }
14354 /// `VMOVDQU16_MASK` (VMOVDQU16).
14355 /// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
14356 ///
14357 ///
14358 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
14359 ///
14360 /// Supported operand variants:
14361 ///
14362 /// ```text
14363 /// +---+----------+
14364 /// | # | Operands |
14365 /// +---+----------+
14366 /// | 1 | Mem, Xmm |
14367 /// | 2 | Mem, Ymm |
14368 /// | 3 | Mem, Zmm |
14369 /// | 4 | Xmm, Mem |
14370 /// | 5 | Xmm, Xmm |
14371 /// | 6 | Ymm, Mem |
14372 /// | 7 | Ymm, Ymm |
14373 /// | 8 | Zmm, Mem |
14374 /// | 9 | Zmm, Zmm |
14375 /// +---+----------+
14376 /// ```
14377 #[inline]
14378 pub fn vmovdqu16_mask<A, B>(&mut self, op0: A, op1: B)
14379 where Assembler<'a>: Vmovdqu16MaskEmitter<A, B> {
14380 <Self as Vmovdqu16MaskEmitter<A, B>>::vmovdqu16_mask(self, op0, op1);
14381 }
14382 /// `VMOVDQU16_MASKZ` (VMOVDQU16).
14383 /// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
14384 ///
14385 ///
14386 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
14387 ///
14388 /// Supported operand variants:
14389 ///
14390 /// ```text
14391 /// +---+----------+
14392 /// | # | Operands |
14393 /// +---+----------+
14394 /// | 1 | Xmm, Mem |
14395 /// | 2 | Xmm, Xmm |
14396 /// | 3 | Ymm, Mem |
14397 /// | 4 | Ymm, Ymm |
14398 /// | 5 | Zmm, Mem |
14399 /// | 6 | Zmm, Zmm |
14400 /// +---+----------+
14401 /// ```
14402 #[inline]
14403 pub fn vmovdqu16_maskz<A, B>(&mut self, op0: A, op1: B)
14404 where Assembler<'a>: Vmovdqu16MaskzEmitter<A, B> {
14405 <Self as Vmovdqu16MaskzEmitter<A, B>>::vmovdqu16_maskz(self, op0, op1);
14406 }
14407 /// `VMOVDQU8` (VMOVDQU8).
14408 /// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
14409 ///
14410 ///
14411 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
14412 ///
14413 /// Supported operand variants:
14414 ///
14415 /// ```text
14416 /// +---+----------+
14417 /// | # | Operands |
14418 /// +---+----------+
14419 /// | 1 | Mem, Xmm |
14420 /// | 2 | Mem, Ymm |
14421 /// | 3 | Mem, Zmm |
14422 /// | 4 | Xmm, Mem |
14423 /// | 5 | Xmm, Xmm |
14424 /// | 6 | Ymm, Mem |
14425 /// | 7 | Ymm, Ymm |
14426 /// | 8 | Zmm, Mem |
14427 /// | 9 | Zmm, Zmm |
14428 /// +---+----------+
14429 /// ```
14430 #[inline]
14431 pub fn vmovdqu8<A, B>(&mut self, op0: A, op1: B)
14432 where Assembler<'a>: Vmovdqu8Emitter<A, B> {
14433 <Self as Vmovdqu8Emitter<A, B>>::vmovdqu8(self, op0, op1);
14434 }
14435 /// `VMOVDQU8_MASK` (VMOVDQU8).
14436 /// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
14437 ///
14438 ///
14439 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
14440 ///
14441 /// Supported operand variants:
14442 ///
14443 /// ```text
14444 /// +---+----------+
14445 /// | # | Operands |
14446 /// +---+----------+
14447 /// | 1 | Mem, Xmm |
14448 /// | 2 | Mem, Ymm |
14449 /// | 3 | Mem, Zmm |
14450 /// | 4 | Xmm, Mem |
14451 /// | 5 | Xmm, Xmm |
14452 /// | 6 | Ymm, Mem |
14453 /// | 7 | Ymm, Ymm |
14454 /// | 8 | Zmm, Mem |
14455 /// | 9 | Zmm, Zmm |
14456 /// +---+----------+
14457 /// ```
14458 #[inline]
14459 pub fn vmovdqu8_mask<A, B>(&mut self, op0: A, op1: B)
14460 where Assembler<'a>: Vmovdqu8MaskEmitter<A, B> {
14461 <Self as Vmovdqu8MaskEmitter<A, B>>::vmovdqu8_mask(self, op0, op1);
14462 }
14463 /// `VMOVDQU8_MASKZ` (VMOVDQU8).
14464 /// Note: VEX.vvvv and EVEX.vvvv are reserved and must be 1111b otherwise instructions will #UD.
14465 ///
14466 ///
14467 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/MOVDQU%3AVMOVDQU8%3AVMOVDQU16%3AVMOVDQU32%3AVMOVDQU64.html).
14468 ///
14469 /// Supported operand variants:
14470 ///
14471 /// ```text
14472 /// +---+----------+
14473 /// | # | Operands |
14474 /// +---+----------+
14475 /// | 1 | Xmm, Mem |
14476 /// | 2 | Xmm, Xmm |
14477 /// | 3 | Ymm, Mem |
14478 /// | 4 | Ymm, Ymm |
14479 /// | 5 | Zmm, Mem |
14480 /// | 6 | Zmm, Zmm |
14481 /// +---+----------+
14482 /// ```
14483 #[inline]
14484 pub fn vmovdqu8_maskz<A, B>(&mut self, op0: A, op1: B)
14485 where Assembler<'a>: Vmovdqu8MaskzEmitter<A, B> {
14486 <Self as Vmovdqu8MaskzEmitter<A, B>>::vmovdqu8_maskz(self, op0, op1);
14487 }
14488 /// `VPABSB` (VPABSB).
14489 /// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
14490 ///
14491 ///
14492 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
14493 ///
14494 /// Supported operand variants:
14495 ///
14496 /// ```text
14497 /// +---+----------+
14498 /// | # | Operands |
14499 /// +---+----------+
14500 /// | 1 | Xmm, Mem |
14501 /// | 2 | Xmm, Xmm |
14502 /// | 3 | Ymm, Mem |
14503 /// | 4 | Ymm, Ymm |
14504 /// | 5 | Zmm, Mem |
14505 /// | 6 | Zmm, Zmm |
14506 /// +---+----------+
14507 /// ```
14508 #[inline]
14509 pub fn vpabsb<A, B>(&mut self, op0: A, op1: B)
14510 where Assembler<'a>: VpabsbEmitter<A, B> {
14511 <Self as VpabsbEmitter<A, B>>::vpabsb(self, op0, op1);
14512 }
14513 /// `VPABSB_MASK` (VPABSB).
14514 /// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
14515 ///
14516 ///
14517 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
14518 ///
14519 /// Supported operand variants:
14520 ///
14521 /// ```text
14522 /// +---+----------+
14523 /// | # | Operands |
14524 /// +---+----------+
14525 /// | 1 | Xmm, Mem |
14526 /// | 2 | Xmm, Xmm |
14527 /// | 3 | Ymm, Mem |
14528 /// | 4 | Ymm, Ymm |
14529 /// | 5 | Zmm, Mem |
14530 /// | 6 | Zmm, Zmm |
14531 /// +---+----------+
14532 /// ```
14533 #[inline]
14534 pub fn vpabsb_mask<A, B>(&mut self, op0: A, op1: B)
14535 where Assembler<'a>: VpabsbMaskEmitter<A, B> {
14536 <Self as VpabsbMaskEmitter<A, B>>::vpabsb_mask(self, op0, op1);
14537 }
14538 /// `VPABSB_MASKZ` (VPABSB).
14539 /// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
14540 ///
14541 ///
14542 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
14543 ///
14544 /// Supported operand variants:
14545 ///
14546 /// ```text
14547 /// +---+----------+
14548 /// | # | Operands |
14549 /// +---+----------+
14550 /// | 1 | Xmm, Mem |
14551 /// | 2 | Xmm, Xmm |
14552 /// | 3 | Ymm, Mem |
14553 /// | 4 | Ymm, Ymm |
14554 /// | 5 | Zmm, Mem |
14555 /// | 6 | Zmm, Zmm |
14556 /// +---+----------+
14557 /// ```
14558 #[inline]
14559 pub fn vpabsb_maskz<A, B>(&mut self, op0: A, op1: B)
14560 where Assembler<'a>: VpabsbMaskzEmitter<A, B> {
14561 <Self as VpabsbMaskzEmitter<A, B>>::vpabsb_maskz(self, op0, op1);
14562 }
14563 /// `VPABSW` (VPABSW).
14564 /// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
14565 ///
14566 ///
14567 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
14568 ///
14569 /// Supported operand variants:
14570 ///
14571 /// ```text
14572 /// +---+----------+
14573 /// | # | Operands |
14574 /// +---+----------+
14575 /// | 1 | Xmm, Mem |
14576 /// | 2 | Xmm, Xmm |
14577 /// | 3 | Ymm, Mem |
14578 /// | 4 | Ymm, Ymm |
14579 /// | 5 | Zmm, Mem |
14580 /// | 6 | Zmm, Zmm |
14581 /// +---+----------+
14582 /// ```
14583 #[inline]
14584 pub fn vpabsw<A, B>(&mut self, op0: A, op1: B)
14585 where Assembler<'a>: VpabswEmitter<A, B> {
14586 <Self as VpabswEmitter<A, B>>::vpabsw(self, op0, op1);
14587 }
14588 /// `VPABSW_MASK` (VPABSW).
14589 /// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
14590 ///
14591 ///
14592 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
14593 ///
14594 /// Supported operand variants:
14595 ///
14596 /// ```text
14597 /// +---+----------+
14598 /// | # | Operands |
14599 /// +---+----------+
14600 /// | 1 | Xmm, Mem |
14601 /// | 2 | Xmm, Xmm |
14602 /// | 3 | Ymm, Mem |
14603 /// | 4 | Ymm, Ymm |
14604 /// | 5 | Zmm, Mem |
14605 /// | 6 | Zmm, Zmm |
14606 /// +---+----------+
14607 /// ```
14608 #[inline]
14609 pub fn vpabsw_mask<A, B>(&mut self, op0: A, op1: B)
14610 where Assembler<'a>: VpabswMaskEmitter<A, B> {
14611 <Self as VpabswMaskEmitter<A, B>>::vpabsw_mask(self, op0, op1);
14612 }
14613 /// `VPABSW_MASKZ` (VPABSW).
14614 /// PABSB/W/D computes the absolute value of each data element of the source operand (the second operand) and stores the UNSIGNED results in the destination operand (the first operand). PABSB operates on signed bytes, PABSW operates on signed 16-bit words, and PABSD operates on signed 32-bit integers.
14615 ///
14616 ///
14617 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PABSB%3APABSW%3APABSD%3APABSQ.html).
14618 ///
14619 /// Supported operand variants:
14620 ///
14621 /// ```text
14622 /// +---+----------+
14623 /// | # | Operands |
14624 /// +---+----------+
14625 /// | 1 | Xmm, Mem |
14626 /// | 2 | Xmm, Xmm |
14627 /// | 3 | Ymm, Mem |
14628 /// | 4 | Ymm, Ymm |
14629 /// | 5 | Zmm, Mem |
14630 /// | 6 | Zmm, Zmm |
14631 /// +---+----------+
14632 /// ```
14633 #[inline]
14634 pub fn vpabsw_maskz<A, B>(&mut self, op0: A, op1: B)
14635 where Assembler<'a>: VpabswMaskzEmitter<A, B> {
14636 <Self as VpabswMaskzEmitter<A, B>>::vpabsw_maskz(self, op0, op1);
14637 }
14638 /// `VPACKSSDW` (VPACKSSDW).
14639 /// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
14640 ///
14641 ///
14642 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
14643 ///
14644 /// Supported operand variants:
14645 ///
14646 /// ```text
14647 /// +---+---------------+
14648 /// | # | Operands |
14649 /// +---+---------------+
14650 /// | 1 | Xmm, Xmm, Mem |
14651 /// | 2 | Xmm, Xmm, Xmm |
14652 /// | 3 | Ymm, Ymm, Mem |
14653 /// | 4 | Ymm, Ymm, Ymm |
14654 /// | 5 | Zmm, Zmm, Mem |
14655 /// | 6 | Zmm, Zmm, Zmm |
14656 /// +---+---------------+
14657 /// ```
14658 #[inline]
14659 pub fn vpackssdw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14660 where Assembler<'a>: VpackssdwEmitter<A, B, C> {
14661 <Self as VpackssdwEmitter<A, B, C>>::vpackssdw(self, op0, op1, op2);
14662 }
14663 /// `VPACKSSDW_MASK` (VPACKSSDW).
14664 /// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
14665 ///
14666 ///
14667 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
14668 ///
14669 /// Supported operand variants:
14670 ///
14671 /// ```text
14672 /// +---+---------------+
14673 /// | # | Operands |
14674 /// +---+---------------+
14675 /// | 1 | Xmm, Xmm, Mem |
14676 /// | 2 | Xmm, Xmm, Xmm |
14677 /// | 3 | Ymm, Ymm, Mem |
14678 /// | 4 | Ymm, Ymm, Ymm |
14679 /// | 5 | Zmm, Zmm, Mem |
14680 /// | 6 | Zmm, Zmm, Zmm |
14681 /// +---+---------------+
14682 /// ```
14683 #[inline]
14684 pub fn vpackssdw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14685 where Assembler<'a>: VpackssdwMaskEmitter<A, B, C> {
14686 <Self as VpackssdwMaskEmitter<A, B, C>>::vpackssdw_mask(self, op0, op1, op2);
14687 }
14688 /// `VPACKSSDW_MASKZ` (VPACKSSDW).
14689 /// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
14690 ///
14691 ///
14692 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
14693 ///
14694 /// Supported operand variants:
14695 ///
14696 /// ```text
14697 /// +---+---------------+
14698 /// | # | Operands |
14699 /// +---+---------------+
14700 /// | 1 | Xmm, Xmm, Mem |
14701 /// | 2 | Xmm, Xmm, Xmm |
14702 /// | 3 | Ymm, Ymm, Mem |
14703 /// | 4 | Ymm, Ymm, Ymm |
14704 /// | 5 | Zmm, Zmm, Mem |
14705 /// | 6 | Zmm, Zmm, Zmm |
14706 /// +---+---------------+
14707 /// ```
14708 #[inline]
14709 pub fn vpackssdw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14710 where Assembler<'a>: VpackssdwMaskzEmitter<A, B, C> {
14711 <Self as VpackssdwMaskzEmitter<A, B, C>>::vpackssdw_maskz(self, op0, op1, op2);
14712 }
14713 /// `VPACKSSWB` (VPACKSSWB).
14714 /// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
14715 ///
14716 ///
14717 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
14718 ///
14719 /// Supported operand variants:
14720 ///
14721 /// ```text
14722 /// +---+---------------+
14723 /// | # | Operands |
14724 /// +---+---------------+
14725 /// | 1 | Xmm, Xmm, Mem |
14726 /// | 2 | Xmm, Xmm, Xmm |
14727 /// | 3 | Ymm, Ymm, Mem |
14728 /// | 4 | Ymm, Ymm, Ymm |
14729 /// | 5 | Zmm, Zmm, Mem |
14730 /// | 6 | Zmm, Zmm, Zmm |
14731 /// +---+---------------+
14732 /// ```
14733 #[inline]
14734 pub fn vpacksswb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14735 where Assembler<'a>: VpacksswbEmitter<A, B, C> {
14736 <Self as VpacksswbEmitter<A, B, C>>::vpacksswb(self, op0, op1, op2);
14737 }
14738 /// `VPACKSSWB_MASK` (VPACKSSWB).
14739 /// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
14740 ///
14741 ///
14742 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
14743 ///
14744 /// Supported operand variants:
14745 ///
14746 /// ```text
14747 /// +---+---------------+
14748 /// | # | Operands |
14749 /// +---+---------------+
14750 /// | 1 | Xmm, Xmm, Mem |
14751 /// | 2 | Xmm, Xmm, Xmm |
14752 /// | 3 | Ymm, Ymm, Mem |
14753 /// | 4 | Ymm, Ymm, Ymm |
14754 /// | 5 | Zmm, Zmm, Mem |
14755 /// | 6 | Zmm, Zmm, Zmm |
14756 /// +---+---------------+
14757 /// ```
14758 #[inline]
14759 pub fn vpacksswb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14760 where Assembler<'a>: VpacksswbMaskEmitter<A, B, C> {
14761 <Self as VpacksswbMaskEmitter<A, B, C>>::vpacksswb_mask(self, op0, op1, op2);
14762 }
14763 /// `VPACKSSWB_MASKZ` (VPACKSSWB).
14764 /// Converts packed signed word integers into packed signed byte integers (PACKSSWB) or converts packed signed doubleword integers into packed signed word integers (PACKSSDW), using saturation to handle overflow conditions. See Figure 4-6 for an example of the packing operation.
14765 ///
14766 ///
14767 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKSSWB%3APACKSSDW.html).
14768 ///
14769 /// Supported operand variants:
14770 ///
14771 /// ```text
14772 /// +---+---------------+
14773 /// | # | Operands |
14774 /// +---+---------------+
14775 /// | 1 | Xmm, Xmm, Mem |
14776 /// | 2 | Xmm, Xmm, Xmm |
14777 /// | 3 | Ymm, Ymm, Mem |
14778 /// | 4 | Ymm, Ymm, Ymm |
14779 /// | 5 | Zmm, Zmm, Mem |
14780 /// | 6 | Zmm, Zmm, Zmm |
14781 /// +---+---------------+
14782 /// ```
14783 #[inline]
14784 pub fn vpacksswb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14785 where Assembler<'a>: VpacksswbMaskzEmitter<A, B, C> {
14786 <Self as VpacksswbMaskzEmitter<A, B, C>>::vpacksswb_maskz(self, op0, op1, op2);
14787 }
14788 /// `VPACKUSDW` (VPACKUSDW).
14789 /// Converts packed signed doubleword integers in the first and second source operands into packed unsigned word integers using unsigned saturation to handle overflow conditions. If the signed doubleword value is beyond the range of an unsigned word (that is, greater than FFFFH or less than 0000H), the saturated unsigned word integer value of FFFFH or 0000H, respectively, is stored in the destination.
14790 ///
14791 ///
14792 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSDW.html).
14793 ///
14794 /// Supported operand variants:
14795 ///
14796 /// ```text
14797 /// +---+---------------+
14798 /// | # | Operands |
14799 /// +---+---------------+
14800 /// | 1 | Xmm, Xmm, Mem |
14801 /// | 2 | Xmm, Xmm, Xmm |
14802 /// | 3 | Ymm, Ymm, Mem |
14803 /// | 4 | Ymm, Ymm, Ymm |
14804 /// | 5 | Zmm, Zmm, Mem |
14805 /// | 6 | Zmm, Zmm, Zmm |
14806 /// +---+---------------+
14807 /// ```
14808 #[inline]
14809 pub fn vpackusdw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14810 where Assembler<'a>: VpackusdwEmitter<A, B, C> {
14811 <Self as VpackusdwEmitter<A, B, C>>::vpackusdw(self, op0, op1, op2);
14812 }
14813 /// `VPACKUSDW_MASK` (VPACKUSDW).
14814 /// Converts packed signed doubleword integers in the first and second source operands into packed unsigned word integers using unsigned saturation to handle overflow conditions. If the signed doubleword value is beyond the range of an unsigned word (that is, greater than FFFFH or less than 0000H), the saturated unsigned word integer value of FFFFH or 0000H, respectively, is stored in the destination.
14815 ///
14816 ///
14817 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSDW.html).
14818 ///
14819 /// Supported operand variants:
14820 ///
14821 /// ```text
14822 /// +---+---------------+
14823 /// | # | Operands |
14824 /// +---+---------------+
14825 /// | 1 | Xmm, Xmm, Mem |
14826 /// | 2 | Xmm, Xmm, Xmm |
14827 /// | 3 | Ymm, Ymm, Mem |
14828 /// | 4 | Ymm, Ymm, Ymm |
14829 /// | 5 | Zmm, Zmm, Mem |
14830 /// | 6 | Zmm, Zmm, Zmm |
14831 /// +---+---------------+
14832 /// ```
14833 #[inline]
14834 pub fn vpackusdw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14835 where Assembler<'a>: VpackusdwMaskEmitter<A, B, C> {
14836 <Self as VpackusdwMaskEmitter<A, B, C>>::vpackusdw_mask(self, op0, op1, op2);
14837 }
14838 /// `VPACKUSDW_MASKZ` (VPACKUSDW).
14839 /// Converts packed signed doubleword integers in the first and second source operands into packed unsigned word integers using unsigned saturation to handle overflow conditions. If the signed doubleword value is beyond the range of an unsigned word (that is, greater than FFFFH or less than 0000H), the saturated unsigned word integer value of FFFFH or 0000H, respectively, is stored in the destination.
14840 ///
14841 ///
14842 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSDW.html).
14843 ///
14844 /// Supported operand variants:
14845 ///
14846 /// ```text
14847 /// +---+---------------+
14848 /// | # | Operands |
14849 /// +---+---------------+
14850 /// | 1 | Xmm, Xmm, Mem |
14851 /// | 2 | Xmm, Xmm, Xmm |
14852 /// | 3 | Ymm, Ymm, Mem |
14853 /// | 4 | Ymm, Ymm, Ymm |
14854 /// | 5 | Zmm, Zmm, Mem |
14855 /// | 6 | Zmm, Zmm, Zmm |
14856 /// +---+---------------+
14857 /// ```
14858 #[inline]
14859 pub fn vpackusdw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14860 where Assembler<'a>: VpackusdwMaskzEmitter<A, B, C> {
14861 <Self as VpackusdwMaskzEmitter<A, B, C>>::vpackusdw_maskz(self, op0, op1, op2);
14862 }
14863 /// `VPACKUSWB` (VPACKUSWB).
14864 /// Converts 4, 8, 16, or 32 signed word integers from the destination operand (first operand) and 4, 8, 16, or 32 signed word integers from the source operand (second operand) into 8, 16, 32 or 64 unsigned byte integers and stores the result in the destination operand. (See Figure 4-6 for an example of the packing operation.) If a signed word integer value is beyond the range of an unsigned byte integer (that is, greater than FFH or less than 00H), the saturated unsigned byte integer value of FFH or 00H, respectively, is stored in the destination.
14865 ///
14866 ///
14867 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSWB.html).
14868 ///
14869 /// Supported operand variants:
14870 ///
14871 /// ```text
14872 /// +---+---------------+
14873 /// | # | Operands |
14874 /// +---+---------------+
14875 /// | 1 | Xmm, Xmm, Mem |
14876 /// | 2 | Xmm, Xmm, Xmm |
14877 /// | 3 | Ymm, Ymm, Mem |
14878 /// | 4 | Ymm, Ymm, Ymm |
14879 /// | 5 | Zmm, Zmm, Mem |
14880 /// | 6 | Zmm, Zmm, Zmm |
14881 /// +---+---------------+
14882 /// ```
14883 #[inline]
14884 pub fn vpackuswb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14885 where Assembler<'a>: VpackuswbEmitter<A, B, C> {
14886 <Self as VpackuswbEmitter<A, B, C>>::vpackuswb(self, op0, op1, op2);
14887 }
14888 /// `VPACKUSWB_MASK` (VPACKUSWB).
14889 /// Converts 4, 8, 16, or 32 signed word integers from the destination operand (first operand) and 4, 8, 16, or 32 signed word integers from the source operand (second operand) into 8, 16, 32 or 64 unsigned byte integers and stores the result in the destination operand. (See Figure 4-6 for an example of the packing operation.) If a signed word integer value is beyond the range of an unsigned byte integer (that is, greater than FFH or less than 00H), the saturated unsigned byte integer value of FFH or 00H, respectively, is stored in the destination.
14890 ///
14891 ///
14892 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSWB.html).
14893 ///
14894 /// Supported operand variants:
14895 ///
14896 /// ```text
14897 /// +---+---------------+
14898 /// | # | Operands |
14899 /// +---+---------------+
14900 /// | 1 | Xmm, Xmm, Mem |
14901 /// | 2 | Xmm, Xmm, Xmm |
14902 /// | 3 | Ymm, Ymm, Mem |
14903 /// | 4 | Ymm, Ymm, Ymm |
14904 /// | 5 | Zmm, Zmm, Mem |
14905 /// | 6 | Zmm, Zmm, Zmm |
14906 /// +---+---------------+
14907 /// ```
14908 #[inline]
14909 pub fn vpackuswb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14910 where Assembler<'a>: VpackuswbMaskEmitter<A, B, C> {
14911 <Self as VpackuswbMaskEmitter<A, B, C>>::vpackuswb_mask(self, op0, op1, op2);
14912 }
14913 /// `VPACKUSWB_MASKZ` (VPACKUSWB).
14914 /// Converts 4, 8, 16, or 32 signed word integers from the destination operand (first operand) and 4, 8, 16, or 32 signed word integers from the source operand (second operand) into 8, 16, 32 or 64 unsigned byte integers and stores the result in the destination operand. (See Figure 4-6 for an example of the packing operation.) If a signed word integer value is beyond the range of an unsigned byte integer (that is, greater than FFH or less than 00H), the saturated unsigned byte integer value of FFH or 00H, respectively, is stored in the destination.
14915 ///
14916 ///
14917 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PACKUSWB.html).
14918 ///
14919 /// Supported operand variants:
14920 ///
14921 /// ```text
14922 /// +---+---------------+
14923 /// | # | Operands |
14924 /// +---+---------------+
14925 /// | 1 | Xmm, Xmm, Mem |
14926 /// | 2 | Xmm, Xmm, Xmm |
14927 /// | 3 | Ymm, Ymm, Mem |
14928 /// | 4 | Ymm, Ymm, Ymm |
14929 /// | 5 | Zmm, Zmm, Mem |
14930 /// | 6 | Zmm, Zmm, Zmm |
14931 /// +---+---------------+
14932 /// ```
14933 #[inline]
14934 pub fn vpackuswb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14935 where Assembler<'a>: VpackuswbMaskzEmitter<A, B, C> {
14936 <Self as VpackuswbMaskzEmitter<A, B, C>>::vpackuswb_maskz(self, op0, op1, op2);
14937 }
14938 /// `VPADDB` (VPADDB).
14939 /// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
14940 ///
14941 ///
14942 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
14943 ///
14944 /// Supported operand variants:
14945 ///
14946 /// ```text
14947 /// +---+---------------+
14948 /// | # | Operands |
14949 /// +---+---------------+
14950 /// | 1 | Xmm, Xmm, Mem |
14951 /// | 2 | Xmm, Xmm, Xmm |
14952 /// | 3 | Ymm, Ymm, Mem |
14953 /// | 4 | Ymm, Ymm, Ymm |
14954 /// | 5 | Zmm, Zmm, Mem |
14955 /// | 6 | Zmm, Zmm, Zmm |
14956 /// +---+---------------+
14957 /// ```
14958 #[inline]
14959 pub fn vpaddb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14960 where Assembler<'a>: VpaddbEmitter<A, B, C> {
14961 <Self as VpaddbEmitter<A, B, C>>::vpaddb(self, op0, op1, op2);
14962 }
14963 /// `VPADDB_MASK` (VPADDB).
14964 /// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
14965 ///
14966 ///
14967 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
14968 ///
14969 /// Supported operand variants:
14970 ///
14971 /// ```text
14972 /// +---+---------------+
14973 /// | # | Operands |
14974 /// +---+---------------+
14975 /// | 1 | Xmm, Xmm, Mem |
14976 /// | 2 | Xmm, Xmm, Xmm |
14977 /// | 3 | Ymm, Ymm, Mem |
14978 /// | 4 | Ymm, Ymm, Ymm |
14979 /// | 5 | Zmm, Zmm, Mem |
14980 /// | 6 | Zmm, Zmm, Zmm |
14981 /// +---+---------------+
14982 /// ```
14983 #[inline]
14984 pub fn vpaddb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
14985 where Assembler<'a>: VpaddbMaskEmitter<A, B, C> {
14986 <Self as VpaddbMaskEmitter<A, B, C>>::vpaddb_mask(self, op0, op1, op2);
14987 }
14988 /// `VPADDB_MASKZ` (VPADDB).
14989 /// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
14990 ///
14991 ///
14992 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
14993 ///
14994 /// Supported operand variants:
14995 ///
14996 /// ```text
14997 /// +---+---------------+
14998 /// | # | Operands |
14999 /// +---+---------------+
15000 /// | 1 | Xmm, Xmm, Mem |
15001 /// | 2 | Xmm, Xmm, Xmm |
15002 /// | 3 | Ymm, Ymm, Mem |
15003 /// | 4 | Ymm, Ymm, Ymm |
15004 /// | 5 | Zmm, Zmm, Mem |
15005 /// | 6 | Zmm, Zmm, Zmm |
15006 /// +---+---------------+
15007 /// ```
15008 #[inline]
15009 pub fn vpaddb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15010 where Assembler<'a>: VpaddbMaskzEmitter<A, B, C> {
15011 <Self as VpaddbMaskzEmitter<A, B, C>>::vpaddb_maskz(self, op0, op1, op2);
15012 }
15013 /// `VPADDSB` (VPADDSB).
15014 /// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
15015 ///
15016 ///
15017 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
15018 ///
15019 /// Supported operand variants:
15020 ///
15021 /// ```text
15022 /// +---+---------------+
15023 /// | # | Operands |
15024 /// +---+---------------+
15025 /// | 1 | Xmm, Xmm, Mem |
15026 /// | 2 | Xmm, Xmm, Xmm |
15027 /// | 3 | Ymm, Ymm, Mem |
15028 /// | 4 | Ymm, Ymm, Ymm |
15029 /// | 5 | Zmm, Zmm, Mem |
15030 /// | 6 | Zmm, Zmm, Zmm |
15031 /// +---+---------------+
15032 /// ```
15033 #[inline]
15034 pub fn vpaddsb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15035 where Assembler<'a>: VpaddsbEmitter<A, B, C> {
15036 <Self as VpaddsbEmitter<A, B, C>>::vpaddsb(self, op0, op1, op2);
15037 }
15038 /// `VPADDSB_MASK` (VPADDSB).
15039 /// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
15040 ///
15041 ///
15042 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
15043 ///
15044 /// Supported operand variants:
15045 ///
15046 /// ```text
15047 /// +---+---------------+
15048 /// | # | Operands |
15049 /// +---+---------------+
15050 /// | 1 | Xmm, Xmm, Mem |
15051 /// | 2 | Xmm, Xmm, Xmm |
15052 /// | 3 | Ymm, Ymm, Mem |
15053 /// | 4 | Ymm, Ymm, Ymm |
15054 /// | 5 | Zmm, Zmm, Mem |
15055 /// | 6 | Zmm, Zmm, Zmm |
15056 /// +---+---------------+
15057 /// ```
15058 #[inline]
15059 pub fn vpaddsb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15060 where Assembler<'a>: VpaddsbMaskEmitter<A, B, C> {
15061 <Self as VpaddsbMaskEmitter<A, B, C>>::vpaddsb_mask(self, op0, op1, op2);
15062 }
15063 /// `VPADDSB_MASKZ` (VPADDSB).
15064 /// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
15065 ///
15066 ///
15067 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
15068 ///
15069 /// Supported operand variants:
15070 ///
15071 /// ```text
15072 /// +---+---------------+
15073 /// | # | Operands |
15074 /// +---+---------------+
15075 /// | 1 | Xmm, Xmm, Mem |
15076 /// | 2 | Xmm, Xmm, Xmm |
15077 /// | 3 | Ymm, Ymm, Mem |
15078 /// | 4 | Ymm, Ymm, Ymm |
15079 /// | 5 | Zmm, Zmm, Mem |
15080 /// | 6 | Zmm, Zmm, Zmm |
15081 /// +---+---------------+
15082 /// ```
15083 #[inline]
15084 pub fn vpaddsb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15085 where Assembler<'a>: VpaddsbMaskzEmitter<A, B, C> {
15086 <Self as VpaddsbMaskzEmitter<A, B, C>>::vpaddsb_maskz(self, op0, op1, op2);
15087 }
15088 /// `VPADDSW` (VPADDSW).
15089 /// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
15090 ///
15091 ///
15092 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
15093 ///
15094 /// Supported operand variants:
15095 ///
15096 /// ```text
15097 /// +---+---------------+
15098 /// | # | Operands |
15099 /// +---+---------------+
15100 /// | 1 | Xmm, Xmm, Mem |
15101 /// | 2 | Xmm, Xmm, Xmm |
15102 /// | 3 | Ymm, Ymm, Mem |
15103 /// | 4 | Ymm, Ymm, Ymm |
15104 /// | 5 | Zmm, Zmm, Mem |
15105 /// | 6 | Zmm, Zmm, Zmm |
15106 /// +---+---------------+
15107 /// ```
15108 #[inline]
15109 pub fn vpaddsw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15110 where Assembler<'a>: VpaddswEmitter<A, B, C> {
15111 <Self as VpaddswEmitter<A, B, C>>::vpaddsw(self, op0, op1, op2);
15112 }
15113 /// `VPADDSW_MASK` (VPADDSW).
15114 /// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
15115 ///
15116 ///
15117 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
15118 ///
15119 /// Supported operand variants:
15120 ///
15121 /// ```text
15122 /// +---+---------------+
15123 /// | # | Operands |
15124 /// +---+---------------+
15125 /// | 1 | Xmm, Xmm, Mem |
15126 /// | 2 | Xmm, Xmm, Xmm |
15127 /// | 3 | Ymm, Ymm, Mem |
15128 /// | 4 | Ymm, Ymm, Ymm |
15129 /// | 5 | Zmm, Zmm, Mem |
15130 /// | 6 | Zmm, Zmm, Zmm |
15131 /// +---+---------------+
15132 /// ```
15133 #[inline]
15134 pub fn vpaddsw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15135 where Assembler<'a>: VpaddswMaskEmitter<A, B, C> {
15136 <Self as VpaddswMaskEmitter<A, B, C>>::vpaddsw_mask(self, op0, op1, op2);
15137 }
15138 /// `VPADDSW_MASKZ` (VPADDSW).
15139 /// Performs a SIMD add of the packed signed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
15140 ///
15141 ///
15142 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDSB%3APADDSW.html).
15143 ///
15144 /// Supported operand variants:
15145 ///
15146 /// ```text
15147 /// +---+---------------+
15148 /// | # | Operands |
15149 /// +---+---------------+
15150 /// | 1 | Xmm, Xmm, Mem |
15151 /// | 2 | Xmm, Xmm, Xmm |
15152 /// | 3 | Ymm, Ymm, Mem |
15153 /// | 4 | Ymm, Ymm, Ymm |
15154 /// | 5 | Zmm, Zmm, Mem |
15155 /// | 6 | Zmm, Zmm, Zmm |
15156 /// +---+---------------+
15157 /// ```
15158 #[inline]
15159 pub fn vpaddsw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15160 where Assembler<'a>: VpaddswMaskzEmitter<A, B, C> {
15161 <Self as VpaddswMaskzEmitter<A, B, C>>::vpaddsw_maskz(self, op0, op1, op2);
15162 }
15163 /// `VPADDUSB` (VPADDUSB).
15164 /// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
15165 ///
15166 ///
15167 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
15168 ///
15169 /// Supported operand variants:
15170 ///
15171 /// ```text
15172 /// +---+---------------+
15173 /// | # | Operands |
15174 /// +---+---------------+
15175 /// | 1 | Xmm, Xmm, Mem |
15176 /// | 2 | Xmm, Xmm, Xmm |
15177 /// | 3 | Ymm, Ymm, Mem |
15178 /// | 4 | Ymm, Ymm, Ymm |
15179 /// | 5 | Zmm, Zmm, Mem |
15180 /// | 6 | Zmm, Zmm, Zmm |
15181 /// +---+---------------+
15182 /// ```
15183 #[inline]
15184 pub fn vpaddusb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15185 where Assembler<'a>: VpaddusbEmitter<A, B, C> {
15186 <Self as VpaddusbEmitter<A, B, C>>::vpaddusb(self, op0, op1, op2);
15187 }
15188 /// `VPADDUSB_MASK` (VPADDUSB).
15189 /// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
15190 ///
15191 ///
15192 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
15193 ///
15194 /// Supported operand variants:
15195 ///
15196 /// ```text
15197 /// +---+---------------+
15198 /// | # | Operands |
15199 /// +---+---------------+
15200 /// | 1 | Xmm, Xmm, Mem |
15201 /// | 2 | Xmm, Xmm, Xmm |
15202 /// | 3 | Ymm, Ymm, Mem |
15203 /// | 4 | Ymm, Ymm, Ymm |
15204 /// | 5 | Zmm, Zmm, Mem |
15205 /// | 6 | Zmm, Zmm, Zmm |
15206 /// +---+---------------+
15207 /// ```
15208 #[inline]
15209 pub fn vpaddusb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15210 where Assembler<'a>: VpaddusbMaskEmitter<A, B, C> {
15211 <Self as VpaddusbMaskEmitter<A, B, C>>::vpaddusb_mask(self, op0, op1, op2);
15212 }
15213 /// `VPADDUSB_MASKZ` (VPADDUSB).
15214 /// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
15215 ///
15216 ///
15217 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
15218 ///
15219 /// Supported operand variants:
15220 ///
15221 /// ```text
15222 /// +---+---------------+
15223 /// | # | Operands |
15224 /// +---+---------------+
15225 /// | 1 | Xmm, Xmm, Mem |
15226 /// | 2 | Xmm, Xmm, Xmm |
15227 /// | 3 | Ymm, Ymm, Mem |
15228 /// | 4 | Ymm, Ymm, Ymm |
15229 /// | 5 | Zmm, Zmm, Mem |
15230 /// | 6 | Zmm, Zmm, Zmm |
15231 /// +---+---------------+
15232 /// ```
15233 #[inline]
15234 pub fn vpaddusb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15235 where Assembler<'a>: VpaddusbMaskzEmitter<A, B, C> {
15236 <Self as VpaddusbMaskzEmitter<A, B, C>>::vpaddusb_maskz(self, op0, op1, op2);
15237 }
15238 /// `VPADDUSW` (VPADDUSW).
15239 /// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
15240 ///
15241 ///
15242 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
15243 ///
15244 /// Supported operand variants:
15245 ///
15246 /// ```text
15247 /// +---+---------------+
15248 /// | # | Operands |
15249 /// +---+---------------+
15250 /// | 1 | Xmm, Xmm, Mem |
15251 /// | 2 | Xmm, Xmm, Xmm |
15252 /// | 3 | Ymm, Ymm, Mem |
15253 /// | 4 | Ymm, Ymm, Ymm |
15254 /// | 5 | Zmm, Zmm, Mem |
15255 /// | 6 | Zmm, Zmm, Zmm |
15256 /// +---+---------------+
15257 /// ```
15258 #[inline]
15259 pub fn vpaddusw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15260 where Assembler<'a>: VpadduswEmitter<A, B, C> {
15261 <Self as VpadduswEmitter<A, B, C>>::vpaddusw(self, op0, op1, op2);
15262 }
15263 /// `VPADDUSW_MASK` (VPADDUSW).
15264 /// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
15265 ///
15266 ///
15267 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
15268 ///
15269 /// Supported operand variants:
15270 ///
15271 /// ```text
15272 /// +---+---------------+
15273 /// | # | Operands |
15274 /// +---+---------------+
15275 /// | 1 | Xmm, Xmm, Mem |
15276 /// | 2 | Xmm, Xmm, Xmm |
15277 /// | 3 | Ymm, Ymm, Mem |
15278 /// | 4 | Ymm, Ymm, Ymm |
15279 /// | 5 | Zmm, Zmm, Mem |
15280 /// | 6 | Zmm, Zmm, Zmm |
15281 /// +---+---------------+
15282 /// ```
15283 #[inline]
15284 pub fn vpaddusw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15285 where Assembler<'a>: VpadduswMaskEmitter<A, B, C> {
15286 <Self as VpadduswMaskEmitter<A, B, C>>::vpaddusw_mask(self, op0, op1, op2);
15287 }
15288 /// `VPADDUSW_MASKZ` (VPADDUSW).
15289 /// Performs a SIMD add of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
15290 ///
15291 ///
15292 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDUSB%3APADDUSW.html).
15293 ///
15294 /// Supported operand variants:
15295 ///
15296 /// ```text
15297 /// +---+---------------+
15298 /// | # | Operands |
15299 /// +---+---------------+
15300 /// | 1 | Xmm, Xmm, Mem |
15301 /// | 2 | Xmm, Xmm, Xmm |
15302 /// | 3 | Ymm, Ymm, Mem |
15303 /// | 4 | Ymm, Ymm, Ymm |
15304 /// | 5 | Zmm, Zmm, Mem |
15305 /// | 6 | Zmm, Zmm, Zmm |
15306 /// +---+---------------+
15307 /// ```
15308 #[inline]
15309 pub fn vpaddusw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15310 where Assembler<'a>: VpadduswMaskzEmitter<A, B, C> {
15311 <Self as VpadduswMaskzEmitter<A, B, C>>::vpaddusw_maskz(self, op0, op1, op2);
15312 }
15313 /// `VPADDW` (VPADDW).
15314 /// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
15315 ///
15316 ///
15317 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
15318 ///
15319 /// Supported operand variants:
15320 ///
15321 /// ```text
15322 /// +---+---------------+
15323 /// | # | Operands |
15324 /// +---+---------------+
15325 /// | 1 | Xmm, Xmm, Mem |
15326 /// | 2 | Xmm, Xmm, Xmm |
15327 /// | 3 | Ymm, Ymm, Mem |
15328 /// | 4 | Ymm, Ymm, Ymm |
15329 /// | 5 | Zmm, Zmm, Mem |
15330 /// | 6 | Zmm, Zmm, Zmm |
15331 /// +---+---------------+
15332 /// ```
15333 #[inline]
15334 pub fn vpaddw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15335 where Assembler<'a>: VpaddwEmitter<A, B, C> {
15336 <Self as VpaddwEmitter<A, B, C>>::vpaddw(self, op0, op1, op2);
15337 }
15338 /// `VPADDW_MASK` (VPADDW).
15339 /// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
15340 ///
15341 ///
15342 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
15343 ///
15344 /// Supported operand variants:
15345 ///
15346 /// ```text
15347 /// +---+---------------+
15348 /// | # | Operands |
15349 /// +---+---------------+
15350 /// | 1 | Xmm, Xmm, Mem |
15351 /// | 2 | Xmm, Xmm, Xmm |
15352 /// | 3 | Ymm, Ymm, Mem |
15353 /// | 4 | Ymm, Ymm, Ymm |
15354 /// | 5 | Zmm, Zmm, Mem |
15355 /// | 6 | Zmm, Zmm, Zmm |
15356 /// +---+---------------+
15357 /// ```
15358 #[inline]
15359 pub fn vpaddw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15360 where Assembler<'a>: VpaddwMaskEmitter<A, B, C> {
15361 <Self as VpaddwMaskEmitter<A, B, C>>::vpaddw_mask(self, op0, op1, op2);
15362 }
15363 /// `VPADDW_MASKZ` (VPADDW).
15364 /// Performs a SIMD add of the packed integers from the source operand (second operand) and the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
15365 ///
15366 ///
15367 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PADDB%3APADDW%3APADDD%3APADDQ.html).
15368 ///
15369 /// Supported operand variants:
15370 ///
15371 /// ```text
15372 /// +---+---------------+
15373 /// | # | Operands |
15374 /// +---+---------------+
15375 /// | 1 | Xmm, Xmm, Mem |
15376 /// | 2 | Xmm, Xmm, Xmm |
15377 /// | 3 | Ymm, Ymm, Mem |
15378 /// | 4 | Ymm, Ymm, Ymm |
15379 /// | 5 | Zmm, Zmm, Mem |
15380 /// | 6 | Zmm, Zmm, Zmm |
15381 /// +---+---------------+
15382 /// ```
15383 #[inline]
15384 pub fn vpaddw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15385 where Assembler<'a>: VpaddwMaskzEmitter<A, B, C> {
15386 <Self as VpaddwMaskzEmitter<A, B, C>>::vpaddw_maskz(self, op0, op1, op2);
15387 }
15388 /// `VPALIGNR` (VPALIGNR).
15389 /// (V)PALIGNR concatenates the destination operand (the first operand) and the source operand (the second operand) into an intermediate composite, shifts the composite at byte granularity to the right by a constant immediate, and extracts the right-aligned result into the destination. The first and the second operands can be an MMX, XMM or a YMM register. The immediate value is considered unsigned. Immediate shift counts larger than the 2L (i.e., 32 for 128-bit operands, or 16 for 64-bit operands) produce a zero result. Both operands can be MMX registers, XMM registers or YMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
15390 ///
15391 ///
15392 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PALIGNR.html).
15393 ///
15394 /// Supported operand variants:
15395 ///
15396 /// ```text
15397 /// +---+--------------------+
15398 /// | # | Operands |
15399 /// +---+--------------------+
15400 /// | 1 | Xmm, Xmm, Mem, Imm |
15401 /// | 2 | Xmm, Xmm, Xmm, Imm |
15402 /// | 3 | Ymm, Ymm, Mem, Imm |
15403 /// | 4 | Ymm, Ymm, Ymm, Imm |
15404 /// | 5 | Zmm, Zmm, Mem, Imm |
15405 /// | 6 | Zmm, Zmm, Zmm, Imm |
15406 /// +---+--------------------+
15407 /// ```
15408 #[inline]
15409 pub fn vpalignr<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
15410 where Assembler<'a>: VpalignrEmitter<A, B, C, D> {
15411 <Self as VpalignrEmitter<A, B, C, D>>::vpalignr(self, op0, op1, op2, op3);
15412 }
15413 /// `VPALIGNR_MASK` (VPALIGNR).
15414 /// (V)PALIGNR concatenates the destination operand (the first operand) and the source operand (the second operand) into an intermediate composite, shifts the composite at byte granularity to the right by a constant immediate, and extracts the right-aligned result into the destination. The first and the second operands can be an MMX, XMM or a YMM register. The immediate value is considered unsigned. Immediate shift counts larger than the 2L (i.e., 32 for 128-bit operands, or 16 for 64-bit operands) produce a zero result. Both operands can be MMX registers, XMM registers or YMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
15415 ///
15416 ///
15417 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PALIGNR.html).
15418 ///
15419 /// Supported operand variants:
15420 ///
15421 /// ```text
15422 /// +---+--------------------+
15423 /// | # | Operands |
15424 /// +---+--------------------+
15425 /// | 1 | Xmm, Xmm, Mem, Imm |
15426 /// | 2 | Xmm, Xmm, Xmm, Imm |
15427 /// | 3 | Ymm, Ymm, Mem, Imm |
15428 /// | 4 | Ymm, Ymm, Ymm, Imm |
15429 /// | 5 | Zmm, Zmm, Mem, Imm |
15430 /// | 6 | Zmm, Zmm, Zmm, Imm |
15431 /// +---+--------------------+
15432 /// ```
15433 #[inline]
15434 pub fn vpalignr_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
15435 where Assembler<'a>: VpalignrMaskEmitter<A, B, C, D> {
15436 <Self as VpalignrMaskEmitter<A, B, C, D>>::vpalignr_mask(self, op0, op1, op2, op3);
15437 }
15438 /// `VPALIGNR_MASKZ` (VPALIGNR).
15439 /// (V)PALIGNR concatenates the destination operand (the first operand) and the source operand (the second operand) into an intermediate composite, shifts the composite at byte granularity to the right by a constant immediate, and extracts the right-aligned result into the destination. The first and the second operands can be an MMX, XMM or a YMM register. The immediate value is considered unsigned. Immediate shift counts larger than the 2L (i.e., 32 for 128-bit operands, or 16 for 64-bit operands) produce a zero result. Both operands can be MMX registers, XMM registers or YMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
15440 ///
15441 ///
15442 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PALIGNR.html).
15443 ///
15444 /// Supported operand variants:
15445 ///
15446 /// ```text
15447 /// +---+--------------------+
15448 /// | # | Operands |
15449 /// +---+--------------------+
15450 /// | 1 | Xmm, Xmm, Mem, Imm |
15451 /// | 2 | Xmm, Xmm, Xmm, Imm |
15452 /// | 3 | Ymm, Ymm, Mem, Imm |
15453 /// | 4 | Ymm, Ymm, Ymm, Imm |
15454 /// | 5 | Zmm, Zmm, Mem, Imm |
15455 /// | 6 | Zmm, Zmm, Zmm, Imm |
15456 /// +---+--------------------+
15457 /// ```
15458 #[inline]
15459 pub fn vpalignr_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
15460 where Assembler<'a>: VpalignrMaskzEmitter<A, B, C, D> {
15461 <Self as VpalignrMaskzEmitter<A, B, C, D>>::vpalignr_maskz(self, op0, op1, op2, op3);
15462 }
15463 /// `VPAVGB` (VPAVGB).
15464 /// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
15465 ///
15466 ///
15467 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
15468 ///
15469 /// Supported operand variants:
15470 ///
15471 /// ```text
15472 /// +---+---------------+
15473 /// | # | Operands |
15474 /// +---+---------------+
15475 /// | 1 | Xmm, Xmm, Mem |
15476 /// | 2 | Xmm, Xmm, Xmm |
15477 /// | 3 | Ymm, Ymm, Mem |
15478 /// | 4 | Ymm, Ymm, Ymm |
15479 /// | 5 | Zmm, Zmm, Mem |
15480 /// | 6 | Zmm, Zmm, Zmm |
15481 /// +---+---------------+
15482 /// ```
15483 #[inline]
15484 pub fn vpavgb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15485 where Assembler<'a>: VpavgbEmitter<A, B, C> {
15486 <Self as VpavgbEmitter<A, B, C>>::vpavgb(self, op0, op1, op2);
15487 }
15488 /// `VPAVGB_MASK` (VPAVGB).
15489 /// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
15490 ///
15491 ///
15492 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
15493 ///
15494 /// Supported operand variants:
15495 ///
15496 /// ```text
15497 /// +---+---------------+
15498 /// | # | Operands |
15499 /// +---+---------------+
15500 /// | 1 | Xmm, Xmm, Mem |
15501 /// | 2 | Xmm, Xmm, Xmm |
15502 /// | 3 | Ymm, Ymm, Mem |
15503 /// | 4 | Ymm, Ymm, Ymm |
15504 /// | 5 | Zmm, Zmm, Mem |
15505 /// | 6 | Zmm, Zmm, Zmm |
15506 /// +---+---------------+
15507 /// ```
15508 #[inline]
15509 pub fn vpavgb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15510 where Assembler<'a>: VpavgbMaskEmitter<A, B, C> {
15511 <Self as VpavgbMaskEmitter<A, B, C>>::vpavgb_mask(self, op0, op1, op2);
15512 }
15513 /// `VPAVGB_MASKZ` (VPAVGB).
15514 /// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
15515 ///
15516 ///
15517 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
15518 ///
15519 /// Supported operand variants:
15520 ///
15521 /// ```text
15522 /// +---+---------------+
15523 /// | # | Operands |
15524 /// +---+---------------+
15525 /// | 1 | Xmm, Xmm, Mem |
15526 /// | 2 | Xmm, Xmm, Xmm |
15527 /// | 3 | Ymm, Ymm, Mem |
15528 /// | 4 | Ymm, Ymm, Ymm |
15529 /// | 5 | Zmm, Zmm, Mem |
15530 /// | 6 | Zmm, Zmm, Zmm |
15531 /// +---+---------------+
15532 /// ```
15533 #[inline]
15534 pub fn vpavgb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15535 where Assembler<'a>: VpavgbMaskzEmitter<A, B, C> {
15536 <Self as VpavgbMaskzEmitter<A, B, C>>::vpavgb_maskz(self, op0, op1, op2);
15537 }
15538 /// `VPAVGW` (VPAVGW).
15539 /// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
15540 ///
15541 ///
15542 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
15543 ///
15544 /// Supported operand variants:
15545 ///
15546 /// ```text
15547 /// +---+---------------+
15548 /// | # | Operands |
15549 /// +---+---------------+
15550 /// | 1 | Xmm, Xmm, Mem |
15551 /// | 2 | Xmm, Xmm, Xmm |
15552 /// | 3 | Ymm, Ymm, Mem |
15553 /// | 4 | Ymm, Ymm, Ymm |
15554 /// | 5 | Zmm, Zmm, Mem |
15555 /// | 6 | Zmm, Zmm, Zmm |
15556 /// +---+---------------+
15557 /// ```
15558 #[inline]
15559 pub fn vpavgw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15560 where Assembler<'a>: VpavgwEmitter<A, B, C> {
15561 <Self as VpavgwEmitter<A, B, C>>::vpavgw(self, op0, op1, op2);
15562 }
15563 /// `VPAVGW_MASK` (VPAVGW).
15564 /// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
15565 ///
15566 ///
15567 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
15568 ///
15569 /// Supported operand variants:
15570 ///
15571 /// ```text
15572 /// +---+---------------+
15573 /// | # | Operands |
15574 /// +---+---------------+
15575 /// | 1 | Xmm, Xmm, Mem |
15576 /// | 2 | Xmm, Xmm, Xmm |
15577 /// | 3 | Ymm, Ymm, Mem |
15578 /// | 4 | Ymm, Ymm, Ymm |
15579 /// | 5 | Zmm, Zmm, Mem |
15580 /// | 6 | Zmm, Zmm, Zmm |
15581 /// +---+---------------+
15582 /// ```
15583 #[inline]
15584 pub fn vpavgw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15585 where Assembler<'a>: VpavgwMaskEmitter<A, B, C> {
15586 <Self as VpavgwMaskEmitter<A, B, C>>::vpavgw_mask(self, op0, op1, op2);
15587 }
15588 /// `VPAVGW_MASKZ` (VPAVGW).
15589 /// Performs a SIMD average of the packed unsigned integers from the source operand (second operand) and the destination operand (first operand), and stores the results in the destination operand. For each corresponding pair of data elements in the first and second operands, the elements are added together, a 1 is added to the temporary sum, and that result is shifted right one bit position.
15590 ///
15591 ///
15592 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PAVGB%3APAVGW.html).
15593 ///
15594 /// Supported operand variants:
15595 ///
15596 /// ```text
15597 /// +---+---------------+
15598 /// | # | Operands |
15599 /// +---+---------------+
15600 /// | 1 | Xmm, Xmm, Mem |
15601 /// | 2 | Xmm, Xmm, Xmm |
15602 /// | 3 | Ymm, Ymm, Mem |
15603 /// | 4 | Ymm, Ymm, Ymm |
15604 /// | 5 | Zmm, Zmm, Mem |
15605 /// | 6 | Zmm, Zmm, Zmm |
15606 /// +---+---------------+
15607 /// ```
15608 #[inline]
15609 pub fn vpavgw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15610 where Assembler<'a>: VpavgwMaskzEmitter<A, B, C> {
15611 <Self as VpavgwMaskzEmitter<A, B, C>>::vpavgw_maskz(self, op0, op1, op2);
15612 }
15613 /// `VPBLENDMB` (VPBLENDMB).
15614 /// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
15615 ///
15616 ///
15617 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
15618 ///
15619 /// Supported operand variants:
15620 ///
15621 /// ```text
15622 /// +---+---------------+
15623 /// | # | Operands |
15624 /// +---+---------------+
15625 /// | 1 | Xmm, Xmm, Mem |
15626 /// | 2 | Xmm, Xmm, Xmm |
15627 /// | 3 | Ymm, Ymm, Mem |
15628 /// | 4 | Ymm, Ymm, Ymm |
15629 /// | 5 | Zmm, Zmm, Mem |
15630 /// | 6 | Zmm, Zmm, Zmm |
15631 /// +---+---------------+
15632 /// ```
15633 #[inline]
15634 pub fn vpblendmb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15635 where Assembler<'a>: VpblendmbEmitter<A, B, C> {
15636 <Self as VpblendmbEmitter<A, B, C>>::vpblendmb(self, op0, op1, op2);
15637 }
15638 /// `VPBLENDMB_MASK` (VPBLENDMB).
15639 /// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
15640 ///
15641 ///
15642 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
15643 ///
15644 /// Supported operand variants:
15645 ///
15646 /// ```text
15647 /// +---+---------------+
15648 /// | # | Operands |
15649 /// +---+---------------+
15650 /// | 1 | Xmm, Xmm, Mem |
15651 /// | 2 | Xmm, Xmm, Xmm |
15652 /// | 3 | Ymm, Ymm, Mem |
15653 /// | 4 | Ymm, Ymm, Ymm |
15654 /// | 5 | Zmm, Zmm, Mem |
15655 /// | 6 | Zmm, Zmm, Zmm |
15656 /// +---+---------------+
15657 /// ```
15658 #[inline]
15659 pub fn vpblendmb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15660 where Assembler<'a>: VpblendmbMaskEmitter<A, B, C> {
15661 <Self as VpblendmbMaskEmitter<A, B, C>>::vpblendmb_mask(self, op0, op1, op2);
15662 }
15663 /// `VPBLENDMB_MASKZ` (VPBLENDMB).
15664 /// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
15665 ///
15666 ///
15667 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
15668 ///
15669 /// Supported operand variants:
15670 ///
15671 /// ```text
15672 /// +---+---------------+
15673 /// | # | Operands |
15674 /// +---+---------------+
15675 /// | 1 | Xmm, Xmm, Mem |
15676 /// | 2 | Xmm, Xmm, Xmm |
15677 /// | 3 | Ymm, Ymm, Mem |
15678 /// | 4 | Ymm, Ymm, Ymm |
15679 /// | 5 | Zmm, Zmm, Mem |
15680 /// | 6 | Zmm, Zmm, Zmm |
15681 /// +---+---------------+
15682 /// ```
15683 #[inline]
15684 pub fn vpblendmb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15685 where Assembler<'a>: VpblendmbMaskzEmitter<A, B, C> {
15686 <Self as VpblendmbMaskzEmitter<A, B, C>>::vpblendmb_maskz(self, op0, op1, op2);
15687 }
15688 /// `VPBLENDMW` (VPBLENDMW).
15689 /// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
15690 ///
15691 ///
15692 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
15693 ///
15694 /// Supported operand variants:
15695 ///
15696 /// ```text
15697 /// +---+---------------+
15698 /// | # | Operands |
15699 /// +---+---------------+
15700 /// | 1 | Xmm, Xmm, Mem |
15701 /// | 2 | Xmm, Xmm, Xmm |
15702 /// | 3 | Ymm, Ymm, Mem |
15703 /// | 4 | Ymm, Ymm, Ymm |
15704 /// | 5 | Zmm, Zmm, Mem |
15705 /// | 6 | Zmm, Zmm, Zmm |
15706 /// +---+---------------+
15707 /// ```
15708 #[inline]
15709 pub fn vpblendmw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15710 where Assembler<'a>: VpblendmwEmitter<A, B, C> {
15711 <Self as VpblendmwEmitter<A, B, C>>::vpblendmw(self, op0, op1, op2);
15712 }
15713 /// `VPBLENDMW_MASK` (VPBLENDMW).
15714 /// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
15715 ///
15716 ///
15717 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
15718 ///
15719 /// Supported operand variants:
15720 ///
15721 /// ```text
15722 /// +---+---------------+
15723 /// | # | Operands |
15724 /// +---+---------------+
15725 /// | 1 | Xmm, Xmm, Mem |
15726 /// | 2 | Xmm, Xmm, Xmm |
15727 /// | 3 | Ymm, Ymm, Mem |
15728 /// | 4 | Ymm, Ymm, Ymm |
15729 /// | 5 | Zmm, Zmm, Mem |
15730 /// | 6 | Zmm, Zmm, Zmm |
15731 /// +---+---------------+
15732 /// ```
15733 #[inline]
15734 pub fn vpblendmw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15735 where Assembler<'a>: VpblendmwMaskEmitter<A, B, C> {
15736 <Self as VpblendmwMaskEmitter<A, B, C>>::vpblendmw_mask(self, op0, op1, op2);
15737 }
15738 /// `VPBLENDMW_MASKZ` (VPBLENDMW).
15739 /// Performs an element-by-element blending of byte/word elements between the first source operand byte vector register and the second source operand byte vector from memory or register, using the instruction mask as selector. The result is written into the destination byte vector register.
15740 ///
15741 ///
15742 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBLENDMB%3AVPBLENDMW.html).
15743 ///
15744 /// Supported operand variants:
15745 ///
15746 /// ```text
15747 /// +---+---------------+
15748 /// | # | Operands |
15749 /// +---+---------------+
15750 /// | 1 | Xmm, Xmm, Mem |
15751 /// | 2 | Xmm, Xmm, Xmm |
15752 /// | 3 | Ymm, Ymm, Mem |
15753 /// | 4 | Ymm, Ymm, Ymm |
15754 /// | 5 | Zmm, Zmm, Mem |
15755 /// | 6 | Zmm, Zmm, Zmm |
15756 /// +---+---------------+
15757 /// ```
15758 #[inline]
15759 pub fn vpblendmw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
15760 where Assembler<'a>: VpblendmwMaskzEmitter<A, B, C> {
15761 <Self as VpblendmwMaskzEmitter<A, B, C>>::vpblendmw_maskz(self, op0, op1, op2);
15762 }
15763 /// `VPBROADCASTB` (VPBROADCASTB).
15764 /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
15765 ///
15766 ///
15767 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
15768 ///
15769 /// Supported operand variants:
15770 ///
15771 /// ```text
15772 /// +---+----------+
15773 /// | # | Operands |
15774 /// +---+----------+
15775 /// | 1 | Xmm, Mem |
15776 /// | 2 | Xmm, Xmm |
15777 /// | 3 | Ymm, Mem |
15778 /// | 4 | Ymm, Xmm |
15779 /// | 5 | Zmm, Mem |
15780 /// | 6 | Zmm, Xmm |
15781 /// +---+----------+
15782 /// ```
15783 #[inline]
15784 pub fn vpbroadcastb<A, B>(&mut self, op0: A, op1: B)
15785 where Assembler<'a>: VpbroadcastbEmitter<A, B> {
15786 <Self as VpbroadcastbEmitter<A, B>>::vpbroadcastb(self, op0, op1);
15787 }
15788 /// `VPBROADCASTB_GP`.
15789 ///
15790 /// Supported operand variants:
15791 ///
15792 /// ```text
15793 /// +---+----------+
15794 /// | # | Operands |
15795 /// +---+----------+
15796 /// | 1 | Xmm, Gpd |
15797 /// | 2 | Ymm, Gpd |
15798 /// | 3 | Zmm, Gpd |
15799 /// +---+----------+
15800 /// ```
15801 #[inline]
15802 pub fn vpbroadcastb_gp<A, B>(&mut self, op0: A, op1: B)
15803 where Assembler<'a>: VpbroadcastbGpEmitter<A, B> {
15804 <Self as VpbroadcastbGpEmitter<A, B>>::vpbroadcastb_gp(self, op0, op1);
15805 }
15806 /// `VPBROADCASTB_GP_MASK`.
15807 ///
15808 /// Supported operand variants:
15809 ///
15810 /// ```text
15811 /// +---+----------+
15812 /// | # | Operands |
15813 /// +---+----------+
15814 /// | 1 | Xmm, Gpd |
15815 /// | 2 | Ymm, Gpd |
15816 /// | 3 | Zmm, Gpd |
15817 /// +---+----------+
15818 /// ```
15819 #[inline]
15820 pub fn vpbroadcastb_gp_mask<A, B>(&mut self, op0: A, op1: B)
15821 where Assembler<'a>: VpbroadcastbGpMaskEmitter<A, B> {
15822 <Self as VpbroadcastbGpMaskEmitter<A, B>>::vpbroadcastb_gp_mask(self, op0, op1);
15823 }
15824 /// `VPBROADCASTB_GP_MASKZ`.
15825 ///
15826 /// Supported operand variants:
15827 ///
15828 /// ```text
15829 /// +---+----------+
15830 /// | # | Operands |
15831 /// +---+----------+
15832 /// | 1 | Xmm, Gpd |
15833 /// | 2 | Ymm, Gpd |
15834 /// | 3 | Zmm, Gpd |
15835 /// +---+----------+
15836 /// ```
15837 #[inline]
15838 pub fn vpbroadcastb_gp_maskz<A, B>(&mut self, op0: A, op1: B)
15839 where Assembler<'a>: VpbroadcastbGpMaskzEmitter<A, B> {
15840 <Self as VpbroadcastbGpMaskzEmitter<A, B>>::vpbroadcastb_gp_maskz(self, op0, op1);
15841 }
15842 /// `VPBROADCASTB_MASK` (VPBROADCASTB).
15843 /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
15844 ///
15845 ///
15846 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
15847 ///
15848 /// Supported operand variants:
15849 ///
15850 /// ```text
15851 /// +---+----------+
15852 /// | # | Operands |
15853 /// +---+----------+
15854 /// | 1 | Xmm, Mem |
15855 /// | 2 | Xmm, Xmm |
15856 /// | 3 | Ymm, Mem |
15857 /// | 4 | Ymm, Xmm |
15858 /// | 5 | Zmm, Mem |
15859 /// | 6 | Zmm, Xmm |
15860 /// +---+----------+
15861 /// ```
15862 #[inline]
15863 pub fn vpbroadcastb_mask<A, B>(&mut self, op0: A, op1: B)
15864 where Assembler<'a>: VpbroadcastbMaskEmitter<A, B> {
15865 <Self as VpbroadcastbMaskEmitter<A, B>>::vpbroadcastb_mask(self, op0, op1);
15866 }
15867 /// `VPBROADCASTB_MASKZ` (VPBROADCASTB).
15868 /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
15869 ///
15870 ///
15871 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
15872 ///
15873 /// Supported operand variants:
15874 ///
15875 /// ```text
15876 /// +---+----------+
15877 /// | # | Operands |
15878 /// +---+----------+
15879 /// | 1 | Xmm, Mem |
15880 /// | 2 | Xmm, Xmm |
15881 /// | 3 | Ymm, Mem |
15882 /// | 4 | Ymm, Xmm |
15883 /// | 5 | Zmm, Mem |
15884 /// | 6 | Zmm, Xmm |
15885 /// +---+----------+
15886 /// ```
15887 #[inline]
15888 pub fn vpbroadcastb_maskz<A, B>(&mut self, op0: A, op1: B)
15889 where Assembler<'a>: VpbroadcastbMaskzEmitter<A, B> {
15890 <Self as VpbroadcastbMaskzEmitter<A, B>>::vpbroadcastb_maskz(self, op0, op1);
15891 }
15892 /// `VPBROADCASTW` (VPBROADCASTW).
15893 /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
15894 ///
15895 ///
15896 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
15897 ///
15898 /// Supported operand variants:
15899 ///
15900 /// ```text
15901 /// +---+----------+
15902 /// | # | Operands |
15903 /// +---+----------+
15904 /// | 1 | Xmm, Mem |
15905 /// | 2 | Xmm, Xmm |
15906 /// | 3 | Ymm, Mem |
15907 /// | 4 | Ymm, Xmm |
15908 /// | 5 | Zmm, Mem |
15909 /// | 6 | Zmm, Xmm |
15910 /// +---+----------+
15911 /// ```
15912 #[inline]
15913 pub fn vpbroadcastw<A, B>(&mut self, op0: A, op1: B)
15914 where Assembler<'a>: VpbroadcastwEmitter<A, B> {
15915 <Self as VpbroadcastwEmitter<A, B>>::vpbroadcastw(self, op0, op1);
15916 }
15917 /// `VPBROADCASTW_GP`.
15918 ///
15919 /// Supported operand variants:
15920 ///
15921 /// ```text
15922 /// +---+----------+
15923 /// | # | Operands |
15924 /// +---+----------+
15925 /// | 1 | Xmm, Gpd |
15926 /// | 2 | Ymm, Gpd |
15927 /// | 3 | Zmm, Gpd |
15928 /// +---+----------+
15929 /// ```
15930 #[inline]
15931 pub fn vpbroadcastw_gp<A, B>(&mut self, op0: A, op1: B)
15932 where Assembler<'a>: VpbroadcastwGpEmitter<A, B> {
15933 <Self as VpbroadcastwGpEmitter<A, B>>::vpbroadcastw_gp(self, op0, op1);
15934 }
15935 /// `VPBROADCASTW_GP_MASK`.
15936 ///
15937 /// Supported operand variants:
15938 ///
15939 /// ```text
15940 /// +---+----------+
15941 /// | # | Operands |
15942 /// +---+----------+
15943 /// | 1 | Xmm, Gpd |
15944 /// | 2 | Ymm, Gpd |
15945 /// | 3 | Zmm, Gpd |
15946 /// +---+----------+
15947 /// ```
15948 #[inline]
15949 pub fn vpbroadcastw_gp_mask<A, B>(&mut self, op0: A, op1: B)
15950 where Assembler<'a>: VpbroadcastwGpMaskEmitter<A, B> {
15951 <Self as VpbroadcastwGpMaskEmitter<A, B>>::vpbroadcastw_gp_mask(self, op0, op1);
15952 }
15953 /// `VPBROADCASTW_GP_MASKZ`.
15954 ///
15955 /// Supported operand variants:
15956 ///
15957 /// ```text
15958 /// +---+----------+
15959 /// | # | Operands |
15960 /// +---+----------+
15961 /// | 1 | Xmm, Gpd |
15962 /// | 2 | Ymm, Gpd |
15963 /// | 3 | Zmm, Gpd |
15964 /// +---+----------+
15965 /// ```
15966 #[inline]
15967 pub fn vpbroadcastw_gp_maskz<A, B>(&mut self, op0: A, op1: B)
15968 where Assembler<'a>: VpbroadcastwGpMaskzEmitter<A, B> {
15969 <Self as VpbroadcastwGpMaskzEmitter<A, B>>::vpbroadcastw_gp_maskz(self, op0, op1);
15970 }
15971 /// `VPBROADCASTW_MASK` (VPBROADCASTW).
15972 /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
15973 ///
15974 ///
15975 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
15976 ///
15977 /// Supported operand variants:
15978 ///
15979 /// ```text
15980 /// +---+----------+
15981 /// | # | Operands |
15982 /// +---+----------+
15983 /// | 1 | Xmm, Mem |
15984 /// | 2 | Xmm, Xmm |
15985 /// | 3 | Ymm, Mem |
15986 /// | 4 | Ymm, Xmm |
15987 /// | 5 | Zmm, Mem |
15988 /// | 6 | Zmm, Xmm |
15989 /// +---+----------+
15990 /// ```
15991 #[inline]
15992 pub fn vpbroadcastw_mask<A, B>(&mut self, op0: A, op1: B)
15993 where Assembler<'a>: VpbroadcastwMaskEmitter<A, B> {
15994 <Self as VpbroadcastwMaskEmitter<A, B>>::vpbroadcastw_mask(self, op0, op1);
15995 }
15996 /// `VPBROADCASTW_MASKZ` (VPBROADCASTW).
15997 /// Load integer data from the source operand (the second operand) and broadcast to all elements of the destination operand (the first operand).
15998 ///
15999 ///
16000 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPBROADCAST.html).
16001 ///
16002 /// Supported operand variants:
16003 ///
16004 /// ```text
16005 /// +---+----------+
16006 /// | # | Operands |
16007 /// +---+----------+
16008 /// | 1 | Xmm, Mem |
16009 /// | 2 | Xmm, Xmm |
16010 /// | 3 | Ymm, Mem |
16011 /// | 4 | Ymm, Xmm |
16012 /// | 5 | Zmm, Mem |
16013 /// | 6 | Zmm, Xmm |
16014 /// +---+----------+
16015 /// ```
16016 #[inline]
16017 pub fn vpbroadcastw_maskz<A, B>(&mut self, op0: A, op1: B)
16018 where Assembler<'a>: VpbroadcastwMaskzEmitter<A, B> {
16019 <Self as VpbroadcastwMaskzEmitter<A, B>>::vpbroadcastw_maskz(self, op0, op1);
16020 }
16021 /// `VPCMPB` (VPCMPB).
16022 /// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16023 ///
16024 ///
16025 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
16026 ///
16027 /// Supported operand variants:
16028 ///
16029 /// ```text
16030 /// +---+---------------------+
16031 /// | # | Operands |
16032 /// +---+---------------------+
16033 /// | 1 | KReg, Xmm, Mem, Imm |
16034 /// | 2 | KReg, Xmm, Xmm, Imm |
16035 /// | 3 | KReg, Ymm, Mem, Imm |
16036 /// | 4 | KReg, Ymm, Ymm, Imm |
16037 /// | 5 | KReg, Zmm, Mem, Imm |
16038 /// | 6 | KReg, Zmm, Zmm, Imm |
16039 /// +---+---------------------+
16040 /// ```
16041 #[inline]
16042 pub fn vpcmpb<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16043 where Assembler<'a>: VpcmpbEmitter<A, B, C, D> {
16044 <Self as VpcmpbEmitter<A, B, C, D>>::vpcmpb(self, op0, op1, op2, op3);
16045 }
16046 /// `VPCMPB_MASK` (VPCMPB).
16047 /// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16048 ///
16049 ///
16050 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
16051 ///
16052 /// Supported operand variants:
16053 ///
16054 /// ```text
16055 /// +---+---------------------+
16056 /// | # | Operands |
16057 /// +---+---------------------+
16058 /// | 1 | KReg, Xmm, Mem, Imm |
16059 /// | 2 | KReg, Xmm, Xmm, Imm |
16060 /// | 3 | KReg, Ymm, Mem, Imm |
16061 /// | 4 | KReg, Ymm, Ymm, Imm |
16062 /// | 5 | KReg, Zmm, Mem, Imm |
16063 /// | 6 | KReg, Zmm, Zmm, Imm |
16064 /// +---+---------------------+
16065 /// ```
16066 #[inline]
16067 pub fn vpcmpb_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16068 where Assembler<'a>: VpcmpbMaskEmitter<A, B, C, D> {
16069 <Self as VpcmpbMaskEmitter<A, B, C, D>>::vpcmpb_mask(self, op0, op1, op2, op3);
16070 }
16071 /// `VPCMPUB` (VPCMPUB).
16072 /// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16073 ///
16074 ///
16075 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
16076 ///
16077 /// Supported operand variants:
16078 ///
16079 /// ```text
16080 /// +---+---------------------+
16081 /// | # | Operands |
16082 /// +---+---------------------+
16083 /// | 1 | KReg, Xmm, Mem, Imm |
16084 /// | 2 | KReg, Xmm, Xmm, Imm |
16085 /// | 3 | KReg, Ymm, Mem, Imm |
16086 /// | 4 | KReg, Ymm, Ymm, Imm |
16087 /// | 5 | KReg, Zmm, Mem, Imm |
16088 /// | 6 | KReg, Zmm, Zmm, Imm |
16089 /// +---+---------------------+
16090 /// ```
16091 #[inline]
16092 pub fn vpcmpub<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16093 where Assembler<'a>: VpcmpubEmitter<A, B, C, D> {
16094 <Self as VpcmpubEmitter<A, B, C, D>>::vpcmpub(self, op0, op1, op2, op3);
16095 }
16096 /// `VPCMPUB_MASK` (VPCMPUB).
16097 /// Performs a SIMD compare of the packed byte values in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16098 ///
16099 ///
16100 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPB%3AVPCMPUB.html).
16101 ///
16102 /// Supported operand variants:
16103 ///
16104 /// ```text
16105 /// +---+---------------------+
16106 /// | # | Operands |
16107 /// +---+---------------------+
16108 /// | 1 | KReg, Xmm, Mem, Imm |
16109 /// | 2 | KReg, Xmm, Xmm, Imm |
16110 /// | 3 | KReg, Ymm, Mem, Imm |
16111 /// | 4 | KReg, Ymm, Ymm, Imm |
16112 /// | 5 | KReg, Zmm, Mem, Imm |
16113 /// | 6 | KReg, Zmm, Zmm, Imm |
16114 /// +---+---------------------+
16115 /// ```
16116 #[inline]
16117 pub fn vpcmpub_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16118 where Assembler<'a>: VpcmpubMaskEmitter<A, B, C, D> {
16119 <Self as VpcmpubMaskEmitter<A, B, C, D>>::vpcmpub_mask(self, op0, op1, op2, op3);
16120 }
16121 /// `VPCMPUW` (VPCMPUW).
16122 /// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16123 ///
16124 ///
16125 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
16126 ///
16127 /// Supported operand variants:
16128 ///
16129 /// ```text
16130 /// +---+---------------------+
16131 /// | # | Operands |
16132 /// +---+---------------------+
16133 /// | 1 | KReg, Xmm, Mem, Imm |
16134 /// | 2 | KReg, Xmm, Xmm, Imm |
16135 /// | 3 | KReg, Ymm, Mem, Imm |
16136 /// | 4 | KReg, Ymm, Ymm, Imm |
16137 /// | 5 | KReg, Zmm, Mem, Imm |
16138 /// | 6 | KReg, Zmm, Zmm, Imm |
16139 /// +---+---------------------+
16140 /// ```
16141 #[inline]
16142 pub fn vpcmpuw<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16143 where Assembler<'a>: VpcmpuwEmitter<A, B, C, D> {
16144 <Self as VpcmpuwEmitter<A, B, C, D>>::vpcmpuw(self, op0, op1, op2, op3);
16145 }
16146 /// `VPCMPUW_MASK` (VPCMPUW).
16147 /// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16148 ///
16149 ///
16150 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
16151 ///
16152 /// Supported operand variants:
16153 ///
16154 /// ```text
16155 /// +---+---------------------+
16156 /// | # | Operands |
16157 /// +---+---------------------+
16158 /// | 1 | KReg, Xmm, Mem, Imm |
16159 /// | 2 | KReg, Xmm, Xmm, Imm |
16160 /// | 3 | KReg, Ymm, Mem, Imm |
16161 /// | 4 | KReg, Ymm, Ymm, Imm |
16162 /// | 5 | KReg, Zmm, Mem, Imm |
16163 /// | 6 | KReg, Zmm, Zmm, Imm |
16164 /// +---+---------------------+
16165 /// ```
16166 #[inline]
16167 pub fn vpcmpuw_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16168 where Assembler<'a>: VpcmpuwMaskEmitter<A, B, C, D> {
16169 <Self as VpcmpuwMaskEmitter<A, B, C, D>>::vpcmpuw_mask(self, op0, op1, op2, op3);
16170 }
16171 /// `VPCMPW` (VPCMPW).
16172 /// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16173 ///
16174 ///
16175 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
16176 ///
16177 /// Supported operand variants:
16178 ///
16179 /// ```text
16180 /// +---+---------------------+
16181 /// | # | Operands |
16182 /// +---+---------------------+
16183 /// | 1 | KReg, Xmm, Mem, Imm |
16184 /// | 2 | KReg, Xmm, Xmm, Imm |
16185 /// | 3 | KReg, Ymm, Mem, Imm |
16186 /// | 4 | KReg, Ymm, Ymm, Imm |
16187 /// | 5 | KReg, Zmm, Mem, Imm |
16188 /// | 6 | KReg, Zmm, Zmm, Imm |
16189 /// +---+---------------------+
16190 /// ```
16191 #[inline]
16192 pub fn vpcmpw<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16193 where Assembler<'a>: VpcmpwEmitter<A, B, C, D> {
16194 <Self as VpcmpwEmitter<A, B, C, D>>::vpcmpw(self, op0, op1, op2, op3);
16195 }
16196 /// `VPCMPW_MASK` (VPCMPW).
16197 /// Performs a SIMD compare of the packed integer word in the second source operand and the first source operand and returns the results of the comparison to the mask destination operand. The comparison predicate operand (immediate byte) specifies the type of comparison performed on each pair of packed values in the two source operands. The result of each comparison is a single mask bit result of 1 (comparison true) or 0 (comparison false).
16198 ///
16199 ///
16200 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCMPW%3AVPCMPUW.html).
16201 ///
16202 /// Supported operand variants:
16203 ///
16204 /// ```text
16205 /// +---+---------------------+
16206 /// | # | Operands |
16207 /// +---+---------------------+
16208 /// | 1 | KReg, Xmm, Mem, Imm |
16209 /// | 2 | KReg, Xmm, Xmm, Imm |
16210 /// | 3 | KReg, Ymm, Mem, Imm |
16211 /// | 4 | KReg, Ymm, Ymm, Imm |
16212 /// | 5 | KReg, Zmm, Mem, Imm |
16213 /// | 6 | KReg, Zmm, Zmm, Imm |
16214 /// +---+---------------------+
16215 /// ```
16216 #[inline]
16217 pub fn vpcmpw_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16218 where Assembler<'a>: VpcmpwMaskEmitter<A, B, C, D> {
16219 <Self as VpcmpwMaskEmitter<A, B, C, D>>::vpcmpw_mask(self, op0, op1, op2, op3);
16220 }
16221 /// `VPERMI2W` (VPERMI2W).
16222 /// Permutes 16-bit/32-bit/64-bit values in the second operand (the first source operand) and the third operand (the second source operand) using indices in the first operand to select elements from the second and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
16223 ///
16224 ///
16225 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMI2W%3AVPERMI2D%3AVPERMI2Q%3AVPERMI2PS%3AVPERMI2PD.html).
16226 ///
16227 /// Supported operand variants:
16228 ///
16229 /// ```text
16230 /// +---+---------------+
16231 /// | # | Operands |
16232 /// +---+---------------+
16233 /// | 1 | Xmm, Xmm, Mem |
16234 /// | 2 | Xmm, Xmm, Xmm |
16235 /// | 3 | Ymm, Ymm, Mem |
16236 /// | 4 | Ymm, Ymm, Ymm |
16237 /// | 5 | Zmm, Zmm, Mem |
16238 /// | 6 | Zmm, Zmm, Zmm |
16239 /// +---+---------------+
16240 /// ```
16241 #[inline]
16242 pub fn vpermi2w<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16243 where Assembler<'a>: Vpermi2wEmitter<A, B, C> {
16244 <Self as Vpermi2wEmitter<A, B, C>>::vpermi2w(self, op0, op1, op2);
16245 }
16246 /// `VPERMI2W_MASK` (VPERMI2W).
16247 /// Permutes 16-bit/32-bit/64-bit values in the second operand (the first source operand) and the third operand (the second source operand) using indices in the first operand to select elements from the second and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
16248 ///
16249 ///
16250 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMI2W%3AVPERMI2D%3AVPERMI2Q%3AVPERMI2PS%3AVPERMI2PD.html).
16251 ///
16252 /// Supported operand variants:
16253 ///
16254 /// ```text
16255 /// +---+---------------+
16256 /// | # | Operands |
16257 /// +---+---------------+
16258 /// | 1 | Xmm, Xmm, Mem |
16259 /// | 2 | Xmm, Xmm, Xmm |
16260 /// | 3 | Ymm, Ymm, Mem |
16261 /// | 4 | Ymm, Ymm, Ymm |
16262 /// | 5 | Zmm, Zmm, Mem |
16263 /// | 6 | Zmm, Zmm, Zmm |
16264 /// +---+---------------+
16265 /// ```
16266 #[inline]
16267 pub fn vpermi2w_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16268 where Assembler<'a>: Vpermi2wMaskEmitter<A, B, C> {
16269 <Self as Vpermi2wMaskEmitter<A, B, C>>::vpermi2w_mask(self, op0, op1, op2);
16270 }
16271 /// `VPERMI2W_MASKZ` (VPERMI2W).
16272 /// Permutes 16-bit/32-bit/64-bit values in the second operand (the first source operand) and the third operand (the second source operand) using indices in the first operand to select elements from the second and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
16273 ///
16274 ///
16275 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMI2W%3AVPERMI2D%3AVPERMI2Q%3AVPERMI2PS%3AVPERMI2PD.html).
16276 ///
16277 /// Supported operand variants:
16278 ///
16279 /// ```text
16280 /// +---+---------------+
16281 /// | # | Operands |
16282 /// +---+---------------+
16283 /// | 1 | Xmm, Xmm, Mem |
16284 /// | 2 | Xmm, Xmm, Xmm |
16285 /// | 3 | Ymm, Ymm, Mem |
16286 /// | 4 | Ymm, Ymm, Ymm |
16287 /// | 5 | Zmm, Zmm, Mem |
16288 /// | 6 | Zmm, Zmm, Zmm |
16289 /// +---+---------------+
16290 /// ```
16291 #[inline]
16292 pub fn vpermi2w_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16293 where Assembler<'a>: Vpermi2wMaskzEmitter<A, B, C> {
16294 <Self as Vpermi2wMaskzEmitter<A, B, C>>::vpermi2w_maskz(self, op0, op1, op2);
16295 }
16296 /// `VPERMT2W` (VPERMT2W).
16297 /// Permutes 16-bit/32-bit/64-bit values in the first operand and the third operand (the second source operand) using indices in the second operand (the first source operand) to select elements from the first and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
16298 ///
16299 ///
16300 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMT2W%3AVPERMT2D%3AVPERMT2Q%3AVPERMT2PS%3AVPERMT2PD.html).
16301 ///
16302 /// Supported operand variants:
16303 ///
16304 /// ```text
16305 /// +---+---------------+
16306 /// | # | Operands |
16307 /// +---+---------------+
16308 /// | 1 | Xmm, Xmm, Mem |
16309 /// | 2 | Xmm, Xmm, Xmm |
16310 /// | 3 | Ymm, Ymm, Mem |
16311 /// | 4 | Ymm, Ymm, Ymm |
16312 /// | 5 | Zmm, Zmm, Mem |
16313 /// | 6 | Zmm, Zmm, Zmm |
16314 /// +---+---------------+
16315 /// ```
16316 #[inline]
16317 pub fn vpermt2w<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16318 where Assembler<'a>: Vpermt2wEmitter<A, B, C> {
16319 <Self as Vpermt2wEmitter<A, B, C>>::vpermt2w(self, op0, op1, op2);
16320 }
16321 /// `VPERMT2W_MASK` (VPERMT2W).
16322 /// Permutes 16-bit/32-bit/64-bit values in the first operand and the third operand (the second source operand) using indices in the second operand (the first source operand) to select elements from the first and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
16323 ///
16324 ///
16325 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMT2W%3AVPERMT2D%3AVPERMT2Q%3AVPERMT2PS%3AVPERMT2PD.html).
16326 ///
16327 /// Supported operand variants:
16328 ///
16329 /// ```text
16330 /// +---+---------------+
16331 /// | # | Operands |
16332 /// +---+---------------+
16333 /// | 1 | Xmm, Xmm, Mem |
16334 /// | 2 | Xmm, Xmm, Xmm |
16335 /// | 3 | Ymm, Ymm, Mem |
16336 /// | 4 | Ymm, Ymm, Ymm |
16337 /// | 5 | Zmm, Zmm, Mem |
16338 /// | 6 | Zmm, Zmm, Zmm |
16339 /// +---+---------------+
16340 /// ```
16341 #[inline]
16342 pub fn vpermt2w_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16343 where Assembler<'a>: Vpermt2wMaskEmitter<A, B, C> {
16344 <Self as Vpermt2wMaskEmitter<A, B, C>>::vpermt2w_mask(self, op0, op1, op2);
16345 }
16346 /// `VPERMT2W_MASKZ` (VPERMT2W).
16347 /// Permutes 16-bit/32-bit/64-bit values in the first operand and the third operand (the second source operand) using indices in the second operand (the first source operand) to select elements from the first and third operands. The selected elements are written to the destination operand (the first operand) according to the writemask k1.
16348 ///
16349 ///
16350 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMT2W%3AVPERMT2D%3AVPERMT2Q%3AVPERMT2PS%3AVPERMT2PD.html).
16351 ///
16352 /// Supported operand variants:
16353 ///
16354 /// ```text
16355 /// +---+---------------+
16356 /// | # | Operands |
16357 /// +---+---------------+
16358 /// | 1 | Xmm, Xmm, Mem |
16359 /// | 2 | Xmm, Xmm, Xmm |
16360 /// | 3 | Ymm, Ymm, Mem |
16361 /// | 4 | Ymm, Ymm, Ymm |
16362 /// | 5 | Zmm, Zmm, Mem |
16363 /// | 6 | Zmm, Zmm, Zmm |
16364 /// +---+---------------+
16365 /// ```
16366 #[inline]
16367 pub fn vpermt2w_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16368 where Assembler<'a>: Vpermt2wMaskzEmitter<A, B, C> {
16369 <Self as Vpermt2wMaskzEmitter<A, B, C>>::vpermt2w_maskz(self, op0, op1, op2);
16370 }
16371 /// `VPERMW` (VPERMW).
16372 /// Copies doublewords (or words) from the second source operand (the third operand) to the destination operand (the first operand) according to the indices in the first source operand (the second operand). Note that this instruction permits a doubleword (word) in the source operand to be copied to more than one location in the destination operand.
16373 ///
16374 ///
16375 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMD%3AVPERMW.html).
16376 ///
16377 /// Supported operand variants:
16378 ///
16379 /// ```text
16380 /// +---+---------------+
16381 /// | # | Operands |
16382 /// +---+---------------+
16383 /// | 1 | Xmm, Xmm, Mem |
16384 /// | 2 | Xmm, Xmm, Xmm |
16385 /// | 3 | Ymm, Ymm, Mem |
16386 /// | 4 | Ymm, Ymm, Ymm |
16387 /// | 5 | Zmm, Zmm, Mem |
16388 /// | 6 | Zmm, Zmm, Zmm |
16389 /// +---+---------------+
16390 /// ```
16391 #[inline]
16392 pub fn vpermw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16393 where Assembler<'a>: VpermwEmitter<A, B, C> {
16394 <Self as VpermwEmitter<A, B, C>>::vpermw(self, op0, op1, op2);
16395 }
16396 /// `VPERMW_MASK` (VPERMW).
16397 /// Copies doublewords (or words) from the second source operand (the third operand) to the destination operand (the first operand) according to the indices in the first source operand (the second operand). Note that this instruction permits a doubleword (word) in the source operand to be copied to more than one location in the destination operand.
16398 ///
16399 ///
16400 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMD%3AVPERMW.html).
16401 ///
16402 /// Supported operand variants:
16403 ///
16404 /// ```text
16405 /// +---+---------------+
16406 /// | # | Operands |
16407 /// +---+---------------+
16408 /// | 1 | Xmm, Xmm, Mem |
16409 /// | 2 | Xmm, Xmm, Xmm |
16410 /// | 3 | Ymm, Ymm, Mem |
16411 /// | 4 | Ymm, Ymm, Ymm |
16412 /// | 5 | Zmm, Zmm, Mem |
16413 /// | 6 | Zmm, Zmm, Zmm |
16414 /// +---+---------------+
16415 /// ```
16416 #[inline]
16417 pub fn vpermw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16418 where Assembler<'a>: VpermwMaskEmitter<A, B, C> {
16419 <Self as VpermwMaskEmitter<A, B, C>>::vpermw_mask(self, op0, op1, op2);
16420 }
16421 /// `VPERMW_MASKZ` (VPERMW).
16422 /// Copies doublewords (or words) from the second source operand (the third operand) to the destination operand (the first operand) according to the indices in the first source operand (the second operand). Note that this instruction permits a doubleword (word) in the source operand to be copied to more than one location in the destination operand.
16423 ///
16424 ///
16425 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPERMD%3AVPERMW.html).
16426 ///
16427 /// Supported operand variants:
16428 ///
16429 /// ```text
16430 /// +---+---------------+
16431 /// | # | Operands |
16432 /// +---+---------------+
16433 /// | 1 | Xmm, Xmm, Mem |
16434 /// | 2 | Xmm, Xmm, Xmm |
16435 /// | 3 | Ymm, Ymm, Mem |
16436 /// | 4 | Ymm, Ymm, Ymm |
16437 /// | 5 | Zmm, Zmm, Mem |
16438 /// | 6 | Zmm, Zmm, Zmm |
16439 /// +---+---------------+
16440 /// ```
16441 #[inline]
16442 pub fn vpermw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16443 where Assembler<'a>: VpermwMaskzEmitter<A, B, C> {
16444 <Self as VpermwMaskzEmitter<A, B, C>>::vpermw_maskz(self, op0, op1, op2);
16445 }
16446 /// `VPEXTRB` (VPEXTRB).
16447 /// Extract a byte/dword/qword integer value from the source XMM register at a byte/dword/qword offset determined from imm8[3:0]. The destination can be a register or byte/dword/qword memory location. If the destination is a register, the upper bits of the register are zero extended.
16448 ///
16449 ///
16450 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PEXTRB%3APEXTRD%3APEXTRQ.html).
16451 ///
16452 /// Supported operand variants:
16453 ///
16454 /// ```text
16455 /// +---+---------------+
16456 /// | # | Operands |
16457 /// +---+---------------+
16458 /// | 1 | Gpd, Xmm, Imm |
16459 /// | 2 | Mem, Xmm, Imm |
16460 /// +---+---------------+
16461 /// ```
16462 #[inline]
16463 pub fn vpextrb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16464 where Assembler<'a>: VpextrbEmitter<A, B, C> {
16465 <Self as VpextrbEmitter<A, B, C>>::vpextrb(self, op0, op1, op2);
16466 }
16467 /// `VPEXTRW` (VPEXTRW).
16468 /// Copies the word in the source operand (second operand) specified by the count operand (third operand) to the destination operand (first operand). The source operand can be an MMX technology register or an XMM register. The destination operand can be the low word of a general-purpose register or a 16-bit memory address. The count operand is an 8-bit immediate. When specifying a word location in an MMX technology register, the 2 least-significant bits of the count operand specify the location; for an XMM register, the 3 least-significant bits specify the location. The content of the destination register above bit 16 is cleared (set to all 0s).
16469 ///
16470 ///
16471 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PEXTRW.html).
16472 ///
16473 /// Supported operand variants:
16474 ///
16475 /// ```text
16476 /// +---+---------------+
16477 /// | # | Operands |
16478 /// +---+---------------+
16479 /// | 1 | Gpd, Xmm, Imm |
16480 /// | 2 | Mem, Xmm, Imm |
16481 /// +---+---------------+
16482 /// ```
16483 #[inline]
16484 pub fn vpextrw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16485 where Assembler<'a>: VpextrwEmitter<A, B, C> {
16486 <Self as VpextrwEmitter<A, B, C>>::vpextrw(self, op0, op1, op2);
16487 }
16488 /// `VPINSRB`.
16489 ///
16490 /// Supported operand variants:
16491 ///
16492 /// ```text
16493 /// +---+--------------------+
16494 /// | # | Operands |
16495 /// +---+--------------------+
16496 /// | 1 | Xmm, Xmm, Gpd, Imm |
16497 /// | 2 | Xmm, Xmm, Mem, Imm |
16498 /// +---+--------------------+
16499 /// ```
16500 #[inline]
16501 pub fn vpinsrb<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16502 where Assembler<'a>: VpinsrbEmitter<A, B, C, D> {
16503 <Self as VpinsrbEmitter<A, B, C, D>>::vpinsrb(self, op0, op1, op2, op3);
16504 }
16505 /// `VPINSRW`.
16506 ///
16507 /// Supported operand variants:
16508 ///
16509 /// ```text
16510 /// +---+--------------------+
16511 /// | # | Operands |
16512 /// +---+--------------------+
16513 /// | 1 | Xmm, Xmm, Gpd, Imm |
16514 /// | 2 | Xmm, Xmm, Mem, Imm |
16515 /// +---+--------------------+
16516 /// ```
16517 #[inline]
16518 pub fn vpinsrw<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
16519 where Assembler<'a>: VpinsrwEmitter<A, B, C, D> {
16520 <Self as VpinsrwEmitter<A, B, C, D>>::vpinsrw(self, op0, op1, op2, op3);
16521 }
16522 /// `VPMADDUBSW` (VPMADDUBSW).
16523 /// (V)PMADDUBSW multiplies vertically each unsigned byte of the destination operand (first operand) with the corresponding signed byte of the source operand (second operand), producing intermediate signed 16-bit integers. Each adjacent pair of signed words is added and the saturated result is packed to the destination operand. For example, the lowest-order bytes (bits 7-0) in the source and destination operands are multiplied and the intermediate signed word result is added with the corresponding intermediate result from the 2nd lowest-order bytes (bits 15-8) of the operands; the sign-saturated result is stored in the lowest word of the destination register (15-0). The same operation is performed on the other pairs of adjacent bytes. Both operands can be MMX register or XMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
16524 ///
16525 ///
16526 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDUBSW.html).
16527 ///
16528 /// Supported operand variants:
16529 ///
16530 /// ```text
16531 /// +---+---------------+
16532 /// | # | Operands |
16533 /// +---+---------------+
16534 /// | 1 | Xmm, Xmm, Mem |
16535 /// | 2 | Xmm, Xmm, Xmm |
16536 /// | 3 | Ymm, Ymm, Mem |
16537 /// | 4 | Ymm, Ymm, Ymm |
16538 /// | 5 | Zmm, Zmm, Mem |
16539 /// | 6 | Zmm, Zmm, Zmm |
16540 /// +---+---------------+
16541 /// ```
16542 #[inline]
16543 pub fn vpmaddubsw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16544 where Assembler<'a>: VpmaddubswEmitter<A, B, C> {
16545 <Self as VpmaddubswEmitter<A, B, C>>::vpmaddubsw(self, op0, op1, op2);
16546 }
16547 /// `VPMADDUBSW_MASK` (VPMADDUBSW).
16548 /// (V)PMADDUBSW multiplies vertically each unsigned byte of the destination operand (first operand) with the corresponding signed byte of the source operand (second operand), producing intermediate signed 16-bit integers. Each adjacent pair of signed words is added and the saturated result is packed to the destination operand. For example, the lowest-order bytes (bits 7-0) in the source and destination operands are multiplied and the intermediate signed word result is added with the corresponding intermediate result from the 2nd lowest-order bytes (bits 15-8) of the operands; the sign-saturated result is stored in the lowest word of the destination register (15-0). The same operation is performed on the other pairs of adjacent bytes. Both operands can be MMX register or XMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
16549 ///
16550 ///
16551 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDUBSW.html).
16552 ///
16553 /// Supported operand variants:
16554 ///
16555 /// ```text
16556 /// +---+---------------+
16557 /// | # | Operands |
16558 /// +---+---------------+
16559 /// | 1 | Xmm, Xmm, Mem |
16560 /// | 2 | Xmm, Xmm, Xmm |
16561 /// | 3 | Ymm, Ymm, Mem |
16562 /// | 4 | Ymm, Ymm, Ymm |
16563 /// | 5 | Zmm, Zmm, Mem |
16564 /// | 6 | Zmm, Zmm, Zmm |
16565 /// +---+---------------+
16566 /// ```
16567 #[inline]
16568 pub fn vpmaddubsw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16569 where Assembler<'a>: VpmaddubswMaskEmitter<A, B, C> {
16570 <Self as VpmaddubswMaskEmitter<A, B, C>>::vpmaddubsw_mask(self, op0, op1, op2);
16571 }
16572 /// `VPMADDUBSW_MASKZ` (VPMADDUBSW).
16573 /// (V)PMADDUBSW multiplies vertically each unsigned byte of the destination operand (first operand) with the corresponding signed byte of the source operand (second operand), producing intermediate signed 16-bit integers. Each adjacent pair of signed words is added and the saturated result is packed to the destination operand. For example, the lowest-order bytes (bits 7-0) in the source and destination operands are multiplied and the intermediate signed word result is added with the corresponding intermediate result from the 2nd lowest-order bytes (bits 15-8) of the operands; the sign-saturated result is stored in the lowest word of the destination register (15-0). The same operation is performed on the other pairs of adjacent bytes. Both operands can be MMX register or XMM registers. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
16574 ///
16575 ///
16576 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDUBSW.html).
16577 ///
16578 /// Supported operand variants:
16579 ///
16580 /// ```text
16581 /// +---+---------------+
16582 /// | # | Operands |
16583 /// +---+---------------+
16584 /// | 1 | Xmm, Xmm, Mem |
16585 /// | 2 | Xmm, Xmm, Xmm |
16586 /// | 3 | Ymm, Ymm, Mem |
16587 /// | 4 | Ymm, Ymm, Ymm |
16588 /// | 5 | Zmm, Zmm, Mem |
16589 /// | 6 | Zmm, Zmm, Zmm |
16590 /// +---+---------------+
16591 /// ```
16592 #[inline]
16593 pub fn vpmaddubsw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16594 where Assembler<'a>: VpmaddubswMaskzEmitter<A, B, C> {
16595 <Self as VpmaddubswMaskzEmitter<A, B, C>>::vpmaddubsw_maskz(self, op0, op1, op2);
16596 }
16597 /// `VPMADDWD` (VPMADDWD).
16598 /// Multiplies the individual signed words of the destination operand (first operand) by the corresponding signed words of the source operand (second operand), producing temporary signed, doubleword results. The adjacent double-word results are then summed and stored in the destination operand. For example, the corresponding low-order words (15-0) and (31-16) in the source and destination operands are multiplied by one another and the double-word results are added together and stored in the low doubleword of the destination register (31-0). The same operation is performed on the other pairs of adjacent words. (Figure 4-11 shows this operation when using 64-bit operands).
16599 ///
16600 ///
16601 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDWD.html).
16602 ///
16603 /// Supported operand variants:
16604 ///
16605 /// ```text
16606 /// +---+---------------+
16607 /// | # | Operands |
16608 /// +---+---------------+
16609 /// | 1 | Xmm, Xmm, Mem |
16610 /// | 2 | Xmm, Xmm, Xmm |
16611 /// | 3 | Ymm, Ymm, Mem |
16612 /// | 4 | Ymm, Ymm, Ymm |
16613 /// | 5 | Zmm, Zmm, Mem |
16614 /// | 6 | Zmm, Zmm, Zmm |
16615 /// +---+---------------+
16616 /// ```
16617 #[inline]
16618 pub fn vpmaddwd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16619 where Assembler<'a>: VpmaddwdEmitter<A, B, C> {
16620 <Self as VpmaddwdEmitter<A, B, C>>::vpmaddwd(self, op0, op1, op2);
16621 }
16622 /// `VPMADDWD_MASK` (VPMADDWD).
16623 /// Multiplies the individual signed words of the destination operand (first operand) by the corresponding signed words of the source operand (second operand), producing temporary signed, doubleword results. The adjacent double-word results are then summed and stored in the destination operand. For example, the corresponding low-order words (15-0) and (31-16) in the source and destination operands are multiplied by one another and the double-word results are added together and stored in the low doubleword of the destination register (31-0). The same operation is performed on the other pairs of adjacent words. (Figure 4-11 shows this operation when using 64-bit operands).
16624 ///
16625 ///
16626 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDWD.html).
16627 ///
16628 /// Supported operand variants:
16629 ///
16630 /// ```text
16631 /// +---+---------------+
16632 /// | # | Operands |
16633 /// +---+---------------+
16634 /// | 1 | Xmm, Xmm, Mem |
16635 /// | 2 | Xmm, Xmm, Xmm |
16636 /// | 3 | Ymm, Ymm, Mem |
16637 /// | 4 | Ymm, Ymm, Ymm |
16638 /// | 5 | Zmm, Zmm, Mem |
16639 /// | 6 | Zmm, Zmm, Zmm |
16640 /// +---+---------------+
16641 /// ```
16642 #[inline]
16643 pub fn vpmaddwd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16644 where Assembler<'a>: VpmaddwdMaskEmitter<A, B, C> {
16645 <Self as VpmaddwdMaskEmitter<A, B, C>>::vpmaddwd_mask(self, op0, op1, op2);
16646 }
16647 /// `VPMADDWD_MASKZ` (VPMADDWD).
16648 /// Multiplies the individual signed words of the destination operand (first operand) by the corresponding signed words of the source operand (second operand), producing temporary signed, doubleword results. The adjacent double-word results are then summed and stored in the destination operand. For example, the corresponding low-order words (15-0) and (31-16) in the source and destination operands are multiplied by one another and the double-word results are added together and stored in the low doubleword of the destination register (31-0). The same operation is performed on the other pairs of adjacent words. (Figure 4-11 shows this operation when using 64-bit operands).
16649 ///
16650 ///
16651 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMADDWD.html).
16652 ///
16653 /// Supported operand variants:
16654 ///
16655 /// ```text
16656 /// +---+---------------+
16657 /// | # | Operands |
16658 /// +---+---------------+
16659 /// | 1 | Xmm, Xmm, Mem |
16660 /// | 2 | Xmm, Xmm, Xmm |
16661 /// | 3 | Ymm, Ymm, Mem |
16662 /// | 4 | Ymm, Ymm, Ymm |
16663 /// | 5 | Zmm, Zmm, Mem |
16664 /// | 6 | Zmm, Zmm, Zmm |
16665 /// +---+---------------+
16666 /// ```
16667 #[inline]
16668 pub fn vpmaddwd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16669 where Assembler<'a>: VpmaddwdMaskzEmitter<A, B, C> {
16670 <Self as VpmaddwdMaskzEmitter<A, B, C>>::vpmaddwd_maskz(self, op0, op1, op2);
16671 }
16672 /// `VPMAXSB` (VPMAXSB).
16673 /// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16674 ///
16675 ///
16676 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
16677 ///
16678 /// Supported operand variants:
16679 ///
16680 /// ```text
16681 /// +---+---------------+
16682 /// | # | Operands |
16683 /// +---+---------------+
16684 /// | 1 | Xmm, Xmm, Mem |
16685 /// | 2 | Xmm, Xmm, Xmm |
16686 /// | 3 | Ymm, Ymm, Mem |
16687 /// | 4 | Ymm, Ymm, Ymm |
16688 /// | 5 | Zmm, Zmm, Mem |
16689 /// | 6 | Zmm, Zmm, Zmm |
16690 /// +---+---------------+
16691 /// ```
16692 #[inline]
16693 pub fn vpmaxsb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16694 where Assembler<'a>: VpmaxsbEmitter<A, B, C> {
16695 <Self as VpmaxsbEmitter<A, B, C>>::vpmaxsb(self, op0, op1, op2);
16696 }
16697 /// `VPMAXSB_MASK` (VPMAXSB).
16698 /// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16699 ///
16700 ///
16701 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
16702 ///
16703 /// Supported operand variants:
16704 ///
16705 /// ```text
16706 /// +---+---------------+
16707 /// | # | Operands |
16708 /// +---+---------------+
16709 /// | 1 | Xmm, Xmm, Mem |
16710 /// | 2 | Xmm, Xmm, Xmm |
16711 /// | 3 | Ymm, Ymm, Mem |
16712 /// | 4 | Ymm, Ymm, Ymm |
16713 /// | 5 | Zmm, Zmm, Mem |
16714 /// | 6 | Zmm, Zmm, Zmm |
16715 /// +---+---------------+
16716 /// ```
16717 #[inline]
16718 pub fn vpmaxsb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16719 where Assembler<'a>: VpmaxsbMaskEmitter<A, B, C> {
16720 <Self as VpmaxsbMaskEmitter<A, B, C>>::vpmaxsb_mask(self, op0, op1, op2);
16721 }
16722 /// `VPMAXSB_MASKZ` (VPMAXSB).
16723 /// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16724 ///
16725 ///
16726 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
16727 ///
16728 /// Supported operand variants:
16729 ///
16730 /// ```text
16731 /// +---+---------------+
16732 /// | # | Operands |
16733 /// +---+---------------+
16734 /// | 1 | Xmm, Xmm, Mem |
16735 /// | 2 | Xmm, Xmm, Xmm |
16736 /// | 3 | Ymm, Ymm, Mem |
16737 /// | 4 | Ymm, Ymm, Ymm |
16738 /// | 5 | Zmm, Zmm, Mem |
16739 /// | 6 | Zmm, Zmm, Zmm |
16740 /// +---+---------------+
16741 /// ```
16742 #[inline]
16743 pub fn vpmaxsb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16744 where Assembler<'a>: VpmaxsbMaskzEmitter<A, B, C> {
16745 <Self as VpmaxsbMaskzEmitter<A, B, C>>::vpmaxsb_maskz(self, op0, op1, op2);
16746 }
16747 /// `VPMAXSW` (VPMAXSW).
16748 /// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16749 ///
16750 ///
16751 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
16752 ///
16753 /// Supported operand variants:
16754 ///
16755 /// ```text
16756 /// +---+---------------+
16757 /// | # | Operands |
16758 /// +---+---------------+
16759 /// | 1 | Xmm, Xmm, Mem |
16760 /// | 2 | Xmm, Xmm, Xmm |
16761 /// | 3 | Ymm, Ymm, Mem |
16762 /// | 4 | Ymm, Ymm, Ymm |
16763 /// | 5 | Zmm, Zmm, Mem |
16764 /// | 6 | Zmm, Zmm, Zmm |
16765 /// +---+---------------+
16766 /// ```
16767 #[inline]
16768 pub fn vpmaxsw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16769 where Assembler<'a>: VpmaxswEmitter<A, B, C> {
16770 <Self as VpmaxswEmitter<A, B, C>>::vpmaxsw(self, op0, op1, op2);
16771 }
16772 /// `VPMAXSW_MASK` (VPMAXSW).
16773 /// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16774 ///
16775 ///
16776 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
16777 ///
16778 /// Supported operand variants:
16779 ///
16780 /// ```text
16781 /// +---+---------------+
16782 /// | # | Operands |
16783 /// +---+---------------+
16784 /// | 1 | Xmm, Xmm, Mem |
16785 /// | 2 | Xmm, Xmm, Xmm |
16786 /// | 3 | Ymm, Ymm, Mem |
16787 /// | 4 | Ymm, Ymm, Ymm |
16788 /// | 5 | Zmm, Zmm, Mem |
16789 /// | 6 | Zmm, Zmm, Zmm |
16790 /// +---+---------------+
16791 /// ```
16792 #[inline]
16793 pub fn vpmaxsw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16794 where Assembler<'a>: VpmaxswMaskEmitter<A, B, C> {
16795 <Self as VpmaxswMaskEmitter<A, B, C>>::vpmaxsw_mask(self, op0, op1, op2);
16796 }
16797 /// `VPMAXSW_MASKZ` (VPMAXSW).
16798 /// Performs a SIMD compare of the packed signed byte, word, dword or qword integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16799 ///
16800 ///
16801 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXSB%3APMAXSW%3APMAXSD%3APMAXSQ.html).
16802 ///
16803 /// Supported operand variants:
16804 ///
16805 /// ```text
16806 /// +---+---------------+
16807 /// | # | Operands |
16808 /// +---+---------------+
16809 /// | 1 | Xmm, Xmm, Mem |
16810 /// | 2 | Xmm, Xmm, Xmm |
16811 /// | 3 | Ymm, Ymm, Mem |
16812 /// | 4 | Ymm, Ymm, Ymm |
16813 /// | 5 | Zmm, Zmm, Mem |
16814 /// | 6 | Zmm, Zmm, Zmm |
16815 /// +---+---------------+
16816 /// ```
16817 #[inline]
16818 pub fn vpmaxsw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16819 where Assembler<'a>: VpmaxswMaskzEmitter<A, B, C> {
16820 <Self as VpmaxswMaskzEmitter<A, B, C>>::vpmaxsw_maskz(self, op0, op1, op2);
16821 }
16822 /// `VPMAXUB` (VPMAXUB).
16823 /// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16824 ///
16825 ///
16826 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
16827 ///
16828 /// Supported operand variants:
16829 ///
16830 /// ```text
16831 /// +---+---------------+
16832 /// | # | Operands |
16833 /// +---+---------------+
16834 /// | 1 | Xmm, Xmm, Mem |
16835 /// | 2 | Xmm, Xmm, Xmm |
16836 /// | 3 | Ymm, Ymm, Mem |
16837 /// | 4 | Ymm, Ymm, Ymm |
16838 /// | 5 | Zmm, Zmm, Mem |
16839 /// | 6 | Zmm, Zmm, Zmm |
16840 /// +---+---------------+
16841 /// ```
16842 #[inline]
16843 pub fn vpmaxub<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16844 where Assembler<'a>: VpmaxubEmitter<A, B, C> {
16845 <Self as VpmaxubEmitter<A, B, C>>::vpmaxub(self, op0, op1, op2);
16846 }
16847 /// `VPMAXUB_MASK` (VPMAXUB).
16848 /// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16849 ///
16850 ///
16851 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
16852 ///
16853 /// Supported operand variants:
16854 ///
16855 /// ```text
16856 /// +---+---------------+
16857 /// | # | Operands |
16858 /// +---+---------------+
16859 /// | 1 | Xmm, Xmm, Mem |
16860 /// | 2 | Xmm, Xmm, Xmm |
16861 /// | 3 | Ymm, Ymm, Mem |
16862 /// | 4 | Ymm, Ymm, Ymm |
16863 /// | 5 | Zmm, Zmm, Mem |
16864 /// | 6 | Zmm, Zmm, Zmm |
16865 /// +---+---------------+
16866 /// ```
16867 #[inline]
16868 pub fn vpmaxub_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16869 where Assembler<'a>: VpmaxubMaskEmitter<A, B, C> {
16870 <Self as VpmaxubMaskEmitter<A, B, C>>::vpmaxub_mask(self, op0, op1, op2);
16871 }
16872 /// `VPMAXUB_MASKZ` (VPMAXUB).
16873 /// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16874 ///
16875 ///
16876 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
16877 ///
16878 /// Supported operand variants:
16879 ///
16880 /// ```text
16881 /// +---+---------------+
16882 /// | # | Operands |
16883 /// +---+---------------+
16884 /// | 1 | Xmm, Xmm, Mem |
16885 /// | 2 | Xmm, Xmm, Xmm |
16886 /// | 3 | Ymm, Ymm, Mem |
16887 /// | 4 | Ymm, Ymm, Ymm |
16888 /// | 5 | Zmm, Zmm, Mem |
16889 /// | 6 | Zmm, Zmm, Zmm |
16890 /// +---+---------------+
16891 /// ```
16892 #[inline]
16893 pub fn vpmaxub_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16894 where Assembler<'a>: VpmaxubMaskzEmitter<A, B, C> {
16895 <Self as VpmaxubMaskzEmitter<A, B, C>>::vpmaxub_maskz(self, op0, op1, op2);
16896 }
16897 /// `VPMAXUW` (VPMAXUW).
16898 /// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16899 ///
16900 ///
16901 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
16902 ///
16903 /// Supported operand variants:
16904 ///
16905 /// ```text
16906 /// +---+---------------+
16907 /// | # | Operands |
16908 /// +---+---------------+
16909 /// | 1 | Xmm, Xmm, Mem |
16910 /// | 2 | Xmm, Xmm, Xmm |
16911 /// | 3 | Ymm, Ymm, Mem |
16912 /// | 4 | Ymm, Ymm, Ymm |
16913 /// | 5 | Zmm, Zmm, Mem |
16914 /// | 6 | Zmm, Zmm, Zmm |
16915 /// +---+---------------+
16916 /// ```
16917 #[inline]
16918 pub fn vpmaxuw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16919 where Assembler<'a>: VpmaxuwEmitter<A, B, C> {
16920 <Self as VpmaxuwEmitter<A, B, C>>::vpmaxuw(self, op0, op1, op2);
16921 }
16922 /// `VPMAXUW_MASK` (VPMAXUW).
16923 /// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16924 ///
16925 ///
16926 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
16927 ///
16928 /// Supported operand variants:
16929 ///
16930 /// ```text
16931 /// +---+---------------+
16932 /// | # | Operands |
16933 /// +---+---------------+
16934 /// | 1 | Xmm, Xmm, Mem |
16935 /// | 2 | Xmm, Xmm, Xmm |
16936 /// | 3 | Ymm, Ymm, Mem |
16937 /// | 4 | Ymm, Ymm, Ymm |
16938 /// | 5 | Zmm, Zmm, Mem |
16939 /// | 6 | Zmm, Zmm, Zmm |
16940 /// +---+---------------+
16941 /// ```
16942 #[inline]
16943 pub fn vpmaxuw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16944 where Assembler<'a>: VpmaxuwMaskEmitter<A, B, C> {
16945 <Self as VpmaxuwMaskEmitter<A, B, C>>::vpmaxuw_mask(self, op0, op1, op2);
16946 }
16947 /// `VPMAXUW_MASKZ` (VPMAXUW).
16948 /// Performs a SIMD compare of the packed unsigned byte, word integers in the second source operand and the first source operand and returns the maximum value for each pair of integers to the destination operand.
16949 ///
16950 ///
16951 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMAXUB%3APMAXUW.html).
16952 ///
16953 /// Supported operand variants:
16954 ///
16955 /// ```text
16956 /// +---+---------------+
16957 /// | # | Operands |
16958 /// +---+---------------+
16959 /// | 1 | Xmm, Xmm, Mem |
16960 /// | 2 | Xmm, Xmm, Xmm |
16961 /// | 3 | Ymm, Ymm, Mem |
16962 /// | 4 | Ymm, Ymm, Ymm |
16963 /// | 5 | Zmm, Zmm, Mem |
16964 /// | 6 | Zmm, Zmm, Zmm |
16965 /// +---+---------------+
16966 /// ```
16967 #[inline]
16968 pub fn vpmaxuw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16969 where Assembler<'a>: VpmaxuwMaskzEmitter<A, B, C> {
16970 <Self as VpmaxuwMaskzEmitter<A, B, C>>::vpmaxuw_maskz(self, op0, op1, op2);
16971 }
16972 /// `VPMINSB` (VPMINSB).
16973 /// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
16974 ///
16975 ///
16976 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
16977 ///
16978 /// Supported operand variants:
16979 ///
16980 /// ```text
16981 /// +---+---------------+
16982 /// | # | Operands |
16983 /// +---+---------------+
16984 /// | 1 | Xmm, Xmm, Mem |
16985 /// | 2 | Xmm, Xmm, Xmm |
16986 /// | 3 | Ymm, Ymm, Mem |
16987 /// | 4 | Ymm, Ymm, Ymm |
16988 /// | 5 | Zmm, Zmm, Mem |
16989 /// | 6 | Zmm, Zmm, Zmm |
16990 /// +---+---------------+
16991 /// ```
16992 #[inline]
16993 pub fn vpminsb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
16994 where Assembler<'a>: VpminsbEmitter<A, B, C> {
16995 <Self as VpminsbEmitter<A, B, C>>::vpminsb(self, op0, op1, op2);
16996 }
16997 /// `VPMINSB_MASK` (VPMINSB).
16998 /// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
16999 ///
17000 ///
17001 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
17002 ///
17003 /// Supported operand variants:
17004 ///
17005 /// ```text
17006 /// +---+---------------+
17007 /// | # | Operands |
17008 /// +---+---------------+
17009 /// | 1 | Xmm, Xmm, Mem |
17010 /// | 2 | Xmm, Xmm, Xmm |
17011 /// | 3 | Ymm, Ymm, Mem |
17012 /// | 4 | Ymm, Ymm, Ymm |
17013 /// | 5 | Zmm, Zmm, Mem |
17014 /// | 6 | Zmm, Zmm, Zmm |
17015 /// +---+---------------+
17016 /// ```
17017 #[inline]
17018 pub fn vpminsb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17019 where Assembler<'a>: VpminsbMaskEmitter<A, B, C> {
17020 <Self as VpminsbMaskEmitter<A, B, C>>::vpminsb_mask(self, op0, op1, op2);
17021 }
17022 /// `VPMINSB_MASKZ` (VPMINSB).
17023 /// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17024 ///
17025 ///
17026 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
17027 ///
17028 /// Supported operand variants:
17029 ///
17030 /// ```text
17031 /// +---+---------------+
17032 /// | # | Operands |
17033 /// +---+---------------+
17034 /// | 1 | Xmm, Xmm, Mem |
17035 /// | 2 | Xmm, Xmm, Xmm |
17036 /// | 3 | Ymm, Ymm, Mem |
17037 /// | 4 | Ymm, Ymm, Ymm |
17038 /// | 5 | Zmm, Zmm, Mem |
17039 /// | 6 | Zmm, Zmm, Zmm |
17040 /// +---+---------------+
17041 /// ```
17042 #[inline]
17043 pub fn vpminsb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17044 where Assembler<'a>: VpminsbMaskzEmitter<A, B, C> {
17045 <Self as VpminsbMaskzEmitter<A, B, C>>::vpminsb_maskz(self, op0, op1, op2);
17046 }
17047 /// `VPMINSW` (VPMINSW).
17048 /// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17049 ///
17050 ///
17051 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
17052 ///
17053 /// Supported operand variants:
17054 ///
17055 /// ```text
17056 /// +---+---------------+
17057 /// | # | Operands |
17058 /// +---+---------------+
17059 /// | 1 | Xmm, Xmm, Mem |
17060 /// | 2 | Xmm, Xmm, Xmm |
17061 /// | 3 | Ymm, Ymm, Mem |
17062 /// | 4 | Ymm, Ymm, Ymm |
17063 /// | 5 | Zmm, Zmm, Mem |
17064 /// | 6 | Zmm, Zmm, Zmm |
17065 /// +---+---------------+
17066 /// ```
17067 #[inline]
17068 pub fn vpminsw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17069 where Assembler<'a>: VpminswEmitter<A, B, C> {
17070 <Self as VpminswEmitter<A, B, C>>::vpminsw(self, op0, op1, op2);
17071 }
17072 /// `VPMINSW_MASK` (VPMINSW).
17073 /// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17074 ///
17075 ///
17076 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
17077 ///
17078 /// Supported operand variants:
17079 ///
17080 /// ```text
17081 /// +---+---------------+
17082 /// | # | Operands |
17083 /// +---+---------------+
17084 /// | 1 | Xmm, Xmm, Mem |
17085 /// | 2 | Xmm, Xmm, Xmm |
17086 /// | 3 | Ymm, Ymm, Mem |
17087 /// | 4 | Ymm, Ymm, Ymm |
17088 /// | 5 | Zmm, Zmm, Mem |
17089 /// | 6 | Zmm, Zmm, Zmm |
17090 /// +---+---------------+
17091 /// ```
17092 #[inline]
17093 pub fn vpminsw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17094 where Assembler<'a>: VpminswMaskEmitter<A, B, C> {
17095 <Self as VpminswMaskEmitter<A, B, C>>::vpminsw_mask(self, op0, op1, op2);
17096 }
17097 /// `VPMINSW_MASKZ` (VPMINSW).
17098 /// Performs a SIMD compare of the packed signed byte, word, or dword integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17099 ///
17100 ///
17101 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINSB%3APMINSW.html).
17102 ///
17103 /// Supported operand variants:
17104 ///
17105 /// ```text
17106 /// +---+---------------+
17107 /// | # | Operands |
17108 /// +---+---------------+
17109 /// | 1 | Xmm, Xmm, Mem |
17110 /// | 2 | Xmm, Xmm, Xmm |
17111 /// | 3 | Ymm, Ymm, Mem |
17112 /// | 4 | Ymm, Ymm, Ymm |
17113 /// | 5 | Zmm, Zmm, Mem |
17114 /// | 6 | Zmm, Zmm, Zmm |
17115 /// +---+---------------+
17116 /// ```
17117 #[inline]
17118 pub fn vpminsw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17119 where Assembler<'a>: VpminswMaskzEmitter<A, B, C> {
17120 <Self as VpminswMaskzEmitter<A, B, C>>::vpminsw_maskz(self, op0, op1, op2);
17121 }
17122 /// `VPMINUB` (VPMINUB).
17123 /// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17124 ///
17125 ///
17126 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
17127 ///
17128 /// Supported operand variants:
17129 ///
17130 /// ```text
17131 /// +---+---------------+
17132 /// | # | Operands |
17133 /// +---+---------------+
17134 /// | 1 | Xmm, Xmm, Mem |
17135 /// | 2 | Xmm, Xmm, Xmm |
17136 /// | 3 | Ymm, Ymm, Mem |
17137 /// | 4 | Ymm, Ymm, Ymm |
17138 /// | 5 | Zmm, Zmm, Mem |
17139 /// | 6 | Zmm, Zmm, Zmm |
17140 /// +---+---------------+
17141 /// ```
17142 #[inline]
17143 pub fn vpminub<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17144 where Assembler<'a>: VpminubEmitter<A, B, C> {
17145 <Self as VpminubEmitter<A, B, C>>::vpminub(self, op0, op1, op2);
17146 }
17147 /// `VPMINUB_MASK` (VPMINUB).
17148 /// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17149 ///
17150 ///
17151 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
17152 ///
17153 /// Supported operand variants:
17154 ///
17155 /// ```text
17156 /// +---+---------------+
17157 /// | # | Operands |
17158 /// +---+---------------+
17159 /// | 1 | Xmm, Xmm, Mem |
17160 /// | 2 | Xmm, Xmm, Xmm |
17161 /// | 3 | Ymm, Ymm, Mem |
17162 /// | 4 | Ymm, Ymm, Ymm |
17163 /// | 5 | Zmm, Zmm, Mem |
17164 /// | 6 | Zmm, Zmm, Zmm |
17165 /// +---+---------------+
17166 /// ```
17167 #[inline]
17168 pub fn vpminub_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17169 where Assembler<'a>: VpminubMaskEmitter<A, B, C> {
17170 <Self as VpminubMaskEmitter<A, B, C>>::vpminub_mask(self, op0, op1, op2);
17171 }
17172 /// `VPMINUB_MASKZ` (VPMINUB).
17173 /// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17174 ///
17175 ///
17176 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
17177 ///
17178 /// Supported operand variants:
17179 ///
17180 /// ```text
17181 /// +---+---------------+
17182 /// | # | Operands |
17183 /// +---+---------------+
17184 /// | 1 | Xmm, Xmm, Mem |
17185 /// | 2 | Xmm, Xmm, Xmm |
17186 /// | 3 | Ymm, Ymm, Mem |
17187 /// | 4 | Ymm, Ymm, Ymm |
17188 /// | 5 | Zmm, Zmm, Mem |
17189 /// | 6 | Zmm, Zmm, Zmm |
17190 /// +---+---------------+
17191 /// ```
17192 #[inline]
17193 pub fn vpminub_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17194 where Assembler<'a>: VpminubMaskzEmitter<A, B, C> {
17195 <Self as VpminubMaskzEmitter<A, B, C>>::vpminub_maskz(self, op0, op1, op2);
17196 }
17197 /// `VPMINUW` (VPMINUW).
17198 /// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17199 ///
17200 ///
17201 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
17202 ///
17203 /// Supported operand variants:
17204 ///
17205 /// ```text
17206 /// +---+---------------+
17207 /// | # | Operands |
17208 /// +---+---------------+
17209 /// | 1 | Xmm, Xmm, Mem |
17210 /// | 2 | Xmm, Xmm, Xmm |
17211 /// | 3 | Ymm, Ymm, Mem |
17212 /// | 4 | Ymm, Ymm, Ymm |
17213 /// | 5 | Zmm, Zmm, Mem |
17214 /// | 6 | Zmm, Zmm, Zmm |
17215 /// +---+---------------+
17216 /// ```
17217 #[inline]
17218 pub fn vpminuw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17219 where Assembler<'a>: VpminuwEmitter<A, B, C> {
17220 <Self as VpminuwEmitter<A, B, C>>::vpminuw(self, op0, op1, op2);
17221 }
17222 /// `VPMINUW_MASK` (VPMINUW).
17223 /// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17224 ///
17225 ///
17226 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
17227 ///
17228 /// Supported operand variants:
17229 ///
17230 /// ```text
17231 /// +---+---------------+
17232 /// | # | Operands |
17233 /// +---+---------------+
17234 /// | 1 | Xmm, Xmm, Mem |
17235 /// | 2 | Xmm, Xmm, Xmm |
17236 /// | 3 | Ymm, Ymm, Mem |
17237 /// | 4 | Ymm, Ymm, Ymm |
17238 /// | 5 | Zmm, Zmm, Mem |
17239 /// | 6 | Zmm, Zmm, Zmm |
17240 /// +---+---------------+
17241 /// ```
17242 #[inline]
17243 pub fn vpminuw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17244 where Assembler<'a>: VpminuwMaskEmitter<A, B, C> {
17245 <Self as VpminuwMaskEmitter<A, B, C>>::vpminuw_mask(self, op0, op1, op2);
17246 }
17247 /// `VPMINUW_MASKZ` (VPMINUW).
17248 /// Performs a SIMD compare of the packed unsigned byte or word integers in the second source operand and the first source operand and returns the minimum value for each pair of integers to the destination operand.
17249 ///
17250 ///
17251 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMINUB%3APMINUW.html).
17252 ///
17253 /// Supported operand variants:
17254 ///
17255 /// ```text
17256 /// +---+---------------+
17257 /// | # | Operands |
17258 /// +---+---------------+
17259 /// | 1 | Xmm, Xmm, Mem |
17260 /// | 2 | Xmm, Xmm, Xmm |
17261 /// | 3 | Ymm, Ymm, Mem |
17262 /// | 4 | Ymm, Ymm, Ymm |
17263 /// | 5 | Zmm, Zmm, Mem |
17264 /// | 6 | Zmm, Zmm, Zmm |
17265 /// +---+---------------+
17266 /// ```
17267 #[inline]
17268 pub fn vpminuw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17269 where Assembler<'a>: VpminuwMaskzEmitter<A, B, C> {
17270 <Self as VpminuwMaskzEmitter<A, B, C>>::vpminuw_maskz(self, op0, op1, op2);
17271 }
17272 /// `VPMOVB2M` (VPMOVB2M).
17273 /// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
17274 ///
17275 ///
17276 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
17277 ///
17278 /// Supported operand variants:
17279 ///
17280 /// ```text
17281 /// +---+-----------+
17282 /// | # | Operands |
17283 /// +---+-----------+
17284 /// | 1 | KReg, Xmm |
17285 /// | 2 | KReg, Ymm |
17286 /// | 3 | KReg, Zmm |
17287 /// +---+-----------+
17288 /// ```
17289 #[inline]
17290 pub fn vpmovb2m<A, B>(&mut self, op0: A, op1: B)
17291 where Assembler<'a>: Vpmovb2mEmitter<A, B> {
17292 <Self as Vpmovb2mEmitter<A, B>>::vpmovb2m(self, op0, op1);
17293 }
17294 /// `VPMOVM2B` (VPMOVM2B).
17295 /// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
17296 ///
17297 ///
17298 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
17299 ///
17300 /// Supported operand variants:
17301 ///
17302 /// ```text
17303 /// +---+-----------+
17304 /// | # | Operands |
17305 /// +---+-----------+
17306 /// | 1 | Xmm, KReg |
17307 /// | 2 | Ymm, KReg |
17308 /// | 3 | Zmm, KReg |
17309 /// +---+-----------+
17310 /// ```
17311 #[inline]
17312 pub fn vpmovm2b<A, B>(&mut self, op0: A, op1: B)
17313 where Assembler<'a>: Vpmovm2bEmitter<A, B> {
17314 <Self as Vpmovm2bEmitter<A, B>>::vpmovm2b(self, op0, op1);
17315 }
17316 /// `VPMOVM2W` (VPMOVM2W).
17317 /// Converts a mask register to a vector register. Each element in the destination register is set to all 1’s or all 0’s depending on the value of the corresponding bit in the source mask register.
17318 ///
17319 ///
17320 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVM2B%3AVPMOVM2W%3AVPMOVM2D%3AVPMOVM2Q.html).
17321 ///
17322 /// Supported operand variants:
17323 ///
17324 /// ```text
17325 /// +---+-----------+
17326 /// | # | Operands |
17327 /// +---+-----------+
17328 /// | 1 | Xmm, KReg |
17329 /// | 2 | Ymm, KReg |
17330 /// | 3 | Zmm, KReg |
17331 /// +---+-----------+
17332 /// ```
17333 #[inline]
17334 pub fn vpmovm2w<A, B>(&mut self, op0: A, op1: B)
17335 where Assembler<'a>: Vpmovm2wEmitter<A, B> {
17336 <Self as Vpmovm2wEmitter<A, B>>::vpmovm2w(self, op0, op1);
17337 }
17338 /// `VPMOVSWB` (VPMOVSWB).
17339 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17340 ///
17341 ///
17342 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17343 ///
17344 /// Supported operand variants:
17345 ///
17346 /// ```text
17347 /// +---+----------+
17348 /// | # | Operands |
17349 /// +---+----------+
17350 /// | 1 | Mem, Xmm |
17351 /// | 2 | Mem, Ymm |
17352 /// | 3 | Mem, Zmm |
17353 /// | 4 | Xmm, Xmm |
17354 /// | 5 | Xmm, Ymm |
17355 /// | 6 | Ymm, Zmm |
17356 /// +---+----------+
17357 /// ```
17358 #[inline]
17359 pub fn vpmovswb<A, B>(&mut self, op0: A, op1: B)
17360 where Assembler<'a>: VpmovswbEmitter<A, B> {
17361 <Self as VpmovswbEmitter<A, B>>::vpmovswb(self, op0, op1);
17362 }
17363 /// `VPMOVSWB_MASK` (VPMOVSWB).
17364 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17365 ///
17366 ///
17367 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17368 ///
17369 /// Supported operand variants:
17370 ///
17371 /// ```text
17372 /// +---+----------+
17373 /// | # | Operands |
17374 /// +---+----------+
17375 /// | 1 | Mem, Xmm |
17376 /// | 2 | Mem, Ymm |
17377 /// | 3 | Mem, Zmm |
17378 /// | 4 | Xmm, Xmm |
17379 /// | 5 | Xmm, Ymm |
17380 /// | 6 | Ymm, Zmm |
17381 /// +---+----------+
17382 /// ```
17383 #[inline]
17384 pub fn vpmovswb_mask<A, B>(&mut self, op0: A, op1: B)
17385 where Assembler<'a>: VpmovswbMaskEmitter<A, B> {
17386 <Self as VpmovswbMaskEmitter<A, B>>::vpmovswb_mask(self, op0, op1);
17387 }
17388 /// `VPMOVSWB_MASKZ` (VPMOVSWB).
17389 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17390 ///
17391 ///
17392 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17393 ///
17394 /// Supported operand variants:
17395 ///
17396 /// ```text
17397 /// +---+----------+
17398 /// | # | Operands |
17399 /// +---+----------+
17400 /// | 1 | Xmm, Xmm |
17401 /// | 2 | Xmm, Ymm |
17402 /// | 3 | Ymm, Zmm |
17403 /// +---+----------+
17404 /// ```
17405 #[inline]
17406 pub fn vpmovswb_maskz<A, B>(&mut self, op0: A, op1: B)
17407 where Assembler<'a>: VpmovswbMaskzEmitter<A, B> {
17408 <Self as VpmovswbMaskzEmitter<A, B>>::vpmovswb_maskz(self, op0, op1);
17409 }
17410 /// `VPMOVUSWB` (VPMOVUSWB).
17411 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17412 ///
17413 ///
17414 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17415 ///
17416 /// Supported operand variants:
17417 ///
17418 /// ```text
17419 /// +---+----------+
17420 /// | # | Operands |
17421 /// +---+----------+
17422 /// | 1 | Mem, Xmm |
17423 /// | 2 | Mem, Ymm |
17424 /// | 3 | Mem, Zmm |
17425 /// | 4 | Xmm, Xmm |
17426 /// | 5 | Xmm, Ymm |
17427 /// | 6 | Ymm, Zmm |
17428 /// +---+----------+
17429 /// ```
17430 #[inline]
17431 pub fn vpmovuswb<A, B>(&mut self, op0: A, op1: B)
17432 where Assembler<'a>: VpmovuswbEmitter<A, B> {
17433 <Self as VpmovuswbEmitter<A, B>>::vpmovuswb(self, op0, op1);
17434 }
17435 /// `VPMOVUSWB_MASK` (VPMOVUSWB).
17436 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17437 ///
17438 ///
17439 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17440 ///
17441 /// Supported operand variants:
17442 ///
17443 /// ```text
17444 /// +---+----------+
17445 /// | # | Operands |
17446 /// +---+----------+
17447 /// | 1 | Mem, Xmm |
17448 /// | 2 | Mem, Ymm |
17449 /// | 3 | Mem, Zmm |
17450 /// | 4 | Xmm, Xmm |
17451 /// | 5 | Xmm, Ymm |
17452 /// | 6 | Ymm, Zmm |
17453 /// +---+----------+
17454 /// ```
17455 #[inline]
17456 pub fn vpmovuswb_mask<A, B>(&mut self, op0: A, op1: B)
17457 where Assembler<'a>: VpmovuswbMaskEmitter<A, B> {
17458 <Self as VpmovuswbMaskEmitter<A, B>>::vpmovuswb_mask(self, op0, op1);
17459 }
17460 /// `VPMOVUSWB_MASKZ` (VPMOVUSWB).
17461 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17462 ///
17463 ///
17464 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17465 ///
17466 /// Supported operand variants:
17467 ///
17468 /// ```text
17469 /// +---+----------+
17470 /// | # | Operands |
17471 /// +---+----------+
17472 /// | 1 | Xmm, Xmm |
17473 /// | 2 | Xmm, Ymm |
17474 /// | 3 | Ymm, Zmm |
17475 /// +---+----------+
17476 /// ```
17477 #[inline]
17478 pub fn vpmovuswb_maskz<A, B>(&mut self, op0: A, op1: B)
17479 where Assembler<'a>: VpmovuswbMaskzEmitter<A, B> {
17480 <Self as VpmovuswbMaskzEmitter<A, B>>::vpmovuswb_maskz(self, op0, op1);
17481 }
17482 /// `VPMOVW2M` (VPMOVW2M).
17483 /// Converts a vector register to a mask register. Each element in the destination register is set to 1 or 0 depending on the value of most significant bit of the corresponding element in the source register.
17484 ///
17485 ///
17486 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVB2M%3AVPMOVW2M%3AVPMOVD2M%3AVPMOVQ2M.html).
17487 ///
17488 /// Supported operand variants:
17489 ///
17490 /// ```text
17491 /// +---+-----------+
17492 /// | # | Operands |
17493 /// +---+-----------+
17494 /// | 1 | KReg, Xmm |
17495 /// | 2 | KReg, Ymm |
17496 /// | 3 | KReg, Zmm |
17497 /// +---+-----------+
17498 /// ```
17499 #[inline]
17500 pub fn vpmovw2m<A, B>(&mut self, op0: A, op1: B)
17501 where Assembler<'a>: Vpmovw2mEmitter<A, B> {
17502 <Self as Vpmovw2mEmitter<A, B>>::vpmovw2m(self, op0, op1);
17503 }
17504 /// `VPMOVWB` (VPMOVWB).
17505 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17506 ///
17507 ///
17508 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17509 ///
17510 /// Supported operand variants:
17511 ///
17512 /// ```text
17513 /// +---+----------+
17514 /// | # | Operands |
17515 /// +---+----------+
17516 /// | 1 | Mem, Xmm |
17517 /// | 2 | Mem, Ymm |
17518 /// | 3 | Mem, Zmm |
17519 /// | 4 | Xmm, Xmm |
17520 /// | 5 | Xmm, Ymm |
17521 /// | 6 | Ymm, Zmm |
17522 /// +---+----------+
17523 /// ```
17524 #[inline]
17525 pub fn vpmovwb<A, B>(&mut self, op0: A, op1: B)
17526 where Assembler<'a>: VpmovwbEmitter<A, B> {
17527 <Self as VpmovwbEmitter<A, B>>::vpmovwb(self, op0, op1);
17528 }
17529 /// `VPMOVWB_MASK` (VPMOVWB).
17530 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17531 ///
17532 ///
17533 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17534 ///
17535 /// Supported operand variants:
17536 ///
17537 /// ```text
17538 /// +---+----------+
17539 /// | # | Operands |
17540 /// +---+----------+
17541 /// | 1 | Mem, Xmm |
17542 /// | 2 | Mem, Ymm |
17543 /// | 3 | Mem, Zmm |
17544 /// | 4 | Xmm, Xmm |
17545 /// | 5 | Xmm, Ymm |
17546 /// | 6 | Ymm, Zmm |
17547 /// +---+----------+
17548 /// ```
17549 #[inline]
17550 pub fn vpmovwb_mask<A, B>(&mut self, op0: A, op1: B)
17551 where Assembler<'a>: VpmovwbMaskEmitter<A, B> {
17552 <Self as VpmovwbMaskEmitter<A, B>>::vpmovwb_mask(self, op0, op1);
17553 }
17554 /// `VPMOVWB_MASKZ` (VPMOVWB).
17555 /// VPMOVWB down converts 16-bit integers into packed bytes using truncation. VPMOVSWB converts signed 16-bit integers into packed signed bytes using signed saturation. VPMOVUSWB convert unsigned word values into unsigned byte values using unsigned saturation.
17556 ///
17557 ///
17558 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPMOVWB%3AVPMOVSWB%3AVPMOVUSWB.html).
17559 ///
17560 /// Supported operand variants:
17561 ///
17562 /// ```text
17563 /// +---+----------+
17564 /// | # | Operands |
17565 /// +---+----------+
17566 /// | 1 | Xmm, Xmm |
17567 /// | 2 | Xmm, Ymm |
17568 /// | 3 | Ymm, Zmm |
17569 /// +---+----------+
17570 /// ```
17571 #[inline]
17572 pub fn vpmovwb_maskz<A, B>(&mut self, op0: A, op1: B)
17573 where Assembler<'a>: VpmovwbMaskzEmitter<A, B> {
17574 <Self as VpmovwbMaskzEmitter<A, B>>::vpmovwb_maskz(self, op0, op1);
17575 }
17576 /// `VPMULHRSW` (VPMULHRSW).
17577 /// PMULHRSW multiplies vertically each signed 16-bit integer from the destination operand (first operand) with the corresponding signed 16-bit integer of the source operand (second operand), producing intermediate, signed 32-bit integers. Each intermediate 32-bit integer is truncated to the 18 most significant bits. Rounding is always performed by adding 1 to the least significant bit of the 18-bit intermediate result. The final result is obtained by selecting the 16 bits immediately to the right of the most significant bit of each 18-bit intermediate result and packed to the destination operand.
17578 ///
17579 ///
17580 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHRSW.html).
17581 ///
17582 /// Supported operand variants:
17583 ///
17584 /// ```text
17585 /// +---+---------------+
17586 /// | # | Operands |
17587 /// +---+---------------+
17588 /// | 1 | Xmm, Xmm, Mem |
17589 /// | 2 | Xmm, Xmm, Xmm |
17590 /// | 3 | Ymm, Ymm, Mem |
17591 /// | 4 | Ymm, Ymm, Ymm |
17592 /// | 5 | Zmm, Zmm, Mem |
17593 /// | 6 | Zmm, Zmm, Zmm |
17594 /// +---+---------------+
17595 /// ```
17596 #[inline]
17597 pub fn vpmulhrsw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17598 where Assembler<'a>: VpmulhrswEmitter<A, B, C> {
17599 <Self as VpmulhrswEmitter<A, B, C>>::vpmulhrsw(self, op0, op1, op2);
17600 }
17601 /// `VPMULHRSW_MASK` (VPMULHRSW).
17602 /// PMULHRSW multiplies vertically each signed 16-bit integer from the destination operand (first operand) with the corresponding signed 16-bit integer of the source operand (second operand), producing intermediate, signed 32-bit integers. Each intermediate 32-bit integer is truncated to the 18 most significant bits. Rounding is always performed by adding 1 to the least significant bit of the 18-bit intermediate result. The final result is obtained by selecting the 16 bits immediately to the right of the most significant bit of each 18-bit intermediate result and packed to the destination operand.
17603 ///
17604 ///
17605 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHRSW.html).
17606 ///
17607 /// Supported operand variants:
17608 ///
17609 /// ```text
17610 /// +---+---------------+
17611 /// | # | Operands |
17612 /// +---+---------------+
17613 /// | 1 | Xmm, Xmm, Mem |
17614 /// | 2 | Xmm, Xmm, Xmm |
17615 /// | 3 | Ymm, Ymm, Mem |
17616 /// | 4 | Ymm, Ymm, Ymm |
17617 /// | 5 | Zmm, Zmm, Mem |
17618 /// | 6 | Zmm, Zmm, Zmm |
17619 /// +---+---------------+
17620 /// ```
17621 #[inline]
17622 pub fn vpmulhrsw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17623 where Assembler<'a>: VpmulhrswMaskEmitter<A, B, C> {
17624 <Self as VpmulhrswMaskEmitter<A, B, C>>::vpmulhrsw_mask(self, op0, op1, op2);
17625 }
17626 /// `VPMULHRSW_MASKZ` (VPMULHRSW).
17627 /// PMULHRSW multiplies vertically each signed 16-bit integer from the destination operand (first operand) with the corresponding signed 16-bit integer of the source operand (second operand), producing intermediate, signed 32-bit integers. Each intermediate 32-bit integer is truncated to the 18 most significant bits. Rounding is always performed by adding 1 to the least significant bit of the 18-bit intermediate result. The final result is obtained by selecting the 16 bits immediately to the right of the most significant bit of each 18-bit intermediate result and packed to the destination operand.
17628 ///
17629 ///
17630 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHRSW.html).
17631 ///
17632 /// Supported operand variants:
17633 ///
17634 /// ```text
17635 /// +---+---------------+
17636 /// | # | Operands |
17637 /// +---+---------------+
17638 /// | 1 | Xmm, Xmm, Mem |
17639 /// | 2 | Xmm, Xmm, Xmm |
17640 /// | 3 | Ymm, Ymm, Mem |
17641 /// | 4 | Ymm, Ymm, Ymm |
17642 /// | 5 | Zmm, Zmm, Mem |
17643 /// | 6 | Zmm, Zmm, Zmm |
17644 /// +---+---------------+
17645 /// ```
17646 #[inline]
17647 pub fn vpmulhrsw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17648 where Assembler<'a>: VpmulhrswMaskzEmitter<A, B, C> {
17649 <Self as VpmulhrswMaskzEmitter<A, B, C>>::vpmulhrsw_maskz(self, op0, op1, op2);
17650 }
17651 /// `VPMULHUW` (VPMULHUW).
17652 /// Performs a SIMD unsigned multiply of the packed unsigned word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each 32-bit intermediate results in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17653 ///
17654 ///
17655 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHUW.html).
17656 ///
17657 /// Supported operand variants:
17658 ///
17659 /// ```text
17660 /// +---+---------------+
17661 /// | # | Operands |
17662 /// +---+---------------+
17663 /// | 1 | Xmm, Xmm, Mem |
17664 /// | 2 | Xmm, Xmm, Xmm |
17665 /// | 3 | Ymm, Ymm, Mem |
17666 /// | 4 | Ymm, Ymm, Ymm |
17667 /// | 5 | Zmm, Zmm, Mem |
17668 /// | 6 | Zmm, Zmm, Zmm |
17669 /// +---+---------------+
17670 /// ```
17671 #[inline]
17672 pub fn vpmulhuw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17673 where Assembler<'a>: VpmulhuwEmitter<A, B, C> {
17674 <Self as VpmulhuwEmitter<A, B, C>>::vpmulhuw(self, op0, op1, op2);
17675 }
17676 /// `VPMULHUW_MASK` (VPMULHUW).
17677 /// Performs a SIMD unsigned multiply of the packed unsigned word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each 32-bit intermediate results in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17678 ///
17679 ///
17680 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHUW.html).
17681 ///
17682 /// Supported operand variants:
17683 ///
17684 /// ```text
17685 /// +---+---------------+
17686 /// | # | Operands |
17687 /// +---+---------------+
17688 /// | 1 | Xmm, Xmm, Mem |
17689 /// | 2 | Xmm, Xmm, Xmm |
17690 /// | 3 | Ymm, Ymm, Mem |
17691 /// | 4 | Ymm, Ymm, Ymm |
17692 /// | 5 | Zmm, Zmm, Mem |
17693 /// | 6 | Zmm, Zmm, Zmm |
17694 /// +---+---------------+
17695 /// ```
17696 #[inline]
17697 pub fn vpmulhuw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17698 where Assembler<'a>: VpmulhuwMaskEmitter<A, B, C> {
17699 <Self as VpmulhuwMaskEmitter<A, B, C>>::vpmulhuw_mask(self, op0, op1, op2);
17700 }
17701 /// `VPMULHUW_MASKZ` (VPMULHUW).
17702 /// Performs a SIMD unsigned multiply of the packed unsigned word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each 32-bit intermediate results in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17703 ///
17704 ///
17705 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHUW.html).
17706 ///
17707 /// Supported operand variants:
17708 ///
17709 /// ```text
17710 /// +---+---------------+
17711 /// | # | Operands |
17712 /// +---+---------------+
17713 /// | 1 | Xmm, Xmm, Mem |
17714 /// | 2 | Xmm, Xmm, Xmm |
17715 /// | 3 | Ymm, Ymm, Mem |
17716 /// | 4 | Ymm, Ymm, Ymm |
17717 /// | 5 | Zmm, Zmm, Mem |
17718 /// | 6 | Zmm, Zmm, Zmm |
17719 /// +---+---------------+
17720 /// ```
17721 #[inline]
17722 pub fn vpmulhuw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17723 where Assembler<'a>: VpmulhuwMaskzEmitter<A, B, C> {
17724 <Self as VpmulhuwMaskzEmitter<A, B, C>>::vpmulhuw_maskz(self, op0, op1, op2);
17725 }
17726 /// `VPMULHW` (VPMULHW).
17727 /// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17728 ///
17729 ///
17730 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHW.html).
17731 ///
17732 /// Supported operand variants:
17733 ///
17734 /// ```text
17735 /// +---+---------------+
17736 /// | # | Operands |
17737 /// +---+---------------+
17738 /// | 1 | Xmm, Xmm, Mem |
17739 /// | 2 | Xmm, Xmm, Xmm |
17740 /// | 3 | Ymm, Ymm, Mem |
17741 /// | 4 | Ymm, Ymm, Ymm |
17742 /// | 5 | Zmm, Zmm, Mem |
17743 /// | 6 | Zmm, Zmm, Zmm |
17744 /// +---+---------------+
17745 /// ```
17746 #[inline]
17747 pub fn vpmulhw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17748 where Assembler<'a>: VpmulhwEmitter<A, B, C> {
17749 <Self as VpmulhwEmitter<A, B, C>>::vpmulhw(self, op0, op1, op2);
17750 }
17751 /// `VPMULHW_MASK` (VPMULHW).
17752 /// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17753 ///
17754 ///
17755 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHW.html).
17756 ///
17757 /// Supported operand variants:
17758 ///
17759 /// ```text
17760 /// +---+---------------+
17761 /// | # | Operands |
17762 /// +---+---------------+
17763 /// | 1 | Xmm, Xmm, Mem |
17764 /// | 2 | Xmm, Xmm, Xmm |
17765 /// | 3 | Ymm, Ymm, Mem |
17766 /// | 4 | Ymm, Ymm, Ymm |
17767 /// | 5 | Zmm, Zmm, Mem |
17768 /// | 6 | Zmm, Zmm, Zmm |
17769 /// +---+---------------+
17770 /// ```
17771 #[inline]
17772 pub fn vpmulhw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17773 where Assembler<'a>: VpmulhwMaskEmitter<A, B, C> {
17774 <Self as VpmulhwMaskEmitter<A, B, C>>::vpmulhw_mask(self, op0, op1, op2);
17775 }
17776 /// `VPMULHW_MASKZ` (VPMULHW).
17777 /// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the high 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17778 ///
17779 ///
17780 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULHW.html).
17781 ///
17782 /// Supported operand variants:
17783 ///
17784 /// ```text
17785 /// +---+---------------+
17786 /// | # | Operands |
17787 /// +---+---------------+
17788 /// | 1 | Xmm, Xmm, Mem |
17789 /// | 2 | Xmm, Xmm, Xmm |
17790 /// | 3 | Ymm, Ymm, Mem |
17791 /// | 4 | Ymm, Ymm, Ymm |
17792 /// | 5 | Zmm, Zmm, Mem |
17793 /// | 6 | Zmm, Zmm, Zmm |
17794 /// +---+---------------+
17795 /// ```
17796 #[inline]
17797 pub fn vpmulhw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17798 where Assembler<'a>: VpmulhwMaskzEmitter<A, B, C> {
17799 <Self as VpmulhwMaskzEmitter<A, B, C>>::vpmulhw_maskz(self, op0, op1, op2);
17800 }
17801 /// `VPMULLW` (VPMULLW).
17802 /// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the low 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17803 ///
17804 ///
17805 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLW.html).
17806 ///
17807 /// Supported operand variants:
17808 ///
17809 /// ```text
17810 /// +---+---------------+
17811 /// | # | Operands |
17812 /// +---+---------------+
17813 /// | 1 | Xmm, Xmm, Mem |
17814 /// | 2 | Xmm, Xmm, Xmm |
17815 /// | 3 | Ymm, Ymm, Mem |
17816 /// | 4 | Ymm, Ymm, Ymm |
17817 /// | 5 | Zmm, Zmm, Mem |
17818 /// | 6 | Zmm, Zmm, Zmm |
17819 /// +---+---------------+
17820 /// ```
17821 #[inline]
17822 pub fn vpmullw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17823 where Assembler<'a>: VpmullwEmitter<A, B, C> {
17824 <Self as VpmullwEmitter<A, B, C>>::vpmullw(self, op0, op1, op2);
17825 }
17826 /// `VPMULLW_MASK` (VPMULLW).
17827 /// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the low 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17828 ///
17829 ///
17830 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLW.html).
17831 ///
17832 /// Supported operand variants:
17833 ///
17834 /// ```text
17835 /// +---+---------------+
17836 /// | # | Operands |
17837 /// +---+---------------+
17838 /// | 1 | Xmm, Xmm, Mem |
17839 /// | 2 | Xmm, Xmm, Xmm |
17840 /// | 3 | Ymm, Ymm, Mem |
17841 /// | 4 | Ymm, Ymm, Ymm |
17842 /// | 5 | Zmm, Zmm, Mem |
17843 /// | 6 | Zmm, Zmm, Zmm |
17844 /// +---+---------------+
17845 /// ```
17846 #[inline]
17847 pub fn vpmullw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17848 where Assembler<'a>: VpmullwMaskEmitter<A, B, C> {
17849 <Self as VpmullwMaskEmitter<A, B, C>>::vpmullw_mask(self, op0, op1, op2);
17850 }
17851 /// `VPMULLW_MASKZ` (VPMULLW).
17852 /// Performs a SIMD signed multiply of the packed signed word integers in the destination operand (first operand) and the source operand (second operand), and stores the low 16 bits of each intermediate 32-bit result in the destination operand. (Figure 4-12 shows this operation when using 64-bit operands.)
17853 ///
17854 ///
17855 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PMULLW.html).
17856 ///
17857 /// Supported operand variants:
17858 ///
17859 /// ```text
17860 /// +---+---------------+
17861 /// | # | Operands |
17862 /// +---+---------------+
17863 /// | 1 | Xmm, Xmm, Mem |
17864 /// | 2 | Xmm, Xmm, Xmm |
17865 /// | 3 | Ymm, Ymm, Mem |
17866 /// | 4 | Ymm, Ymm, Ymm |
17867 /// | 5 | Zmm, Zmm, Mem |
17868 /// | 6 | Zmm, Zmm, Zmm |
17869 /// +---+---------------+
17870 /// ```
17871 #[inline]
17872 pub fn vpmullw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17873 where Assembler<'a>: VpmullwMaskzEmitter<A, B, C> {
17874 <Self as VpmullwMaskzEmitter<A, B, C>>::vpmullw_maskz(self, op0, op1, op2);
17875 }
17876 /// `VPSADBW` (VPSADBW).
17877 /// Computes the absolute value of the difference of 8 unsigned byte integers from the source operand (second operand) and from the destination operand (first operand). These 8 differences are then summed to produce an unsigned word integer result that is stored in the destination operand. Figure 4-14 shows the operation of the PSADBW instruction when using 64-bit operands.
17878 ///
17879 ///
17880 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSADBW.html).
17881 ///
17882 /// Supported operand variants:
17883 ///
17884 /// ```text
17885 /// +---+---------------+
17886 /// | # | Operands |
17887 /// +---+---------------+
17888 /// | 1 | Xmm, Xmm, Mem |
17889 /// | 2 | Xmm, Xmm, Xmm |
17890 /// | 3 | Ymm, Ymm, Mem |
17891 /// | 4 | Ymm, Ymm, Ymm |
17892 /// | 5 | Zmm, Zmm, Mem |
17893 /// | 6 | Zmm, Zmm, Zmm |
17894 /// +---+---------------+
17895 /// ```
17896 #[inline]
17897 pub fn vpsadbw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17898 where Assembler<'a>: VpsadbwEmitter<A, B, C> {
17899 <Self as VpsadbwEmitter<A, B, C>>::vpsadbw(self, op0, op1, op2);
17900 }
17901 /// `VPSHUFB` (VPSHUFB).
17902 /// PSHUFB performs in-place shuffles of bytes in the destination operand (the first operand) according to the shuffle control mask in the source operand (the second operand). The instruction permutes the data in the destination operand, leaving the shuffle mask unaffected. If the most significant bit (bit[7]) of each byte of the shuffle control mask is set, then constant zero is written in the result byte. Each byte in the shuffle control mask forms an index to permute the corresponding byte in the destination operand. The value of each index is the least significant 4 bits (128-bit operation) or 3 bits (64-bit operation) of the shuffle control byte. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
17903 ///
17904 ///
17905 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFB.html).
17906 ///
17907 /// Supported operand variants:
17908 ///
17909 /// ```text
17910 /// +---+---------------+
17911 /// | # | Operands |
17912 /// +---+---------------+
17913 /// | 1 | Xmm, Xmm, Mem |
17914 /// | 2 | Xmm, Xmm, Xmm |
17915 /// | 3 | Ymm, Ymm, Mem |
17916 /// | 4 | Ymm, Ymm, Ymm |
17917 /// | 5 | Zmm, Zmm, Mem |
17918 /// | 6 | Zmm, Zmm, Zmm |
17919 /// +---+---------------+
17920 /// ```
17921 #[inline]
17922 pub fn vpshufb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17923 where Assembler<'a>: VpshufbEmitter<A, B, C> {
17924 <Self as VpshufbEmitter<A, B, C>>::vpshufb(self, op0, op1, op2);
17925 }
17926 /// `VPSHUFB_MASK` (VPSHUFB).
17927 /// PSHUFB performs in-place shuffles of bytes in the destination operand (the first operand) according to the shuffle control mask in the source operand (the second operand). The instruction permutes the data in the destination operand, leaving the shuffle mask unaffected. If the most significant bit (bit[7]) of each byte of the shuffle control mask is set, then constant zero is written in the result byte. Each byte in the shuffle control mask forms an index to permute the corresponding byte in the destination operand. The value of each index is the least significant 4 bits (128-bit operation) or 3 bits (64-bit operation) of the shuffle control byte. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
17928 ///
17929 ///
17930 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFB.html).
17931 ///
17932 /// Supported operand variants:
17933 ///
17934 /// ```text
17935 /// +---+---------------+
17936 /// | # | Operands |
17937 /// +---+---------------+
17938 /// | 1 | Xmm, Xmm, Mem |
17939 /// | 2 | Xmm, Xmm, Xmm |
17940 /// | 3 | Ymm, Ymm, Mem |
17941 /// | 4 | Ymm, Ymm, Ymm |
17942 /// | 5 | Zmm, Zmm, Mem |
17943 /// | 6 | Zmm, Zmm, Zmm |
17944 /// +---+---------------+
17945 /// ```
17946 #[inline]
17947 pub fn vpshufb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17948 where Assembler<'a>: VpshufbMaskEmitter<A, B, C> {
17949 <Self as VpshufbMaskEmitter<A, B, C>>::vpshufb_mask(self, op0, op1, op2);
17950 }
17951 /// `VPSHUFB_MASKZ` (VPSHUFB).
17952 /// PSHUFB performs in-place shuffles of bytes in the destination operand (the first operand) according to the shuffle control mask in the source operand (the second operand). The instruction permutes the data in the destination operand, leaving the shuffle mask unaffected. If the most significant bit (bit[7]) of each byte of the shuffle control mask is set, then constant zero is written in the result byte. Each byte in the shuffle control mask forms an index to permute the corresponding byte in the destination operand. The value of each index is the least significant 4 bits (128-bit operation) or 3 bits (64-bit operation) of the shuffle control byte. When the source operand is a 128-bit memory operand, the operand must be aligned on a 16-byte boundary or a general-protection exception (#GP) will be generated.
17953 ///
17954 ///
17955 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFB.html).
17956 ///
17957 /// Supported operand variants:
17958 ///
17959 /// ```text
17960 /// +---+---------------+
17961 /// | # | Operands |
17962 /// +---+---------------+
17963 /// | 1 | Xmm, Xmm, Mem |
17964 /// | 2 | Xmm, Xmm, Xmm |
17965 /// | 3 | Ymm, Ymm, Mem |
17966 /// | 4 | Ymm, Ymm, Ymm |
17967 /// | 5 | Zmm, Zmm, Mem |
17968 /// | 6 | Zmm, Zmm, Zmm |
17969 /// +---+---------------+
17970 /// ```
17971 #[inline]
17972 pub fn vpshufb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17973 where Assembler<'a>: VpshufbMaskzEmitter<A, B, C> {
17974 <Self as VpshufbMaskzEmitter<A, B, C>>::vpshufb_maskz(self, op0, op1, op2);
17975 }
17976 /// `VPSHUFHW` (VPSHUFHW).
17977 /// Copies words from the high quadword of a 128-bit lane of the source operand and inserts them in the high quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. This 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the high quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3, 4) from the high quadword of the source operand to be copied to the destination operand. The low quadword of the source operand is copied to the low quadword of the destination operand, for each 128-bit lane.
17978 ///
17979 ///
17980 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFHW.html).
17981 ///
17982 /// Supported operand variants:
17983 ///
17984 /// ```text
17985 /// +---+---------------+
17986 /// | # | Operands |
17987 /// +---+---------------+
17988 /// | 1 | Xmm, Mem, Imm |
17989 /// | 2 | Xmm, Xmm, Imm |
17990 /// | 3 | Ymm, Mem, Imm |
17991 /// | 4 | Ymm, Ymm, Imm |
17992 /// | 5 | Zmm, Mem, Imm |
17993 /// | 6 | Zmm, Zmm, Imm |
17994 /// +---+---------------+
17995 /// ```
17996 #[inline]
17997 pub fn vpshufhw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
17998 where Assembler<'a>: VpshufhwEmitter<A, B, C> {
17999 <Self as VpshufhwEmitter<A, B, C>>::vpshufhw(self, op0, op1, op2);
18000 }
18001 /// `VPSHUFHW_MASK` (VPSHUFHW).
18002 /// Copies words from the high quadword of a 128-bit lane of the source operand and inserts them in the high quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. This 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the high quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3, 4) from the high quadword of the source operand to be copied to the destination operand. The low quadword of the source operand is copied to the low quadword of the destination operand, for each 128-bit lane.
18003 ///
18004 ///
18005 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFHW.html).
18006 ///
18007 /// Supported operand variants:
18008 ///
18009 /// ```text
18010 /// +---+---------------+
18011 /// | # | Operands |
18012 /// +---+---------------+
18013 /// | 1 | Xmm, Mem, Imm |
18014 /// | 2 | Xmm, Xmm, Imm |
18015 /// | 3 | Ymm, Mem, Imm |
18016 /// | 4 | Ymm, Ymm, Imm |
18017 /// | 5 | Zmm, Mem, Imm |
18018 /// | 6 | Zmm, Zmm, Imm |
18019 /// +---+---------------+
18020 /// ```
18021 #[inline]
18022 pub fn vpshufhw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18023 where Assembler<'a>: VpshufhwMaskEmitter<A, B, C> {
18024 <Self as VpshufhwMaskEmitter<A, B, C>>::vpshufhw_mask(self, op0, op1, op2);
18025 }
18026 /// `VPSHUFHW_MASKZ` (VPSHUFHW).
18027 /// Copies words from the high quadword of a 128-bit lane of the source operand and inserts them in the high quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. This 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the high quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3, 4) from the high quadword of the source operand to be copied to the destination operand. The low quadword of the source operand is copied to the low quadword of the destination operand, for each 128-bit lane.
18028 ///
18029 ///
18030 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFHW.html).
18031 ///
18032 /// Supported operand variants:
18033 ///
18034 /// ```text
18035 /// +---+---------------+
18036 /// | # | Operands |
18037 /// +---+---------------+
18038 /// | 1 | Xmm, Mem, Imm |
18039 /// | 2 | Xmm, Xmm, Imm |
18040 /// | 3 | Ymm, Mem, Imm |
18041 /// | 4 | Ymm, Ymm, Imm |
18042 /// | 5 | Zmm, Mem, Imm |
18043 /// | 6 | Zmm, Zmm, Imm |
18044 /// +---+---------------+
18045 /// ```
18046 #[inline]
18047 pub fn vpshufhw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18048 where Assembler<'a>: VpshufhwMaskzEmitter<A, B, C> {
18049 <Self as VpshufhwMaskzEmitter<A, B, C>>::vpshufhw_maskz(self, op0, op1, op2);
18050 }
18051 /// `VPSHUFLW` (VPSHUFLW).
18052 /// Copies words from the low quadword of a 128-bit lane of the source operand and inserts them in the low quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. The 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the low quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3) from the low quadword of the source operand to be copied to the destination operand. The high quadword of the source operand is copied to the high quadword of the destination operand, for each 128-bit lane.
18053 ///
18054 ///
18055 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFLW.html).
18056 ///
18057 /// Supported operand variants:
18058 ///
18059 /// ```text
18060 /// +---+---------------+
18061 /// | # | Operands |
18062 /// +---+---------------+
18063 /// | 1 | Xmm, Mem, Imm |
18064 /// | 2 | Xmm, Xmm, Imm |
18065 /// | 3 | Ymm, Mem, Imm |
18066 /// | 4 | Ymm, Ymm, Imm |
18067 /// | 5 | Zmm, Mem, Imm |
18068 /// | 6 | Zmm, Zmm, Imm |
18069 /// +---+---------------+
18070 /// ```
18071 #[inline]
18072 pub fn vpshuflw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18073 where Assembler<'a>: VpshuflwEmitter<A, B, C> {
18074 <Self as VpshuflwEmitter<A, B, C>>::vpshuflw(self, op0, op1, op2);
18075 }
18076 /// `VPSHUFLW_MASK` (VPSHUFLW).
18077 /// Copies words from the low quadword of a 128-bit lane of the source operand and inserts them in the low quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. The 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the low quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3) from the low quadword of the source operand to be copied to the destination operand. The high quadword of the source operand is copied to the high quadword of the destination operand, for each 128-bit lane.
18078 ///
18079 ///
18080 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFLW.html).
18081 ///
18082 /// Supported operand variants:
18083 ///
18084 /// ```text
18085 /// +---+---------------+
18086 /// | # | Operands |
18087 /// +---+---------------+
18088 /// | 1 | Xmm, Mem, Imm |
18089 /// | 2 | Xmm, Xmm, Imm |
18090 /// | 3 | Ymm, Mem, Imm |
18091 /// | 4 | Ymm, Ymm, Imm |
18092 /// | 5 | Zmm, Mem, Imm |
18093 /// | 6 | Zmm, Zmm, Imm |
18094 /// +---+---------------+
18095 /// ```
18096 #[inline]
18097 pub fn vpshuflw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18098 where Assembler<'a>: VpshuflwMaskEmitter<A, B, C> {
18099 <Self as VpshuflwMaskEmitter<A, B, C>>::vpshuflw_mask(self, op0, op1, op2);
18100 }
18101 /// `VPSHUFLW_MASKZ` (VPSHUFLW).
18102 /// Copies words from the low quadword of a 128-bit lane of the source operand and inserts them in the low quadword of the destination operand at word locations (of the respective lane) selected with the immediate operand. The 256-bit operation is similar to the in-lane operation used by the 256-bit VPSHUFD instruction, which is illustrated in Figure 4-16. For 128-bit operation, only the low 128-bit lane is operative. Each 2-bit field in the immediate operand selects the contents of one word location in the low quadword of the destination operand. The binary encodings of the immediate operand fields select words (0, 1, 2 or 3) from the low quadword of the source operand to be copied to the destination operand. The high quadword of the source operand is copied to the high quadword of the destination operand, for each 128-bit lane.
18103 ///
18104 ///
18105 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSHUFLW.html).
18106 ///
18107 /// Supported operand variants:
18108 ///
18109 /// ```text
18110 /// +---+---------------+
18111 /// | # | Operands |
18112 /// +---+---------------+
18113 /// | 1 | Xmm, Mem, Imm |
18114 /// | 2 | Xmm, Xmm, Imm |
18115 /// | 3 | Ymm, Mem, Imm |
18116 /// | 4 | Ymm, Ymm, Imm |
18117 /// | 5 | Zmm, Mem, Imm |
18118 /// | 6 | Zmm, Zmm, Imm |
18119 /// +---+---------------+
18120 /// ```
18121 #[inline]
18122 pub fn vpshuflw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18123 where Assembler<'a>: VpshuflwMaskzEmitter<A, B, C> {
18124 <Self as VpshuflwMaskzEmitter<A, B, C>>::vpshuflw_maskz(self, op0, op1, op2);
18125 }
18126 /// `VPSLLDQ` (VPSLLDQ).
18127 /// Shifts the destination operand (first operand) to the left by the number of bytes specified in the count operand (second operand). The empty low-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The count operand is an 8-bit immediate.
18128 ///
18129 ///
18130 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLDQ.html).
18131 ///
18132 /// Supported operand variants:
18133 ///
18134 /// ```text
18135 /// +---+---------------+
18136 /// | # | Operands |
18137 /// +---+---------------+
18138 /// | 1 | Xmm, Mem, Imm |
18139 /// | 2 | Xmm, Xmm, Imm |
18140 /// | 3 | Ymm, Mem, Imm |
18141 /// | 4 | Ymm, Ymm, Imm |
18142 /// | 5 | Zmm, Mem, Imm |
18143 /// | 6 | Zmm, Zmm, Imm |
18144 /// +---+---------------+
18145 /// ```
18146 #[inline]
18147 pub fn vpslldq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18148 where Assembler<'a>: VpslldqEmitter<A, B, C> {
18149 <Self as VpslldqEmitter<A, B, C>>::vpslldq(self, op0, op1, op2);
18150 }
18151 /// `VPSLLVW` (VPSLLVW).
18152 /// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the left by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0).
18153 ///
18154 ///
18155 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSLLVW%3AVPSLLVD%3AVPSLLVQ.html).
18156 ///
18157 /// Supported operand variants:
18158 ///
18159 /// ```text
18160 /// +---+---------------+
18161 /// | # | Operands |
18162 /// +---+---------------+
18163 /// | 1 | Xmm, Xmm, Mem |
18164 /// | 2 | Xmm, Xmm, Xmm |
18165 /// | 3 | Ymm, Ymm, Mem |
18166 /// | 4 | Ymm, Ymm, Ymm |
18167 /// | 5 | Zmm, Zmm, Mem |
18168 /// | 6 | Zmm, Zmm, Zmm |
18169 /// +---+---------------+
18170 /// ```
18171 #[inline]
18172 pub fn vpsllvw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18173 where Assembler<'a>: VpsllvwEmitter<A, B, C> {
18174 <Self as VpsllvwEmitter<A, B, C>>::vpsllvw(self, op0, op1, op2);
18175 }
18176 /// `VPSLLVW_MASK` (VPSLLVW).
18177 /// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the left by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0).
18178 ///
18179 ///
18180 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSLLVW%3AVPSLLVD%3AVPSLLVQ.html).
18181 ///
18182 /// Supported operand variants:
18183 ///
18184 /// ```text
18185 /// +---+---------------+
18186 /// | # | Operands |
18187 /// +---+---------------+
18188 /// | 1 | Xmm, Xmm, Mem |
18189 /// | 2 | Xmm, Xmm, Xmm |
18190 /// | 3 | Ymm, Ymm, Mem |
18191 /// | 4 | Ymm, Ymm, Ymm |
18192 /// | 5 | Zmm, Zmm, Mem |
18193 /// | 6 | Zmm, Zmm, Zmm |
18194 /// +---+---------------+
18195 /// ```
18196 #[inline]
18197 pub fn vpsllvw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18198 where Assembler<'a>: VpsllvwMaskEmitter<A, B, C> {
18199 <Self as VpsllvwMaskEmitter<A, B, C>>::vpsllvw_mask(self, op0, op1, op2);
18200 }
18201 /// `VPSLLVW_MASKZ` (VPSLLVW).
18202 /// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the left by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0).
18203 ///
18204 ///
18205 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSLLVW%3AVPSLLVD%3AVPSLLVQ.html).
18206 ///
18207 /// Supported operand variants:
18208 ///
18209 /// ```text
18210 /// +---+---------------+
18211 /// | # | Operands |
18212 /// +---+---------------+
18213 /// | 1 | Xmm, Xmm, Mem |
18214 /// | 2 | Xmm, Xmm, Xmm |
18215 /// | 3 | Ymm, Ymm, Mem |
18216 /// | 4 | Ymm, Ymm, Ymm |
18217 /// | 5 | Zmm, Zmm, Mem |
18218 /// | 6 | Zmm, Zmm, Zmm |
18219 /// +---+---------------+
18220 /// ```
18221 #[inline]
18222 pub fn vpsllvw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18223 where Assembler<'a>: VpsllvwMaskzEmitter<A, B, C> {
18224 <Self as VpsllvwMaskzEmitter<A, B, C>>::vpsllvw_maskz(self, op0, op1, op2);
18225 }
18226 /// `VPSLLW` (VPSLLW).
18227 /// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the left by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-17 gives an example of shifting words in a 64-bit operand.
18228 ///
18229 ///
18230 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLW%3APSLLD%3APSLLQ.html).
18231 ///
18232 /// Supported operand variants:
18233 ///
18234 /// ```text
18235 /// +----+---------------+
18236 /// | # | Operands |
18237 /// +----+---------------+
18238 /// | 1 | Xmm, Mem, Imm |
18239 /// | 2 | Xmm, Xmm, Imm |
18240 /// | 3 | Xmm, Xmm, Mem |
18241 /// | 4 | Xmm, Xmm, Xmm |
18242 /// | 5 | Ymm, Mem, Imm |
18243 /// | 6 | Ymm, Ymm, Imm |
18244 /// | 7 | Ymm, Ymm, Mem |
18245 /// | 8 | Ymm, Ymm, Xmm |
18246 /// | 9 | Zmm, Mem, Imm |
18247 /// | 10 | Zmm, Zmm, Imm |
18248 /// | 11 | Zmm, Zmm, Mem |
18249 /// | 12 | Zmm, Zmm, Xmm |
18250 /// +----+---------------+
18251 /// ```
18252 #[inline]
18253 pub fn vpsllw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18254 where Assembler<'a>: VpsllwEmitter<A, B, C> {
18255 <Self as VpsllwEmitter<A, B, C>>::vpsllw(self, op0, op1, op2);
18256 }
18257 /// `VPSLLW_MASK` (VPSLLW).
18258 /// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the left by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-17 gives an example of shifting words in a 64-bit operand.
18259 ///
18260 ///
18261 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLW%3APSLLD%3APSLLQ.html).
18262 ///
18263 /// Supported operand variants:
18264 ///
18265 /// ```text
18266 /// +----+---------------+
18267 /// | # | Operands |
18268 /// +----+---------------+
18269 /// | 1 | Xmm, Mem, Imm |
18270 /// | 2 | Xmm, Xmm, Imm |
18271 /// | 3 | Xmm, Xmm, Mem |
18272 /// | 4 | Xmm, Xmm, Xmm |
18273 /// | 5 | Ymm, Mem, Imm |
18274 /// | 6 | Ymm, Ymm, Imm |
18275 /// | 7 | Ymm, Ymm, Mem |
18276 /// | 8 | Ymm, Ymm, Xmm |
18277 /// | 9 | Zmm, Mem, Imm |
18278 /// | 10 | Zmm, Zmm, Imm |
18279 /// | 11 | Zmm, Zmm, Mem |
18280 /// | 12 | Zmm, Zmm, Xmm |
18281 /// +----+---------------+
18282 /// ```
18283 #[inline]
18284 pub fn vpsllw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18285 where Assembler<'a>: VpsllwMaskEmitter<A, B, C> {
18286 <Self as VpsllwMaskEmitter<A, B, C>>::vpsllw_mask(self, op0, op1, op2);
18287 }
18288 /// `VPSLLW_MASKZ` (VPSLLW).
18289 /// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the left by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted left, the empty low-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-17 gives an example of shifting words in a 64-bit operand.
18290 ///
18291 ///
18292 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSLLW%3APSLLD%3APSLLQ.html).
18293 ///
18294 /// Supported operand variants:
18295 ///
18296 /// ```text
18297 /// +----+---------------+
18298 /// | # | Operands |
18299 /// +----+---------------+
18300 /// | 1 | Xmm, Mem, Imm |
18301 /// | 2 | Xmm, Xmm, Imm |
18302 /// | 3 | Xmm, Xmm, Mem |
18303 /// | 4 | Xmm, Xmm, Xmm |
18304 /// | 5 | Ymm, Mem, Imm |
18305 /// | 6 | Ymm, Ymm, Imm |
18306 /// | 7 | Ymm, Ymm, Mem |
18307 /// | 8 | Ymm, Ymm, Xmm |
18308 /// | 9 | Zmm, Mem, Imm |
18309 /// | 10 | Zmm, Zmm, Imm |
18310 /// | 11 | Zmm, Zmm, Mem |
18311 /// | 12 | Zmm, Zmm, Xmm |
18312 /// +----+---------------+
18313 /// ```
18314 #[inline]
18315 pub fn vpsllw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18316 where Assembler<'a>: VpsllwMaskzEmitter<A, B, C> {
18317 <Self as VpsllwMaskzEmitter<A, B, C>>::vpsllw_maskz(self, op0, op1, op2);
18318 }
18319 /// `VPSRAVW` (VPSRAVW).
18320 /// Shifts the bits in the individual data elements (word/doublewords/quadword) in the first source operand (the second operand) to the right by the number of bits specified in the count value of respective data elements in the second source operand (the third operand). As the bits in the data elements are shifted right, the empty high-order bits are set to the MSB (sign extension).
18321 ///
18322 ///
18323 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRAVW%3AVPSRAVD%3AVPSRAVQ.html).
18324 ///
18325 /// Supported operand variants:
18326 ///
18327 /// ```text
18328 /// +---+---------------+
18329 /// | # | Operands |
18330 /// +---+---------------+
18331 /// | 1 | Xmm, Xmm, Mem |
18332 /// | 2 | Xmm, Xmm, Xmm |
18333 /// | 3 | Ymm, Ymm, Mem |
18334 /// | 4 | Ymm, Ymm, Ymm |
18335 /// | 5 | Zmm, Zmm, Mem |
18336 /// | 6 | Zmm, Zmm, Zmm |
18337 /// +---+---------------+
18338 /// ```
18339 #[inline]
18340 pub fn vpsravw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18341 where Assembler<'a>: VpsravwEmitter<A, B, C> {
18342 <Self as VpsravwEmitter<A, B, C>>::vpsravw(self, op0, op1, op2);
18343 }
18344 /// `VPSRAVW_MASK` (VPSRAVW).
18345 /// Shifts the bits in the individual data elements (word/doublewords/quadword) in the first source operand (the second operand) to the right by the number of bits specified in the count value of respective data elements in the second source operand (the third operand). As the bits in the data elements are shifted right, the empty high-order bits are set to the MSB (sign extension).
18346 ///
18347 ///
18348 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRAVW%3AVPSRAVD%3AVPSRAVQ.html).
18349 ///
18350 /// Supported operand variants:
18351 ///
18352 /// ```text
18353 /// +---+---------------+
18354 /// | # | Operands |
18355 /// +---+---------------+
18356 /// | 1 | Xmm, Xmm, Mem |
18357 /// | 2 | Xmm, Xmm, Xmm |
18358 /// | 3 | Ymm, Ymm, Mem |
18359 /// | 4 | Ymm, Ymm, Ymm |
18360 /// | 5 | Zmm, Zmm, Mem |
18361 /// | 6 | Zmm, Zmm, Zmm |
18362 /// +---+---------------+
18363 /// ```
18364 #[inline]
18365 pub fn vpsravw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18366 where Assembler<'a>: VpsravwMaskEmitter<A, B, C> {
18367 <Self as VpsravwMaskEmitter<A, B, C>>::vpsravw_mask(self, op0, op1, op2);
18368 }
18369 /// `VPSRAVW_MASKZ` (VPSRAVW).
18370 /// Shifts the bits in the individual data elements (word/doublewords/quadword) in the first source operand (the second operand) to the right by the number of bits specified in the count value of respective data elements in the second source operand (the third operand). As the bits in the data elements are shifted right, the empty high-order bits are set to the MSB (sign extension).
18371 ///
18372 ///
18373 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRAVW%3AVPSRAVD%3AVPSRAVQ.html).
18374 ///
18375 /// Supported operand variants:
18376 ///
18377 /// ```text
18378 /// +---+---------------+
18379 /// | # | Operands |
18380 /// +---+---------------+
18381 /// | 1 | Xmm, Xmm, Mem |
18382 /// | 2 | Xmm, Xmm, Xmm |
18383 /// | 3 | Ymm, Ymm, Mem |
18384 /// | 4 | Ymm, Ymm, Ymm |
18385 /// | 5 | Zmm, Zmm, Mem |
18386 /// | 6 | Zmm, Zmm, Zmm |
18387 /// +---+---------------+
18388 /// ```
18389 #[inline]
18390 pub fn vpsravw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18391 where Assembler<'a>: VpsravwMaskzEmitter<A, B, C> {
18392 <Self as VpsravwMaskzEmitter<A, B, C>>::vpsravw_maskz(self, op0, op1, op2);
18393 }
18394 /// `VPSRAW` (VPSRAW).
18395 /// Shifts the bits in the individual data elements (words, doublewords or quadwords) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are filled with the initial value of the sign bit of the data element. If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for quadwords), each destination data element is filled with the initial value of the sign bit of the element. (Figure 4-18 gives an example of shifting words in a 64-bit operand.)
18396 ///
18397 ///
18398 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRAW%3APSRAD%3APSRAQ.html).
18399 ///
18400 /// Supported operand variants:
18401 ///
18402 /// ```text
18403 /// +----+---------------+
18404 /// | # | Operands |
18405 /// +----+---------------+
18406 /// | 1 | Xmm, Mem, Imm |
18407 /// | 2 | Xmm, Xmm, Imm |
18408 /// | 3 | Xmm, Xmm, Mem |
18409 /// | 4 | Xmm, Xmm, Xmm |
18410 /// | 5 | Ymm, Mem, Imm |
18411 /// | 6 | Ymm, Ymm, Imm |
18412 /// | 7 | Ymm, Ymm, Mem |
18413 /// | 8 | Ymm, Ymm, Xmm |
18414 /// | 9 | Zmm, Mem, Imm |
18415 /// | 10 | Zmm, Zmm, Imm |
18416 /// | 11 | Zmm, Zmm, Mem |
18417 /// | 12 | Zmm, Zmm, Xmm |
18418 /// +----+---------------+
18419 /// ```
18420 #[inline]
18421 pub fn vpsraw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18422 where Assembler<'a>: VpsrawEmitter<A, B, C> {
18423 <Self as VpsrawEmitter<A, B, C>>::vpsraw(self, op0, op1, op2);
18424 }
18425 /// `VPSRAW_MASK` (VPSRAW).
18426 /// Shifts the bits in the individual data elements (words, doublewords or quadwords) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are filled with the initial value of the sign bit of the data element. If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for quadwords), each destination data element is filled with the initial value of the sign bit of the element. (Figure 4-18 gives an example of shifting words in a 64-bit operand.)
18427 ///
18428 ///
18429 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRAW%3APSRAD%3APSRAQ.html).
18430 ///
18431 /// Supported operand variants:
18432 ///
18433 /// ```text
18434 /// +----+---------------+
18435 /// | # | Operands |
18436 /// +----+---------------+
18437 /// | 1 | Xmm, Mem, Imm |
18438 /// | 2 | Xmm, Xmm, Imm |
18439 /// | 3 | Xmm, Xmm, Mem |
18440 /// | 4 | Xmm, Xmm, Xmm |
18441 /// | 5 | Ymm, Mem, Imm |
18442 /// | 6 | Ymm, Ymm, Imm |
18443 /// | 7 | Ymm, Ymm, Mem |
18444 /// | 8 | Ymm, Ymm, Xmm |
18445 /// | 9 | Zmm, Mem, Imm |
18446 /// | 10 | Zmm, Zmm, Imm |
18447 /// | 11 | Zmm, Zmm, Mem |
18448 /// | 12 | Zmm, Zmm, Xmm |
18449 /// +----+---------------+
18450 /// ```
18451 #[inline]
18452 pub fn vpsraw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18453 where Assembler<'a>: VpsrawMaskEmitter<A, B, C> {
18454 <Self as VpsrawMaskEmitter<A, B, C>>::vpsraw_mask(self, op0, op1, op2);
18455 }
18456 /// `VPSRAW_MASKZ` (VPSRAW).
18457 /// Shifts the bits in the individual data elements (words, doublewords or quadwords) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are filled with the initial value of the sign bit of the data element. If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for quadwords), each destination data element is filled with the initial value of the sign bit of the element. (Figure 4-18 gives an example of shifting words in a 64-bit operand.)
18458 ///
18459 ///
18460 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRAW%3APSRAD%3APSRAQ.html).
18461 ///
18462 /// Supported operand variants:
18463 ///
18464 /// ```text
18465 /// +----+---------------+
18466 /// | # | Operands |
18467 /// +----+---------------+
18468 /// | 1 | Xmm, Mem, Imm |
18469 /// | 2 | Xmm, Xmm, Imm |
18470 /// | 3 | Xmm, Xmm, Mem |
18471 /// | 4 | Xmm, Xmm, Xmm |
18472 /// | 5 | Ymm, Mem, Imm |
18473 /// | 6 | Ymm, Ymm, Imm |
18474 /// | 7 | Ymm, Ymm, Mem |
18475 /// | 8 | Ymm, Ymm, Xmm |
18476 /// | 9 | Zmm, Mem, Imm |
18477 /// | 10 | Zmm, Zmm, Imm |
18478 /// | 11 | Zmm, Zmm, Mem |
18479 /// | 12 | Zmm, Zmm, Xmm |
18480 /// +----+---------------+
18481 /// ```
18482 #[inline]
18483 pub fn vpsraw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18484 where Assembler<'a>: VpsrawMaskzEmitter<A, B, C> {
18485 <Self as VpsrawMaskzEmitter<A, B, C>>::vpsraw_maskz(self, op0, op1, op2);
18486 }
18487 /// `VPSRLDQ` (VPSRLDQ).
18488 /// Shifts the destination operand (first operand) to the right by the number of bytes specified in the count operand (second operand). The empty high-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The count operand is an 8-bit immediate.
18489 ///
18490 ///
18491 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLDQ.html).
18492 ///
18493 /// Supported operand variants:
18494 ///
18495 /// ```text
18496 /// +---+---------------+
18497 /// | # | Operands |
18498 /// +---+---------------+
18499 /// | 1 | Xmm, Mem, Imm |
18500 /// | 2 | Xmm, Xmm, Imm |
18501 /// | 3 | Ymm, Mem, Imm |
18502 /// | 4 | Ymm, Ymm, Imm |
18503 /// | 5 | Zmm, Mem, Imm |
18504 /// | 6 | Zmm, Zmm, Imm |
18505 /// +---+---------------+
18506 /// ```
18507 #[inline]
18508 pub fn vpsrldq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18509 where Assembler<'a>: VpsrldqEmitter<A, B, C> {
18510 <Self as VpsrldqEmitter<A, B, C>>::vpsrldq(self, op0, op1, op2);
18511 }
18512 /// `VPSRLVW` (VPSRLVW).
18513 /// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the right by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0).
18514 ///
18515 ///
18516 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRLVW%3AVPSRLVD%3AVPSRLVQ.html).
18517 ///
18518 /// Supported operand variants:
18519 ///
18520 /// ```text
18521 /// +---+---------------+
18522 /// | # | Operands |
18523 /// +---+---------------+
18524 /// | 1 | Xmm, Xmm, Mem |
18525 /// | 2 | Xmm, Xmm, Xmm |
18526 /// | 3 | Ymm, Ymm, Mem |
18527 /// | 4 | Ymm, Ymm, Ymm |
18528 /// | 5 | Zmm, Zmm, Mem |
18529 /// | 6 | Zmm, Zmm, Zmm |
18530 /// +---+---------------+
18531 /// ```
18532 #[inline]
18533 pub fn vpsrlvw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18534 where Assembler<'a>: VpsrlvwEmitter<A, B, C> {
18535 <Self as VpsrlvwEmitter<A, B, C>>::vpsrlvw(self, op0, op1, op2);
18536 }
18537 /// `VPSRLVW_MASK` (VPSRLVW).
18538 /// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the right by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0).
18539 ///
18540 ///
18541 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRLVW%3AVPSRLVD%3AVPSRLVQ.html).
18542 ///
18543 /// Supported operand variants:
18544 ///
18545 /// ```text
18546 /// +---+---------------+
18547 /// | # | Operands |
18548 /// +---+---------------+
18549 /// | 1 | Xmm, Xmm, Mem |
18550 /// | 2 | Xmm, Xmm, Xmm |
18551 /// | 3 | Ymm, Ymm, Mem |
18552 /// | 4 | Ymm, Ymm, Ymm |
18553 /// | 5 | Zmm, Zmm, Mem |
18554 /// | 6 | Zmm, Zmm, Zmm |
18555 /// +---+---------------+
18556 /// ```
18557 #[inline]
18558 pub fn vpsrlvw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18559 where Assembler<'a>: VpsrlvwMaskEmitter<A, B, C> {
18560 <Self as VpsrlvwMaskEmitter<A, B, C>>::vpsrlvw_mask(self, op0, op1, op2);
18561 }
18562 /// `VPSRLVW_MASKZ` (VPSRLVW).
18563 /// Shifts the bits in the individual data elements (words, doublewords or quadword) in the first source operand to the right by the count value of respective data elements in the second source operand. As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0).
18564 ///
18565 ///
18566 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSRLVW%3AVPSRLVD%3AVPSRLVQ.html).
18567 ///
18568 /// Supported operand variants:
18569 ///
18570 /// ```text
18571 /// +---+---------------+
18572 /// | # | Operands |
18573 /// +---+---------------+
18574 /// | 1 | Xmm, Xmm, Mem |
18575 /// | 2 | Xmm, Xmm, Xmm |
18576 /// | 3 | Ymm, Ymm, Mem |
18577 /// | 4 | Ymm, Ymm, Ymm |
18578 /// | 5 | Zmm, Zmm, Mem |
18579 /// | 6 | Zmm, Zmm, Zmm |
18580 /// +---+---------------+
18581 /// ```
18582 #[inline]
18583 pub fn vpsrlvw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18584 where Assembler<'a>: VpsrlvwMaskzEmitter<A, B, C> {
18585 <Self as VpsrlvwMaskzEmitter<A, B, C>>::vpsrlvw_maskz(self, op0, op1, op2);
18586 }
18587 /// `VPSRLW` (VPSRLW).
18588 /// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-19 gives an example of shifting words in a 64-bit operand.
18589 ///
18590 ///
18591 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLW%3APSRLD%3APSRLQ.html).
18592 ///
18593 /// Supported operand variants:
18594 ///
18595 /// ```text
18596 /// +----+---------------+
18597 /// | # | Operands |
18598 /// +----+---------------+
18599 /// | 1 | Xmm, Mem, Imm |
18600 /// | 2 | Xmm, Xmm, Imm |
18601 /// | 3 | Xmm, Xmm, Mem |
18602 /// | 4 | Xmm, Xmm, Xmm |
18603 /// | 5 | Ymm, Mem, Imm |
18604 /// | 6 | Ymm, Ymm, Imm |
18605 /// | 7 | Ymm, Ymm, Mem |
18606 /// | 8 | Ymm, Ymm, Xmm |
18607 /// | 9 | Zmm, Mem, Imm |
18608 /// | 10 | Zmm, Zmm, Imm |
18609 /// | 11 | Zmm, Zmm, Mem |
18610 /// | 12 | Zmm, Zmm, Xmm |
18611 /// +----+---------------+
18612 /// ```
18613 #[inline]
18614 pub fn vpsrlw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18615 where Assembler<'a>: VpsrlwEmitter<A, B, C> {
18616 <Self as VpsrlwEmitter<A, B, C>>::vpsrlw(self, op0, op1, op2);
18617 }
18618 /// `VPSRLW_MASK` (VPSRLW).
18619 /// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-19 gives an example of shifting words in a 64-bit operand.
18620 ///
18621 ///
18622 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLW%3APSRLD%3APSRLQ.html).
18623 ///
18624 /// Supported operand variants:
18625 ///
18626 /// ```text
18627 /// +----+---------------+
18628 /// | # | Operands |
18629 /// +----+---------------+
18630 /// | 1 | Xmm, Mem, Imm |
18631 /// | 2 | Xmm, Xmm, Imm |
18632 /// | 3 | Xmm, Xmm, Mem |
18633 /// | 4 | Xmm, Xmm, Xmm |
18634 /// | 5 | Ymm, Mem, Imm |
18635 /// | 6 | Ymm, Ymm, Imm |
18636 /// | 7 | Ymm, Ymm, Mem |
18637 /// | 8 | Ymm, Ymm, Xmm |
18638 /// | 9 | Zmm, Mem, Imm |
18639 /// | 10 | Zmm, Zmm, Imm |
18640 /// | 11 | Zmm, Zmm, Mem |
18641 /// | 12 | Zmm, Zmm, Xmm |
18642 /// +----+---------------+
18643 /// ```
18644 #[inline]
18645 pub fn vpsrlw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18646 where Assembler<'a>: VpsrlwMaskEmitter<A, B, C> {
18647 <Self as VpsrlwMaskEmitter<A, B, C>>::vpsrlw_mask(self, op0, op1, op2);
18648 }
18649 /// `VPSRLW_MASKZ` (VPSRLW).
18650 /// Shifts the bits in the individual data elements (words, doublewords, or quadword) in the destination operand (first operand) to the right by the number of bits specified in the count operand (second operand). As the bits in the data elements are shifted right, the empty high-order bits are cleared (set to 0). If the value specified by the count operand is greater than 15 (for words), 31 (for doublewords), or 63 (for a quadword), then the destination operand is set to all 0s. Figure 4-19 gives an example of shifting words in a 64-bit operand.
18651 ///
18652 ///
18653 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSRLW%3APSRLD%3APSRLQ.html).
18654 ///
18655 /// Supported operand variants:
18656 ///
18657 /// ```text
18658 /// +----+---------------+
18659 /// | # | Operands |
18660 /// +----+---------------+
18661 /// | 1 | Xmm, Mem, Imm |
18662 /// | 2 | Xmm, Xmm, Imm |
18663 /// | 3 | Xmm, Xmm, Mem |
18664 /// | 4 | Xmm, Xmm, Xmm |
18665 /// | 5 | Ymm, Mem, Imm |
18666 /// | 6 | Ymm, Ymm, Imm |
18667 /// | 7 | Ymm, Ymm, Mem |
18668 /// | 8 | Ymm, Ymm, Xmm |
18669 /// | 9 | Zmm, Mem, Imm |
18670 /// | 10 | Zmm, Zmm, Imm |
18671 /// | 11 | Zmm, Zmm, Mem |
18672 /// | 12 | Zmm, Zmm, Xmm |
18673 /// +----+---------------+
18674 /// ```
18675 #[inline]
18676 pub fn vpsrlw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18677 where Assembler<'a>: VpsrlwMaskzEmitter<A, B, C> {
18678 <Self as VpsrlwMaskzEmitter<A, B, C>>::vpsrlw_maskz(self, op0, op1, op2);
18679 }
18680 /// `VPSUBB` (VPSUBB).
18681 /// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
18682 ///
18683 ///
18684 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
18685 ///
18686 /// Supported operand variants:
18687 ///
18688 /// ```text
18689 /// +---+---------------+
18690 /// | # | Operands |
18691 /// +---+---------------+
18692 /// | 1 | Xmm, Xmm, Mem |
18693 /// | 2 | Xmm, Xmm, Xmm |
18694 /// | 3 | Ymm, Ymm, Mem |
18695 /// | 4 | Ymm, Ymm, Ymm |
18696 /// | 5 | Zmm, Zmm, Mem |
18697 /// | 6 | Zmm, Zmm, Zmm |
18698 /// +---+---------------+
18699 /// ```
18700 #[inline]
18701 pub fn vpsubb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18702 where Assembler<'a>: VpsubbEmitter<A, B, C> {
18703 <Self as VpsubbEmitter<A, B, C>>::vpsubb(self, op0, op1, op2);
18704 }
18705 /// `VPSUBB_MASK` (VPSUBB).
18706 /// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
18707 ///
18708 ///
18709 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
18710 ///
18711 /// Supported operand variants:
18712 ///
18713 /// ```text
18714 /// +---+---------------+
18715 /// | # | Operands |
18716 /// +---+---------------+
18717 /// | 1 | Xmm, Xmm, Mem |
18718 /// | 2 | Xmm, Xmm, Xmm |
18719 /// | 3 | Ymm, Ymm, Mem |
18720 /// | 4 | Ymm, Ymm, Ymm |
18721 /// | 5 | Zmm, Zmm, Mem |
18722 /// | 6 | Zmm, Zmm, Zmm |
18723 /// +---+---------------+
18724 /// ```
18725 #[inline]
18726 pub fn vpsubb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18727 where Assembler<'a>: VpsubbMaskEmitter<A, B, C> {
18728 <Self as VpsubbMaskEmitter<A, B, C>>::vpsubb_mask(self, op0, op1, op2);
18729 }
18730 /// `VPSUBB_MASKZ` (VPSUBB).
18731 /// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
18732 ///
18733 ///
18734 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
18735 ///
18736 /// Supported operand variants:
18737 ///
18738 /// ```text
18739 /// +---+---------------+
18740 /// | # | Operands |
18741 /// +---+---------------+
18742 /// | 1 | Xmm, Xmm, Mem |
18743 /// | 2 | Xmm, Xmm, Xmm |
18744 /// | 3 | Ymm, Ymm, Mem |
18745 /// | 4 | Ymm, Ymm, Ymm |
18746 /// | 5 | Zmm, Zmm, Mem |
18747 /// | 6 | Zmm, Zmm, Zmm |
18748 /// +---+---------------+
18749 /// ```
18750 #[inline]
18751 pub fn vpsubb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18752 where Assembler<'a>: VpsubbMaskzEmitter<A, B, C> {
18753 <Self as VpsubbMaskzEmitter<A, B, C>>::vpsubb_maskz(self, op0, op1, op2);
18754 }
18755 /// `VPSUBSB` (VPSUBSB).
18756 /// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
18757 ///
18758 ///
18759 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
18760 ///
18761 /// Supported operand variants:
18762 ///
18763 /// ```text
18764 /// +---+---------------+
18765 /// | # | Operands |
18766 /// +---+---------------+
18767 /// | 1 | Xmm, Xmm, Mem |
18768 /// | 2 | Xmm, Xmm, Xmm |
18769 /// | 3 | Ymm, Ymm, Mem |
18770 /// | 4 | Ymm, Ymm, Ymm |
18771 /// | 5 | Zmm, Zmm, Mem |
18772 /// | 6 | Zmm, Zmm, Zmm |
18773 /// +---+---------------+
18774 /// ```
18775 #[inline]
18776 pub fn vpsubsb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18777 where Assembler<'a>: VpsubsbEmitter<A, B, C> {
18778 <Self as VpsubsbEmitter<A, B, C>>::vpsubsb(self, op0, op1, op2);
18779 }
18780 /// `VPSUBSB_MASK` (VPSUBSB).
18781 /// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
18782 ///
18783 ///
18784 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
18785 ///
18786 /// Supported operand variants:
18787 ///
18788 /// ```text
18789 /// +---+---------------+
18790 /// | # | Operands |
18791 /// +---+---------------+
18792 /// | 1 | Xmm, Xmm, Mem |
18793 /// | 2 | Xmm, Xmm, Xmm |
18794 /// | 3 | Ymm, Ymm, Mem |
18795 /// | 4 | Ymm, Ymm, Ymm |
18796 /// | 5 | Zmm, Zmm, Mem |
18797 /// | 6 | Zmm, Zmm, Zmm |
18798 /// +---+---------------+
18799 /// ```
18800 #[inline]
18801 pub fn vpsubsb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18802 where Assembler<'a>: VpsubsbMaskEmitter<A, B, C> {
18803 <Self as VpsubsbMaskEmitter<A, B, C>>::vpsubsb_mask(self, op0, op1, op2);
18804 }
18805 /// `VPSUBSB_MASKZ` (VPSUBSB).
18806 /// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
18807 ///
18808 ///
18809 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
18810 ///
18811 /// Supported operand variants:
18812 ///
18813 /// ```text
18814 /// +---+---------------+
18815 /// | # | Operands |
18816 /// +---+---------------+
18817 /// | 1 | Xmm, Xmm, Mem |
18818 /// | 2 | Xmm, Xmm, Xmm |
18819 /// | 3 | Ymm, Ymm, Mem |
18820 /// | 4 | Ymm, Ymm, Ymm |
18821 /// | 5 | Zmm, Zmm, Mem |
18822 /// | 6 | Zmm, Zmm, Zmm |
18823 /// +---+---------------+
18824 /// ```
18825 #[inline]
18826 pub fn vpsubsb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18827 where Assembler<'a>: VpsubsbMaskzEmitter<A, B, C> {
18828 <Self as VpsubsbMaskzEmitter<A, B, C>>::vpsubsb_maskz(self, op0, op1, op2);
18829 }
18830 /// `VPSUBSW` (VPSUBSW).
18831 /// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
18832 ///
18833 ///
18834 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
18835 ///
18836 /// Supported operand variants:
18837 ///
18838 /// ```text
18839 /// +---+---------------+
18840 /// | # | Operands |
18841 /// +---+---------------+
18842 /// | 1 | Xmm, Xmm, Mem |
18843 /// | 2 | Xmm, Xmm, Xmm |
18844 /// | 3 | Ymm, Ymm, Mem |
18845 /// | 4 | Ymm, Ymm, Ymm |
18846 /// | 5 | Zmm, Zmm, Mem |
18847 /// | 6 | Zmm, Zmm, Zmm |
18848 /// +---+---------------+
18849 /// ```
18850 #[inline]
18851 pub fn vpsubsw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18852 where Assembler<'a>: VpsubswEmitter<A, B, C> {
18853 <Self as VpsubswEmitter<A, B, C>>::vpsubsw(self, op0, op1, op2);
18854 }
18855 /// `VPSUBSW_MASK` (VPSUBSW).
18856 /// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
18857 ///
18858 ///
18859 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
18860 ///
18861 /// Supported operand variants:
18862 ///
18863 /// ```text
18864 /// +---+---------------+
18865 /// | # | Operands |
18866 /// +---+---------------+
18867 /// | 1 | Xmm, Xmm, Mem |
18868 /// | 2 | Xmm, Xmm, Xmm |
18869 /// | 3 | Ymm, Ymm, Mem |
18870 /// | 4 | Ymm, Ymm, Ymm |
18871 /// | 5 | Zmm, Zmm, Mem |
18872 /// | 6 | Zmm, Zmm, Zmm |
18873 /// +---+---------------+
18874 /// ```
18875 #[inline]
18876 pub fn vpsubsw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18877 where Assembler<'a>: VpsubswMaskEmitter<A, B, C> {
18878 <Self as VpsubswMaskEmitter<A, B, C>>::vpsubsw_mask(self, op0, op1, op2);
18879 }
18880 /// `VPSUBSW_MASKZ` (VPSUBSW).
18881 /// Performs a SIMD subtract of the packed signed integers of the source operand (second operand) from the packed signed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with signed saturation, as described in the following paragraphs.
18882 ///
18883 ///
18884 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBSB%3APSUBSW.html).
18885 ///
18886 /// Supported operand variants:
18887 ///
18888 /// ```text
18889 /// +---+---------------+
18890 /// | # | Operands |
18891 /// +---+---------------+
18892 /// | 1 | Xmm, Xmm, Mem |
18893 /// | 2 | Xmm, Xmm, Xmm |
18894 /// | 3 | Ymm, Ymm, Mem |
18895 /// | 4 | Ymm, Ymm, Ymm |
18896 /// | 5 | Zmm, Zmm, Mem |
18897 /// | 6 | Zmm, Zmm, Zmm |
18898 /// +---+---------------+
18899 /// ```
18900 #[inline]
18901 pub fn vpsubsw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18902 where Assembler<'a>: VpsubswMaskzEmitter<A, B, C> {
18903 <Self as VpsubswMaskzEmitter<A, B, C>>::vpsubsw_maskz(self, op0, op1, op2);
18904 }
18905 /// `VPSUBUSB` (VPSUBUSB).
18906 /// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
18907 ///
18908 ///
18909 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
18910 ///
18911 /// Supported operand variants:
18912 ///
18913 /// ```text
18914 /// +---+---------------+
18915 /// | # | Operands |
18916 /// +---+---------------+
18917 /// | 1 | Xmm, Xmm, Mem |
18918 /// | 2 | Xmm, Xmm, Xmm |
18919 /// | 3 | Ymm, Ymm, Mem |
18920 /// | 4 | Ymm, Ymm, Ymm |
18921 /// | 5 | Zmm, Zmm, Mem |
18922 /// | 6 | Zmm, Zmm, Zmm |
18923 /// +---+---------------+
18924 /// ```
18925 #[inline]
18926 pub fn vpsubusb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18927 where Assembler<'a>: VpsubusbEmitter<A, B, C> {
18928 <Self as VpsubusbEmitter<A, B, C>>::vpsubusb(self, op0, op1, op2);
18929 }
18930 /// `VPSUBUSB_MASK` (VPSUBUSB).
18931 /// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
18932 ///
18933 ///
18934 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
18935 ///
18936 /// Supported operand variants:
18937 ///
18938 /// ```text
18939 /// +---+---------------+
18940 /// | # | Operands |
18941 /// +---+---------------+
18942 /// | 1 | Xmm, Xmm, Mem |
18943 /// | 2 | Xmm, Xmm, Xmm |
18944 /// | 3 | Ymm, Ymm, Mem |
18945 /// | 4 | Ymm, Ymm, Ymm |
18946 /// | 5 | Zmm, Zmm, Mem |
18947 /// | 6 | Zmm, Zmm, Zmm |
18948 /// +---+---------------+
18949 /// ```
18950 #[inline]
18951 pub fn vpsubusb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18952 where Assembler<'a>: VpsubusbMaskEmitter<A, B, C> {
18953 <Self as VpsubusbMaskEmitter<A, B, C>>::vpsubusb_mask(self, op0, op1, op2);
18954 }
18955 /// `VPSUBUSB_MASKZ` (VPSUBUSB).
18956 /// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
18957 ///
18958 ///
18959 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
18960 ///
18961 /// Supported operand variants:
18962 ///
18963 /// ```text
18964 /// +---+---------------+
18965 /// | # | Operands |
18966 /// +---+---------------+
18967 /// | 1 | Xmm, Xmm, Mem |
18968 /// | 2 | Xmm, Xmm, Xmm |
18969 /// | 3 | Ymm, Ymm, Mem |
18970 /// | 4 | Ymm, Ymm, Ymm |
18971 /// | 5 | Zmm, Zmm, Mem |
18972 /// | 6 | Zmm, Zmm, Zmm |
18973 /// +---+---------------+
18974 /// ```
18975 #[inline]
18976 pub fn vpsubusb_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
18977 where Assembler<'a>: VpsubusbMaskzEmitter<A, B, C> {
18978 <Self as VpsubusbMaskzEmitter<A, B, C>>::vpsubusb_maskz(self, op0, op1, op2);
18979 }
18980 /// `VPSUBUSW` (VPSUBUSW).
18981 /// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
18982 ///
18983 ///
18984 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
18985 ///
18986 /// Supported operand variants:
18987 ///
18988 /// ```text
18989 /// +---+---------------+
18990 /// | # | Operands |
18991 /// +---+---------------+
18992 /// | 1 | Xmm, Xmm, Mem |
18993 /// | 2 | Xmm, Xmm, Xmm |
18994 /// | 3 | Ymm, Ymm, Mem |
18995 /// | 4 | Ymm, Ymm, Ymm |
18996 /// | 5 | Zmm, Zmm, Mem |
18997 /// | 6 | Zmm, Zmm, Zmm |
18998 /// +---+---------------+
18999 /// ```
19000 #[inline]
19001 pub fn vpsubusw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19002 where Assembler<'a>: VpsubuswEmitter<A, B, C> {
19003 <Self as VpsubuswEmitter<A, B, C>>::vpsubusw(self, op0, op1, op2);
19004 }
19005 /// `VPSUBUSW_MASK` (VPSUBUSW).
19006 /// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
19007 ///
19008 ///
19009 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
19010 ///
19011 /// Supported operand variants:
19012 ///
19013 /// ```text
19014 /// +---+---------------+
19015 /// | # | Operands |
19016 /// +---+---------------+
19017 /// | 1 | Xmm, Xmm, Mem |
19018 /// | 2 | Xmm, Xmm, Xmm |
19019 /// | 3 | Ymm, Ymm, Mem |
19020 /// | 4 | Ymm, Ymm, Ymm |
19021 /// | 5 | Zmm, Zmm, Mem |
19022 /// | 6 | Zmm, Zmm, Zmm |
19023 /// +---+---------------+
19024 /// ```
19025 #[inline]
19026 pub fn vpsubusw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19027 where Assembler<'a>: VpsubuswMaskEmitter<A, B, C> {
19028 <Self as VpsubuswMaskEmitter<A, B, C>>::vpsubusw_mask(self, op0, op1, op2);
19029 }
19030 /// `VPSUBUSW_MASKZ` (VPSUBUSW).
19031 /// Performs a SIMD subtract of the packed unsigned integers of the source operand (second operand) from the packed unsigned integers of the destination operand (first operand), and stores the packed unsigned integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with unsigned saturation, as described in the following paragraphs.
19032 ///
19033 ///
19034 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBUSB%3APSUBUSW.html).
19035 ///
19036 /// Supported operand variants:
19037 ///
19038 /// ```text
19039 /// +---+---------------+
19040 /// | # | Operands |
19041 /// +---+---------------+
19042 /// | 1 | Xmm, Xmm, Mem |
19043 /// | 2 | Xmm, Xmm, Xmm |
19044 /// | 3 | Ymm, Ymm, Mem |
19045 /// | 4 | Ymm, Ymm, Ymm |
19046 /// | 5 | Zmm, Zmm, Mem |
19047 /// | 6 | Zmm, Zmm, Zmm |
19048 /// +---+---------------+
19049 /// ```
19050 #[inline]
19051 pub fn vpsubusw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19052 where Assembler<'a>: VpsubuswMaskzEmitter<A, B, C> {
19053 <Self as VpsubuswMaskzEmitter<A, B, C>>::vpsubusw_maskz(self, op0, op1, op2);
19054 }
19055 /// `VPSUBW` (VPSUBW).
19056 /// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
19057 ///
19058 ///
19059 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
19060 ///
19061 /// Supported operand variants:
19062 ///
19063 /// ```text
19064 /// +---+---------------+
19065 /// | # | Operands |
19066 /// +---+---------------+
19067 /// | 1 | Xmm, Xmm, Mem |
19068 /// | 2 | Xmm, Xmm, Xmm |
19069 /// | 3 | Ymm, Ymm, Mem |
19070 /// | 4 | Ymm, Ymm, Ymm |
19071 /// | 5 | Zmm, Zmm, Mem |
19072 /// | 6 | Zmm, Zmm, Zmm |
19073 /// +---+---------------+
19074 /// ```
19075 #[inline]
19076 pub fn vpsubw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19077 where Assembler<'a>: VpsubwEmitter<A, B, C> {
19078 <Self as VpsubwEmitter<A, B, C>>::vpsubw(self, op0, op1, op2);
19079 }
19080 /// `VPSUBW_MASK` (VPSUBW).
19081 /// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
19082 ///
19083 ///
19084 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
19085 ///
19086 /// Supported operand variants:
19087 ///
19088 /// ```text
19089 /// +---+---------------+
19090 /// | # | Operands |
19091 /// +---+---------------+
19092 /// | 1 | Xmm, Xmm, Mem |
19093 /// | 2 | Xmm, Xmm, Xmm |
19094 /// | 3 | Ymm, Ymm, Mem |
19095 /// | 4 | Ymm, Ymm, Ymm |
19096 /// | 5 | Zmm, Zmm, Mem |
19097 /// | 6 | Zmm, Zmm, Zmm |
19098 /// +---+---------------+
19099 /// ```
19100 #[inline]
19101 pub fn vpsubw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19102 where Assembler<'a>: VpsubwMaskEmitter<A, B, C> {
19103 <Self as VpsubwMaskEmitter<A, B, C>>::vpsubw_mask(self, op0, op1, op2);
19104 }
19105 /// `VPSUBW_MASKZ` (VPSUBW).
19106 /// Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed integers of the destination operand (first operand), and stores the packed integer results in the destination operand. See Figure 9-4 in the Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1, for an illustration of a SIMD operation. Overflow is handled with wraparound, as described in the following paragraphs.
19107 ///
19108 ///
19109 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PSUBB%3APSUBW%3APSUBD.html).
19110 ///
19111 /// Supported operand variants:
19112 ///
19113 /// ```text
19114 /// +---+---------------+
19115 /// | # | Operands |
19116 /// +---+---------------+
19117 /// | 1 | Xmm, Xmm, Mem |
19118 /// | 2 | Xmm, Xmm, Xmm |
19119 /// | 3 | Ymm, Ymm, Mem |
19120 /// | 4 | Ymm, Ymm, Ymm |
19121 /// | 5 | Zmm, Zmm, Mem |
19122 /// | 6 | Zmm, Zmm, Zmm |
19123 /// +---+---------------+
19124 /// ```
19125 #[inline]
19126 pub fn vpsubw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19127 where Assembler<'a>: VpsubwMaskzEmitter<A, B, C> {
19128 <Self as VpsubwMaskzEmitter<A, B, C>>::vpsubw_maskz(self, op0, op1, op2);
19129 }
19130 /// `VPTESTMB` (VPTESTMB).
19131 /// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
19132 ///
19133 ///
19134 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
19135 ///
19136 /// Supported operand variants:
19137 ///
19138 /// ```text
19139 /// +---+----------------+
19140 /// | # | Operands |
19141 /// +---+----------------+
19142 /// | 1 | KReg, Xmm, Mem |
19143 /// | 2 | KReg, Xmm, Xmm |
19144 /// | 3 | KReg, Ymm, Mem |
19145 /// | 4 | KReg, Ymm, Ymm |
19146 /// | 5 | KReg, Zmm, Mem |
19147 /// | 6 | KReg, Zmm, Zmm |
19148 /// +---+----------------+
19149 /// ```
19150 #[inline]
19151 pub fn vptestmb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19152 where Assembler<'a>: VptestmbEmitter<A, B, C> {
19153 <Self as VptestmbEmitter<A, B, C>>::vptestmb(self, op0, op1, op2);
19154 }
19155 /// `VPTESTMB_MASK` (VPTESTMB).
19156 /// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
19157 ///
19158 ///
19159 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
19160 ///
19161 /// Supported operand variants:
19162 ///
19163 /// ```text
19164 /// +---+----------------+
19165 /// | # | Operands |
19166 /// +---+----------------+
19167 /// | 1 | KReg, Xmm, Mem |
19168 /// | 2 | KReg, Xmm, Xmm |
19169 /// | 3 | KReg, Ymm, Mem |
19170 /// | 4 | KReg, Ymm, Ymm |
19171 /// | 5 | KReg, Zmm, Mem |
19172 /// | 6 | KReg, Zmm, Zmm |
19173 /// +---+----------------+
19174 /// ```
19175 #[inline]
19176 pub fn vptestmb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19177 where Assembler<'a>: VptestmbMaskEmitter<A, B, C> {
19178 <Self as VptestmbMaskEmitter<A, B, C>>::vptestmb_mask(self, op0, op1, op2);
19179 }
19180 /// `VPTESTMW` (VPTESTMW).
19181 /// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
19182 ///
19183 ///
19184 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
19185 ///
19186 /// Supported operand variants:
19187 ///
19188 /// ```text
19189 /// +---+----------------+
19190 /// | # | Operands |
19191 /// +---+----------------+
19192 /// | 1 | KReg, Xmm, Mem |
19193 /// | 2 | KReg, Xmm, Xmm |
19194 /// | 3 | KReg, Ymm, Mem |
19195 /// | 4 | KReg, Ymm, Ymm |
19196 /// | 5 | KReg, Zmm, Mem |
19197 /// | 6 | KReg, Zmm, Zmm |
19198 /// +---+----------------+
19199 /// ```
19200 #[inline]
19201 pub fn vptestmw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19202 where Assembler<'a>: VptestmwEmitter<A, B, C> {
19203 <Self as VptestmwEmitter<A, B, C>>::vptestmw(self, op0, op1, op2);
19204 }
19205 /// `VPTESTMW_MASK` (VPTESTMW).
19206 /// Performs a bitwise logical AND operation on the first source operand (the second operand) and second source operand (the third operand) and stores the result in the destination operand (the first operand) under the write-mask. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is non-zero; otherwise it is set to 0.
19207 ///
19208 ///
19209 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTMB%3AVPTESTMW%3AVPTESTMD%3AVPTESTMQ.html).
19210 ///
19211 /// Supported operand variants:
19212 ///
19213 /// ```text
19214 /// +---+----------------+
19215 /// | # | Operands |
19216 /// +---+----------------+
19217 /// | 1 | KReg, Xmm, Mem |
19218 /// | 2 | KReg, Xmm, Xmm |
19219 /// | 3 | KReg, Ymm, Mem |
19220 /// | 4 | KReg, Ymm, Ymm |
19221 /// | 5 | KReg, Zmm, Mem |
19222 /// | 6 | KReg, Zmm, Zmm |
19223 /// +---+----------------+
19224 /// ```
19225 #[inline]
19226 pub fn vptestmw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19227 where Assembler<'a>: VptestmwMaskEmitter<A, B, C> {
19228 <Self as VptestmwMaskEmitter<A, B, C>>::vptestmw_mask(self, op0, op1, op2);
19229 }
19230 /// `VPTESTNMB` (VPTESTNMB).
19231 /// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
19232 ///
19233 ///
19234 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
19235 ///
19236 /// Supported operand variants:
19237 ///
19238 /// ```text
19239 /// +---+----------------+
19240 /// | # | Operands |
19241 /// +---+----------------+
19242 /// | 1 | KReg, Xmm, Mem |
19243 /// | 2 | KReg, Xmm, Xmm |
19244 /// | 3 | KReg, Ymm, Mem |
19245 /// | 4 | KReg, Ymm, Ymm |
19246 /// | 5 | KReg, Zmm, Mem |
19247 /// | 6 | KReg, Zmm, Zmm |
19248 /// +---+----------------+
19249 /// ```
19250 #[inline]
19251 pub fn vptestnmb<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19252 where Assembler<'a>: VptestnmbEmitter<A, B, C> {
19253 <Self as VptestnmbEmitter<A, B, C>>::vptestnmb(self, op0, op1, op2);
19254 }
19255 /// `VPTESTNMB_MASK` (VPTESTNMB).
19256 /// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
19257 ///
19258 ///
19259 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
19260 ///
19261 /// Supported operand variants:
19262 ///
19263 /// ```text
19264 /// +---+----------------+
19265 /// | # | Operands |
19266 /// +---+----------------+
19267 /// | 1 | KReg, Xmm, Mem |
19268 /// | 2 | KReg, Xmm, Xmm |
19269 /// | 3 | KReg, Ymm, Mem |
19270 /// | 4 | KReg, Ymm, Ymm |
19271 /// | 5 | KReg, Zmm, Mem |
19272 /// | 6 | KReg, Zmm, Zmm |
19273 /// +---+----------------+
19274 /// ```
19275 #[inline]
19276 pub fn vptestnmb_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19277 where Assembler<'a>: VptestnmbMaskEmitter<A, B, C> {
19278 <Self as VptestnmbMaskEmitter<A, B, C>>::vptestnmb_mask(self, op0, op1, op2);
19279 }
19280 /// `VPTESTNMW` (VPTESTNMW).
19281 /// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
19282 ///
19283 ///
19284 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
19285 ///
19286 /// Supported operand variants:
19287 ///
19288 /// ```text
19289 /// +---+----------------+
19290 /// | # | Operands |
19291 /// +---+----------------+
19292 /// | 1 | KReg, Xmm, Mem |
19293 /// | 2 | KReg, Xmm, Xmm |
19294 /// | 3 | KReg, Ymm, Mem |
19295 /// | 4 | KReg, Ymm, Ymm |
19296 /// | 5 | KReg, Zmm, Mem |
19297 /// | 6 | KReg, Zmm, Zmm |
19298 /// +---+----------------+
19299 /// ```
19300 #[inline]
19301 pub fn vptestnmw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19302 where Assembler<'a>: VptestnmwEmitter<A, B, C> {
19303 <Self as VptestnmwEmitter<A, B, C>>::vptestnmw(self, op0, op1, op2);
19304 }
19305 /// `VPTESTNMW_MASK` (VPTESTNMW).
19306 /// Performs a bitwise logical NAND operation on the byte/word/doubleword/quadword element of the first source operand (the second operand) with the corresponding element of the second source operand (the third operand) and stores the logical comparison result into each bit of the destination operand (the first operand) according to the writemask k1. Each bit of the result is set to 1 if the bitwise AND of the corresponding elements of the first and second src operands is zero; otherwise it is set to 0.
19307 ///
19308 ///
19309 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPTESTNMB%3AVPTESTNMW%3AVPTESTNMD%3AVPTESTNMQ.html).
19310 ///
19311 /// Supported operand variants:
19312 ///
19313 /// ```text
19314 /// +---+----------------+
19315 /// | # | Operands |
19316 /// +---+----------------+
19317 /// | 1 | KReg, Xmm, Mem |
19318 /// | 2 | KReg, Xmm, Xmm |
19319 /// | 3 | KReg, Ymm, Mem |
19320 /// | 4 | KReg, Ymm, Ymm |
19321 /// | 5 | KReg, Zmm, Mem |
19322 /// | 6 | KReg, Zmm, Zmm |
19323 /// +---+----------------+
19324 /// ```
19325 #[inline]
19326 pub fn vptestnmw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19327 where Assembler<'a>: VptestnmwMaskEmitter<A, B, C> {
19328 <Self as VptestnmwMaskEmitter<A, B, C>>::vptestnmw_mask(self, op0, op1, op2);
19329 }
19330 /// `VPUNPCKHBW` (VPUNPCKHBW).
19331 /// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
19332 ///
19333 ///
19334 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
19335 ///
19336 /// Supported operand variants:
19337 ///
19338 /// ```text
19339 /// +---+---------------+
19340 /// | # | Operands |
19341 /// +---+---------------+
19342 /// | 1 | Xmm, Xmm, Mem |
19343 /// | 2 | Xmm, Xmm, Xmm |
19344 /// | 3 | Ymm, Ymm, Mem |
19345 /// | 4 | Ymm, Ymm, Ymm |
19346 /// | 5 | Zmm, Zmm, Mem |
19347 /// | 6 | Zmm, Zmm, Zmm |
19348 /// +---+---------------+
19349 /// ```
19350 #[inline]
19351 pub fn vpunpckhbw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19352 where Assembler<'a>: VpunpckhbwEmitter<A, B, C> {
19353 <Self as VpunpckhbwEmitter<A, B, C>>::vpunpckhbw(self, op0, op1, op2);
19354 }
19355 /// `VPUNPCKHBW_MASK` (VPUNPCKHBW).
19356 /// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
19357 ///
19358 ///
19359 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
19360 ///
19361 /// Supported operand variants:
19362 ///
19363 /// ```text
19364 /// +---+---------------+
19365 /// | # | Operands |
19366 /// +---+---------------+
19367 /// | 1 | Xmm, Xmm, Mem |
19368 /// | 2 | Xmm, Xmm, Xmm |
19369 /// | 3 | Ymm, Ymm, Mem |
19370 /// | 4 | Ymm, Ymm, Ymm |
19371 /// | 5 | Zmm, Zmm, Mem |
19372 /// | 6 | Zmm, Zmm, Zmm |
19373 /// +---+---------------+
19374 /// ```
19375 #[inline]
19376 pub fn vpunpckhbw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19377 where Assembler<'a>: VpunpckhbwMaskEmitter<A, B, C> {
19378 <Self as VpunpckhbwMaskEmitter<A, B, C>>::vpunpckhbw_mask(self, op0, op1, op2);
19379 }
19380 /// `VPUNPCKHBW_MASKZ` (VPUNPCKHBW).
19381 /// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
19382 ///
19383 ///
19384 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
19385 ///
19386 /// Supported operand variants:
19387 ///
19388 /// ```text
19389 /// +---+---------------+
19390 /// | # | Operands |
19391 /// +---+---------------+
19392 /// | 1 | Xmm, Xmm, Mem |
19393 /// | 2 | Xmm, Xmm, Xmm |
19394 /// | 3 | Ymm, Ymm, Mem |
19395 /// | 4 | Ymm, Ymm, Ymm |
19396 /// | 5 | Zmm, Zmm, Mem |
19397 /// | 6 | Zmm, Zmm, Zmm |
19398 /// +---+---------------+
19399 /// ```
19400 #[inline]
19401 pub fn vpunpckhbw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19402 where Assembler<'a>: VpunpckhbwMaskzEmitter<A, B, C> {
19403 <Self as VpunpckhbwMaskzEmitter<A, B, C>>::vpunpckhbw_maskz(self, op0, op1, op2);
19404 }
19405 /// `VPUNPCKHWD` (VPUNPCKHWD).
19406 /// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
19407 ///
19408 ///
19409 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
19410 ///
19411 /// Supported operand variants:
19412 ///
19413 /// ```text
19414 /// +---+---------------+
19415 /// | # | Operands |
19416 /// +---+---------------+
19417 /// | 1 | Xmm, Xmm, Mem |
19418 /// | 2 | Xmm, Xmm, Xmm |
19419 /// | 3 | Ymm, Ymm, Mem |
19420 /// | 4 | Ymm, Ymm, Ymm |
19421 /// | 5 | Zmm, Zmm, Mem |
19422 /// | 6 | Zmm, Zmm, Zmm |
19423 /// +---+---------------+
19424 /// ```
19425 #[inline]
19426 pub fn vpunpckhwd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19427 where Assembler<'a>: VpunpckhwdEmitter<A, B, C> {
19428 <Self as VpunpckhwdEmitter<A, B, C>>::vpunpckhwd(self, op0, op1, op2);
19429 }
19430 /// `VPUNPCKHWD_MASK` (VPUNPCKHWD).
19431 /// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
19432 ///
19433 ///
19434 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
19435 ///
19436 /// Supported operand variants:
19437 ///
19438 /// ```text
19439 /// +---+---------------+
19440 /// | # | Operands |
19441 /// +---+---------------+
19442 /// | 1 | Xmm, Xmm, Mem |
19443 /// | 2 | Xmm, Xmm, Xmm |
19444 /// | 3 | Ymm, Ymm, Mem |
19445 /// | 4 | Ymm, Ymm, Ymm |
19446 /// | 5 | Zmm, Zmm, Mem |
19447 /// | 6 | Zmm, Zmm, Zmm |
19448 /// +---+---------------+
19449 /// ```
19450 #[inline]
19451 pub fn vpunpckhwd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19452 where Assembler<'a>: VpunpckhwdMaskEmitter<A, B, C> {
19453 <Self as VpunpckhwdMaskEmitter<A, B, C>>::vpunpckhwd_mask(self, op0, op1, op2);
19454 }
19455 /// `VPUNPCKHWD_MASKZ` (VPUNPCKHWD).
19456 /// Unpacks and interleaves the high-order data elements (bytes, words, doublewords, or quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. Figure 4-20 shows the unpack operation for bytes in 64-bit operands. The low-order data elements are ignored.
19457 ///
19458 ///
19459 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKHBW%3APUNPCKHWD%3APUNPCKHDQ%3APUNPCKHQDQ.html).
19460 ///
19461 /// Supported operand variants:
19462 ///
19463 /// ```text
19464 /// +---+---------------+
19465 /// | # | Operands |
19466 /// +---+---------------+
19467 /// | 1 | Xmm, Xmm, Mem |
19468 /// | 2 | Xmm, Xmm, Xmm |
19469 /// | 3 | Ymm, Ymm, Mem |
19470 /// | 4 | Ymm, Ymm, Ymm |
19471 /// | 5 | Zmm, Zmm, Mem |
19472 /// | 6 | Zmm, Zmm, Zmm |
19473 /// +---+---------------+
19474 /// ```
19475 #[inline]
19476 pub fn vpunpckhwd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19477 where Assembler<'a>: VpunpckhwdMaskzEmitter<A, B, C> {
19478 <Self as VpunpckhwdMaskzEmitter<A, B, C>>::vpunpckhwd_maskz(self, op0, op1, op2);
19479 }
19480 /// `VPUNPCKLBW` (VPUNPCKLBW).
19481 /// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
19482 ///
19483 ///
19484 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
19485 ///
19486 /// Supported operand variants:
19487 ///
19488 /// ```text
19489 /// +---+---------------+
19490 /// | # | Operands |
19491 /// +---+---------------+
19492 /// | 1 | Xmm, Xmm, Mem |
19493 /// | 2 | Xmm, Xmm, Xmm |
19494 /// | 3 | Ymm, Ymm, Mem |
19495 /// | 4 | Ymm, Ymm, Ymm |
19496 /// | 5 | Zmm, Zmm, Mem |
19497 /// | 6 | Zmm, Zmm, Zmm |
19498 /// +---+---------------+
19499 /// ```
19500 #[inline]
19501 pub fn vpunpcklbw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19502 where Assembler<'a>: VpunpcklbwEmitter<A, B, C> {
19503 <Self as VpunpcklbwEmitter<A, B, C>>::vpunpcklbw(self, op0, op1, op2);
19504 }
19505 /// `VPUNPCKLBW_MASK` (VPUNPCKLBW).
19506 /// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
19507 ///
19508 ///
19509 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
19510 ///
19511 /// Supported operand variants:
19512 ///
19513 /// ```text
19514 /// +---+---------------+
19515 /// | # | Operands |
19516 /// +---+---------------+
19517 /// | 1 | Xmm, Xmm, Mem |
19518 /// | 2 | Xmm, Xmm, Xmm |
19519 /// | 3 | Ymm, Ymm, Mem |
19520 /// | 4 | Ymm, Ymm, Ymm |
19521 /// | 5 | Zmm, Zmm, Mem |
19522 /// | 6 | Zmm, Zmm, Zmm |
19523 /// +---+---------------+
19524 /// ```
19525 #[inline]
19526 pub fn vpunpcklbw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19527 where Assembler<'a>: VpunpcklbwMaskEmitter<A, B, C> {
19528 <Self as VpunpcklbwMaskEmitter<A, B, C>>::vpunpcklbw_mask(self, op0, op1, op2);
19529 }
19530 /// `VPUNPCKLBW_MASKZ` (VPUNPCKLBW).
19531 /// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
19532 ///
19533 ///
19534 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
19535 ///
19536 /// Supported operand variants:
19537 ///
19538 /// ```text
19539 /// +---+---------------+
19540 /// | # | Operands |
19541 /// +---+---------------+
19542 /// | 1 | Xmm, Xmm, Mem |
19543 /// | 2 | Xmm, Xmm, Xmm |
19544 /// | 3 | Ymm, Ymm, Mem |
19545 /// | 4 | Ymm, Ymm, Ymm |
19546 /// | 5 | Zmm, Zmm, Mem |
19547 /// | 6 | Zmm, Zmm, Zmm |
19548 /// +---+---------------+
19549 /// ```
19550 #[inline]
19551 pub fn vpunpcklbw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19552 where Assembler<'a>: VpunpcklbwMaskzEmitter<A, B, C> {
19553 <Self as VpunpcklbwMaskzEmitter<A, B, C>>::vpunpcklbw_maskz(self, op0, op1, op2);
19554 }
19555 /// `VPUNPCKLWD` (VPUNPCKLWD).
19556 /// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
19557 ///
19558 ///
19559 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
19560 ///
19561 /// Supported operand variants:
19562 ///
19563 /// ```text
19564 /// +---+---------------+
19565 /// | # | Operands |
19566 /// +---+---------------+
19567 /// | 1 | Xmm, Xmm, Mem |
19568 /// | 2 | Xmm, Xmm, Xmm |
19569 /// | 3 | Ymm, Ymm, Mem |
19570 /// | 4 | Ymm, Ymm, Ymm |
19571 /// | 5 | Zmm, Zmm, Mem |
19572 /// | 6 | Zmm, Zmm, Zmm |
19573 /// +---+---------------+
19574 /// ```
19575 #[inline]
19576 pub fn vpunpcklwd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19577 where Assembler<'a>: VpunpcklwdEmitter<A, B, C> {
19578 <Self as VpunpcklwdEmitter<A, B, C>>::vpunpcklwd(self, op0, op1, op2);
19579 }
19580 /// `VPUNPCKLWD_MASK` (VPUNPCKLWD).
19581 /// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
19582 ///
19583 ///
19584 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
19585 ///
19586 /// Supported operand variants:
19587 ///
19588 /// ```text
19589 /// +---+---------------+
19590 /// | # | Operands |
19591 /// +---+---------------+
19592 /// | 1 | Xmm, Xmm, Mem |
19593 /// | 2 | Xmm, Xmm, Xmm |
19594 /// | 3 | Ymm, Ymm, Mem |
19595 /// | 4 | Ymm, Ymm, Ymm |
19596 /// | 5 | Zmm, Zmm, Mem |
19597 /// | 6 | Zmm, Zmm, Zmm |
19598 /// +---+---------------+
19599 /// ```
19600 #[inline]
19601 pub fn vpunpcklwd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19602 where Assembler<'a>: VpunpcklwdMaskEmitter<A, B, C> {
19603 <Self as VpunpcklwdMaskEmitter<A, B, C>>::vpunpcklwd_mask(self, op0, op1, op2);
19604 }
19605 /// `VPUNPCKLWD_MASKZ` (VPUNPCKLWD).
19606 /// Unpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords) of the destination operand (first operand) and source operand (second operand) into the destination operand. (Figure 4-22 shows the unpack operation for bytes in 64-bit operands.). The high-order data elements are ignored.
19607 ///
19608 ///
19609 /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/PUNPCKLBW%3APUNPCKLWD%3APUNPCKLDQ%3APUNPCKLQDQ.html).
19610 ///
19611 /// Supported operand variants:
19612 ///
19613 /// ```text
19614 /// +---+---------------+
19615 /// | # | Operands |
19616 /// +---+---------------+
19617 /// | 1 | Xmm, Xmm, Mem |
19618 /// | 2 | Xmm, Xmm, Xmm |
19619 /// | 3 | Ymm, Ymm, Mem |
19620 /// | 4 | Ymm, Ymm, Ymm |
19621 /// | 5 | Zmm, Zmm, Mem |
19622 /// | 6 | Zmm, Zmm, Zmm |
19623 /// +---+---------------+
19624 /// ```
19625 #[inline]
19626 pub fn vpunpcklwd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
19627 where Assembler<'a>: VpunpcklwdMaskzEmitter<A, B, C> {
19628 <Self as VpunpcklwdMaskzEmitter<A, B, C>>::vpunpcklwd_maskz(self, op0, op1, op2);
19629 }
19630}