Skip to main content

asmkit/x86/features/
AVX512_VBMI2.rs

1use crate::x86::assembler::*;
2use crate::x86::operands::*;
3use super::super::opcodes::*;
4use crate::core::emitter::*;
5use crate::core::operand::*;
6
7/// A dummy operand that represents no register. Here just for simplicity.
8const NOREG: Operand = Operand::new();
9
10/// `VPCOMPRESSB` (VPCOMPRESSB). 
11/// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
12///
13///
14/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
15///
16/// Supported operand variants:
17///
18/// ```text
19/// +---+----------+
20/// | # | Operands |
21/// +---+----------+
22/// | 1 | Mem, Xmm |
23/// | 2 | Mem, Ymm |
24/// | 3 | Mem, Zmm |
25/// | 4 | Xmm, Xmm |
26/// | 5 | Ymm, Ymm |
27/// | 6 | Zmm, Zmm |
28/// +---+----------+
29/// ```
30pub trait VpcompressbEmitter<A, B> {
31    fn vpcompressb(&mut self, op0: A, op1: B);
32}
33
34impl<'a> VpcompressbEmitter<Mem, Xmm> for Assembler<'a> {
35    fn vpcompressb(&mut self, op0: Mem, op1: Xmm) {
36        self.emit(VPCOMPRESSB128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
37    }
38}
39
40impl<'a> VpcompressbEmitter<Mem, Ymm> for Assembler<'a> {
41    fn vpcompressb(&mut self, op0: Mem, op1: Ymm) {
42        self.emit(VPCOMPRESSB256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
43    }
44}
45
46impl<'a> VpcompressbEmitter<Mem, Zmm> for Assembler<'a> {
47    fn vpcompressb(&mut self, op0: Mem, op1: Zmm) {
48        self.emit(VPCOMPRESSB512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
49    }
50}
51
52impl<'a> VpcompressbEmitter<Xmm, Xmm> for Assembler<'a> {
53    fn vpcompressb(&mut self, op0: Xmm, op1: Xmm) {
54        self.emit(VPCOMPRESSB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
55    }
56}
57
58impl<'a> VpcompressbEmitter<Ymm, Ymm> for Assembler<'a> {
59    fn vpcompressb(&mut self, op0: Ymm, op1: Ymm) {
60        self.emit(VPCOMPRESSB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
61    }
62}
63
64impl<'a> VpcompressbEmitter<Zmm, Zmm> for Assembler<'a> {
65    fn vpcompressb(&mut self, op0: Zmm, op1: Zmm) {
66        self.emit(VPCOMPRESSB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
67    }
68}
69
70/// `VPCOMPRESSB_MASK` (VPCOMPRESSB). 
71/// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
72///
73///
74/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
75///
76/// Supported operand variants:
77///
78/// ```text
79/// +---+----------+
80/// | # | Operands |
81/// +---+----------+
82/// | 1 | Mem, Xmm |
83/// | 2 | Mem, Ymm |
84/// | 3 | Mem, Zmm |
85/// | 4 | Xmm, Xmm |
86/// | 5 | Ymm, Ymm |
87/// | 6 | Zmm, Zmm |
88/// +---+----------+
89/// ```
90pub trait VpcompressbMaskEmitter<A, B> {
91    fn vpcompressb_mask(&mut self, op0: A, op1: B);
92}
93
94impl<'a> VpcompressbMaskEmitter<Mem, Xmm> for Assembler<'a> {
95    fn vpcompressb_mask(&mut self, op0: Mem, op1: Xmm) {
96        self.emit(VPCOMPRESSB128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
97    }
98}
99
100impl<'a> VpcompressbMaskEmitter<Mem, Ymm> for Assembler<'a> {
101    fn vpcompressb_mask(&mut self, op0: Mem, op1: Ymm) {
102        self.emit(VPCOMPRESSB256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
103    }
104}
105
106impl<'a> VpcompressbMaskEmitter<Mem, Zmm> for Assembler<'a> {
107    fn vpcompressb_mask(&mut self, op0: Mem, op1: Zmm) {
108        self.emit(VPCOMPRESSB512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
109    }
110}
111
112impl<'a> VpcompressbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
113    fn vpcompressb_mask(&mut self, op0: Xmm, op1: Xmm) {
114        self.emit(VPCOMPRESSB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
115    }
116}
117
118impl<'a> VpcompressbMaskEmitter<Ymm, Ymm> for Assembler<'a> {
119    fn vpcompressb_mask(&mut self, op0: Ymm, op1: Ymm) {
120        self.emit(VPCOMPRESSB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
121    }
122}
123
124impl<'a> VpcompressbMaskEmitter<Zmm, Zmm> for Assembler<'a> {
125    fn vpcompressb_mask(&mut self, op0: Zmm, op1: Zmm) {
126        self.emit(VPCOMPRESSB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
127    }
128}
129
130/// `VPCOMPRESSB_MASKZ` (VPCOMPRESSB). 
131/// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
132///
133///
134/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
135///
136/// Supported operand variants:
137///
138/// ```text
139/// +---+----------+
140/// | # | Operands |
141/// +---+----------+
142/// | 1 | Xmm, Xmm |
143/// | 2 | Ymm, Ymm |
144/// | 3 | Zmm, Zmm |
145/// +---+----------+
146/// ```
147pub trait VpcompressbMaskzEmitter<A, B> {
148    fn vpcompressb_maskz(&mut self, op0: A, op1: B);
149}
150
151impl<'a> VpcompressbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
152    fn vpcompressb_maskz(&mut self, op0: Xmm, op1: Xmm) {
153        self.emit(VPCOMPRESSB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
154    }
155}
156
157impl<'a> VpcompressbMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
158    fn vpcompressb_maskz(&mut self, op0: Ymm, op1: Ymm) {
159        self.emit(VPCOMPRESSB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
160    }
161}
162
163impl<'a> VpcompressbMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
164    fn vpcompressb_maskz(&mut self, op0: Zmm, op1: Zmm) {
165        self.emit(VPCOMPRESSB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
166    }
167}
168
169/// `VPCOMPRESSW` (VPCOMPRESSW). 
170/// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
171///
172///
173/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
174///
175/// Supported operand variants:
176///
177/// ```text
178/// +---+----------+
179/// | # | Operands |
180/// +---+----------+
181/// | 1 | Mem, Xmm |
182/// | 2 | Mem, Ymm |
183/// | 3 | Mem, Zmm |
184/// | 4 | Xmm, Xmm |
185/// | 5 | Ymm, Ymm |
186/// | 6 | Zmm, Zmm |
187/// +---+----------+
188/// ```
189pub trait VpcompresswEmitter<A, B> {
190    fn vpcompressw(&mut self, op0: A, op1: B);
191}
192
193impl<'a> VpcompresswEmitter<Mem, Xmm> for Assembler<'a> {
194    fn vpcompressw(&mut self, op0: Mem, op1: Xmm) {
195        self.emit(VPCOMPRESSW128MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
196    }
197}
198
199impl<'a> VpcompresswEmitter<Mem, Ymm> for Assembler<'a> {
200    fn vpcompressw(&mut self, op0: Mem, op1: Ymm) {
201        self.emit(VPCOMPRESSW256MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
202    }
203}
204
205impl<'a> VpcompresswEmitter<Mem, Zmm> for Assembler<'a> {
206    fn vpcompressw(&mut self, op0: Mem, op1: Zmm) {
207        self.emit(VPCOMPRESSW512MR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
208    }
209}
210
211impl<'a> VpcompresswEmitter<Xmm, Xmm> for Assembler<'a> {
212    fn vpcompressw(&mut self, op0: Xmm, op1: Xmm) {
213        self.emit(VPCOMPRESSW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
214    }
215}
216
217impl<'a> VpcompresswEmitter<Ymm, Ymm> for Assembler<'a> {
218    fn vpcompressw(&mut self, op0: Ymm, op1: Ymm) {
219        self.emit(VPCOMPRESSW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
220    }
221}
222
223impl<'a> VpcompresswEmitter<Zmm, Zmm> for Assembler<'a> {
224    fn vpcompressw(&mut self, op0: Zmm, op1: Zmm) {
225        self.emit(VPCOMPRESSW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
226    }
227}
228
229/// `VPCOMPRESSW_MASK` (VPCOMPRESSW). 
230/// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
231///
232///
233/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
234///
235/// Supported operand variants:
236///
237/// ```text
238/// +---+----------+
239/// | # | Operands |
240/// +---+----------+
241/// | 1 | Mem, Xmm |
242/// | 2 | Mem, Ymm |
243/// | 3 | Mem, Zmm |
244/// | 4 | Xmm, Xmm |
245/// | 5 | Ymm, Ymm |
246/// | 6 | Zmm, Zmm |
247/// +---+----------+
248/// ```
249pub trait VpcompresswMaskEmitter<A, B> {
250    fn vpcompressw_mask(&mut self, op0: A, op1: B);
251}
252
253impl<'a> VpcompresswMaskEmitter<Mem, Xmm> for Assembler<'a> {
254    fn vpcompressw_mask(&mut self, op0: Mem, op1: Xmm) {
255        self.emit(VPCOMPRESSW128MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
256    }
257}
258
259impl<'a> VpcompresswMaskEmitter<Mem, Ymm> for Assembler<'a> {
260    fn vpcompressw_mask(&mut self, op0: Mem, op1: Ymm) {
261        self.emit(VPCOMPRESSW256MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
262    }
263}
264
265impl<'a> VpcompresswMaskEmitter<Mem, Zmm> for Assembler<'a> {
266    fn vpcompressw_mask(&mut self, op0: Mem, op1: Zmm) {
267        self.emit(VPCOMPRESSW512MR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
268    }
269}
270
271impl<'a> VpcompresswMaskEmitter<Xmm, Xmm> for Assembler<'a> {
272    fn vpcompressw_mask(&mut self, op0: Xmm, op1: Xmm) {
273        self.emit(VPCOMPRESSW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
274    }
275}
276
277impl<'a> VpcompresswMaskEmitter<Ymm, Ymm> for Assembler<'a> {
278    fn vpcompressw_mask(&mut self, op0: Ymm, op1: Ymm) {
279        self.emit(VPCOMPRESSW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
280    }
281}
282
283impl<'a> VpcompresswMaskEmitter<Zmm, Zmm> for Assembler<'a> {
284    fn vpcompressw_mask(&mut self, op0: Zmm, op1: Zmm) {
285        self.emit(VPCOMPRESSW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
286    }
287}
288
289/// `VPCOMPRESSW_MASKZ` (VPCOMPRESSW). 
290/// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
291///
292///
293/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
294///
295/// Supported operand variants:
296///
297/// ```text
298/// +---+----------+
299/// | # | Operands |
300/// +---+----------+
301/// | 1 | Xmm, Xmm |
302/// | 2 | Ymm, Ymm |
303/// | 3 | Zmm, Zmm |
304/// +---+----------+
305/// ```
306pub trait VpcompresswMaskzEmitter<A, B> {
307    fn vpcompressw_maskz(&mut self, op0: A, op1: B);
308}
309
310impl<'a> VpcompresswMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
311    fn vpcompressw_maskz(&mut self, op0: Xmm, op1: Xmm) {
312        self.emit(VPCOMPRESSW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
313    }
314}
315
316impl<'a> VpcompresswMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
317    fn vpcompressw_maskz(&mut self, op0: Ymm, op1: Ymm) {
318        self.emit(VPCOMPRESSW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
319    }
320}
321
322impl<'a> VpcompresswMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
323    fn vpcompressw_maskz(&mut self, op0: Zmm, op1: Zmm) {
324        self.emit(VPCOMPRESSW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
325    }
326}
327
328/// `VPEXPANDB` (VPEXPANDB). 
329/// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
330///
331///
332/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
333///
334/// Supported operand variants:
335///
336/// ```text
337/// +---+----------+
338/// | # | Operands |
339/// +---+----------+
340/// | 1 | Xmm, Mem |
341/// | 2 | Xmm, Xmm |
342/// | 3 | Ymm, Mem |
343/// | 4 | Ymm, Ymm |
344/// | 5 | Zmm, Mem |
345/// | 6 | Zmm, Zmm |
346/// +---+----------+
347/// ```
348pub trait VpexpandbEmitter<A, B> {
349    fn vpexpandb(&mut self, op0: A, op1: B);
350}
351
352impl<'a> VpexpandbEmitter<Xmm, Mem> for Assembler<'a> {
353    fn vpexpandb(&mut self, op0: Xmm, op1: Mem) {
354        self.emit(VPEXPANDB128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
355    }
356}
357
358impl<'a> VpexpandbEmitter<Ymm, Mem> for Assembler<'a> {
359    fn vpexpandb(&mut self, op0: Ymm, op1: Mem) {
360        self.emit(VPEXPANDB256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
361    }
362}
363
364impl<'a> VpexpandbEmitter<Zmm, Mem> for Assembler<'a> {
365    fn vpexpandb(&mut self, op0: Zmm, op1: Mem) {
366        self.emit(VPEXPANDB512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
367    }
368}
369
370impl<'a> VpexpandbEmitter<Xmm, Xmm> for Assembler<'a> {
371    fn vpexpandb(&mut self, op0: Xmm, op1: Xmm) {
372        self.emit(VPEXPANDB128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
373    }
374}
375
376impl<'a> VpexpandbEmitter<Ymm, Ymm> for Assembler<'a> {
377    fn vpexpandb(&mut self, op0: Ymm, op1: Ymm) {
378        self.emit(VPEXPANDB256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
379    }
380}
381
382impl<'a> VpexpandbEmitter<Zmm, Zmm> for Assembler<'a> {
383    fn vpexpandb(&mut self, op0: Zmm, op1: Zmm) {
384        self.emit(VPEXPANDB512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
385    }
386}
387
388/// `VPEXPANDB_MASK` (VPEXPANDB). 
389/// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
390///
391///
392/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
393///
394/// Supported operand variants:
395///
396/// ```text
397/// +---+----------+
398/// | # | Operands |
399/// +---+----------+
400/// | 1 | Xmm, Mem |
401/// | 2 | Xmm, Xmm |
402/// | 3 | Ymm, Mem |
403/// | 4 | Ymm, Ymm |
404/// | 5 | Zmm, Mem |
405/// | 6 | Zmm, Zmm |
406/// +---+----------+
407/// ```
408pub trait VpexpandbMaskEmitter<A, B> {
409    fn vpexpandb_mask(&mut self, op0: A, op1: B);
410}
411
412impl<'a> VpexpandbMaskEmitter<Xmm, Mem> for Assembler<'a> {
413    fn vpexpandb_mask(&mut self, op0: Xmm, op1: Mem) {
414        self.emit(VPEXPANDB128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
415    }
416}
417
418impl<'a> VpexpandbMaskEmitter<Ymm, Mem> for Assembler<'a> {
419    fn vpexpandb_mask(&mut self, op0: Ymm, op1: Mem) {
420        self.emit(VPEXPANDB256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
421    }
422}
423
424impl<'a> VpexpandbMaskEmitter<Zmm, Mem> for Assembler<'a> {
425    fn vpexpandb_mask(&mut self, op0: Zmm, op1: Mem) {
426        self.emit(VPEXPANDB512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
427    }
428}
429
430impl<'a> VpexpandbMaskEmitter<Xmm, Xmm> for Assembler<'a> {
431    fn vpexpandb_mask(&mut self, op0: Xmm, op1: Xmm) {
432        self.emit(VPEXPANDB128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
433    }
434}
435
436impl<'a> VpexpandbMaskEmitter<Ymm, Ymm> for Assembler<'a> {
437    fn vpexpandb_mask(&mut self, op0: Ymm, op1: Ymm) {
438        self.emit(VPEXPANDB256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
439    }
440}
441
442impl<'a> VpexpandbMaskEmitter<Zmm, Zmm> for Assembler<'a> {
443    fn vpexpandb_mask(&mut self, op0: Zmm, op1: Zmm) {
444        self.emit(VPEXPANDB512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
445    }
446}
447
448/// `VPEXPANDB_MASKZ` (VPEXPANDB). 
449/// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
450///
451///
452/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
453///
454/// Supported operand variants:
455///
456/// ```text
457/// +---+----------+
458/// | # | Operands |
459/// +---+----------+
460/// | 1 | Xmm, Mem |
461/// | 2 | Xmm, Xmm |
462/// | 3 | Ymm, Mem |
463/// | 4 | Ymm, Ymm |
464/// | 5 | Zmm, Mem |
465/// | 6 | Zmm, Zmm |
466/// +---+----------+
467/// ```
468pub trait VpexpandbMaskzEmitter<A, B> {
469    fn vpexpandb_maskz(&mut self, op0: A, op1: B);
470}
471
472impl<'a> VpexpandbMaskzEmitter<Xmm, Mem> for Assembler<'a> {
473    fn vpexpandb_maskz(&mut self, op0: Xmm, op1: Mem) {
474        self.emit(VPEXPANDB128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
475    }
476}
477
478impl<'a> VpexpandbMaskzEmitter<Ymm, Mem> for Assembler<'a> {
479    fn vpexpandb_maskz(&mut self, op0: Ymm, op1: Mem) {
480        self.emit(VPEXPANDB256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
481    }
482}
483
484impl<'a> VpexpandbMaskzEmitter<Zmm, Mem> for Assembler<'a> {
485    fn vpexpandb_maskz(&mut self, op0: Zmm, op1: Mem) {
486        self.emit(VPEXPANDB512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
487    }
488}
489
490impl<'a> VpexpandbMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
491    fn vpexpandb_maskz(&mut self, op0: Xmm, op1: Xmm) {
492        self.emit(VPEXPANDB128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
493    }
494}
495
496impl<'a> VpexpandbMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
497    fn vpexpandb_maskz(&mut self, op0: Ymm, op1: Ymm) {
498        self.emit(VPEXPANDB256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
499    }
500}
501
502impl<'a> VpexpandbMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
503    fn vpexpandb_maskz(&mut self, op0: Zmm, op1: Zmm) {
504        self.emit(VPEXPANDB512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
505    }
506}
507
508/// `VPEXPANDW` (VPEXPANDW). 
509/// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
510///
511///
512/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
513///
514/// Supported operand variants:
515///
516/// ```text
517/// +---+----------+
518/// | # | Operands |
519/// +---+----------+
520/// | 1 | Xmm, Mem |
521/// | 2 | Xmm, Xmm |
522/// | 3 | Ymm, Mem |
523/// | 4 | Ymm, Ymm |
524/// | 5 | Zmm, Mem |
525/// | 6 | Zmm, Zmm |
526/// +---+----------+
527/// ```
528pub trait VpexpandwEmitter<A, B> {
529    fn vpexpandw(&mut self, op0: A, op1: B);
530}
531
532impl<'a> VpexpandwEmitter<Xmm, Mem> for Assembler<'a> {
533    fn vpexpandw(&mut self, op0: Xmm, op1: Mem) {
534        self.emit(VPEXPANDW128RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
535    }
536}
537
538impl<'a> VpexpandwEmitter<Ymm, Mem> for Assembler<'a> {
539    fn vpexpandw(&mut self, op0: Ymm, op1: Mem) {
540        self.emit(VPEXPANDW256RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
541    }
542}
543
544impl<'a> VpexpandwEmitter<Zmm, Mem> for Assembler<'a> {
545    fn vpexpandw(&mut self, op0: Zmm, op1: Mem) {
546        self.emit(VPEXPANDW512RM, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
547    }
548}
549
550impl<'a> VpexpandwEmitter<Xmm, Xmm> for Assembler<'a> {
551    fn vpexpandw(&mut self, op0: Xmm, op1: Xmm) {
552        self.emit(VPEXPANDW128RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
553    }
554}
555
556impl<'a> VpexpandwEmitter<Ymm, Ymm> for Assembler<'a> {
557    fn vpexpandw(&mut self, op0: Ymm, op1: Ymm) {
558        self.emit(VPEXPANDW256RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
559    }
560}
561
562impl<'a> VpexpandwEmitter<Zmm, Zmm> for Assembler<'a> {
563    fn vpexpandw(&mut self, op0: Zmm, op1: Zmm) {
564        self.emit(VPEXPANDW512RR, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
565    }
566}
567
568/// `VPEXPANDW_MASK` (VPEXPANDW). 
569/// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
570///
571///
572/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
573///
574/// Supported operand variants:
575///
576/// ```text
577/// +---+----------+
578/// | # | Operands |
579/// +---+----------+
580/// | 1 | Xmm, Mem |
581/// | 2 | Xmm, Xmm |
582/// | 3 | Ymm, Mem |
583/// | 4 | Ymm, Ymm |
584/// | 5 | Zmm, Mem |
585/// | 6 | Zmm, Zmm |
586/// +---+----------+
587/// ```
588pub trait VpexpandwMaskEmitter<A, B> {
589    fn vpexpandw_mask(&mut self, op0: A, op1: B);
590}
591
592impl<'a> VpexpandwMaskEmitter<Xmm, Mem> for Assembler<'a> {
593    fn vpexpandw_mask(&mut self, op0: Xmm, op1: Mem) {
594        self.emit(VPEXPANDW128RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
595    }
596}
597
598impl<'a> VpexpandwMaskEmitter<Ymm, Mem> for Assembler<'a> {
599    fn vpexpandw_mask(&mut self, op0: Ymm, op1: Mem) {
600        self.emit(VPEXPANDW256RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
601    }
602}
603
604impl<'a> VpexpandwMaskEmitter<Zmm, Mem> for Assembler<'a> {
605    fn vpexpandw_mask(&mut self, op0: Zmm, op1: Mem) {
606        self.emit(VPEXPANDW512RM_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
607    }
608}
609
610impl<'a> VpexpandwMaskEmitter<Xmm, Xmm> for Assembler<'a> {
611    fn vpexpandw_mask(&mut self, op0: Xmm, op1: Xmm) {
612        self.emit(VPEXPANDW128RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
613    }
614}
615
616impl<'a> VpexpandwMaskEmitter<Ymm, Ymm> for Assembler<'a> {
617    fn vpexpandw_mask(&mut self, op0: Ymm, op1: Ymm) {
618        self.emit(VPEXPANDW256RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
619    }
620}
621
622impl<'a> VpexpandwMaskEmitter<Zmm, Zmm> for Assembler<'a> {
623    fn vpexpandw_mask(&mut self, op0: Zmm, op1: Zmm) {
624        self.emit(VPEXPANDW512RR_MASK, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
625    }
626}
627
628/// `VPEXPANDW_MASKZ` (VPEXPANDW). 
629/// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
630///
631///
632/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
633///
634/// Supported operand variants:
635///
636/// ```text
637/// +---+----------+
638/// | # | Operands |
639/// +---+----------+
640/// | 1 | Xmm, Mem |
641/// | 2 | Xmm, Xmm |
642/// | 3 | Ymm, Mem |
643/// | 4 | Ymm, Ymm |
644/// | 5 | Zmm, Mem |
645/// | 6 | Zmm, Zmm |
646/// +---+----------+
647/// ```
648pub trait VpexpandwMaskzEmitter<A, B> {
649    fn vpexpandw_maskz(&mut self, op0: A, op1: B);
650}
651
652impl<'a> VpexpandwMaskzEmitter<Xmm, Mem> for Assembler<'a> {
653    fn vpexpandw_maskz(&mut self, op0: Xmm, op1: Mem) {
654        self.emit(VPEXPANDW128RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
655    }
656}
657
658impl<'a> VpexpandwMaskzEmitter<Ymm, Mem> for Assembler<'a> {
659    fn vpexpandw_maskz(&mut self, op0: Ymm, op1: Mem) {
660        self.emit(VPEXPANDW256RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
661    }
662}
663
664impl<'a> VpexpandwMaskzEmitter<Zmm, Mem> for Assembler<'a> {
665    fn vpexpandw_maskz(&mut self, op0: Zmm, op1: Mem) {
666        self.emit(VPEXPANDW512RM_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
667    }
668}
669
670impl<'a> VpexpandwMaskzEmitter<Xmm, Xmm> for Assembler<'a> {
671    fn vpexpandw_maskz(&mut self, op0: Xmm, op1: Xmm) {
672        self.emit(VPEXPANDW128RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
673    }
674}
675
676impl<'a> VpexpandwMaskzEmitter<Ymm, Ymm> for Assembler<'a> {
677    fn vpexpandw_maskz(&mut self, op0: Ymm, op1: Ymm) {
678        self.emit(VPEXPANDW256RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
679    }
680}
681
682impl<'a> VpexpandwMaskzEmitter<Zmm, Zmm> for Assembler<'a> {
683    fn vpexpandw_maskz(&mut self, op0: Zmm, op1: Zmm) {
684        self.emit(VPEXPANDW512RR_MASKZ, op0.as_operand(), op1.as_operand(), &NOREG, &NOREG);
685    }
686}
687
688/// `VPSHLDD` (VPSHLDD). 
689/// Concatenate packed data, extract result shifted to the left by constant value.
690///
691///
692/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
693///
694/// Supported operand variants:
695///
696/// ```text
697/// +---+--------------------+
698/// | # | Operands           |
699/// +---+--------------------+
700/// | 1 | Xmm, Xmm, Mem, Imm |
701/// | 2 | Xmm, Xmm, Xmm, Imm |
702/// | 3 | Ymm, Ymm, Mem, Imm |
703/// | 4 | Ymm, Ymm, Ymm, Imm |
704/// | 5 | Zmm, Zmm, Mem, Imm |
705/// | 6 | Zmm, Zmm, Zmm, Imm |
706/// +---+--------------------+
707/// ```
708pub trait VpshlddEmitter<A, B, C, D> {
709    fn vpshldd(&mut self, op0: A, op1: B, op2: C, op3: D);
710}
711
712impl<'a> VpshlddEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
713    fn vpshldd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
714        self.emit(VPSHLDD128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
715    }
716}
717
718impl<'a> VpshlddEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
719    fn vpshldd(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
720        self.emit(VPSHLDD128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
721    }
722}
723
724impl<'a> VpshlddEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
725    fn vpshldd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
726        self.emit(VPSHLDD256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
727    }
728}
729
730impl<'a> VpshlddEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
731    fn vpshldd(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
732        self.emit(VPSHLDD256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
733    }
734}
735
736impl<'a> VpshlddEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
737    fn vpshldd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
738        self.emit(VPSHLDD512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
739    }
740}
741
742impl<'a> VpshlddEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
743    fn vpshldd(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
744        self.emit(VPSHLDD512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
745    }
746}
747
748/// `VPSHLDD_MASK` (VPSHLDD). 
749/// Concatenate packed data, extract result shifted to the left by constant value.
750///
751///
752/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
753///
754/// Supported operand variants:
755///
756/// ```text
757/// +---+--------------------+
758/// | # | Operands           |
759/// +---+--------------------+
760/// | 1 | Xmm, Xmm, Mem, Imm |
761/// | 2 | Xmm, Xmm, Xmm, Imm |
762/// | 3 | Ymm, Ymm, Mem, Imm |
763/// | 4 | Ymm, Ymm, Ymm, Imm |
764/// | 5 | Zmm, Zmm, Mem, Imm |
765/// | 6 | Zmm, Zmm, Zmm, Imm |
766/// +---+--------------------+
767/// ```
768pub trait VpshlddMaskEmitter<A, B, C, D> {
769    fn vpshldd_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
770}
771
772impl<'a> VpshlddMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
773    fn vpshldd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
774        self.emit(VPSHLDD128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
775    }
776}
777
778impl<'a> VpshlddMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
779    fn vpshldd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
780        self.emit(VPSHLDD128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
781    }
782}
783
784impl<'a> VpshlddMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
785    fn vpshldd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
786        self.emit(VPSHLDD256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
787    }
788}
789
790impl<'a> VpshlddMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
791    fn vpshldd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
792        self.emit(VPSHLDD256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
793    }
794}
795
796impl<'a> VpshlddMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
797    fn vpshldd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
798        self.emit(VPSHLDD512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
799    }
800}
801
802impl<'a> VpshlddMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
803    fn vpshldd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
804        self.emit(VPSHLDD512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
805    }
806}
807
808/// `VPSHLDD_MASKZ` (VPSHLDD). 
809/// Concatenate packed data, extract result shifted to the left by constant value.
810///
811///
812/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
813///
814/// Supported operand variants:
815///
816/// ```text
817/// +---+--------------------+
818/// | # | Operands           |
819/// +---+--------------------+
820/// | 1 | Xmm, Xmm, Mem, Imm |
821/// | 2 | Xmm, Xmm, Xmm, Imm |
822/// | 3 | Ymm, Ymm, Mem, Imm |
823/// | 4 | Ymm, Ymm, Ymm, Imm |
824/// | 5 | Zmm, Zmm, Mem, Imm |
825/// | 6 | Zmm, Zmm, Zmm, Imm |
826/// +---+--------------------+
827/// ```
828pub trait VpshlddMaskzEmitter<A, B, C, D> {
829    fn vpshldd_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
830}
831
832impl<'a> VpshlddMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
833    fn vpshldd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
834        self.emit(VPSHLDD128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
835    }
836}
837
838impl<'a> VpshlddMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
839    fn vpshldd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
840        self.emit(VPSHLDD128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
841    }
842}
843
844impl<'a> VpshlddMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
845    fn vpshldd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
846        self.emit(VPSHLDD256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
847    }
848}
849
850impl<'a> VpshlddMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
851    fn vpshldd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
852        self.emit(VPSHLDD256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
853    }
854}
855
856impl<'a> VpshlddMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
857    fn vpshldd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
858        self.emit(VPSHLDD512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
859    }
860}
861
862impl<'a> VpshlddMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
863    fn vpshldd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
864        self.emit(VPSHLDD512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
865    }
866}
867
868/// `VPSHLDQ` (VPSHLDQ). 
869/// Concatenate packed data, extract result shifted to the left by constant value.
870///
871///
872/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
873///
874/// Supported operand variants:
875///
876/// ```text
877/// +---+--------------------+
878/// | # | Operands           |
879/// +---+--------------------+
880/// | 1 | Xmm, Xmm, Mem, Imm |
881/// | 2 | Xmm, Xmm, Xmm, Imm |
882/// | 3 | Ymm, Ymm, Mem, Imm |
883/// | 4 | Ymm, Ymm, Ymm, Imm |
884/// | 5 | Zmm, Zmm, Mem, Imm |
885/// | 6 | Zmm, Zmm, Zmm, Imm |
886/// +---+--------------------+
887/// ```
888pub trait VpshldqEmitter<A, B, C, D> {
889    fn vpshldq(&mut self, op0: A, op1: B, op2: C, op3: D);
890}
891
892impl<'a> VpshldqEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
893    fn vpshldq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
894        self.emit(VPSHLDQ128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
895    }
896}
897
898impl<'a> VpshldqEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
899    fn vpshldq(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
900        self.emit(VPSHLDQ128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
901    }
902}
903
904impl<'a> VpshldqEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
905    fn vpshldq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
906        self.emit(VPSHLDQ256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
907    }
908}
909
910impl<'a> VpshldqEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
911    fn vpshldq(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
912        self.emit(VPSHLDQ256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
913    }
914}
915
916impl<'a> VpshldqEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
917    fn vpshldq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
918        self.emit(VPSHLDQ512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
919    }
920}
921
922impl<'a> VpshldqEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
923    fn vpshldq(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
924        self.emit(VPSHLDQ512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
925    }
926}
927
928/// `VPSHLDQ_MASK` (VPSHLDQ). 
929/// Concatenate packed data, extract result shifted to the left by constant value.
930///
931///
932/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
933///
934/// Supported operand variants:
935///
936/// ```text
937/// +---+--------------------+
938/// | # | Operands           |
939/// +---+--------------------+
940/// | 1 | Xmm, Xmm, Mem, Imm |
941/// | 2 | Xmm, Xmm, Xmm, Imm |
942/// | 3 | Ymm, Ymm, Mem, Imm |
943/// | 4 | Ymm, Ymm, Ymm, Imm |
944/// | 5 | Zmm, Zmm, Mem, Imm |
945/// | 6 | Zmm, Zmm, Zmm, Imm |
946/// +---+--------------------+
947/// ```
948pub trait VpshldqMaskEmitter<A, B, C, D> {
949    fn vpshldq_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
950}
951
952impl<'a> VpshldqMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
953    fn vpshldq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
954        self.emit(VPSHLDQ128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
955    }
956}
957
958impl<'a> VpshldqMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
959    fn vpshldq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
960        self.emit(VPSHLDQ128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
961    }
962}
963
964impl<'a> VpshldqMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
965    fn vpshldq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
966        self.emit(VPSHLDQ256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
967    }
968}
969
970impl<'a> VpshldqMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
971    fn vpshldq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
972        self.emit(VPSHLDQ256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
973    }
974}
975
976impl<'a> VpshldqMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
977    fn vpshldq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
978        self.emit(VPSHLDQ512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
979    }
980}
981
982impl<'a> VpshldqMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
983    fn vpshldq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
984        self.emit(VPSHLDQ512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
985    }
986}
987
988/// `VPSHLDQ_MASKZ` (VPSHLDQ). 
989/// Concatenate packed data, extract result shifted to the left by constant value.
990///
991///
992/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
993///
994/// Supported operand variants:
995///
996/// ```text
997/// +---+--------------------+
998/// | # | Operands           |
999/// +---+--------------------+
1000/// | 1 | Xmm, Xmm, Mem, Imm |
1001/// | 2 | Xmm, Xmm, Xmm, Imm |
1002/// | 3 | Ymm, Ymm, Mem, Imm |
1003/// | 4 | Ymm, Ymm, Ymm, Imm |
1004/// | 5 | Zmm, Zmm, Mem, Imm |
1005/// | 6 | Zmm, Zmm, Zmm, Imm |
1006/// +---+--------------------+
1007/// ```
1008pub trait VpshldqMaskzEmitter<A, B, C, D> {
1009    fn vpshldq_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
1010}
1011
1012impl<'a> VpshldqMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1013    fn vpshldq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1014        self.emit(VPSHLDQ128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1015    }
1016}
1017
1018impl<'a> VpshldqMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1019    fn vpshldq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1020        self.emit(VPSHLDQ128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1021    }
1022}
1023
1024impl<'a> VpshldqMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1025    fn vpshldq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1026        self.emit(VPSHLDQ256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1027    }
1028}
1029
1030impl<'a> VpshldqMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1031    fn vpshldq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1032        self.emit(VPSHLDQ256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1033    }
1034}
1035
1036impl<'a> VpshldqMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1037    fn vpshldq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1038        self.emit(VPSHLDQ512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1039    }
1040}
1041
1042impl<'a> VpshldqMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1043    fn vpshldq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1044        self.emit(VPSHLDQ512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1045    }
1046}
1047
1048/// `VPSHLDVD` (VPSHLDVD). 
1049/// Concatenate packed data, extract result shifted to the left by variable value.
1050///
1051///
1052/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1053///
1054/// Supported operand variants:
1055///
1056/// ```text
1057/// +---+---------------+
1058/// | # | Operands      |
1059/// +---+---------------+
1060/// | 1 | Xmm, Xmm, Mem |
1061/// | 2 | Xmm, Xmm, Xmm |
1062/// | 3 | Ymm, Ymm, Mem |
1063/// | 4 | Ymm, Ymm, Ymm |
1064/// | 5 | Zmm, Zmm, Mem |
1065/// | 6 | Zmm, Zmm, Zmm |
1066/// +---+---------------+
1067/// ```
1068pub trait VpshldvdEmitter<A, B, C> {
1069    fn vpshldvd(&mut self, op0: A, op1: B, op2: C);
1070}
1071
1072impl<'a> VpshldvdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1073    fn vpshldvd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1074        self.emit(VPSHLDVD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1075    }
1076}
1077
1078impl<'a> VpshldvdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1079    fn vpshldvd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1080        self.emit(VPSHLDVD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1081    }
1082}
1083
1084impl<'a> VpshldvdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1085    fn vpshldvd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1086        self.emit(VPSHLDVD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1087    }
1088}
1089
1090impl<'a> VpshldvdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1091    fn vpshldvd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1092        self.emit(VPSHLDVD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1093    }
1094}
1095
1096impl<'a> VpshldvdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1097    fn vpshldvd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1098        self.emit(VPSHLDVD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1099    }
1100}
1101
1102impl<'a> VpshldvdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1103    fn vpshldvd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1104        self.emit(VPSHLDVD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1105    }
1106}
1107
1108/// `VPSHLDVD_MASK` (VPSHLDVD). 
1109/// Concatenate packed data, extract result shifted to the left by variable value.
1110///
1111///
1112/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1113///
1114/// Supported operand variants:
1115///
1116/// ```text
1117/// +---+---------------+
1118/// | # | Operands      |
1119/// +---+---------------+
1120/// | 1 | Xmm, Xmm, Mem |
1121/// | 2 | Xmm, Xmm, Xmm |
1122/// | 3 | Ymm, Ymm, Mem |
1123/// | 4 | Ymm, Ymm, Ymm |
1124/// | 5 | Zmm, Zmm, Mem |
1125/// | 6 | Zmm, Zmm, Zmm |
1126/// +---+---------------+
1127/// ```
1128pub trait VpshldvdMaskEmitter<A, B, C> {
1129    fn vpshldvd_mask(&mut self, op0: A, op1: B, op2: C);
1130}
1131
1132impl<'a> VpshldvdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1133    fn vpshldvd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1134        self.emit(VPSHLDVD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1135    }
1136}
1137
1138impl<'a> VpshldvdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1139    fn vpshldvd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1140        self.emit(VPSHLDVD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1141    }
1142}
1143
1144impl<'a> VpshldvdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1145    fn vpshldvd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1146        self.emit(VPSHLDVD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1147    }
1148}
1149
1150impl<'a> VpshldvdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1151    fn vpshldvd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1152        self.emit(VPSHLDVD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1153    }
1154}
1155
1156impl<'a> VpshldvdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1157    fn vpshldvd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1158        self.emit(VPSHLDVD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1159    }
1160}
1161
1162impl<'a> VpshldvdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1163    fn vpshldvd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1164        self.emit(VPSHLDVD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1165    }
1166}
1167
1168/// `VPSHLDVD_MASKZ` (VPSHLDVD). 
1169/// Concatenate packed data, extract result shifted to the left by variable value.
1170///
1171///
1172/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1173///
1174/// Supported operand variants:
1175///
1176/// ```text
1177/// +---+---------------+
1178/// | # | Operands      |
1179/// +---+---------------+
1180/// | 1 | Xmm, Xmm, Mem |
1181/// | 2 | Xmm, Xmm, Xmm |
1182/// | 3 | Ymm, Ymm, Mem |
1183/// | 4 | Ymm, Ymm, Ymm |
1184/// | 5 | Zmm, Zmm, Mem |
1185/// | 6 | Zmm, Zmm, Zmm |
1186/// +---+---------------+
1187/// ```
1188pub trait VpshldvdMaskzEmitter<A, B, C> {
1189    fn vpshldvd_maskz(&mut self, op0: A, op1: B, op2: C);
1190}
1191
1192impl<'a> VpshldvdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1193    fn vpshldvd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1194        self.emit(VPSHLDVD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1195    }
1196}
1197
1198impl<'a> VpshldvdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1199    fn vpshldvd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1200        self.emit(VPSHLDVD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1201    }
1202}
1203
1204impl<'a> VpshldvdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1205    fn vpshldvd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1206        self.emit(VPSHLDVD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1207    }
1208}
1209
1210impl<'a> VpshldvdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1211    fn vpshldvd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1212        self.emit(VPSHLDVD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1213    }
1214}
1215
1216impl<'a> VpshldvdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1217    fn vpshldvd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1218        self.emit(VPSHLDVD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1219    }
1220}
1221
1222impl<'a> VpshldvdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1223    fn vpshldvd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1224        self.emit(VPSHLDVD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1225    }
1226}
1227
1228/// `VPSHLDVQ` (VPSHLDVQ). 
1229/// Concatenate packed data, extract result shifted to the left by variable value.
1230///
1231///
1232/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1233///
1234/// Supported operand variants:
1235///
1236/// ```text
1237/// +---+---------------+
1238/// | # | Operands      |
1239/// +---+---------------+
1240/// | 1 | Xmm, Xmm, Mem |
1241/// | 2 | Xmm, Xmm, Xmm |
1242/// | 3 | Ymm, Ymm, Mem |
1243/// | 4 | Ymm, Ymm, Ymm |
1244/// | 5 | Zmm, Zmm, Mem |
1245/// | 6 | Zmm, Zmm, Zmm |
1246/// +---+---------------+
1247/// ```
1248pub trait VpshldvqEmitter<A, B, C> {
1249    fn vpshldvq(&mut self, op0: A, op1: B, op2: C);
1250}
1251
1252impl<'a> VpshldvqEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1253    fn vpshldvq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1254        self.emit(VPSHLDVQ128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1255    }
1256}
1257
1258impl<'a> VpshldvqEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1259    fn vpshldvq(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1260        self.emit(VPSHLDVQ128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1261    }
1262}
1263
1264impl<'a> VpshldvqEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1265    fn vpshldvq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1266        self.emit(VPSHLDVQ256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1267    }
1268}
1269
1270impl<'a> VpshldvqEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1271    fn vpshldvq(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1272        self.emit(VPSHLDVQ256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1273    }
1274}
1275
1276impl<'a> VpshldvqEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1277    fn vpshldvq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1278        self.emit(VPSHLDVQ512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1279    }
1280}
1281
1282impl<'a> VpshldvqEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1283    fn vpshldvq(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1284        self.emit(VPSHLDVQ512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1285    }
1286}
1287
1288/// `VPSHLDVQ_MASK` (VPSHLDVQ). 
1289/// Concatenate packed data, extract result shifted to the left by variable value.
1290///
1291///
1292/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1293///
1294/// Supported operand variants:
1295///
1296/// ```text
1297/// +---+---------------+
1298/// | # | Operands      |
1299/// +---+---------------+
1300/// | 1 | Xmm, Xmm, Mem |
1301/// | 2 | Xmm, Xmm, Xmm |
1302/// | 3 | Ymm, Ymm, Mem |
1303/// | 4 | Ymm, Ymm, Ymm |
1304/// | 5 | Zmm, Zmm, Mem |
1305/// | 6 | Zmm, Zmm, Zmm |
1306/// +---+---------------+
1307/// ```
1308pub trait VpshldvqMaskEmitter<A, B, C> {
1309    fn vpshldvq_mask(&mut self, op0: A, op1: B, op2: C);
1310}
1311
1312impl<'a> VpshldvqMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1313    fn vpshldvq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1314        self.emit(VPSHLDVQ128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1315    }
1316}
1317
1318impl<'a> VpshldvqMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1319    fn vpshldvq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1320        self.emit(VPSHLDVQ128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1321    }
1322}
1323
1324impl<'a> VpshldvqMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1325    fn vpshldvq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1326        self.emit(VPSHLDVQ256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1327    }
1328}
1329
1330impl<'a> VpshldvqMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1331    fn vpshldvq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1332        self.emit(VPSHLDVQ256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1333    }
1334}
1335
1336impl<'a> VpshldvqMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1337    fn vpshldvq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1338        self.emit(VPSHLDVQ512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1339    }
1340}
1341
1342impl<'a> VpshldvqMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1343    fn vpshldvq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1344        self.emit(VPSHLDVQ512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1345    }
1346}
1347
1348/// `VPSHLDVQ_MASKZ` (VPSHLDVQ). 
1349/// Concatenate packed data, extract result shifted to the left by variable value.
1350///
1351///
1352/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1353///
1354/// Supported operand variants:
1355///
1356/// ```text
1357/// +---+---------------+
1358/// | # | Operands      |
1359/// +---+---------------+
1360/// | 1 | Xmm, Xmm, Mem |
1361/// | 2 | Xmm, Xmm, Xmm |
1362/// | 3 | Ymm, Ymm, Mem |
1363/// | 4 | Ymm, Ymm, Ymm |
1364/// | 5 | Zmm, Zmm, Mem |
1365/// | 6 | Zmm, Zmm, Zmm |
1366/// +---+---------------+
1367/// ```
1368pub trait VpshldvqMaskzEmitter<A, B, C> {
1369    fn vpshldvq_maskz(&mut self, op0: A, op1: B, op2: C);
1370}
1371
1372impl<'a> VpshldvqMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1373    fn vpshldvq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1374        self.emit(VPSHLDVQ128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1375    }
1376}
1377
1378impl<'a> VpshldvqMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1379    fn vpshldvq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1380        self.emit(VPSHLDVQ128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1381    }
1382}
1383
1384impl<'a> VpshldvqMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1385    fn vpshldvq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1386        self.emit(VPSHLDVQ256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1387    }
1388}
1389
1390impl<'a> VpshldvqMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1391    fn vpshldvq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1392        self.emit(VPSHLDVQ256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1393    }
1394}
1395
1396impl<'a> VpshldvqMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1397    fn vpshldvq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1398        self.emit(VPSHLDVQ512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1399    }
1400}
1401
1402impl<'a> VpshldvqMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1403    fn vpshldvq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1404        self.emit(VPSHLDVQ512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1405    }
1406}
1407
1408/// `VPSHLDVW` (VPSHLDVW). 
1409/// Concatenate packed data, extract result shifted to the left by variable value.
1410///
1411///
1412/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1413///
1414/// Supported operand variants:
1415///
1416/// ```text
1417/// +---+---------------+
1418/// | # | Operands      |
1419/// +---+---------------+
1420/// | 1 | Xmm, Xmm, Mem |
1421/// | 2 | Xmm, Xmm, Xmm |
1422/// | 3 | Ymm, Ymm, Mem |
1423/// | 4 | Ymm, Ymm, Ymm |
1424/// | 5 | Zmm, Zmm, Mem |
1425/// | 6 | Zmm, Zmm, Zmm |
1426/// +---+---------------+
1427/// ```
1428pub trait VpshldvwEmitter<A, B, C> {
1429    fn vpshldvw(&mut self, op0: A, op1: B, op2: C);
1430}
1431
1432impl<'a> VpshldvwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1433    fn vpshldvw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1434        self.emit(VPSHLDVW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1435    }
1436}
1437
1438impl<'a> VpshldvwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1439    fn vpshldvw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1440        self.emit(VPSHLDVW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1441    }
1442}
1443
1444impl<'a> VpshldvwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1445    fn vpshldvw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1446        self.emit(VPSHLDVW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1447    }
1448}
1449
1450impl<'a> VpshldvwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1451    fn vpshldvw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1452        self.emit(VPSHLDVW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1453    }
1454}
1455
1456impl<'a> VpshldvwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1457    fn vpshldvw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1458        self.emit(VPSHLDVW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1459    }
1460}
1461
1462impl<'a> VpshldvwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1463    fn vpshldvw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1464        self.emit(VPSHLDVW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1465    }
1466}
1467
1468/// `VPSHLDVW_MASK` (VPSHLDVW). 
1469/// Concatenate packed data, extract result shifted to the left by variable value.
1470///
1471///
1472/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1473///
1474/// Supported operand variants:
1475///
1476/// ```text
1477/// +---+---------------+
1478/// | # | Operands      |
1479/// +---+---------------+
1480/// | 1 | Xmm, Xmm, Mem |
1481/// | 2 | Xmm, Xmm, Xmm |
1482/// | 3 | Ymm, Ymm, Mem |
1483/// | 4 | Ymm, Ymm, Ymm |
1484/// | 5 | Zmm, Zmm, Mem |
1485/// | 6 | Zmm, Zmm, Zmm |
1486/// +---+---------------+
1487/// ```
1488pub trait VpshldvwMaskEmitter<A, B, C> {
1489    fn vpshldvw_mask(&mut self, op0: A, op1: B, op2: C);
1490}
1491
1492impl<'a> VpshldvwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1493    fn vpshldvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1494        self.emit(VPSHLDVW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1495    }
1496}
1497
1498impl<'a> VpshldvwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1499    fn vpshldvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1500        self.emit(VPSHLDVW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1501    }
1502}
1503
1504impl<'a> VpshldvwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1505    fn vpshldvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1506        self.emit(VPSHLDVW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1507    }
1508}
1509
1510impl<'a> VpshldvwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1511    fn vpshldvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1512        self.emit(VPSHLDVW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1513    }
1514}
1515
1516impl<'a> VpshldvwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1517    fn vpshldvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1518        self.emit(VPSHLDVW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1519    }
1520}
1521
1522impl<'a> VpshldvwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1523    fn vpshldvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1524        self.emit(VPSHLDVW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1525    }
1526}
1527
1528/// `VPSHLDVW_MASKZ` (VPSHLDVW). 
1529/// Concatenate packed data, extract result shifted to the left by variable value.
1530///
1531///
1532/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
1533///
1534/// Supported operand variants:
1535///
1536/// ```text
1537/// +---+---------------+
1538/// | # | Operands      |
1539/// +---+---------------+
1540/// | 1 | Xmm, Xmm, Mem |
1541/// | 2 | Xmm, Xmm, Xmm |
1542/// | 3 | Ymm, Ymm, Mem |
1543/// | 4 | Ymm, Ymm, Ymm |
1544/// | 5 | Zmm, Zmm, Mem |
1545/// | 6 | Zmm, Zmm, Zmm |
1546/// +---+---------------+
1547/// ```
1548pub trait VpshldvwMaskzEmitter<A, B, C> {
1549    fn vpshldvw_maskz(&mut self, op0: A, op1: B, op2: C);
1550}
1551
1552impl<'a> VpshldvwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
1553    fn vpshldvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
1554        self.emit(VPSHLDVW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1555    }
1556}
1557
1558impl<'a> VpshldvwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
1559    fn vpshldvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
1560        self.emit(VPSHLDVW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1561    }
1562}
1563
1564impl<'a> VpshldvwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
1565    fn vpshldvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
1566        self.emit(VPSHLDVW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1567    }
1568}
1569
1570impl<'a> VpshldvwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
1571    fn vpshldvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
1572        self.emit(VPSHLDVW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1573    }
1574}
1575
1576impl<'a> VpshldvwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
1577    fn vpshldvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
1578        self.emit(VPSHLDVW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1579    }
1580}
1581
1582impl<'a> VpshldvwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
1583    fn vpshldvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
1584        self.emit(VPSHLDVW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
1585    }
1586}
1587
1588/// `VPSHLDW` (VPSHLDW). 
1589/// Concatenate packed data, extract result shifted to the left by constant value.
1590///
1591///
1592/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
1593///
1594/// Supported operand variants:
1595///
1596/// ```text
1597/// +---+--------------------+
1598/// | # | Operands           |
1599/// +---+--------------------+
1600/// | 1 | Xmm, Xmm, Mem, Imm |
1601/// | 2 | Xmm, Xmm, Xmm, Imm |
1602/// | 3 | Ymm, Ymm, Mem, Imm |
1603/// | 4 | Ymm, Ymm, Ymm, Imm |
1604/// | 5 | Zmm, Zmm, Mem, Imm |
1605/// | 6 | Zmm, Zmm, Zmm, Imm |
1606/// +---+--------------------+
1607/// ```
1608pub trait VpshldwEmitter<A, B, C, D> {
1609    fn vpshldw(&mut self, op0: A, op1: B, op2: C, op3: D);
1610}
1611
1612impl<'a> VpshldwEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1613    fn vpshldw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1614        self.emit(VPSHLDW128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1615    }
1616}
1617
1618impl<'a> VpshldwEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1619    fn vpshldw(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1620        self.emit(VPSHLDW128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1621    }
1622}
1623
1624impl<'a> VpshldwEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1625    fn vpshldw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1626        self.emit(VPSHLDW256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1627    }
1628}
1629
1630impl<'a> VpshldwEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1631    fn vpshldw(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1632        self.emit(VPSHLDW256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1633    }
1634}
1635
1636impl<'a> VpshldwEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1637    fn vpshldw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1638        self.emit(VPSHLDW512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1639    }
1640}
1641
1642impl<'a> VpshldwEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1643    fn vpshldw(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1644        self.emit(VPSHLDW512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1645    }
1646}
1647
1648/// `VPSHLDW_MASK` (VPSHLDW). 
1649/// Concatenate packed data, extract result shifted to the left by constant value.
1650///
1651///
1652/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
1653///
1654/// Supported operand variants:
1655///
1656/// ```text
1657/// +---+--------------------+
1658/// | # | Operands           |
1659/// +---+--------------------+
1660/// | 1 | Xmm, Xmm, Mem, Imm |
1661/// | 2 | Xmm, Xmm, Xmm, Imm |
1662/// | 3 | Ymm, Ymm, Mem, Imm |
1663/// | 4 | Ymm, Ymm, Ymm, Imm |
1664/// | 5 | Zmm, Zmm, Mem, Imm |
1665/// | 6 | Zmm, Zmm, Zmm, Imm |
1666/// +---+--------------------+
1667/// ```
1668pub trait VpshldwMaskEmitter<A, B, C, D> {
1669    fn vpshldw_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
1670}
1671
1672impl<'a> VpshldwMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1673    fn vpshldw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1674        self.emit(VPSHLDW128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1675    }
1676}
1677
1678impl<'a> VpshldwMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1679    fn vpshldw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1680        self.emit(VPSHLDW128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1681    }
1682}
1683
1684impl<'a> VpshldwMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1685    fn vpshldw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1686        self.emit(VPSHLDW256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1687    }
1688}
1689
1690impl<'a> VpshldwMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1691    fn vpshldw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1692        self.emit(VPSHLDW256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1693    }
1694}
1695
1696impl<'a> VpshldwMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1697    fn vpshldw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1698        self.emit(VPSHLDW512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1699    }
1700}
1701
1702impl<'a> VpshldwMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1703    fn vpshldw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1704        self.emit(VPSHLDW512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1705    }
1706}
1707
1708/// `VPSHLDW_MASKZ` (VPSHLDW). 
1709/// Concatenate packed data, extract result shifted to the left by constant value.
1710///
1711///
1712/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
1713///
1714/// Supported operand variants:
1715///
1716/// ```text
1717/// +---+--------------------+
1718/// | # | Operands           |
1719/// +---+--------------------+
1720/// | 1 | Xmm, Xmm, Mem, Imm |
1721/// | 2 | Xmm, Xmm, Xmm, Imm |
1722/// | 3 | Ymm, Ymm, Mem, Imm |
1723/// | 4 | Ymm, Ymm, Ymm, Imm |
1724/// | 5 | Zmm, Zmm, Mem, Imm |
1725/// | 6 | Zmm, Zmm, Zmm, Imm |
1726/// +---+--------------------+
1727/// ```
1728pub trait VpshldwMaskzEmitter<A, B, C, D> {
1729    fn vpshldw_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
1730}
1731
1732impl<'a> VpshldwMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1733    fn vpshldw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1734        self.emit(VPSHLDW128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1735    }
1736}
1737
1738impl<'a> VpshldwMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1739    fn vpshldw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1740        self.emit(VPSHLDW128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1741    }
1742}
1743
1744impl<'a> VpshldwMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1745    fn vpshldw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1746        self.emit(VPSHLDW256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1747    }
1748}
1749
1750impl<'a> VpshldwMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1751    fn vpshldw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1752        self.emit(VPSHLDW256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1753    }
1754}
1755
1756impl<'a> VpshldwMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1757    fn vpshldw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1758        self.emit(VPSHLDW512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1759    }
1760}
1761
1762impl<'a> VpshldwMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1763    fn vpshldw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1764        self.emit(VPSHLDW512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1765    }
1766}
1767
1768/// `VPSHRDD` (VPSHRDD). 
1769/// Concatenate packed data, extract result shifted to the right by constant value.
1770///
1771///
1772/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
1773///
1774/// Supported operand variants:
1775///
1776/// ```text
1777/// +---+--------------------+
1778/// | # | Operands           |
1779/// +---+--------------------+
1780/// | 1 | Xmm, Xmm, Mem, Imm |
1781/// | 2 | Xmm, Xmm, Xmm, Imm |
1782/// | 3 | Ymm, Ymm, Mem, Imm |
1783/// | 4 | Ymm, Ymm, Ymm, Imm |
1784/// | 5 | Zmm, Zmm, Mem, Imm |
1785/// | 6 | Zmm, Zmm, Zmm, Imm |
1786/// +---+--------------------+
1787/// ```
1788pub trait VpshrddEmitter<A, B, C, D> {
1789    fn vpshrdd(&mut self, op0: A, op1: B, op2: C, op3: D);
1790}
1791
1792impl<'a> VpshrddEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1793    fn vpshrdd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1794        self.emit(VPSHRDD128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1795    }
1796}
1797
1798impl<'a> VpshrddEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1799    fn vpshrdd(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1800        self.emit(VPSHRDD128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1801    }
1802}
1803
1804impl<'a> VpshrddEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1805    fn vpshrdd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1806        self.emit(VPSHRDD256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1807    }
1808}
1809
1810impl<'a> VpshrddEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1811    fn vpshrdd(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1812        self.emit(VPSHRDD256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1813    }
1814}
1815
1816impl<'a> VpshrddEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1817    fn vpshrdd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1818        self.emit(VPSHRDD512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1819    }
1820}
1821
1822impl<'a> VpshrddEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1823    fn vpshrdd(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1824        self.emit(VPSHRDD512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1825    }
1826}
1827
1828/// `VPSHRDD_MASK` (VPSHRDD). 
1829/// Concatenate packed data, extract result shifted to the right by constant value.
1830///
1831///
1832/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
1833///
1834/// Supported operand variants:
1835///
1836/// ```text
1837/// +---+--------------------+
1838/// | # | Operands           |
1839/// +---+--------------------+
1840/// | 1 | Xmm, Xmm, Mem, Imm |
1841/// | 2 | Xmm, Xmm, Xmm, Imm |
1842/// | 3 | Ymm, Ymm, Mem, Imm |
1843/// | 4 | Ymm, Ymm, Ymm, Imm |
1844/// | 5 | Zmm, Zmm, Mem, Imm |
1845/// | 6 | Zmm, Zmm, Zmm, Imm |
1846/// +---+--------------------+
1847/// ```
1848pub trait VpshrddMaskEmitter<A, B, C, D> {
1849    fn vpshrdd_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
1850}
1851
1852impl<'a> VpshrddMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1853    fn vpshrdd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1854        self.emit(VPSHRDD128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1855    }
1856}
1857
1858impl<'a> VpshrddMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1859    fn vpshrdd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1860        self.emit(VPSHRDD128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1861    }
1862}
1863
1864impl<'a> VpshrddMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1865    fn vpshrdd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1866        self.emit(VPSHRDD256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1867    }
1868}
1869
1870impl<'a> VpshrddMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1871    fn vpshrdd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1872        self.emit(VPSHRDD256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1873    }
1874}
1875
1876impl<'a> VpshrddMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1877    fn vpshrdd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1878        self.emit(VPSHRDD512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1879    }
1880}
1881
1882impl<'a> VpshrddMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1883    fn vpshrdd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1884        self.emit(VPSHRDD512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1885    }
1886}
1887
1888/// `VPSHRDD_MASKZ` (VPSHRDD). 
1889/// Concatenate packed data, extract result shifted to the right by constant value.
1890///
1891///
1892/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
1893///
1894/// Supported operand variants:
1895///
1896/// ```text
1897/// +---+--------------------+
1898/// | # | Operands           |
1899/// +---+--------------------+
1900/// | 1 | Xmm, Xmm, Mem, Imm |
1901/// | 2 | Xmm, Xmm, Xmm, Imm |
1902/// | 3 | Ymm, Ymm, Mem, Imm |
1903/// | 4 | Ymm, Ymm, Ymm, Imm |
1904/// | 5 | Zmm, Zmm, Mem, Imm |
1905/// | 6 | Zmm, Zmm, Zmm, Imm |
1906/// +---+--------------------+
1907/// ```
1908pub trait VpshrddMaskzEmitter<A, B, C, D> {
1909    fn vpshrdd_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
1910}
1911
1912impl<'a> VpshrddMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1913    fn vpshrdd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1914        self.emit(VPSHRDD128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1915    }
1916}
1917
1918impl<'a> VpshrddMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1919    fn vpshrdd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1920        self.emit(VPSHRDD128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1921    }
1922}
1923
1924impl<'a> VpshrddMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1925    fn vpshrdd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1926        self.emit(VPSHRDD256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1927    }
1928}
1929
1930impl<'a> VpshrddMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1931    fn vpshrdd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1932        self.emit(VPSHRDD256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1933    }
1934}
1935
1936impl<'a> VpshrddMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1937    fn vpshrdd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1938        self.emit(VPSHRDD512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1939    }
1940}
1941
1942impl<'a> VpshrddMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
1943    fn vpshrdd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
1944        self.emit(VPSHRDD512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1945    }
1946}
1947
1948/// `VPSHRDQ` (VPSHRDQ). 
1949/// Concatenate packed data, extract result shifted to the right by constant value.
1950///
1951///
1952/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
1953///
1954/// Supported operand variants:
1955///
1956/// ```text
1957/// +---+--------------------+
1958/// | # | Operands           |
1959/// +---+--------------------+
1960/// | 1 | Xmm, Xmm, Mem, Imm |
1961/// | 2 | Xmm, Xmm, Xmm, Imm |
1962/// | 3 | Ymm, Ymm, Mem, Imm |
1963/// | 4 | Ymm, Ymm, Ymm, Imm |
1964/// | 5 | Zmm, Zmm, Mem, Imm |
1965/// | 6 | Zmm, Zmm, Zmm, Imm |
1966/// +---+--------------------+
1967/// ```
1968pub trait VpshrdqEmitter<A, B, C, D> {
1969    fn vpshrdq(&mut self, op0: A, op1: B, op2: C, op3: D);
1970}
1971
1972impl<'a> VpshrdqEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
1973    fn vpshrdq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
1974        self.emit(VPSHRDQ128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1975    }
1976}
1977
1978impl<'a> VpshrdqEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
1979    fn vpshrdq(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
1980        self.emit(VPSHRDQ128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1981    }
1982}
1983
1984impl<'a> VpshrdqEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
1985    fn vpshrdq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
1986        self.emit(VPSHRDQ256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1987    }
1988}
1989
1990impl<'a> VpshrdqEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
1991    fn vpshrdq(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
1992        self.emit(VPSHRDQ256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1993    }
1994}
1995
1996impl<'a> VpshrdqEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
1997    fn vpshrdq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
1998        self.emit(VPSHRDQ512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
1999    }
2000}
2001
2002impl<'a> VpshrdqEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
2003    fn vpshrdq(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
2004        self.emit(VPSHRDQ512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2005    }
2006}
2007
2008/// `VPSHRDQ_MASK` (VPSHRDQ). 
2009/// Concatenate packed data, extract result shifted to the right by constant value.
2010///
2011///
2012/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
2013///
2014/// Supported operand variants:
2015///
2016/// ```text
2017/// +---+--------------------+
2018/// | # | Operands           |
2019/// +---+--------------------+
2020/// | 1 | Xmm, Xmm, Mem, Imm |
2021/// | 2 | Xmm, Xmm, Xmm, Imm |
2022/// | 3 | Ymm, Ymm, Mem, Imm |
2023/// | 4 | Ymm, Ymm, Ymm, Imm |
2024/// | 5 | Zmm, Zmm, Mem, Imm |
2025/// | 6 | Zmm, Zmm, Zmm, Imm |
2026/// +---+--------------------+
2027/// ```
2028pub trait VpshrdqMaskEmitter<A, B, C, D> {
2029    fn vpshrdq_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
2030}
2031
2032impl<'a> VpshrdqMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
2033    fn vpshrdq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
2034        self.emit(VPSHRDQ128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2035    }
2036}
2037
2038impl<'a> VpshrdqMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
2039    fn vpshrdq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
2040        self.emit(VPSHRDQ128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2041    }
2042}
2043
2044impl<'a> VpshrdqMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
2045    fn vpshrdq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
2046        self.emit(VPSHRDQ256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2047    }
2048}
2049
2050impl<'a> VpshrdqMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
2051    fn vpshrdq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
2052        self.emit(VPSHRDQ256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2053    }
2054}
2055
2056impl<'a> VpshrdqMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
2057    fn vpshrdq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
2058        self.emit(VPSHRDQ512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2059    }
2060}
2061
2062impl<'a> VpshrdqMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
2063    fn vpshrdq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
2064        self.emit(VPSHRDQ512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2065    }
2066}
2067
2068/// `VPSHRDQ_MASKZ` (VPSHRDQ). 
2069/// Concatenate packed data, extract result shifted to the right by constant value.
2070///
2071///
2072/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
2073///
2074/// Supported operand variants:
2075///
2076/// ```text
2077/// +---+--------------------+
2078/// | # | Operands           |
2079/// +---+--------------------+
2080/// | 1 | Xmm, Xmm, Mem, Imm |
2081/// | 2 | Xmm, Xmm, Xmm, Imm |
2082/// | 3 | Ymm, Ymm, Mem, Imm |
2083/// | 4 | Ymm, Ymm, Ymm, Imm |
2084/// | 5 | Zmm, Zmm, Mem, Imm |
2085/// | 6 | Zmm, Zmm, Zmm, Imm |
2086/// +---+--------------------+
2087/// ```
2088pub trait VpshrdqMaskzEmitter<A, B, C, D> {
2089    fn vpshrdq_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
2090}
2091
2092impl<'a> VpshrdqMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
2093    fn vpshrdq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
2094        self.emit(VPSHRDQ128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2095    }
2096}
2097
2098impl<'a> VpshrdqMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
2099    fn vpshrdq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
2100        self.emit(VPSHRDQ128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2101    }
2102}
2103
2104impl<'a> VpshrdqMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
2105    fn vpshrdq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
2106        self.emit(VPSHRDQ256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2107    }
2108}
2109
2110impl<'a> VpshrdqMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
2111    fn vpshrdq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
2112        self.emit(VPSHRDQ256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2113    }
2114}
2115
2116impl<'a> VpshrdqMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
2117    fn vpshrdq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
2118        self.emit(VPSHRDQ512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2119    }
2120}
2121
2122impl<'a> VpshrdqMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
2123    fn vpshrdq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
2124        self.emit(VPSHRDQ512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2125    }
2126}
2127
2128/// `VPSHRDVD` (VPSHRDVD). 
2129/// Concatenate packed data, extract result shifted to the right by variable value.
2130///
2131///
2132/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2133///
2134/// Supported operand variants:
2135///
2136/// ```text
2137/// +---+---------------+
2138/// | # | Operands      |
2139/// +---+---------------+
2140/// | 1 | Xmm, Xmm, Mem |
2141/// | 2 | Xmm, Xmm, Xmm |
2142/// | 3 | Ymm, Ymm, Mem |
2143/// | 4 | Ymm, Ymm, Ymm |
2144/// | 5 | Zmm, Zmm, Mem |
2145/// | 6 | Zmm, Zmm, Zmm |
2146/// +---+---------------+
2147/// ```
2148pub trait VpshrdvdEmitter<A, B, C> {
2149    fn vpshrdvd(&mut self, op0: A, op1: B, op2: C);
2150}
2151
2152impl<'a> VpshrdvdEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2153    fn vpshrdvd(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2154        self.emit(VPSHRDVD128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2155    }
2156}
2157
2158impl<'a> VpshrdvdEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2159    fn vpshrdvd(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2160        self.emit(VPSHRDVD128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2161    }
2162}
2163
2164impl<'a> VpshrdvdEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2165    fn vpshrdvd(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2166        self.emit(VPSHRDVD256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2167    }
2168}
2169
2170impl<'a> VpshrdvdEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2171    fn vpshrdvd(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2172        self.emit(VPSHRDVD256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2173    }
2174}
2175
2176impl<'a> VpshrdvdEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2177    fn vpshrdvd(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2178        self.emit(VPSHRDVD512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2179    }
2180}
2181
2182impl<'a> VpshrdvdEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2183    fn vpshrdvd(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2184        self.emit(VPSHRDVD512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2185    }
2186}
2187
2188/// `VPSHRDVD_MASK` (VPSHRDVD). 
2189/// Concatenate packed data, extract result shifted to the right by variable value.
2190///
2191///
2192/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2193///
2194/// Supported operand variants:
2195///
2196/// ```text
2197/// +---+---------------+
2198/// | # | Operands      |
2199/// +---+---------------+
2200/// | 1 | Xmm, Xmm, Mem |
2201/// | 2 | Xmm, Xmm, Xmm |
2202/// | 3 | Ymm, Ymm, Mem |
2203/// | 4 | Ymm, Ymm, Ymm |
2204/// | 5 | Zmm, Zmm, Mem |
2205/// | 6 | Zmm, Zmm, Zmm |
2206/// +---+---------------+
2207/// ```
2208pub trait VpshrdvdMaskEmitter<A, B, C> {
2209    fn vpshrdvd_mask(&mut self, op0: A, op1: B, op2: C);
2210}
2211
2212impl<'a> VpshrdvdMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2213    fn vpshrdvd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2214        self.emit(VPSHRDVD128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2215    }
2216}
2217
2218impl<'a> VpshrdvdMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2219    fn vpshrdvd_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2220        self.emit(VPSHRDVD128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2221    }
2222}
2223
2224impl<'a> VpshrdvdMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2225    fn vpshrdvd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2226        self.emit(VPSHRDVD256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2227    }
2228}
2229
2230impl<'a> VpshrdvdMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2231    fn vpshrdvd_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2232        self.emit(VPSHRDVD256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2233    }
2234}
2235
2236impl<'a> VpshrdvdMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2237    fn vpshrdvd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2238        self.emit(VPSHRDVD512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2239    }
2240}
2241
2242impl<'a> VpshrdvdMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2243    fn vpshrdvd_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2244        self.emit(VPSHRDVD512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2245    }
2246}
2247
2248/// `VPSHRDVD_MASKZ` (VPSHRDVD). 
2249/// Concatenate packed data, extract result shifted to the right by variable value.
2250///
2251///
2252/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2253///
2254/// Supported operand variants:
2255///
2256/// ```text
2257/// +---+---------------+
2258/// | # | Operands      |
2259/// +---+---------------+
2260/// | 1 | Xmm, Xmm, Mem |
2261/// | 2 | Xmm, Xmm, Xmm |
2262/// | 3 | Ymm, Ymm, Mem |
2263/// | 4 | Ymm, Ymm, Ymm |
2264/// | 5 | Zmm, Zmm, Mem |
2265/// | 6 | Zmm, Zmm, Zmm |
2266/// +---+---------------+
2267/// ```
2268pub trait VpshrdvdMaskzEmitter<A, B, C> {
2269    fn vpshrdvd_maskz(&mut self, op0: A, op1: B, op2: C);
2270}
2271
2272impl<'a> VpshrdvdMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2273    fn vpshrdvd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2274        self.emit(VPSHRDVD128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2275    }
2276}
2277
2278impl<'a> VpshrdvdMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2279    fn vpshrdvd_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2280        self.emit(VPSHRDVD128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2281    }
2282}
2283
2284impl<'a> VpshrdvdMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2285    fn vpshrdvd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2286        self.emit(VPSHRDVD256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2287    }
2288}
2289
2290impl<'a> VpshrdvdMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2291    fn vpshrdvd_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2292        self.emit(VPSHRDVD256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2293    }
2294}
2295
2296impl<'a> VpshrdvdMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2297    fn vpshrdvd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2298        self.emit(VPSHRDVD512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2299    }
2300}
2301
2302impl<'a> VpshrdvdMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2303    fn vpshrdvd_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2304        self.emit(VPSHRDVD512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2305    }
2306}
2307
2308/// `VPSHRDVQ` (VPSHRDVQ). 
2309/// Concatenate packed data, extract result shifted to the right by variable value.
2310///
2311///
2312/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2313///
2314/// Supported operand variants:
2315///
2316/// ```text
2317/// +---+---------------+
2318/// | # | Operands      |
2319/// +---+---------------+
2320/// | 1 | Xmm, Xmm, Mem |
2321/// | 2 | Xmm, Xmm, Xmm |
2322/// | 3 | Ymm, Ymm, Mem |
2323/// | 4 | Ymm, Ymm, Ymm |
2324/// | 5 | Zmm, Zmm, Mem |
2325/// | 6 | Zmm, Zmm, Zmm |
2326/// +---+---------------+
2327/// ```
2328pub trait VpshrdvqEmitter<A, B, C> {
2329    fn vpshrdvq(&mut self, op0: A, op1: B, op2: C);
2330}
2331
2332impl<'a> VpshrdvqEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2333    fn vpshrdvq(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2334        self.emit(VPSHRDVQ128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2335    }
2336}
2337
2338impl<'a> VpshrdvqEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2339    fn vpshrdvq(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2340        self.emit(VPSHRDVQ128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2341    }
2342}
2343
2344impl<'a> VpshrdvqEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2345    fn vpshrdvq(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2346        self.emit(VPSHRDVQ256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2347    }
2348}
2349
2350impl<'a> VpshrdvqEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2351    fn vpshrdvq(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2352        self.emit(VPSHRDVQ256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2353    }
2354}
2355
2356impl<'a> VpshrdvqEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2357    fn vpshrdvq(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2358        self.emit(VPSHRDVQ512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2359    }
2360}
2361
2362impl<'a> VpshrdvqEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2363    fn vpshrdvq(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2364        self.emit(VPSHRDVQ512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2365    }
2366}
2367
2368/// `VPSHRDVQ_MASK` (VPSHRDVQ). 
2369/// Concatenate packed data, extract result shifted to the right by variable value.
2370///
2371///
2372/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2373///
2374/// Supported operand variants:
2375///
2376/// ```text
2377/// +---+---------------+
2378/// | # | Operands      |
2379/// +---+---------------+
2380/// | 1 | Xmm, Xmm, Mem |
2381/// | 2 | Xmm, Xmm, Xmm |
2382/// | 3 | Ymm, Ymm, Mem |
2383/// | 4 | Ymm, Ymm, Ymm |
2384/// | 5 | Zmm, Zmm, Mem |
2385/// | 6 | Zmm, Zmm, Zmm |
2386/// +---+---------------+
2387/// ```
2388pub trait VpshrdvqMaskEmitter<A, B, C> {
2389    fn vpshrdvq_mask(&mut self, op0: A, op1: B, op2: C);
2390}
2391
2392impl<'a> VpshrdvqMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2393    fn vpshrdvq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2394        self.emit(VPSHRDVQ128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2395    }
2396}
2397
2398impl<'a> VpshrdvqMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2399    fn vpshrdvq_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2400        self.emit(VPSHRDVQ128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2401    }
2402}
2403
2404impl<'a> VpshrdvqMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2405    fn vpshrdvq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2406        self.emit(VPSHRDVQ256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2407    }
2408}
2409
2410impl<'a> VpshrdvqMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2411    fn vpshrdvq_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2412        self.emit(VPSHRDVQ256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2413    }
2414}
2415
2416impl<'a> VpshrdvqMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2417    fn vpshrdvq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2418        self.emit(VPSHRDVQ512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2419    }
2420}
2421
2422impl<'a> VpshrdvqMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2423    fn vpshrdvq_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2424        self.emit(VPSHRDVQ512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2425    }
2426}
2427
2428/// `VPSHRDVQ_MASKZ` (VPSHRDVQ). 
2429/// Concatenate packed data, extract result shifted to the right by variable value.
2430///
2431///
2432/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2433///
2434/// Supported operand variants:
2435///
2436/// ```text
2437/// +---+---------------+
2438/// | # | Operands      |
2439/// +---+---------------+
2440/// | 1 | Xmm, Xmm, Mem |
2441/// | 2 | Xmm, Xmm, Xmm |
2442/// | 3 | Ymm, Ymm, Mem |
2443/// | 4 | Ymm, Ymm, Ymm |
2444/// | 5 | Zmm, Zmm, Mem |
2445/// | 6 | Zmm, Zmm, Zmm |
2446/// +---+---------------+
2447/// ```
2448pub trait VpshrdvqMaskzEmitter<A, B, C> {
2449    fn vpshrdvq_maskz(&mut self, op0: A, op1: B, op2: C);
2450}
2451
2452impl<'a> VpshrdvqMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2453    fn vpshrdvq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2454        self.emit(VPSHRDVQ128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2455    }
2456}
2457
2458impl<'a> VpshrdvqMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2459    fn vpshrdvq_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2460        self.emit(VPSHRDVQ128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2461    }
2462}
2463
2464impl<'a> VpshrdvqMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2465    fn vpshrdvq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2466        self.emit(VPSHRDVQ256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2467    }
2468}
2469
2470impl<'a> VpshrdvqMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2471    fn vpshrdvq_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2472        self.emit(VPSHRDVQ256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2473    }
2474}
2475
2476impl<'a> VpshrdvqMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2477    fn vpshrdvq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2478        self.emit(VPSHRDVQ512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2479    }
2480}
2481
2482impl<'a> VpshrdvqMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2483    fn vpshrdvq_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2484        self.emit(VPSHRDVQ512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2485    }
2486}
2487
2488/// `VPSHRDVW` (VPSHRDVW). 
2489/// Concatenate packed data, extract result shifted to the right by variable value.
2490///
2491///
2492/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2493///
2494/// Supported operand variants:
2495///
2496/// ```text
2497/// +---+---------------+
2498/// | # | Operands      |
2499/// +---+---------------+
2500/// | 1 | Xmm, Xmm, Mem |
2501/// | 2 | Xmm, Xmm, Xmm |
2502/// | 3 | Ymm, Ymm, Mem |
2503/// | 4 | Ymm, Ymm, Ymm |
2504/// | 5 | Zmm, Zmm, Mem |
2505/// | 6 | Zmm, Zmm, Zmm |
2506/// +---+---------------+
2507/// ```
2508pub trait VpshrdvwEmitter<A, B, C> {
2509    fn vpshrdvw(&mut self, op0: A, op1: B, op2: C);
2510}
2511
2512impl<'a> VpshrdvwEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2513    fn vpshrdvw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2514        self.emit(VPSHRDVW128RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2515    }
2516}
2517
2518impl<'a> VpshrdvwEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2519    fn vpshrdvw(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2520        self.emit(VPSHRDVW128RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2521    }
2522}
2523
2524impl<'a> VpshrdvwEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2525    fn vpshrdvw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2526        self.emit(VPSHRDVW256RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2527    }
2528}
2529
2530impl<'a> VpshrdvwEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2531    fn vpshrdvw(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2532        self.emit(VPSHRDVW256RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2533    }
2534}
2535
2536impl<'a> VpshrdvwEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2537    fn vpshrdvw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2538        self.emit(VPSHRDVW512RRR, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2539    }
2540}
2541
2542impl<'a> VpshrdvwEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2543    fn vpshrdvw(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2544        self.emit(VPSHRDVW512RRM, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2545    }
2546}
2547
2548/// `VPSHRDVW_MASK` (VPSHRDVW). 
2549/// Concatenate packed data, extract result shifted to the right by variable value.
2550///
2551///
2552/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2553///
2554/// Supported operand variants:
2555///
2556/// ```text
2557/// +---+---------------+
2558/// | # | Operands      |
2559/// +---+---------------+
2560/// | 1 | Xmm, Xmm, Mem |
2561/// | 2 | Xmm, Xmm, Xmm |
2562/// | 3 | Ymm, Ymm, Mem |
2563/// | 4 | Ymm, Ymm, Ymm |
2564/// | 5 | Zmm, Zmm, Mem |
2565/// | 6 | Zmm, Zmm, Zmm |
2566/// +---+---------------+
2567/// ```
2568pub trait VpshrdvwMaskEmitter<A, B, C> {
2569    fn vpshrdvw_mask(&mut self, op0: A, op1: B, op2: C);
2570}
2571
2572impl<'a> VpshrdvwMaskEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2573    fn vpshrdvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2574        self.emit(VPSHRDVW128RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2575    }
2576}
2577
2578impl<'a> VpshrdvwMaskEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2579    fn vpshrdvw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2580        self.emit(VPSHRDVW128RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2581    }
2582}
2583
2584impl<'a> VpshrdvwMaskEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2585    fn vpshrdvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2586        self.emit(VPSHRDVW256RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2587    }
2588}
2589
2590impl<'a> VpshrdvwMaskEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2591    fn vpshrdvw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2592        self.emit(VPSHRDVW256RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2593    }
2594}
2595
2596impl<'a> VpshrdvwMaskEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2597    fn vpshrdvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2598        self.emit(VPSHRDVW512RRR_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2599    }
2600}
2601
2602impl<'a> VpshrdvwMaskEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2603    fn vpshrdvw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2604        self.emit(VPSHRDVW512RRM_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2605    }
2606}
2607
2608/// `VPSHRDVW_MASKZ` (VPSHRDVW). 
2609/// Concatenate packed data, extract result shifted to the right by variable value.
2610///
2611///
2612/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
2613///
2614/// Supported operand variants:
2615///
2616/// ```text
2617/// +---+---------------+
2618/// | # | Operands      |
2619/// +---+---------------+
2620/// | 1 | Xmm, Xmm, Mem |
2621/// | 2 | Xmm, Xmm, Xmm |
2622/// | 3 | Ymm, Ymm, Mem |
2623/// | 4 | Ymm, Ymm, Ymm |
2624/// | 5 | Zmm, Zmm, Mem |
2625/// | 6 | Zmm, Zmm, Zmm |
2626/// +---+---------------+
2627/// ```
2628pub trait VpshrdvwMaskzEmitter<A, B, C> {
2629    fn vpshrdvw_maskz(&mut self, op0: A, op1: B, op2: C);
2630}
2631
2632impl<'a> VpshrdvwMaskzEmitter<Xmm, Xmm, Xmm> for Assembler<'a> {
2633    fn vpshrdvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm) {
2634        self.emit(VPSHRDVW128RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2635    }
2636}
2637
2638impl<'a> VpshrdvwMaskzEmitter<Xmm, Xmm, Mem> for Assembler<'a> {
2639    fn vpshrdvw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem) {
2640        self.emit(VPSHRDVW128RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2641    }
2642}
2643
2644impl<'a> VpshrdvwMaskzEmitter<Ymm, Ymm, Ymm> for Assembler<'a> {
2645    fn vpshrdvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm) {
2646        self.emit(VPSHRDVW256RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2647    }
2648}
2649
2650impl<'a> VpshrdvwMaskzEmitter<Ymm, Ymm, Mem> for Assembler<'a> {
2651    fn vpshrdvw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem) {
2652        self.emit(VPSHRDVW256RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2653    }
2654}
2655
2656impl<'a> VpshrdvwMaskzEmitter<Zmm, Zmm, Zmm> for Assembler<'a> {
2657    fn vpshrdvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm) {
2658        self.emit(VPSHRDVW512RRR_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2659    }
2660}
2661
2662impl<'a> VpshrdvwMaskzEmitter<Zmm, Zmm, Mem> for Assembler<'a> {
2663    fn vpshrdvw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem) {
2664        self.emit(VPSHRDVW512RRM_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), &NOREG);
2665    }
2666}
2667
2668/// `VPSHRDW` (VPSHRDW). 
2669/// Concatenate packed data, extract result shifted to the right by constant value.
2670///
2671///
2672/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
2673///
2674/// Supported operand variants:
2675///
2676/// ```text
2677/// +---+--------------------+
2678/// | # | Operands           |
2679/// +---+--------------------+
2680/// | 1 | Xmm, Xmm, Mem, Imm |
2681/// | 2 | Xmm, Xmm, Xmm, Imm |
2682/// | 3 | Ymm, Ymm, Mem, Imm |
2683/// | 4 | Ymm, Ymm, Ymm, Imm |
2684/// | 5 | Zmm, Zmm, Mem, Imm |
2685/// | 6 | Zmm, Zmm, Zmm, Imm |
2686/// +---+--------------------+
2687/// ```
2688pub trait VpshrdwEmitter<A, B, C, D> {
2689    fn vpshrdw(&mut self, op0: A, op1: B, op2: C, op3: D);
2690}
2691
2692impl<'a> VpshrdwEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
2693    fn vpshrdw(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
2694        self.emit(VPSHRDW128RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2695    }
2696}
2697
2698impl<'a> VpshrdwEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
2699    fn vpshrdw(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
2700        self.emit(VPSHRDW128RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2701    }
2702}
2703
2704impl<'a> VpshrdwEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
2705    fn vpshrdw(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
2706        self.emit(VPSHRDW256RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2707    }
2708}
2709
2710impl<'a> VpshrdwEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
2711    fn vpshrdw(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
2712        self.emit(VPSHRDW256RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2713    }
2714}
2715
2716impl<'a> VpshrdwEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
2717    fn vpshrdw(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
2718        self.emit(VPSHRDW512RRRI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2719    }
2720}
2721
2722impl<'a> VpshrdwEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
2723    fn vpshrdw(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
2724        self.emit(VPSHRDW512RRMI, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2725    }
2726}
2727
2728/// `VPSHRDW_MASK` (VPSHRDW). 
2729/// Concatenate packed data, extract result shifted to the right by constant value.
2730///
2731///
2732/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
2733///
2734/// Supported operand variants:
2735///
2736/// ```text
2737/// +---+--------------------+
2738/// | # | Operands           |
2739/// +---+--------------------+
2740/// | 1 | Xmm, Xmm, Mem, Imm |
2741/// | 2 | Xmm, Xmm, Xmm, Imm |
2742/// | 3 | Ymm, Ymm, Mem, Imm |
2743/// | 4 | Ymm, Ymm, Ymm, Imm |
2744/// | 5 | Zmm, Zmm, Mem, Imm |
2745/// | 6 | Zmm, Zmm, Zmm, Imm |
2746/// +---+--------------------+
2747/// ```
2748pub trait VpshrdwMaskEmitter<A, B, C, D> {
2749    fn vpshrdw_mask(&mut self, op0: A, op1: B, op2: C, op3: D);
2750}
2751
2752impl<'a> VpshrdwMaskEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
2753    fn vpshrdw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
2754        self.emit(VPSHRDW128RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2755    }
2756}
2757
2758impl<'a> VpshrdwMaskEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
2759    fn vpshrdw_mask(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
2760        self.emit(VPSHRDW128RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2761    }
2762}
2763
2764impl<'a> VpshrdwMaskEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
2765    fn vpshrdw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
2766        self.emit(VPSHRDW256RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2767    }
2768}
2769
2770impl<'a> VpshrdwMaskEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
2771    fn vpshrdw_mask(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
2772        self.emit(VPSHRDW256RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2773    }
2774}
2775
2776impl<'a> VpshrdwMaskEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
2777    fn vpshrdw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
2778        self.emit(VPSHRDW512RRRI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2779    }
2780}
2781
2782impl<'a> VpshrdwMaskEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
2783    fn vpshrdw_mask(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
2784        self.emit(VPSHRDW512RRMI_MASK, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2785    }
2786}
2787
2788/// `VPSHRDW_MASKZ` (VPSHRDW). 
2789/// Concatenate packed data, extract result shifted to the right by constant value.
2790///
2791///
2792/// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
2793///
2794/// Supported operand variants:
2795///
2796/// ```text
2797/// +---+--------------------+
2798/// | # | Operands           |
2799/// +---+--------------------+
2800/// | 1 | Xmm, Xmm, Mem, Imm |
2801/// | 2 | Xmm, Xmm, Xmm, Imm |
2802/// | 3 | Ymm, Ymm, Mem, Imm |
2803/// | 4 | Ymm, Ymm, Ymm, Imm |
2804/// | 5 | Zmm, Zmm, Mem, Imm |
2805/// | 6 | Zmm, Zmm, Zmm, Imm |
2806/// +---+--------------------+
2807/// ```
2808pub trait VpshrdwMaskzEmitter<A, B, C, D> {
2809    fn vpshrdw_maskz(&mut self, op0: A, op1: B, op2: C, op3: D);
2810}
2811
2812impl<'a> VpshrdwMaskzEmitter<Xmm, Xmm, Xmm, Imm> for Assembler<'a> {
2813    fn vpshrdw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Xmm, op3: Imm) {
2814        self.emit(VPSHRDW128RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2815    }
2816}
2817
2818impl<'a> VpshrdwMaskzEmitter<Xmm, Xmm, Mem, Imm> for Assembler<'a> {
2819    fn vpshrdw_maskz(&mut self, op0: Xmm, op1: Xmm, op2: Mem, op3: Imm) {
2820        self.emit(VPSHRDW128RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2821    }
2822}
2823
2824impl<'a> VpshrdwMaskzEmitter<Ymm, Ymm, Ymm, Imm> for Assembler<'a> {
2825    fn vpshrdw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Ymm, op3: Imm) {
2826        self.emit(VPSHRDW256RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2827    }
2828}
2829
2830impl<'a> VpshrdwMaskzEmitter<Ymm, Ymm, Mem, Imm> for Assembler<'a> {
2831    fn vpshrdw_maskz(&mut self, op0: Ymm, op1: Ymm, op2: Mem, op3: Imm) {
2832        self.emit(VPSHRDW256RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2833    }
2834}
2835
2836impl<'a> VpshrdwMaskzEmitter<Zmm, Zmm, Zmm, Imm> for Assembler<'a> {
2837    fn vpshrdw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Zmm, op3: Imm) {
2838        self.emit(VPSHRDW512RRRI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2839    }
2840}
2841
2842impl<'a> VpshrdwMaskzEmitter<Zmm, Zmm, Mem, Imm> for Assembler<'a> {
2843    fn vpshrdw_maskz(&mut self, op0: Zmm, op1: Zmm, op2: Mem, op3: Imm) {
2844        self.emit(VPSHRDW512RRMI_MASKZ, op0.as_operand(), op1.as_operand(), op2.as_operand(), op3.as_operand());
2845    }
2846}
2847
2848
2849impl<'a> Assembler<'a> {
2850    /// `VPCOMPRESSB` (VPCOMPRESSB). 
2851    /// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
2852    ///
2853    ///
2854    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
2855    ///
2856    /// Supported operand variants:
2857    ///
2858    /// ```text
2859    /// +---+----------+
2860    /// | # | Operands |
2861    /// +---+----------+
2862    /// | 1 | Mem, Xmm |
2863    /// | 2 | Mem, Ymm |
2864    /// | 3 | Mem, Zmm |
2865    /// | 4 | Xmm, Xmm |
2866    /// | 5 | Ymm, Ymm |
2867    /// | 6 | Zmm, Zmm |
2868    /// +---+----------+
2869    /// ```
2870    #[inline]
2871    pub fn vpcompressb<A, B>(&mut self, op0: A, op1: B)
2872    where Assembler<'a>: VpcompressbEmitter<A, B> {
2873        <Self as VpcompressbEmitter<A, B>>::vpcompressb(self, op0, op1);
2874    }
2875    /// `VPCOMPRESSB_MASK` (VPCOMPRESSB). 
2876    /// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
2877    ///
2878    ///
2879    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
2880    ///
2881    /// Supported operand variants:
2882    ///
2883    /// ```text
2884    /// +---+----------+
2885    /// | # | Operands |
2886    /// +---+----------+
2887    /// | 1 | Mem, Xmm |
2888    /// | 2 | Mem, Ymm |
2889    /// | 3 | Mem, Zmm |
2890    /// | 4 | Xmm, Xmm |
2891    /// | 5 | Ymm, Ymm |
2892    /// | 6 | Zmm, Zmm |
2893    /// +---+----------+
2894    /// ```
2895    #[inline]
2896    pub fn vpcompressb_mask<A, B>(&mut self, op0: A, op1: B)
2897    where Assembler<'a>: VpcompressbMaskEmitter<A, B> {
2898        <Self as VpcompressbMaskEmitter<A, B>>::vpcompressb_mask(self, op0, op1);
2899    }
2900    /// `VPCOMPRESSB_MASKZ` (VPCOMPRESSB). 
2901    /// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
2902    ///
2903    ///
2904    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
2905    ///
2906    /// Supported operand variants:
2907    ///
2908    /// ```text
2909    /// +---+----------+
2910    /// | # | Operands |
2911    /// +---+----------+
2912    /// | 1 | Xmm, Xmm |
2913    /// | 2 | Ymm, Ymm |
2914    /// | 3 | Zmm, Zmm |
2915    /// +---+----------+
2916    /// ```
2917    #[inline]
2918    pub fn vpcompressb_maskz<A, B>(&mut self, op0: A, op1: B)
2919    where Assembler<'a>: VpcompressbMaskzEmitter<A, B> {
2920        <Self as VpcompressbMaskzEmitter<A, B>>::vpcompressb_maskz(self, op0, op1);
2921    }
2922    /// `VPCOMPRESSW` (VPCOMPRESSW). 
2923    /// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
2924    ///
2925    ///
2926    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
2927    ///
2928    /// Supported operand variants:
2929    ///
2930    /// ```text
2931    /// +---+----------+
2932    /// | # | Operands |
2933    /// +---+----------+
2934    /// | 1 | Mem, Xmm |
2935    /// | 2 | Mem, Ymm |
2936    /// | 3 | Mem, Zmm |
2937    /// | 4 | Xmm, Xmm |
2938    /// | 5 | Ymm, Ymm |
2939    /// | 6 | Zmm, Zmm |
2940    /// +---+----------+
2941    /// ```
2942    #[inline]
2943    pub fn vpcompressw<A, B>(&mut self, op0: A, op1: B)
2944    where Assembler<'a>: VpcompresswEmitter<A, B> {
2945        <Self as VpcompresswEmitter<A, B>>::vpcompressw(self, op0, op1);
2946    }
2947    /// `VPCOMPRESSW_MASK` (VPCOMPRESSW). 
2948    /// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
2949    ///
2950    ///
2951    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
2952    ///
2953    /// Supported operand variants:
2954    ///
2955    /// ```text
2956    /// +---+----------+
2957    /// | # | Operands |
2958    /// +---+----------+
2959    /// | 1 | Mem, Xmm |
2960    /// | 2 | Mem, Ymm |
2961    /// | 3 | Mem, Zmm |
2962    /// | 4 | Xmm, Xmm |
2963    /// | 5 | Ymm, Ymm |
2964    /// | 6 | Zmm, Zmm |
2965    /// +---+----------+
2966    /// ```
2967    #[inline]
2968    pub fn vpcompressw_mask<A, B>(&mut self, op0: A, op1: B)
2969    where Assembler<'a>: VpcompresswMaskEmitter<A, B> {
2970        <Self as VpcompresswMaskEmitter<A, B>>::vpcompressw_mask(self, op0, op1);
2971    }
2972    /// `VPCOMPRESSW_MASKZ` (VPCOMPRESSW). 
2973    /// Compress (stores) up to 64 byte values or 32 word values from the source operand (second operand) to the destination operand (first operand), based on the active elements determined by the writemask operand. Note: EVEX.vvvv is reserved and must be 1111b otherwise instructions will #UD.
2974    ///
2975    ///
2976    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPCOMPRESSB%3AVCOMPRESSW.html).
2977    ///
2978    /// Supported operand variants:
2979    ///
2980    /// ```text
2981    /// +---+----------+
2982    /// | # | Operands |
2983    /// +---+----------+
2984    /// | 1 | Xmm, Xmm |
2985    /// | 2 | Ymm, Ymm |
2986    /// | 3 | Zmm, Zmm |
2987    /// +---+----------+
2988    /// ```
2989    #[inline]
2990    pub fn vpcompressw_maskz<A, B>(&mut self, op0: A, op1: B)
2991    where Assembler<'a>: VpcompresswMaskzEmitter<A, B> {
2992        <Self as VpcompresswMaskzEmitter<A, B>>::vpcompressw_maskz(self, op0, op1);
2993    }
2994    /// `VPEXPANDB` (VPEXPANDB). 
2995    /// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
2996    ///
2997    ///
2998    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
2999    ///
3000    /// Supported operand variants:
3001    ///
3002    /// ```text
3003    /// +---+----------+
3004    /// | # | Operands |
3005    /// +---+----------+
3006    /// | 1 | Xmm, Mem |
3007    /// | 2 | Xmm, Xmm |
3008    /// | 3 | Ymm, Mem |
3009    /// | 4 | Ymm, Ymm |
3010    /// | 5 | Zmm, Mem |
3011    /// | 6 | Zmm, Zmm |
3012    /// +---+----------+
3013    /// ```
3014    #[inline]
3015    pub fn vpexpandb<A, B>(&mut self, op0: A, op1: B)
3016    where Assembler<'a>: VpexpandbEmitter<A, B> {
3017        <Self as VpexpandbEmitter<A, B>>::vpexpandb(self, op0, op1);
3018    }
3019    /// `VPEXPANDB_MASK` (VPEXPANDB). 
3020    /// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
3021    ///
3022    ///
3023    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
3024    ///
3025    /// Supported operand variants:
3026    ///
3027    /// ```text
3028    /// +---+----------+
3029    /// | # | Operands |
3030    /// +---+----------+
3031    /// | 1 | Xmm, Mem |
3032    /// | 2 | Xmm, Xmm |
3033    /// | 3 | Ymm, Mem |
3034    /// | 4 | Ymm, Ymm |
3035    /// | 5 | Zmm, Mem |
3036    /// | 6 | Zmm, Zmm |
3037    /// +---+----------+
3038    /// ```
3039    #[inline]
3040    pub fn vpexpandb_mask<A, B>(&mut self, op0: A, op1: B)
3041    where Assembler<'a>: VpexpandbMaskEmitter<A, B> {
3042        <Self as VpexpandbMaskEmitter<A, B>>::vpexpandb_mask(self, op0, op1);
3043    }
3044    /// `VPEXPANDB_MASKZ` (VPEXPANDB). 
3045    /// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
3046    ///
3047    ///
3048    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
3049    ///
3050    /// Supported operand variants:
3051    ///
3052    /// ```text
3053    /// +---+----------+
3054    /// | # | Operands |
3055    /// +---+----------+
3056    /// | 1 | Xmm, Mem |
3057    /// | 2 | Xmm, Xmm |
3058    /// | 3 | Ymm, Mem |
3059    /// | 4 | Ymm, Ymm |
3060    /// | 5 | Zmm, Mem |
3061    /// | 6 | Zmm, Zmm |
3062    /// +---+----------+
3063    /// ```
3064    #[inline]
3065    pub fn vpexpandb_maskz<A, B>(&mut self, op0: A, op1: B)
3066    where Assembler<'a>: VpexpandbMaskzEmitter<A, B> {
3067        <Self as VpexpandbMaskzEmitter<A, B>>::vpexpandb_maskz(self, op0, op1);
3068    }
3069    /// `VPEXPANDW` (VPEXPANDW). 
3070    /// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
3071    ///
3072    ///
3073    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
3074    ///
3075    /// Supported operand variants:
3076    ///
3077    /// ```text
3078    /// +---+----------+
3079    /// | # | Operands |
3080    /// +---+----------+
3081    /// | 1 | Xmm, Mem |
3082    /// | 2 | Xmm, Xmm |
3083    /// | 3 | Ymm, Mem |
3084    /// | 4 | Ymm, Ymm |
3085    /// | 5 | Zmm, Mem |
3086    /// | 6 | Zmm, Zmm |
3087    /// +---+----------+
3088    /// ```
3089    #[inline]
3090    pub fn vpexpandw<A, B>(&mut self, op0: A, op1: B)
3091    where Assembler<'a>: VpexpandwEmitter<A, B> {
3092        <Self as VpexpandwEmitter<A, B>>::vpexpandw(self, op0, op1);
3093    }
3094    /// `VPEXPANDW_MASK` (VPEXPANDW). 
3095    /// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
3096    ///
3097    ///
3098    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
3099    ///
3100    /// Supported operand variants:
3101    ///
3102    /// ```text
3103    /// +---+----------+
3104    /// | # | Operands |
3105    /// +---+----------+
3106    /// | 1 | Xmm, Mem |
3107    /// | 2 | Xmm, Xmm |
3108    /// | 3 | Ymm, Mem |
3109    /// | 4 | Ymm, Ymm |
3110    /// | 5 | Zmm, Mem |
3111    /// | 6 | Zmm, Zmm |
3112    /// +---+----------+
3113    /// ```
3114    #[inline]
3115    pub fn vpexpandw_mask<A, B>(&mut self, op0: A, op1: B)
3116    where Assembler<'a>: VpexpandwMaskEmitter<A, B> {
3117        <Self as VpexpandwMaskEmitter<A, B>>::vpexpandw_mask(self, op0, op1);
3118    }
3119    /// `VPEXPANDW_MASKZ` (VPEXPANDW). 
3120    /// Expands (loads) up to 64 byte integer values or 32 word integer values from the source operand (memory operand) to the destination operand (register operand), based on the active elements determined by the writemask operand.
3121    ///
3122    ///
3123    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPEXPANDB%3AVPEXPANDW.html).
3124    ///
3125    /// Supported operand variants:
3126    ///
3127    /// ```text
3128    /// +---+----------+
3129    /// | # | Operands |
3130    /// +---+----------+
3131    /// | 1 | Xmm, Mem |
3132    /// | 2 | Xmm, Xmm |
3133    /// | 3 | Ymm, Mem |
3134    /// | 4 | Ymm, Ymm |
3135    /// | 5 | Zmm, Mem |
3136    /// | 6 | Zmm, Zmm |
3137    /// +---+----------+
3138    /// ```
3139    #[inline]
3140    pub fn vpexpandw_maskz<A, B>(&mut self, op0: A, op1: B)
3141    where Assembler<'a>: VpexpandwMaskzEmitter<A, B> {
3142        <Self as VpexpandwMaskzEmitter<A, B>>::vpexpandw_maskz(self, op0, op1);
3143    }
3144    /// `VPSHLDD` (VPSHLDD). 
3145    /// Concatenate packed data, extract result shifted to the left by constant value.
3146    ///
3147    ///
3148    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3149    ///
3150    /// Supported operand variants:
3151    ///
3152    /// ```text
3153    /// +---+--------------------+
3154    /// | # | Operands           |
3155    /// +---+--------------------+
3156    /// | 1 | Xmm, Xmm, Mem, Imm |
3157    /// | 2 | Xmm, Xmm, Xmm, Imm |
3158    /// | 3 | Ymm, Ymm, Mem, Imm |
3159    /// | 4 | Ymm, Ymm, Ymm, Imm |
3160    /// | 5 | Zmm, Zmm, Mem, Imm |
3161    /// | 6 | Zmm, Zmm, Zmm, Imm |
3162    /// +---+--------------------+
3163    /// ```
3164    #[inline]
3165    pub fn vpshldd<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3166    where Assembler<'a>: VpshlddEmitter<A, B, C, D> {
3167        <Self as VpshlddEmitter<A, B, C, D>>::vpshldd(self, op0, op1, op2, op3);
3168    }
3169    /// `VPSHLDD_MASK` (VPSHLDD). 
3170    /// Concatenate packed data, extract result shifted to the left by constant value.
3171    ///
3172    ///
3173    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3174    ///
3175    /// Supported operand variants:
3176    ///
3177    /// ```text
3178    /// +---+--------------------+
3179    /// | # | Operands           |
3180    /// +---+--------------------+
3181    /// | 1 | Xmm, Xmm, Mem, Imm |
3182    /// | 2 | Xmm, Xmm, Xmm, Imm |
3183    /// | 3 | Ymm, Ymm, Mem, Imm |
3184    /// | 4 | Ymm, Ymm, Ymm, Imm |
3185    /// | 5 | Zmm, Zmm, Mem, Imm |
3186    /// | 6 | Zmm, Zmm, Zmm, Imm |
3187    /// +---+--------------------+
3188    /// ```
3189    #[inline]
3190    pub fn vpshldd_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3191    where Assembler<'a>: VpshlddMaskEmitter<A, B, C, D> {
3192        <Self as VpshlddMaskEmitter<A, B, C, D>>::vpshldd_mask(self, op0, op1, op2, op3);
3193    }
3194    /// `VPSHLDD_MASKZ` (VPSHLDD). 
3195    /// Concatenate packed data, extract result shifted to the left by constant value.
3196    ///
3197    ///
3198    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3199    ///
3200    /// Supported operand variants:
3201    ///
3202    /// ```text
3203    /// +---+--------------------+
3204    /// | # | Operands           |
3205    /// +---+--------------------+
3206    /// | 1 | Xmm, Xmm, Mem, Imm |
3207    /// | 2 | Xmm, Xmm, Xmm, Imm |
3208    /// | 3 | Ymm, Ymm, Mem, Imm |
3209    /// | 4 | Ymm, Ymm, Ymm, Imm |
3210    /// | 5 | Zmm, Zmm, Mem, Imm |
3211    /// | 6 | Zmm, Zmm, Zmm, Imm |
3212    /// +---+--------------------+
3213    /// ```
3214    #[inline]
3215    pub fn vpshldd_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3216    where Assembler<'a>: VpshlddMaskzEmitter<A, B, C, D> {
3217        <Self as VpshlddMaskzEmitter<A, B, C, D>>::vpshldd_maskz(self, op0, op1, op2, op3);
3218    }
3219    /// `VPSHLDQ` (VPSHLDQ). 
3220    /// Concatenate packed data, extract result shifted to the left by constant value.
3221    ///
3222    ///
3223    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3224    ///
3225    /// Supported operand variants:
3226    ///
3227    /// ```text
3228    /// +---+--------------------+
3229    /// | # | Operands           |
3230    /// +---+--------------------+
3231    /// | 1 | Xmm, Xmm, Mem, Imm |
3232    /// | 2 | Xmm, Xmm, Xmm, Imm |
3233    /// | 3 | Ymm, Ymm, Mem, Imm |
3234    /// | 4 | Ymm, Ymm, Ymm, Imm |
3235    /// | 5 | Zmm, Zmm, Mem, Imm |
3236    /// | 6 | Zmm, Zmm, Zmm, Imm |
3237    /// +---+--------------------+
3238    /// ```
3239    #[inline]
3240    pub fn vpshldq<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3241    where Assembler<'a>: VpshldqEmitter<A, B, C, D> {
3242        <Self as VpshldqEmitter<A, B, C, D>>::vpshldq(self, op0, op1, op2, op3);
3243    }
3244    /// `VPSHLDQ_MASK` (VPSHLDQ). 
3245    /// Concatenate packed data, extract result shifted to the left by constant value.
3246    ///
3247    ///
3248    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3249    ///
3250    /// Supported operand variants:
3251    ///
3252    /// ```text
3253    /// +---+--------------------+
3254    /// | # | Operands           |
3255    /// +---+--------------------+
3256    /// | 1 | Xmm, Xmm, Mem, Imm |
3257    /// | 2 | Xmm, Xmm, Xmm, Imm |
3258    /// | 3 | Ymm, Ymm, Mem, Imm |
3259    /// | 4 | Ymm, Ymm, Ymm, Imm |
3260    /// | 5 | Zmm, Zmm, Mem, Imm |
3261    /// | 6 | Zmm, Zmm, Zmm, Imm |
3262    /// +---+--------------------+
3263    /// ```
3264    #[inline]
3265    pub fn vpshldq_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3266    where Assembler<'a>: VpshldqMaskEmitter<A, B, C, D> {
3267        <Self as VpshldqMaskEmitter<A, B, C, D>>::vpshldq_mask(self, op0, op1, op2, op3);
3268    }
3269    /// `VPSHLDQ_MASKZ` (VPSHLDQ). 
3270    /// Concatenate packed data, extract result shifted to the left by constant value.
3271    ///
3272    ///
3273    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3274    ///
3275    /// Supported operand variants:
3276    ///
3277    /// ```text
3278    /// +---+--------------------+
3279    /// | # | Operands           |
3280    /// +---+--------------------+
3281    /// | 1 | Xmm, Xmm, Mem, Imm |
3282    /// | 2 | Xmm, Xmm, Xmm, Imm |
3283    /// | 3 | Ymm, Ymm, Mem, Imm |
3284    /// | 4 | Ymm, Ymm, Ymm, Imm |
3285    /// | 5 | Zmm, Zmm, Mem, Imm |
3286    /// | 6 | Zmm, Zmm, Zmm, Imm |
3287    /// +---+--------------------+
3288    /// ```
3289    #[inline]
3290    pub fn vpshldq_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3291    where Assembler<'a>: VpshldqMaskzEmitter<A, B, C, D> {
3292        <Self as VpshldqMaskzEmitter<A, B, C, D>>::vpshldq_maskz(self, op0, op1, op2, op3);
3293    }
3294    /// `VPSHLDVD` (VPSHLDVD). 
3295    /// Concatenate packed data, extract result shifted to the left by variable value.
3296    ///
3297    ///
3298    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3299    ///
3300    /// Supported operand variants:
3301    ///
3302    /// ```text
3303    /// +---+---------------+
3304    /// | # | Operands      |
3305    /// +---+---------------+
3306    /// | 1 | Xmm, Xmm, Mem |
3307    /// | 2 | Xmm, Xmm, Xmm |
3308    /// | 3 | Ymm, Ymm, Mem |
3309    /// | 4 | Ymm, Ymm, Ymm |
3310    /// | 5 | Zmm, Zmm, Mem |
3311    /// | 6 | Zmm, Zmm, Zmm |
3312    /// +---+---------------+
3313    /// ```
3314    #[inline]
3315    pub fn vpshldvd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3316    where Assembler<'a>: VpshldvdEmitter<A, B, C> {
3317        <Self as VpshldvdEmitter<A, B, C>>::vpshldvd(self, op0, op1, op2);
3318    }
3319    /// `VPSHLDVD_MASK` (VPSHLDVD). 
3320    /// Concatenate packed data, extract result shifted to the left by variable value.
3321    ///
3322    ///
3323    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3324    ///
3325    /// Supported operand variants:
3326    ///
3327    /// ```text
3328    /// +---+---------------+
3329    /// | # | Operands      |
3330    /// +---+---------------+
3331    /// | 1 | Xmm, Xmm, Mem |
3332    /// | 2 | Xmm, Xmm, Xmm |
3333    /// | 3 | Ymm, Ymm, Mem |
3334    /// | 4 | Ymm, Ymm, Ymm |
3335    /// | 5 | Zmm, Zmm, Mem |
3336    /// | 6 | Zmm, Zmm, Zmm |
3337    /// +---+---------------+
3338    /// ```
3339    #[inline]
3340    pub fn vpshldvd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3341    where Assembler<'a>: VpshldvdMaskEmitter<A, B, C> {
3342        <Self as VpshldvdMaskEmitter<A, B, C>>::vpshldvd_mask(self, op0, op1, op2);
3343    }
3344    /// `VPSHLDVD_MASKZ` (VPSHLDVD). 
3345    /// Concatenate packed data, extract result shifted to the left by variable value.
3346    ///
3347    ///
3348    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3349    ///
3350    /// Supported operand variants:
3351    ///
3352    /// ```text
3353    /// +---+---------------+
3354    /// | # | Operands      |
3355    /// +---+---------------+
3356    /// | 1 | Xmm, Xmm, Mem |
3357    /// | 2 | Xmm, Xmm, Xmm |
3358    /// | 3 | Ymm, Ymm, Mem |
3359    /// | 4 | Ymm, Ymm, Ymm |
3360    /// | 5 | Zmm, Zmm, Mem |
3361    /// | 6 | Zmm, Zmm, Zmm |
3362    /// +---+---------------+
3363    /// ```
3364    #[inline]
3365    pub fn vpshldvd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3366    where Assembler<'a>: VpshldvdMaskzEmitter<A, B, C> {
3367        <Self as VpshldvdMaskzEmitter<A, B, C>>::vpshldvd_maskz(self, op0, op1, op2);
3368    }
3369    /// `VPSHLDVQ` (VPSHLDVQ). 
3370    /// Concatenate packed data, extract result shifted to the left by variable value.
3371    ///
3372    ///
3373    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3374    ///
3375    /// Supported operand variants:
3376    ///
3377    /// ```text
3378    /// +---+---------------+
3379    /// | # | Operands      |
3380    /// +---+---------------+
3381    /// | 1 | Xmm, Xmm, Mem |
3382    /// | 2 | Xmm, Xmm, Xmm |
3383    /// | 3 | Ymm, Ymm, Mem |
3384    /// | 4 | Ymm, Ymm, Ymm |
3385    /// | 5 | Zmm, Zmm, Mem |
3386    /// | 6 | Zmm, Zmm, Zmm |
3387    /// +---+---------------+
3388    /// ```
3389    #[inline]
3390    pub fn vpshldvq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3391    where Assembler<'a>: VpshldvqEmitter<A, B, C> {
3392        <Self as VpshldvqEmitter<A, B, C>>::vpshldvq(self, op0, op1, op2);
3393    }
3394    /// `VPSHLDVQ_MASK` (VPSHLDVQ). 
3395    /// Concatenate packed data, extract result shifted to the left by variable value.
3396    ///
3397    ///
3398    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3399    ///
3400    /// Supported operand variants:
3401    ///
3402    /// ```text
3403    /// +---+---------------+
3404    /// | # | Operands      |
3405    /// +---+---------------+
3406    /// | 1 | Xmm, Xmm, Mem |
3407    /// | 2 | Xmm, Xmm, Xmm |
3408    /// | 3 | Ymm, Ymm, Mem |
3409    /// | 4 | Ymm, Ymm, Ymm |
3410    /// | 5 | Zmm, Zmm, Mem |
3411    /// | 6 | Zmm, Zmm, Zmm |
3412    /// +---+---------------+
3413    /// ```
3414    #[inline]
3415    pub fn vpshldvq_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3416    where Assembler<'a>: VpshldvqMaskEmitter<A, B, C> {
3417        <Self as VpshldvqMaskEmitter<A, B, C>>::vpshldvq_mask(self, op0, op1, op2);
3418    }
3419    /// `VPSHLDVQ_MASKZ` (VPSHLDVQ). 
3420    /// Concatenate packed data, extract result shifted to the left by variable value.
3421    ///
3422    ///
3423    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3424    ///
3425    /// Supported operand variants:
3426    ///
3427    /// ```text
3428    /// +---+---------------+
3429    /// | # | Operands      |
3430    /// +---+---------------+
3431    /// | 1 | Xmm, Xmm, Mem |
3432    /// | 2 | Xmm, Xmm, Xmm |
3433    /// | 3 | Ymm, Ymm, Mem |
3434    /// | 4 | Ymm, Ymm, Ymm |
3435    /// | 5 | Zmm, Zmm, Mem |
3436    /// | 6 | Zmm, Zmm, Zmm |
3437    /// +---+---------------+
3438    /// ```
3439    #[inline]
3440    pub fn vpshldvq_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3441    where Assembler<'a>: VpshldvqMaskzEmitter<A, B, C> {
3442        <Self as VpshldvqMaskzEmitter<A, B, C>>::vpshldvq_maskz(self, op0, op1, op2);
3443    }
3444    /// `VPSHLDVW` (VPSHLDVW). 
3445    /// Concatenate packed data, extract result shifted to the left by variable value.
3446    ///
3447    ///
3448    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3449    ///
3450    /// Supported operand variants:
3451    ///
3452    /// ```text
3453    /// +---+---------------+
3454    /// | # | Operands      |
3455    /// +---+---------------+
3456    /// | 1 | Xmm, Xmm, Mem |
3457    /// | 2 | Xmm, Xmm, Xmm |
3458    /// | 3 | Ymm, Ymm, Mem |
3459    /// | 4 | Ymm, Ymm, Ymm |
3460    /// | 5 | Zmm, Zmm, Mem |
3461    /// | 6 | Zmm, Zmm, Zmm |
3462    /// +---+---------------+
3463    /// ```
3464    #[inline]
3465    pub fn vpshldvw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3466    where Assembler<'a>: VpshldvwEmitter<A, B, C> {
3467        <Self as VpshldvwEmitter<A, B, C>>::vpshldvw(self, op0, op1, op2);
3468    }
3469    /// `VPSHLDVW_MASK` (VPSHLDVW). 
3470    /// Concatenate packed data, extract result shifted to the left by variable value.
3471    ///
3472    ///
3473    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3474    ///
3475    /// Supported operand variants:
3476    ///
3477    /// ```text
3478    /// +---+---------------+
3479    /// | # | Operands      |
3480    /// +---+---------------+
3481    /// | 1 | Xmm, Xmm, Mem |
3482    /// | 2 | Xmm, Xmm, Xmm |
3483    /// | 3 | Ymm, Ymm, Mem |
3484    /// | 4 | Ymm, Ymm, Ymm |
3485    /// | 5 | Zmm, Zmm, Mem |
3486    /// | 6 | Zmm, Zmm, Zmm |
3487    /// +---+---------------+
3488    /// ```
3489    #[inline]
3490    pub fn vpshldvw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3491    where Assembler<'a>: VpshldvwMaskEmitter<A, B, C> {
3492        <Self as VpshldvwMaskEmitter<A, B, C>>::vpshldvw_mask(self, op0, op1, op2);
3493    }
3494    /// `VPSHLDVW_MASKZ` (VPSHLDVW). 
3495    /// Concatenate packed data, extract result shifted to the left by variable value.
3496    ///
3497    ///
3498    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLDV.html).
3499    ///
3500    /// Supported operand variants:
3501    ///
3502    /// ```text
3503    /// +---+---------------+
3504    /// | # | Operands      |
3505    /// +---+---------------+
3506    /// | 1 | Xmm, Xmm, Mem |
3507    /// | 2 | Xmm, Xmm, Xmm |
3508    /// | 3 | Ymm, Ymm, Mem |
3509    /// | 4 | Ymm, Ymm, Ymm |
3510    /// | 5 | Zmm, Zmm, Mem |
3511    /// | 6 | Zmm, Zmm, Zmm |
3512    /// +---+---------------+
3513    /// ```
3514    #[inline]
3515    pub fn vpshldvw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3516    where Assembler<'a>: VpshldvwMaskzEmitter<A, B, C> {
3517        <Self as VpshldvwMaskzEmitter<A, B, C>>::vpshldvw_maskz(self, op0, op1, op2);
3518    }
3519    /// `VPSHLDW` (VPSHLDW). 
3520    /// Concatenate packed data, extract result shifted to the left by constant value.
3521    ///
3522    ///
3523    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3524    ///
3525    /// Supported operand variants:
3526    ///
3527    /// ```text
3528    /// +---+--------------------+
3529    /// | # | Operands           |
3530    /// +---+--------------------+
3531    /// | 1 | Xmm, Xmm, Mem, Imm |
3532    /// | 2 | Xmm, Xmm, Xmm, Imm |
3533    /// | 3 | Ymm, Ymm, Mem, Imm |
3534    /// | 4 | Ymm, Ymm, Ymm, Imm |
3535    /// | 5 | Zmm, Zmm, Mem, Imm |
3536    /// | 6 | Zmm, Zmm, Zmm, Imm |
3537    /// +---+--------------------+
3538    /// ```
3539    #[inline]
3540    pub fn vpshldw<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3541    where Assembler<'a>: VpshldwEmitter<A, B, C, D> {
3542        <Self as VpshldwEmitter<A, B, C, D>>::vpshldw(self, op0, op1, op2, op3);
3543    }
3544    /// `VPSHLDW_MASK` (VPSHLDW). 
3545    /// Concatenate packed data, extract result shifted to the left by constant value.
3546    ///
3547    ///
3548    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3549    ///
3550    /// Supported operand variants:
3551    ///
3552    /// ```text
3553    /// +---+--------------------+
3554    /// | # | Operands           |
3555    /// +---+--------------------+
3556    /// | 1 | Xmm, Xmm, Mem, Imm |
3557    /// | 2 | Xmm, Xmm, Xmm, Imm |
3558    /// | 3 | Ymm, Ymm, Mem, Imm |
3559    /// | 4 | Ymm, Ymm, Ymm, Imm |
3560    /// | 5 | Zmm, Zmm, Mem, Imm |
3561    /// | 6 | Zmm, Zmm, Zmm, Imm |
3562    /// +---+--------------------+
3563    /// ```
3564    #[inline]
3565    pub fn vpshldw_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3566    where Assembler<'a>: VpshldwMaskEmitter<A, B, C, D> {
3567        <Self as VpshldwMaskEmitter<A, B, C, D>>::vpshldw_mask(self, op0, op1, op2, op3);
3568    }
3569    /// `VPSHLDW_MASKZ` (VPSHLDW). 
3570    /// Concatenate packed data, extract result shifted to the left by constant value.
3571    ///
3572    ///
3573    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHLD.html).
3574    ///
3575    /// Supported operand variants:
3576    ///
3577    /// ```text
3578    /// +---+--------------------+
3579    /// | # | Operands           |
3580    /// +---+--------------------+
3581    /// | 1 | Xmm, Xmm, Mem, Imm |
3582    /// | 2 | Xmm, Xmm, Xmm, Imm |
3583    /// | 3 | Ymm, Ymm, Mem, Imm |
3584    /// | 4 | Ymm, Ymm, Ymm, Imm |
3585    /// | 5 | Zmm, Zmm, Mem, Imm |
3586    /// | 6 | Zmm, Zmm, Zmm, Imm |
3587    /// +---+--------------------+
3588    /// ```
3589    #[inline]
3590    pub fn vpshldw_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3591    where Assembler<'a>: VpshldwMaskzEmitter<A, B, C, D> {
3592        <Self as VpshldwMaskzEmitter<A, B, C, D>>::vpshldw_maskz(self, op0, op1, op2, op3);
3593    }
3594    /// `VPSHRDD` (VPSHRDD). 
3595    /// Concatenate packed data, extract result shifted to the right by constant value.
3596    ///
3597    ///
3598    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3599    ///
3600    /// Supported operand variants:
3601    ///
3602    /// ```text
3603    /// +---+--------------------+
3604    /// | # | Operands           |
3605    /// +---+--------------------+
3606    /// | 1 | Xmm, Xmm, Mem, Imm |
3607    /// | 2 | Xmm, Xmm, Xmm, Imm |
3608    /// | 3 | Ymm, Ymm, Mem, Imm |
3609    /// | 4 | Ymm, Ymm, Ymm, Imm |
3610    /// | 5 | Zmm, Zmm, Mem, Imm |
3611    /// | 6 | Zmm, Zmm, Zmm, Imm |
3612    /// +---+--------------------+
3613    /// ```
3614    #[inline]
3615    pub fn vpshrdd<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3616    where Assembler<'a>: VpshrddEmitter<A, B, C, D> {
3617        <Self as VpshrddEmitter<A, B, C, D>>::vpshrdd(self, op0, op1, op2, op3);
3618    }
3619    /// `VPSHRDD_MASK` (VPSHRDD). 
3620    /// Concatenate packed data, extract result shifted to the right by constant value.
3621    ///
3622    ///
3623    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3624    ///
3625    /// Supported operand variants:
3626    ///
3627    /// ```text
3628    /// +---+--------------------+
3629    /// | # | Operands           |
3630    /// +---+--------------------+
3631    /// | 1 | Xmm, Xmm, Mem, Imm |
3632    /// | 2 | Xmm, Xmm, Xmm, Imm |
3633    /// | 3 | Ymm, Ymm, Mem, Imm |
3634    /// | 4 | Ymm, Ymm, Ymm, Imm |
3635    /// | 5 | Zmm, Zmm, Mem, Imm |
3636    /// | 6 | Zmm, Zmm, Zmm, Imm |
3637    /// +---+--------------------+
3638    /// ```
3639    #[inline]
3640    pub fn vpshrdd_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3641    where Assembler<'a>: VpshrddMaskEmitter<A, B, C, D> {
3642        <Self as VpshrddMaskEmitter<A, B, C, D>>::vpshrdd_mask(self, op0, op1, op2, op3);
3643    }
3644    /// `VPSHRDD_MASKZ` (VPSHRDD). 
3645    /// Concatenate packed data, extract result shifted to the right by constant value.
3646    ///
3647    ///
3648    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3649    ///
3650    /// Supported operand variants:
3651    ///
3652    /// ```text
3653    /// +---+--------------------+
3654    /// | # | Operands           |
3655    /// +---+--------------------+
3656    /// | 1 | Xmm, Xmm, Mem, Imm |
3657    /// | 2 | Xmm, Xmm, Xmm, Imm |
3658    /// | 3 | Ymm, Ymm, Mem, Imm |
3659    /// | 4 | Ymm, Ymm, Ymm, Imm |
3660    /// | 5 | Zmm, Zmm, Mem, Imm |
3661    /// | 6 | Zmm, Zmm, Zmm, Imm |
3662    /// +---+--------------------+
3663    /// ```
3664    #[inline]
3665    pub fn vpshrdd_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3666    where Assembler<'a>: VpshrddMaskzEmitter<A, B, C, D> {
3667        <Self as VpshrddMaskzEmitter<A, B, C, D>>::vpshrdd_maskz(self, op0, op1, op2, op3);
3668    }
3669    /// `VPSHRDQ` (VPSHRDQ). 
3670    /// Concatenate packed data, extract result shifted to the right by constant value.
3671    ///
3672    ///
3673    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3674    ///
3675    /// Supported operand variants:
3676    ///
3677    /// ```text
3678    /// +---+--------------------+
3679    /// | # | Operands           |
3680    /// +---+--------------------+
3681    /// | 1 | Xmm, Xmm, Mem, Imm |
3682    /// | 2 | Xmm, Xmm, Xmm, Imm |
3683    /// | 3 | Ymm, Ymm, Mem, Imm |
3684    /// | 4 | Ymm, Ymm, Ymm, Imm |
3685    /// | 5 | Zmm, Zmm, Mem, Imm |
3686    /// | 6 | Zmm, Zmm, Zmm, Imm |
3687    /// +---+--------------------+
3688    /// ```
3689    #[inline]
3690    pub fn vpshrdq<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3691    where Assembler<'a>: VpshrdqEmitter<A, B, C, D> {
3692        <Self as VpshrdqEmitter<A, B, C, D>>::vpshrdq(self, op0, op1, op2, op3);
3693    }
3694    /// `VPSHRDQ_MASK` (VPSHRDQ). 
3695    /// Concatenate packed data, extract result shifted to the right by constant value.
3696    ///
3697    ///
3698    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3699    ///
3700    /// Supported operand variants:
3701    ///
3702    /// ```text
3703    /// +---+--------------------+
3704    /// | # | Operands           |
3705    /// +---+--------------------+
3706    /// | 1 | Xmm, Xmm, Mem, Imm |
3707    /// | 2 | Xmm, Xmm, Xmm, Imm |
3708    /// | 3 | Ymm, Ymm, Mem, Imm |
3709    /// | 4 | Ymm, Ymm, Ymm, Imm |
3710    /// | 5 | Zmm, Zmm, Mem, Imm |
3711    /// | 6 | Zmm, Zmm, Zmm, Imm |
3712    /// +---+--------------------+
3713    /// ```
3714    #[inline]
3715    pub fn vpshrdq_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3716    where Assembler<'a>: VpshrdqMaskEmitter<A, B, C, D> {
3717        <Self as VpshrdqMaskEmitter<A, B, C, D>>::vpshrdq_mask(self, op0, op1, op2, op3);
3718    }
3719    /// `VPSHRDQ_MASKZ` (VPSHRDQ). 
3720    /// Concatenate packed data, extract result shifted to the right by constant value.
3721    ///
3722    ///
3723    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3724    ///
3725    /// Supported operand variants:
3726    ///
3727    /// ```text
3728    /// +---+--------------------+
3729    /// | # | Operands           |
3730    /// +---+--------------------+
3731    /// | 1 | Xmm, Xmm, Mem, Imm |
3732    /// | 2 | Xmm, Xmm, Xmm, Imm |
3733    /// | 3 | Ymm, Ymm, Mem, Imm |
3734    /// | 4 | Ymm, Ymm, Ymm, Imm |
3735    /// | 5 | Zmm, Zmm, Mem, Imm |
3736    /// | 6 | Zmm, Zmm, Zmm, Imm |
3737    /// +---+--------------------+
3738    /// ```
3739    #[inline]
3740    pub fn vpshrdq_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3741    where Assembler<'a>: VpshrdqMaskzEmitter<A, B, C, D> {
3742        <Self as VpshrdqMaskzEmitter<A, B, C, D>>::vpshrdq_maskz(self, op0, op1, op2, op3);
3743    }
3744    /// `VPSHRDVD` (VPSHRDVD). 
3745    /// Concatenate packed data, extract result shifted to the right by variable value.
3746    ///
3747    ///
3748    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3749    ///
3750    /// Supported operand variants:
3751    ///
3752    /// ```text
3753    /// +---+---------------+
3754    /// | # | Operands      |
3755    /// +---+---------------+
3756    /// | 1 | Xmm, Xmm, Mem |
3757    /// | 2 | Xmm, Xmm, Xmm |
3758    /// | 3 | Ymm, Ymm, Mem |
3759    /// | 4 | Ymm, Ymm, Ymm |
3760    /// | 5 | Zmm, Zmm, Mem |
3761    /// | 6 | Zmm, Zmm, Zmm |
3762    /// +---+---------------+
3763    /// ```
3764    #[inline]
3765    pub fn vpshrdvd<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3766    where Assembler<'a>: VpshrdvdEmitter<A, B, C> {
3767        <Self as VpshrdvdEmitter<A, B, C>>::vpshrdvd(self, op0, op1, op2);
3768    }
3769    /// `VPSHRDVD_MASK` (VPSHRDVD). 
3770    /// Concatenate packed data, extract result shifted to the right by variable value.
3771    ///
3772    ///
3773    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3774    ///
3775    /// Supported operand variants:
3776    ///
3777    /// ```text
3778    /// +---+---------------+
3779    /// | # | Operands      |
3780    /// +---+---------------+
3781    /// | 1 | Xmm, Xmm, Mem |
3782    /// | 2 | Xmm, Xmm, Xmm |
3783    /// | 3 | Ymm, Ymm, Mem |
3784    /// | 4 | Ymm, Ymm, Ymm |
3785    /// | 5 | Zmm, Zmm, Mem |
3786    /// | 6 | Zmm, Zmm, Zmm |
3787    /// +---+---------------+
3788    /// ```
3789    #[inline]
3790    pub fn vpshrdvd_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3791    where Assembler<'a>: VpshrdvdMaskEmitter<A, B, C> {
3792        <Self as VpshrdvdMaskEmitter<A, B, C>>::vpshrdvd_mask(self, op0, op1, op2);
3793    }
3794    /// `VPSHRDVD_MASKZ` (VPSHRDVD). 
3795    /// Concatenate packed data, extract result shifted to the right by variable value.
3796    ///
3797    ///
3798    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3799    ///
3800    /// Supported operand variants:
3801    ///
3802    /// ```text
3803    /// +---+---------------+
3804    /// | # | Operands      |
3805    /// +---+---------------+
3806    /// | 1 | Xmm, Xmm, Mem |
3807    /// | 2 | Xmm, Xmm, Xmm |
3808    /// | 3 | Ymm, Ymm, Mem |
3809    /// | 4 | Ymm, Ymm, Ymm |
3810    /// | 5 | Zmm, Zmm, Mem |
3811    /// | 6 | Zmm, Zmm, Zmm |
3812    /// +---+---------------+
3813    /// ```
3814    #[inline]
3815    pub fn vpshrdvd_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3816    where Assembler<'a>: VpshrdvdMaskzEmitter<A, B, C> {
3817        <Self as VpshrdvdMaskzEmitter<A, B, C>>::vpshrdvd_maskz(self, op0, op1, op2);
3818    }
3819    /// `VPSHRDVQ` (VPSHRDVQ). 
3820    /// Concatenate packed data, extract result shifted to the right by variable value.
3821    ///
3822    ///
3823    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3824    ///
3825    /// Supported operand variants:
3826    ///
3827    /// ```text
3828    /// +---+---------------+
3829    /// | # | Operands      |
3830    /// +---+---------------+
3831    /// | 1 | Xmm, Xmm, Mem |
3832    /// | 2 | Xmm, Xmm, Xmm |
3833    /// | 3 | Ymm, Ymm, Mem |
3834    /// | 4 | Ymm, Ymm, Ymm |
3835    /// | 5 | Zmm, Zmm, Mem |
3836    /// | 6 | Zmm, Zmm, Zmm |
3837    /// +---+---------------+
3838    /// ```
3839    #[inline]
3840    pub fn vpshrdvq<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3841    where Assembler<'a>: VpshrdvqEmitter<A, B, C> {
3842        <Self as VpshrdvqEmitter<A, B, C>>::vpshrdvq(self, op0, op1, op2);
3843    }
3844    /// `VPSHRDVQ_MASK` (VPSHRDVQ). 
3845    /// Concatenate packed data, extract result shifted to the right by variable value.
3846    ///
3847    ///
3848    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3849    ///
3850    /// Supported operand variants:
3851    ///
3852    /// ```text
3853    /// +---+---------------+
3854    /// | # | Operands      |
3855    /// +---+---------------+
3856    /// | 1 | Xmm, Xmm, Mem |
3857    /// | 2 | Xmm, Xmm, Xmm |
3858    /// | 3 | Ymm, Ymm, Mem |
3859    /// | 4 | Ymm, Ymm, Ymm |
3860    /// | 5 | Zmm, Zmm, Mem |
3861    /// | 6 | Zmm, Zmm, Zmm |
3862    /// +---+---------------+
3863    /// ```
3864    #[inline]
3865    pub fn vpshrdvq_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3866    where Assembler<'a>: VpshrdvqMaskEmitter<A, B, C> {
3867        <Self as VpshrdvqMaskEmitter<A, B, C>>::vpshrdvq_mask(self, op0, op1, op2);
3868    }
3869    /// `VPSHRDVQ_MASKZ` (VPSHRDVQ). 
3870    /// Concatenate packed data, extract result shifted to the right by variable value.
3871    ///
3872    ///
3873    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3874    ///
3875    /// Supported operand variants:
3876    ///
3877    /// ```text
3878    /// +---+---------------+
3879    /// | # | Operands      |
3880    /// +---+---------------+
3881    /// | 1 | Xmm, Xmm, Mem |
3882    /// | 2 | Xmm, Xmm, Xmm |
3883    /// | 3 | Ymm, Ymm, Mem |
3884    /// | 4 | Ymm, Ymm, Ymm |
3885    /// | 5 | Zmm, Zmm, Mem |
3886    /// | 6 | Zmm, Zmm, Zmm |
3887    /// +---+---------------+
3888    /// ```
3889    #[inline]
3890    pub fn vpshrdvq_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3891    where Assembler<'a>: VpshrdvqMaskzEmitter<A, B, C> {
3892        <Self as VpshrdvqMaskzEmitter<A, B, C>>::vpshrdvq_maskz(self, op0, op1, op2);
3893    }
3894    /// `VPSHRDVW` (VPSHRDVW). 
3895    /// Concatenate packed data, extract result shifted to the right by variable value.
3896    ///
3897    ///
3898    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3899    ///
3900    /// Supported operand variants:
3901    ///
3902    /// ```text
3903    /// +---+---------------+
3904    /// | # | Operands      |
3905    /// +---+---------------+
3906    /// | 1 | Xmm, Xmm, Mem |
3907    /// | 2 | Xmm, Xmm, Xmm |
3908    /// | 3 | Ymm, Ymm, Mem |
3909    /// | 4 | Ymm, Ymm, Ymm |
3910    /// | 5 | Zmm, Zmm, Mem |
3911    /// | 6 | Zmm, Zmm, Zmm |
3912    /// +---+---------------+
3913    /// ```
3914    #[inline]
3915    pub fn vpshrdvw<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3916    where Assembler<'a>: VpshrdvwEmitter<A, B, C> {
3917        <Self as VpshrdvwEmitter<A, B, C>>::vpshrdvw(self, op0, op1, op2);
3918    }
3919    /// `VPSHRDVW_MASK` (VPSHRDVW). 
3920    /// Concatenate packed data, extract result shifted to the right by variable value.
3921    ///
3922    ///
3923    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3924    ///
3925    /// Supported operand variants:
3926    ///
3927    /// ```text
3928    /// +---+---------------+
3929    /// | # | Operands      |
3930    /// +---+---------------+
3931    /// | 1 | Xmm, Xmm, Mem |
3932    /// | 2 | Xmm, Xmm, Xmm |
3933    /// | 3 | Ymm, Ymm, Mem |
3934    /// | 4 | Ymm, Ymm, Ymm |
3935    /// | 5 | Zmm, Zmm, Mem |
3936    /// | 6 | Zmm, Zmm, Zmm |
3937    /// +---+---------------+
3938    /// ```
3939    #[inline]
3940    pub fn vpshrdvw_mask<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3941    where Assembler<'a>: VpshrdvwMaskEmitter<A, B, C> {
3942        <Self as VpshrdvwMaskEmitter<A, B, C>>::vpshrdvw_mask(self, op0, op1, op2);
3943    }
3944    /// `VPSHRDVW_MASKZ` (VPSHRDVW). 
3945    /// Concatenate packed data, extract result shifted to the right by variable value.
3946    ///
3947    ///
3948    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRDV.html).
3949    ///
3950    /// Supported operand variants:
3951    ///
3952    /// ```text
3953    /// +---+---------------+
3954    /// | # | Operands      |
3955    /// +---+---------------+
3956    /// | 1 | Xmm, Xmm, Mem |
3957    /// | 2 | Xmm, Xmm, Xmm |
3958    /// | 3 | Ymm, Ymm, Mem |
3959    /// | 4 | Ymm, Ymm, Ymm |
3960    /// | 5 | Zmm, Zmm, Mem |
3961    /// | 6 | Zmm, Zmm, Zmm |
3962    /// +---+---------------+
3963    /// ```
3964    #[inline]
3965    pub fn vpshrdvw_maskz<A, B, C>(&mut self, op0: A, op1: B, op2: C)
3966    where Assembler<'a>: VpshrdvwMaskzEmitter<A, B, C> {
3967        <Self as VpshrdvwMaskzEmitter<A, B, C>>::vpshrdvw_maskz(self, op0, op1, op2);
3968    }
3969    /// `VPSHRDW` (VPSHRDW). 
3970    /// Concatenate packed data, extract result shifted to the right by constant value.
3971    ///
3972    ///
3973    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3974    ///
3975    /// Supported operand variants:
3976    ///
3977    /// ```text
3978    /// +---+--------------------+
3979    /// | # | Operands           |
3980    /// +---+--------------------+
3981    /// | 1 | Xmm, Xmm, Mem, Imm |
3982    /// | 2 | Xmm, Xmm, Xmm, Imm |
3983    /// | 3 | Ymm, Ymm, Mem, Imm |
3984    /// | 4 | Ymm, Ymm, Ymm, Imm |
3985    /// | 5 | Zmm, Zmm, Mem, Imm |
3986    /// | 6 | Zmm, Zmm, Zmm, Imm |
3987    /// +---+--------------------+
3988    /// ```
3989    #[inline]
3990    pub fn vpshrdw<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
3991    where Assembler<'a>: VpshrdwEmitter<A, B, C, D> {
3992        <Self as VpshrdwEmitter<A, B, C, D>>::vpshrdw(self, op0, op1, op2, op3);
3993    }
3994    /// `VPSHRDW_MASK` (VPSHRDW). 
3995    /// Concatenate packed data, extract result shifted to the right by constant value.
3996    ///
3997    ///
3998    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
3999    ///
4000    /// Supported operand variants:
4001    ///
4002    /// ```text
4003    /// +---+--------------------+
4004    /// | # | Operands           |
4005    /// +---+--------------------+
4006    /// | 1 | Xmm, Xmm, Mem, Imm |
4007    /// | 2 | Xmm, Xmm, Xmm, Imm |
4008    /// | 3 | Ymm, Ymm, Mem, Imm |
4009    /// | 4 | Ymm, Ymm, Ymm, Imm |
4010    /// | 5 | Zmm, Zmm, Mem, Imm |
4011    /// | 6 | Zmm, Zmm, Zmm, Imm |
4012    /// +---+--------------------+
4013    /// ```
4014    #[inline]
4015    pub fn vpshrdw_mask<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
4016    where Assembler<'a>: VpshrdwMaskEmitter<A, B, C, D> {
4017        <Self as VpshrdwMaskEmitter<A, B, C, D>>::vpshrdw_mask(self, op0, op1, op2, op3);
4018    }
4019    /// `VPSHRDW_MASKZ` (VPSHRDW). 
4020    /// Concatenate packed data, extract result shifted to the right by constant value.
4021    ///
4022    ///
4023    /// For more details, see the [Intel manual](https://www.felixcloutier.com/x86/VPSHRD.html).
4024    ///
4025    /// Supported operand variants:
4026    ///
4027    /// ```text
4028    /// +---+--------------------+
4029    /// | # | Operands           |
4030    /// +---+--------------------+
4031    /// | 1 | Xmm, Xmm, Mem, Imm |
4032    /// | 2 | Xmm, Xmm, Xmm, Imm |
4033    /// | 3 | Ymm, Ymm, Mem, Imm |
4034    /// | 4 | Ymm, Ymm, Ymm, Imm |
4035    /// | 5 | Zmm, Zmm, Mem, Imm |
4036    /// | 6 | Zmm, Zmm, Zmm, Imm |
4037    /// +---+--------------------+
4038    /// ```
4039    #[inline]
4040    pub fn vpshrdw_maskz<A, B, C, D>(&mut self, op0: A, op1: B, op2: C, op3: D)
4041    where Assembler<'a>: VpshrdwMaskzEmitter<A, B, C, D> {
4042        <Self as VpshrdwMaskzEmitter<A, B, C, D>>::vpshrdw_maskz(self, op0, op1, op2, op3);
4043    }
4044}