asmkit/x86/
macroassembler.rs

1//! MacroAssembler for X86/64
2
3use crate::core::{
4    buffer::CodeBuffer,
5    operand::{Imm, OperandCast},
6};
7
8use super::*;
9
10pub struct MacroAssembler<'a> {
11    pub asm: Assembler<'a>,
12}
13
14impl<'a> MacroAssembler<'a> {
15    pub const SCRATCH_REGISTER: Gpq = R11;
16    pub const FP_TEMP_REGISTER: Xmm = XMM15;
17
18    pub fn new(buffer: &'a mut CodeBuffer) -> Self {
19        Self {
20            asm: Assembler::new(buffer),
21        }
22    }
23
24    pub fn supports_avx(&self) -> bool {
25        true
26    }
27
28    #[cold]
29    #[inline(never)]
30    fn unsupported_operands(&self, s: &str) {
31        {
32            unreachable!("{:?}", s);
33        }
34    }
35
36    pub fn swap32(&mut self, src1: impl OperandCast, src2: impl OperandCast) {
37        let src1 = *src1.as_operand();
38        let src2 = *src2.as_operand();
39
40        if src1.is_gp() && src2.is_gp() {
41            if src1.id() == src2.id() {
42                return;
43            }
44            self.asm.xchg32rr(src1, src2);
45        } else if src1.is_mem() && src2.is_gp() {
46            self.asm.xchg32mr(src1, src2);
47        } else if src1.is_vec() && src2.is_vec() {
48            if src1.id() == src2.id() {
49                return;
50            }
51            self.mov64(Self::FP_TEMP_REGISTER, src1);
52            self.mov64(src1, src2);
53            self.mov64(src2, Self::FP_TEMP_REGISTER);
54        } else {
55            self.unsupported_operands("RR or RM pair expected");
56        }
57    }
58
59    pub fn swap64(&mut self, src1: impl OperandCast, src2: impl OperandCast) {
60        let src1 = *src1.as_operand();
61        let src2 = *src2.as_operand();
62
63        if src1.is_gp() && src2.is_gp() {
64            if src1.id() == src2.id() {
65                return;
66            }
67            self.asm.xchg64rr(src1, src2);
68        } else if src1.is_mem() && src2.is_gp() {
69            self.asm.xchg64mr(src1, src2);
70        } else if src1.is_vec() && src2.is_vec() {
71            if src1.id() == src2.id() {
72                return;
73            }
74            self.mov64(Self::FP_TEMP_REGISTER, src1);
75            self.mov64(src1, src2);
76            self.mov64(src2, Self::FP_TEMP_REGISTER);
77        } else {
78            self.unsupported_operands("RR or RM pair expected");
79        }
80    }
81
82    pub fn mov32(&mut self, dst: impl OperandCast, src: impl OperandCast) {
83        let dst = *dst.as_operand();
84        let src = *src.as_operand();
85        if dst.is_gp() && src.is_gp() {
86            if dst.id() == src.id() {
87                return;
88            }
89            self.asm.mov32rr(dst, src);
90        } else if dst.is_gp() && src.is_imm() {
91            self.asm.mov32ri(dst, src);
92        } else if dst.is_vec() && src.is_vec() {
93            if dst.id() == src.id() {
94                return;
95            }
96            if self.supports_avx() {
97                self.asm.vmovaps128rr(dst, src);
98            } else {
99                self.asm.sse_movapsrr(dst, src);
100            }
101        } else if dst.is_vec() && src.is_gp() {
102            if self.supports_avx() {
103                self.asm.vmovd_g2xrr(dst, src);
104            } else {
105                self.asm.sse_movd_g2xrr(dst, src);
106            }
107        } else if dst.is_gp() && src.is_vec() {
108            if self.supports_avx() {
109                self.asm.vmovd_x2grr(dst, src);
110            } else {
111                self.asm.sse_movd_x2grr(dst, src);
112            }
113        } else {
114            self.unsupported_operands("RR or RI pair expected");
115        }
116    }
117
118    pub fn mov64(&mut self, dst: impl OperandCast, src: impl OperandCast) {
119        let dst = *dst.as_operand();
120        let src = *src.as_operand();
121        if dst.is_reg() && src.is_reg() {
122            if dst.id() == src.id() {
123                return;
124            }
125            self.asm.mov64rr(dst, src);
126        } else if dst.is_reg() && src.is_imm() {
127            self.asm.mov64ri(dst, src);
128        } else if dst.is_vec() && src.is_vec() {
129            if dst.id() == src.id() {
130                return;
131            }
132            if self.supports_avx() {
133                self.asm.vmovaps128rr(dst, src);
134            } else {
135                self.asm.sse_movapsrr(dst, src);
136            }
137        } else if dst.is_vec() && src.is_gp() {
138            if self.supports_avx() {
139                self.asm.vmovq_g2xrr(dst, src);
140            } else {
141                self.asm.sse_movq_g2xrr(dst, src);
142            }
143        } else if dst.is_gp() && src.is_vec() {
144            if self.supports_avx() {
145                self.asm.vmovq_x2grr(dst, src);
146            } else {
147                self.asm.sse_movq_x2grr(dst, src);
148            }
149        } else {
150            self.unsupported_operands("RR or RI pair expected");
151        }
152    }
153
154    pub fn load8(&mut self, dst: impl OperandCast, src: impl OperandCast) {
155        let dst = *dst.as_operand();
156        let src = *src.as_operand();
157        if dst.is_reg() && src.is_mem() {
158            self.asm.mov8rm(dst, src);
159        } else {
160            self.unsupported_operands("RM or MR pair expected");
161        }
162    }
163
164    pub fn load16(&mut self, dst: impl OperandCast, src: impl OperandCast) {
165        let dst = *dst.as_operand();
166        let src = *src.as_operand();
167        if dst.is_reg() && src.is_mem() {
168            self.asm.mov16rm(dst, src);
169        } else {
170            self.unsupported_operands("RM or MR pair expected");
171        }
172    }
173
174    pub fn load32(&mut self, dst: impl OperandCast, src: impl OperandCast) {
175        let dst = *dst.as_operand();
176        let src = *src.as_operand();
177        if dst.is_reg() && src.is_mem() {
178            self.asm.mov32rm(dst, src);
179        } else if dst.is_vec() && src.is_mem() {
180            if !self.supports_avx() {
181                self.asm.sse_movssmr(dst, src);
182            } else {
183                self.asm.vmovssmr(dst, src);
184            }
185        } else {
186            self.unsupported_operands("RM or MR pair expected");
187        }
188    }
189
190    pub fn load64(&mut self, dst: impl OperandCast, src: impl OperandCast) {
191        let dst = *dst.as_operand();
192        let src = *src.as_operand();
193        if dst.is_reg() && src.is_mem() {
194            self.asm.mov64rm(dst, src);
195        } else if dst.is_vec() && src.is_mem() {
196            if self.supports_avx() {
197                self.asm.vmovsdrm(dst, src);
198            } else {
199                self.asm.sse_movsdrm(dst, src);
200            }
201        } else {
202            self.unsupported_operands("RM or MR pair expected");
203        }
204    }
205
206    pub fn store8(&mut self, dst: impl OperandCast, src: impl OperandCast) {
207        let dst = *dst.as_operand();
208        let src = *src.as_operand();
209        if dst.is_mem() && src.is_reg() {
210            self.asm.mov8mr(dst, src);
211        } else if dst.is_mem() && src.is_imm() {
212            self.asm.mov8mi(dst, src);
213        } else {
214            self.unsupported_operands("MR or MI pair expected");
215        }
216    }
217
218    pub fn store16(&mut self, dst: impl OperandCast, src: impl OperandCast) {
219        let dst = *dst.as_operand();
220        let src = *src.as_operand();
221        if dst.is_mem() && src.is_reg() {
222            self.asm.mov16mr(dst, src);
223        } else if dst.is_mem() && src.is_imm() {
224            self.asm.mov16mi(dst, src);
225        } else {
226            self.unsupported_operands("MR or MI pair expected");
227        }
228    }
229
230    pub fn store32(&mut self, dst: impl OperandCast, src: impl OperandCast) {
231        let dst = *dst.as_operand();
232        let src = *src.as_operand();
233        if dst.is_mem() && src.is_reg() {
234            self.asm.mov32mr(dst, src);
235        } else if dst.is_mem() && src.is_imm() {
236            self.asm.mov32mi(dst, src);
237        } else {
238            self.unsupported_operands("MR or MI pair expected");
239        }
240    }
241
242    pub fn store64(&mut self, dst: impl OperandCast, src: impl OperandCast) {
243        let dst = *dst.as_operand();
244        let src = *src.as_operand();
245        if dst.is_mem() && src.is_reg() {
246            self.asm.mov64mr(dst, src);
247        } else if dst.is_mem() && src.is_imm() {
248            self.asm.mov64mi(dst, src);
249        } else {
250            self.unsupported_operands("MR or MI pair expected");
251        }
252    }
253
254    pub fn zero32(&mut self, dst: impl OperandCast) {
255        let dst = *dst.as_operand();
256
257        if dst.is_gp() {
258            self.asm.xor32rr(dst, dst);
259        } else if dst.is_vec() {
260            if self.supports_avx() {
261                self.asm.vxorps128rrr(dst, dst, dst);
262            } else {
263                self.asm.sse_xorpsrr(dst, dst);
264            }
265        }
266    }
267
268    pub fn zero64(&mut self, dst: impl OperandCast) {
269        let dst = *dst.as_operand();
270        if dst.is_gp() {
271            self.asm.xor64rr(dst, dst);
272        } else if dst.is_vec() {
273            if self.supports_avx() {
274                self.asm.vxorps128rrr(dst, dst, dst);
275            } else {
276                self.asm.sse_xorpsrr(dst, dst);
277            }
278        } else {
279            self.unsupported_operands("GP or VEC expected");
280        }
281    }
282
283    pub fn add32(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
284        let dst = *dst.as_operand();
285        let src1 = *src1.as_operand();
286        let src2 = *src2.as_operand();
287
288        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
289            self.x86_lea32(dst, ptr32_index(src1.as_::<Gpq>(), src2.as_::<Gpq>(), 0, 0));
290        } else if dst.is_gp() && src1.is_gp() && src2.is_imm() {
291            if src1.id() == dst.id() {
292                self.asm.add32ri(dst, src2);
293            } else {
294                self.x86_lea32(
295                    dst,
296                    ptr32(src1.as_::<Gpq>(), src2.as_::<Imm>().value() as i32),
297                );
298            }
299        } else if dst.is_mem() && src1.is_gp() && src2.is_reg() {
300            self.asm.mov32mr(dst, src1);
301            self.asm.add32mr(dst, src2);
302        } else if dst.is_mem() && src1.is_gp() && src2.is_imm() {
303            self.asm.mov32mr(dst, src1);
304            self.asm.add32mi(dst, src2);
305        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
306            if self.supports_avx() {
307                self.asm.vaddssrrr(dst, src1, src2);
308            } else {
309                if dst.id() == src1.id() {
310                    self.asm.sse_addssrr(dst, src2);
311                } else {
312                    self.mov64(dst, src1);
313                    self.asm.sse_addssrr(dst, src2);
314                }
315            }
316        } else if dst.is_vec() && src1.is_vec() && src2.is_mem() {
317            if self.supports_avx() {
318                self.asm.vaddssrrm(dst, src1, src2);
319            } else {
320                self.mov64(dst, src1);
321                self.asm.sse_addsdrm(dst, src2);
322            }
323        } else {
324            self.unsupported_operands("RRR, RRI, or VVV expected");
325        }
326    }
327
328    pub fn add64(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
329        let dst = *dst.as_operand();
330        let src1 = *src1.as_operand();
331        let src2 = *src2.as_operand();
332
333        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
334            self.x86_lea64(dst, ptr64_index(src1.as_::<Gpq>(), src2.as_::<Gpq>(), 0, 0));
335        } else if dst.is_gp() && src1.is_gp() && src2.is_imm() {
336            if src1.id() == dst.id() {
337                self.asm.add64ri(dst, src2);
338            } else {
339                self.x86_lea64(
340                    dst,
341                    ptr64(src1.as_::<Gpq>(), src2.as_::<Imm>().value() as i32),
342                );
343            }
344        } else if dst.is_gp() && src1.is_gp() && src2.is_mem() {
345            if src1.id() == dst.id() {
346                self.asm.add64rm(dst, src2);
347            } else {
348                self.mov64(dst, src1);
349                self.asm.add64rm(dst, src2);
350            }
351        } else if dst.is_mem() && src1.is_gp() && src2.is_reg() {
352            self.asm.mov64mr(dst, src1);
353            self.asm.add64mr(dst, src2);
354        } else if dst.is_mem() && src1.is_gp() && src2.is_imm() {
355            self.asm.mov64mr(dst, src1);
356            self.asm.add64mi(dst, src2);
357        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
358            if self.supports_avx() {
359                self.asm.vaddsdrrr(dst, src1, src2);
360            } else {
361                if dst.id() == src1.id() {
362                    self.asm.sse_addsdrr(dst, src2);
363                } else {
364                    self.mov64(dst, src1);
365                    self.asm.sse_addsdrr(dst, src2);
366                }
367            }
368        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
369            if self.supports_avx() {
370                self.asm.vaddsdrrm(dst, src1, src2);
371            } else {
372                if dst.id() == src1.id() {
373                    self.asm.sse_addsdrm(dst, src2);
374                } else {
375                    self.mov64(dst, src1);
376                    self.asm.sse_addsdrm(dst, src2);
377                }
378            }
379        } else {
380            self.unsupported_operands("GP and GP or GP and IMM expected");
381        }
382    }
383
384    pub fn x86_lea32(&mut self, dst: impl OperandCast, src: impl OperandCast) {
385        let dst = *dst.as_operand();
386        let src = src.as_operand().as_::<Mem>();
387
388        if !src.has_offset() && !src.has_shift() {
389            if src.base_id() == dst.id() {
390                self.asm.add32rr(dst, Gpq::from_id(src.index_id()));
391                return;
392            }
393
394            if src.index_id() == dst.id() {
395                self.asm.add32rr(dst, Gpq::from_id(src.base_id()));
396                return;
397            }
398        }
399
400        self.asm.lea32rm(dst, src);
401    }
402
403    pub fn x86_lea64(&mut self, dst: impl OperandCast, src: impl OperandCast) {
404        let dst = *dst.as_operand();
405        let src = src.as_operand().as_::<Mem>();
406
407        if !src.has_offset() && !src.has_shift() {
408            if src.base_id() == dst.id() {
409                self.asm.add64rr(dst, Gpq::from_id(src.index_id()));
410                return;
411            }
412
413            if src.index_id() == dst.id() {
414                self.asm.add64rr(dst, Gpq::from_id(src.base_id()));
415                return;
416            }
417        }
418
419        self.asm.lea64rm(dst, src);
420    }
421
422    pub fn sub32(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
423        let dst = *dst.as_operand();
424        let src1 = *src1.as_operand();
425        let src2 = *src2.as_operand();
426
427        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
428            if dst.id() == src1.id() {
429                self.neg32(dst);
430                self.add32(dst, dst, src2);
431            } else {
432                self.mov32(dst, src1);
433                self.asm.sub32rr(dst, src2);
434            }
435        } else if dst.is_gp() && src1.is_gp() && src2.is_imm() {
436            if dst.id() == src1.id() {
437                self.asm.sub32ri(dst, src2);
438            } else {
439                self.x86_lea32(
440                    dst,
441                    ptr32(
442                        src1.as_::<Gpq>(),
443                        (src2.as_::<Imm>().value() as i32).wrapping_neg(),
444                    ),
445                );
446            }
447        } else if dst.is_gp() && src1.is_gp() && src2.is_mem() {
448            if dst.id() == src1.id() {
449                self.asm.sub32rm(dst, src2);
450            } else {
451                self.asm.mov32rr(dst, src1);
452                self.asm.sub32rm(dst, src2);
453            }
454        } else if dst.is_mem() && src1.is_gp() && src2.is_gp() {
455            self.asm.mov32mr(dst, src1);
456            self.asm.sub32mr(dst, src2);
457        } else if dst.is_mem() && src1.is_gp() && src2.is_imm() {
458            self.asm.mov32mr(dst, src1);
459            self.asm.sub32mi(dst, src2);
460        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
461            if self.supports_avx() {
462                self.asm.vsubssrrr(dst, src1, src2);
463            } else {
464                // B := A - B is invalid.
465                if src1.id() != dst.id() && src2.id() == dst.id() {
466                    self.mov32(Self::FP_TEMP_REGISTER, src2);
467                    self.mov64(dst, src1);
468                    self.asm.sse_subssrr(dst, Self::FP_TEMP_REGISTER);
469                } else {
470                    self.mov64(dst, src1);
471                    self.asm.sse_subssrr(dst, src2);
472                }
473            }
474        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
475            if self.supports_avx() {
476                self.asm.vsubssrrm(dst, src1, src2);
477            } else {
478                self.mov64(dst, src1);
479                self.asm.sse_subssrm(dst, src2);
480            }
481        } else {
482            self.unsupported_operands("RRR or RRI expected");
483        }
484    }
485
486    pub fn sub64(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
487        let dst = *dst.as_operand();
488        let src1 = *src1.as_operand();
489        let src2 = *src2.as_operand();
490
491        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
492            if dst.id() == src1.id() {
493                self.neg64(dst);
494                self.add64(dst, dst, src2);
495            } else {
496                self.mov64(dst, src1);
497                self.asm.sub64rr(dst, src2);
498            }
499        } else if dst.is_gp() && src1.is_gp() && src2.is_imm() {
500            if dst.id() == src1.id() {
501                self.asm.sub64ri(dst, src2);
502            } else {
503                self.x86_lea64(
504                    dst,
505                    ptr64(
506                        src1.as_::<Gpq>(),
507                        (src2.as_::<Imm>().value() as i32).wrapping_neg(),
508                    ),
509                );
510            }
511        } else if dst.is_gp() && src1.is_gp() && src2.is_mem() {
512            if dst.id() == src1.id() {
513                self.asm.sub64rm(dst, src2);
514            } else {
515                self.asm.mov64rr(dst, src1);
516                self.asm.sub64rm(dst, src2);
517            }
518        } else if dst.is_mem() && src1.is_gp() && src2.is_gp() {
519            self.asm.mov64mr(dst, src1);
520            self.asm.sub64mr(dst, src2);
521        } else if dst.is_mem() && src1.is_gp() && src2.is_imm() {
522            self.asm.mov64mr(dst, src1);
523            self.asm.sub64mi(dst, src2);
524        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
525            if self.supports_avx() {
526                self.asm.vsubsdrrr(dst, src1, src2);
527            } else {
528                // B := A - B is invalid.
529                if src1.id() != dst.id() && src2.id() == dst.id() {
530                    self.mov64(Self::FP_TEMP_REGISTER, src2);
531                    self.mov64(dst, src1);
532                    self.asm.sse_subsdrr(dst, Self::FP_TEMP_REGISTER);
533                } else {
534                    self.mov64(dst, src1);
535                    self.asm.sse_subsdrr(dst, src2);
536                }
537            }
538        } else if dst.is_vec() && src1.is_vec() && src2.is_mem() {
539            if self.supports_avx() {
540                self.asm.vsubsdrrm(dst, src1, src2);
541            } else {
542                self.mov64(dst, src1);
543                self.asm.sse_subsdrm(dst, src2);
544            }
545        } else {
546            self.unsupported_operands("RRR or RRI expected");
547        }
548    }
549
550    pub fn neg32(&mut self, srcdst: impl OperandCast) {
551        let srcdst = *srcdst.as_operand();
552        if srcdst.is_gp() {
553            self.asm.neg32r(srcdst);
554        } else if srcdst.is_mem() {
555            self.asm.neg32m(srcdst);
556        } else {
557            self.unsupported_operands("GP or MEM expected");
558        }
559    }
560
561    pub fn neg64(&mut self, srcdst: impl OperandCast) {
562        let srcdst = *srcdst.as_operand();
563        if srcdst.is_gp() {
564            self.asm.neg64r(srcdst);
565        } else if srcdst.is_mem() {
566            self.asm.neg64m(srcdst);
567        } else {
568            self.unsupported_operands("GP or MEM expected");
569        }
570    }
571
572    pub fn mul64(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
573        let dst = *dst.as_operand();
574        let src1 = *src1.as_operand();
575        let src2 = *src2.as_operand();
576
577        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
578            if dst.id() == src1.id() {
579                self.asm.imul64rr(dst, src2);
580            } else if dst.id() == src2.id() {
581                self.asm.imul64rr(dst, src1);
582            } else {
583                self.mov64(dst, src1);
584                self.asm.imul64rr(dst, src2);
585            }
586        } else if dst.is_gp() && src1.is_gp() && src2.is_mem() {
587            self.mov64(dst, src1);
588            self.asm.imul64rm(dst, src2);
589        } else if dst.is_gp() && src1.is_mem() && src2.is_gp() {
590            self.mov64(dst, src2);
591            self.asm.imul64rm(dst, src1);
592        } else if dst.is_gp() && src1.is_gp() && src2.is_imm() {
593            self.asm.mov64ri(dst, src2);
594            self.asm.imul64rr(dst, src1);
595        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
596            if self.supports_avx() {
597                self.asm.vmulsdrrr(dst, src1, src2);
598            } else {
599                self.mov64(dst, src1);
600                self.asm.sse_mulsdrr(dst, src2);
601            }
602        } else if dst.is_vec() && src1.is_vec() && src2.is_mem() {
603            if self.supports_avx() {
604                self.asm.vmulsdrrm(dst, src1, src2);
605            } else {
606                self.mov64(dst, src1);
607                self.asm.sse_mulsdrm(dst, src2);
608            }
609        } else {
610            self.unsupported_operands("RRR expected");
611        }
612    }
613
614    pub fn mul32(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
615        let dst = *dst.as_operand();
616        let src1 = *src1.as_operand();
617        let src2 = *src2.as_operand();
618
619        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
620            if dst.id() == src1.id() {
621                self.asm.imul32rr(dst, src2);
622            } else if dst.id() == src2.id() {
623                self.asm.imul32rr(dst, src1);
624            } else {
625                self.mov32(dst, src1);
626                self.asm.imul32rr(dst, src2);
627            }
628        } else if dst.is_gp() && src1.is_gp() && src2.is_mem() {
629            self.mov32(dst, src1);
630            self.asm.imul32rm(dst, src2);
631        } else if dst.is_gp() && src1.is_mem() && src2.is_gp() {
632            self.mov32(dst, src2);
633            self.asm.imul32rm(dst, src1);
634        } else if dst.is_gp() && src1.is_gp() && src2.is_imm() {
635            self.asm.mov32ri(dst, src2);
636            self.asm.imul32rr(dst, src1);
637        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
638            if self.supports_avx() {
639                self.asm.vmulssrrr(dst, src1, src2);
640            } else {
641                self.mov32(dst, src1);
642                self.asm.sse_mulssrr(dst, src2);
643            }
644        } else if dst.is_vec() && src1.is_vec() && src2.is_mem() {
645            if self.supports_avx() {
646                self.asm.vmulssrrm(dst, src1, src2);
647            } else {
648                self.mov32(dst, src1);
649                self.asm.sse_mulssrm(dst, src2);
650            }
651        } else {
652            self.unsupported_operands("RRR expected");
653        }
654    }
655
656    pub fn div64(&mut self, dst: impl OperandCast, src1: impl OperandCast, src2: impl OperandCast) {
657        let dst = *dst.as_operand();
658        let src1 = *src1.as_operand();
659        let src2 = *src2.as_operand();
660
661        if dst.is_gp() && src1.is_gp() && src2.is_gp() {
662            self.mov64(RAX, src1);
663            self.asm.cqo();
664            self.asm.idiv64r(src2);
665            self.mov64(dst, RAX);
666        } else if dst.is_gp() && src1.is_gp() && src2.is_mem() {
667            self.mov64(RAX, src1);
668            self.asm.cqo();
669            self.asm.idiv64m(src2);
670            self.mov64(dst, RAX);
671        } else if dst.is_gp() && src1.is_mem() && src2.is_gp() {
672            self.asm.mov64rm(RAX, src1);
673            self.asm.cqo();
674            self.asm.idiv64r(src2);
675            self.mov64(dst, RAX);
676        } else if dst.is_vec() && src1.is_vec() && src2.is_vec() {
677            if self.supports_avx() {
678                self.asm.vdivsdrrr(dst, src1, src2);
679            } else {
680                self.mov64(dst, src1);
681                self.asm.sse_divsdrr(dst, src2);
682            }
683        } else if dst.is_vec() && src1.is_vec() && src2.is_mem() {
684            if self.supports_avx() {
685                self.asm.vdivsdrrm(dst, src1, src2);
686            } else {
687                self.mov64(dst, src1);
688                self.asm.sse_divsdrm(dst, src2);
689            }
690        } else {
691            self.unsupported_operands("RRR or RRI expected");
692        }
693    }
694}