1use super::opcodes::*;
2use crate::core::emitter::*;
3use crate::core::operand::*;
4
5pub trait EmitterExplicit: Emitter {
8 fn add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9 self.emit_n(
10 Opcode::ADD as i64,
11 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
12 )
13 }
14
15 fn add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
16 self.emit_n(
17 Opcode::ADDUW as i64,
18 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
19 )
20 }
21
22 fn addi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
23 self.emit_n(
24 Opcode::ADDI as i64,
25 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
26 )
27 }
28
29 fn addiw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
30 self.emit_n(
31 Opcode::ADDIW as i64,
32 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
33 )
34 }
35
36 fn addw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
37 self.emit_n(
38 Opcode::ADDW as i64,
39 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
40 )
41 }
42
43 fn aes32dsi(
44 &mut self,
45 op0: impl OperandCast,
46 op1: impl OperandCast,
47 op2: impl OperandCast,
48 op3: impl OperandCast,
49 ) {
50 self.emit_n(
51 Opcode::AES32DSI as i64,
52 &[
53 op0.as_operand(),
54 op1.as_operand(),
55 op2.as_operand(),
56 op3.as_operand(),
57 ],
58 )
59 }
60
61 fn aes32dsmi(
62 &mut self,
63 op0: impl OperandCast,
64 op1: impl OperandCast,
65 op2: impl OperandCast,
66 op3: impl OperandCast,
67 ) {
68 self.emit_n(
69 Opcode::AES32DSMI as i64,
70 &[
71 op0.as_operand(),
72 op1.as_operand(),
73 op2.as_operand(),
74 op3.as_operand(),
75 ],
76 )
77 }
78
79 fn aes32esi(
80 &mut self,
81 op0: impl OperandCast,
82 op1: impl OperandCast,
83 op2: impl OperandCast,
84 op3: impl OperandCast,
85 ) {
86 self.emit_n(
87 Opcode::AES32ESI as i64,
88 &[
89 op0.as_operand(),
90 op1.as_operand(),
91 op2.as_operand(),
92 op3.as_operand(),
93 ],
94 )
95 }
96
97 fn aes32esmi(
98 &mut self,
99 op0: impl OperandCast,
100 op1: impl OperandCast,
101 op2: impl OperandCast,
102 op3: impl OperandCast,
103 ) {
104 self.emit_n(
105 Opcode::AES32ESMI as i64,
106 &[
107 op0.as_operand(),
108 op1.as_operand(),
109 op2.as_operand(),
110 op3.as_operand(),
111 ],
112 )
113 }
114
115 fn aes64ds(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
116 self.emit_n(
117 Opcode::AES64DS as i64,
118 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
119 )
120 }
121
122 fn aes64dsm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
123 self.emit_n(
124 Opcode::AES64DSM as i64,
125 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
126 )
127 }
128
129 fn aes64es(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
130 self.emit_n(
131 Opcode::AES64ES as i64,
132 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
133 )
134 }
135
136 fn aes64esm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
137 self.emit_n(
138 Opcode::AES64ESM as i64,
139 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
140 )
141 }
142
143 fn aes64im(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
144 self.emit_n(
145 Opcode::AES64IM as i64,
146 &[op0.as_operand(), op1.as_operand()],
147 )
148 }
149
150 fn aes64ks1i(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
151 self.emit_n(
152 Opcode::AES64KS1I as i64,
153 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
154 )
155 }
156
157 fn aes64ks2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
158 self.emit_n(
159 Opcode::AES64KS2 as i64,
160 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
161 )
162 }
163
164 fn amoadd_b(
165 &mut self,
166 op0: impl OperandCast,
167 op1: impl OperandCast,
168 op2: impl OperandCast,
169 op3: impl OperandCast,
170 op4: impl OperandCast,
171 ) {
172 self.emit_n(
173 Opcode::AMOADDB as i64,
174 &[
175 op0.as_operand(),
176 op1.as_operand(),
177 op2.as_operand(),
178 op3.as_operand(),
179 op4.as_operand(),
180 ],
181 )
182 }
183
184 fn amoadd_d(
185 &mut self,
186 op0: impl OperandCast,
187 op1: impl OperandCast,
188 op2: impl OperandCast,
189 op3: impl OperandCast,
190 op4: impl OperandCast,
191 ) {
192 self.emit_n(
193 Opcode::AMOADDD as i64,
194 &[
195 op0.as_operand(),
196 op1.as_operand(),
197 op2.as_operand(),
198 op3.as_operand(),
199 op4.as_operand(),
200 ],
201 )
202 }
203
204 fn amoadd_h(
205 &mut self,
206 op0: impl OperandCast,
207 op1: impl OperandCast,
208 op2: impl OperandCast,
209 op3: impl OperandCast,
210 op4: impl OperandCast,
211 ) {
212 self.emit_n(
213 Opcode::AMOADDH as i64,
214 &[
215 op0.as_operand(),
216 op1.as_operand(),
217 op2.as_operand(),
218 op3.as_operand(),
219 op4.as_operand(),
220 ],
221 )
222 }
223
224 fn amoadd_w(
225 &mut self,
226 op0: impl OperandCast,
227 op1: impl OperandCast,
228 op2: impl OperandCast,
229 op3: impl OperandCast,
230 op4: impl OperandCast,
231 ) {
232 self.emit_n(
233 Opcode::AMOADDW as i64,
234 &[
235 op0.as_operand(),
236 op1.as_operand(),
237 op2.as_operand(),
238 op3.as_operand(),
239 op4.as_operand(),
240 ],
241 )
242 }
243
244 fn amoand_b(
245 &mut self,
246 op0: impl OperandCast,
247 op1: impl OperandCast,
248 op2: impl OperandCast,
249 op3: impl OperandCast,
250 op4: impl OperandCast,
251 ) {
252 self.emit_n(
253 Opcode::AMOANDB as i64,
254 &[
255 op0.as_operand(),
256 op1.as_operand(),
257 op2.as_operand(),
258 op3.as_operand(),
259 op4.as_operand(),
260 ],
261 )
262 }
263
264 fn amoand_d(
265 &mut self,
266 op0: impl OperandCast,
267 op1: impl OperandCast,
268 op2: impl OperandCast,
269 op3: impl OperandCast,
270 op4: impl OperandCast,
271 ) {
272 self.emit_n(
273 Opcode::AMOANDD as i64,
274 &[
275 op0.as_operand(),
276 op1.as_operand(),
277 op2.as_operand(),
278 op3.as_operand(),
279 op4.as_operand(),
280 ],
281 )
282 }
283
284 fn amoand_h(
285 &mut self,
286 op0: impl OperandCast,
287 op1: impl OperandCast,
288 op2: impl OperandCast,
289 op3: impl OperandCast,
290 op4: impl OperandCast,
291 ) {
292 self.emit_n(
293 Opcode::AMOANDH as i64,
294 &[
295 op0.as_operand(),
296 op1.as_operand(),
297 op2.as_operand(),
298 op3.as_operand(),
299 op4.as_operand(),
300 ],
301 )
302 }
303
304 fn amoand_w(
305 &mut self,
306 op0: impl OperandCast,
307 op1: impl OperandCast,
308 op2: impl OperandCast,
309 op3: impl OperandCast,
310 op4: impl OperandCast,
311 ) {
312 self.emit_n(
313 Opcode::AMOANDW as i64,
314 &[
315 op0.as_operand(),
316 op1.as_operand(),
317 op2.as_operand(),
318 op3.as_operand(),
319 op4.as_operand(),
320 ],
321 )
322 }
323
324 fn amocas_b(
325 &mut self,
326 op0: impl OperandCast,
327 op1: impl OperandCast,
328 op2: impl OperandCast,
329 op3: impl OperandCast,
330 op4: impl OperandCast,
331 ) {
332 self.emit_n(
333 Opcode::AMOCASB as i64,
334 &[
335 op0.as_operand(),
336 op1.as_operand(),
337 op2.as_operand(),
338 op3.as_operand(),
339 op4.as_operand(),
340 ],
341 )
342 }
343
344 fn amocas_d(
345 &mut self,
346 op0: impl OperandCast,
347 op1: impl OperandCast,
348 op2: impl OperandCast,
349 op3: impl OperandCast,
350 op4: impl OperandCast,
351 ) {
352 self.emit_n(
353 Opcode::AMOCASD as i64,
354 &[
355 op0.as_operand(),
356 op1.as_operand(),
357 op2.as_operand(),
358 op3.as_operand(),
359 op4.as_operand(),
360 ],
361 )
362 }
363
364 fn amocas_h(
365 &mut self,
366 op0: impl OperandCast,
367 op1: impl OperandCast,
368 op2: impl OperandCast,
369 op3: impl OperandCast,
370 op4: impl OperandCast,
371 ) {
372 self.emit_n(
373 Opcode::AMOCASH as i64,
374 &[
375 op0.as_operand(),
376 op1.as_operand(),
377 op2.as_operand(),
378 op3.as_operand(),
379 op4.as_operand(),
380 ],
381 )
382 }
383
384 fn amocas_q(
385 &mut self,
386 op0: impl OperandCast,
387 op1: impl OperandCast,
388 op2: impl OperandCast,
389 op3: impl OperandCast,
390 op4: impl OperandCast,
391 ) {
392 self.emit_n(
393 Opcode::AMOCASQ as i64,
394 &[
395 op0.as_operand(),
396 op1.as_operand(),
397 op2.as_operand(),
398 op3.as_operand(),
399 op4.as_operand(),
400 ],
401 )
402 }
403
404 fn amocas_w(
405 &mut self,
406 op0: impl OperandCast,
407 op1: impl OperandCast,
408 op2: impl OperandCast,
409 op3: impl OperandCast,
410 op4: impl OperandCast,
411 ) {
412 self.emit_n(
413 Opcode::AMOCASW as i64,
414 &[
415 op0.as_operand(),
416 op1.as_operand(),
417 op2.as_operand(),
418 op3.as_operand(),
419 op4.as_operand(),
420 ],
421 )
422 }
423
424 fn amomax_b(
425 &mut self,
426 op0: impl OperandCast,
427 op1: impl OperandCast,
428 op2: impl OperandCast,
429 op3: impl OperandCast,
430 op4: impl OperandCast,
431 ) {
432 self.emit_n(
433 Opcode::AMOMAXB as i64,
434 &[
435 op0.as_operand(),
436 op1.as_operand(),
437 op2.as_operand(),
438 op3.as_operand(),
439 op4.as_operand(),
440 ],
441 )
442 }
443
444 fn amomax_d(
445 &mut self,
446 op0: impl OperandCast,
447 op1: impl OperandCast,
448 op2: impl OperandCast,
449 op3: impl OperandCast,
450 op4: impl OperandCast,
451 ) {
452 self.emit_n(
453 Opcode::AMOMAXD as i64,
454 &[
455 op0.as_operand(),
456 op1.as_operand(),
457 op2.as_operand(),
458 op3.as_operand(),
459 op4.as_operand(),
460 ],
461 )
462 }
463
464 fn amomax_h(
465 &mut self,
466 op0: impl OperandCast,
467 op1: impl OperandCast,
468 op2: impl OperandCast,
469 op3: impl OperandCast,
470 op4: impl OperandCast,
471 ) {
472 self.emit_n(
473 Opcode::AMOMAXH as i64,
474 &[
475 op0.as_operand(),
476 op1.as_operand(),
477 op2.as_operand(),
478 op3.as_operand(),
479 op4.as_operand(),
480 ],
481 )
482 }
483
484 fn amomax_w(
485 &mut self,
486 op0: impl OperandCast,
487 op1: impl OperandCast,
488 op2: impl OperandCast,
489 op3: impl OperandCast,
490 op4: impl OperandCast,
491 ) {
492 self.emit_n(
493 Opcode::AMOMAXW as i64,
494 &[
495 op0.as_operand(),
496 op1.as_operand(),
497 op2.as_operand(),
498 op3.as_operand(),
499 op4.as_operand(),
500 ],
501 )
502 }
503
504 fn amomaxu_b(
505 &mut self,
506 op0: impl OperandCast,
507 op1: impl OperandCast,
508 op2: impl OperandCast,
509 op3: impl OperandCast,
510 op4: impl OperandCast,
511 ) {
512 self.emit_n(
513 Opcode::AMOMAXUB as i64,
514 &[
515 op0.as_operand(),
516 op1.as_operand(),
517 op2.as_operand(),
518 op3.as_operand(),
519 op4.as_operand(),
520 ],
521 )
522 }
523
524 fn amomaxu_d(
525 &mut self,
526 op0: impl OperandCast,
527 op1: impl OperandCast,
528 op2: impl OperandCast,
529 op3: impl OperandCast,
530 op4: impl OperandCast,
531 ) {
532 self.emit_n(
533 Opcode::AMOMAXUD as i64,
534 &[
535 op0.as_operand(),
536 op1.as_operand(),
537 op2.as_operand(),
538 op3.as_operand(),
539 op4.as_operand(),
540 ],
541 )
542 }
543
544 fn amomaxu_h(
545 &mut self,
546 op0: impl OperandCast,
547 op1: impl OperandCast,
548 op2: impl OperandCast,
549 op3: impl OperandCast,
550 op4: impl OperandCast,
551 ) {
552 self.emit_n(
553 Opcode::AMOMAXUH as i64,
554 &[
555 op0.as_operand(),
556 op1.as_operand(),
557 op2.as_operand(),
558 op3.as_operand(),
559 op4.as_operand(),
560 ],
561 )
562 }
563
564 fn amomaxu_w(
565 &mut self,
566 op0: impl OperandCast,
567 op1: impl OperandCast,
568 op2: impl OperandCast,
569 op3: impl OperandCast,
570 op4: impl OperandCast,
571 ) {
572 self.emit_n(
573 Opcode::AMOMAXUW as i64,
574 &[
575 op0.as_operand(),
576 op1.as_operand(),
577 op2.as_operand(),
578 op3.as_operand(),
579 op4.as_operand(),
580 ],
581 )
582 }
583
584 fn amomin_b(
585 &mut self,
586 op0: impl OperandCast,
587 op1: impl OperandCast,
588 op2: impl OperandCast,
589 op3: impl OperandCast,
590 op4: impl OperandCast,
591 ) {
592 self.emit_n(
593 Opcode::AMOMINB as i64,
594 &[
595 op0.as_operand(),
596 op1.as_operand(),
597 op2.as_operand(),
598 op3.as_operand(),
599 op4.as_operand(),
600 ],
601 )
602 }
603
604 fn amomin_d(
605 &mut self,
606 op0: impl OperandCast,
607 op1: impl OperandCast,
608 op2: impl OperandCast,
609 op3: impl OperandCast,
610 op4: impl OperandCast,
611 ) {
612 self.emit_n(
613 Opcode::AMOMIND as i64,
614 &[
615 op0.as_operand(),
616 op1.as_operand(),
617 op2.as_operand(),
618 op3.as_operand(),
619 op4.as_operand(),
620 ],
621 )
622 }
623
624 fn amomin_h(
625 &mut self,
626 op0: impl OperandCast,
627 op1: impl OperandCast,
628 op2: impl OperandCast,
629 op3: impl OperandCast,
630 op4: impl OperandCast,
631 ) {
632 self.emit_n(
633 Opcode::AMOMINH as i64,
634 &[
635 op0.as_operand(),
636 op1.as_operand(),
637 op2.as_operand(),
638 op3.as_operand(),
639 op4.as_operand(),
640 ],
641 )
642 }
643
644 fn amomin_w(
645 &mut self,
646 op0: impl OperandCast,
647 op1: impl OperandCast,
648 op2: impl OperandCast,
649 op3: impl OperandCast,
650 op4: impl OperandCast,
651 ) {
652 self.emit_n(
653 Opcode::AMOMINW as i64,
654 &[
655 op0.as_operand(),
656 op1.as_operand(),
657 op2.as_operand(),
658 op3.as_operand(),
659 op4.as_operand(),
660 ],
661 )
662 }
663
664 fn amominu_b(
665 &mut self,
666 op0: impl OperandCast,
667 op1: impl OperandCast,
668 op2: impl OperandCast,
669 op3: impl OperandCast,
670 op4: impl OperandCast,
671 ) {
672 self.emit_n(
673 Opcode::AMOMINUB as i64,
674 &[
675 op0.as_operand(),
676 op1.as_operand(),
677 op2.as_operand(),
678 op3.as_operand(),
679 op4.as_operand(),
680 ],
681 )
682 }
683
684 fn amominu_d(
685 &mut self,
686 op0: impl OperandCast,
687 op1: impl OperandCast,
688 op2: impl OperandCast,
689 op3: impl OperandCast,
690 op4: impl OperandCast,
691 ) {
692 self.emit_n(
693 Opcode::AMOMINUD as i64,
694 &[
695 op0.as_operand(),
696 op1.as_operand(),
697 op2.as_operand(),
698 op3.as_operand(),
699 op4.as_operand(),
700 ],
701 )
702 }
703
704 fn amominu_h(
705 &mut self,
706 op0: impl OperandCast,
707 op1: impl OperandCast,
708 op2: impl OperandCast,
709 op3: impl OperandCast,
710 op4: impl OperandCast,
711 ) {
712 self.emit_n(
713 Opcode::AMOMINUH as i64,
714 &[
715 op0.as_operand(),
716 op1.as_operand(),
717 op2.as_operand(),
718 op3.as_operand(),
719 op4.as_operand(),
720 ],
721 )
722 }
723
724 fn amominu_w(
725 &mut self,
726 op0: impl OperandCast,
727 op1: impl OperandCast,
728 op2: impl OperandCast,
729 op3: impl OperandCast,
730 op4: impl OperandCast,
731 ) {
732 self.emit_n(
733 Opcode::AMOMINUW as i64,
734 &[
735 op0.as_operand(),
736 op1.as_operand(),
737 op2.as_operand(),
738 op3.as_operand(),
739 op4.as_operand(),
740 ],
741 )
742 }
743
744 fn amoor_b(
745 &mut self,
746 op0: impl OperandCast,
747 op1: impl OperandCast,
748 op2: impl OperandCast,
749 op3: impl OperandCast,
750 op4: impl OperandCast,
751 ) {
752 self.emit_n(
753 Opcode::AMOORB as i64,
754 &[
755 op0.as_operand(),
756 op1.as_operand(),
757 op2.as_operand(),
758 op3.as_operand(),
759 op4.as_operand(),
760 ],
761 )
762 }
763
764 fn amoor_d(
765 &mut self,
766 op0: impl OperandCast,
767 op1: impl OperandCast,
768 op2: impl OperandCast,
769 op3: impl OperandCast,
770 op4: impl OperandCast,
771 ) {
772 self.emit_n(
773 Opcode::AMOORD as i64,
774 &[
775 op0.as_operand(),
776 op1.as_operand(),
777 op2.as_operand(),
778 op3.as_operand(),
779 op4.as_operand(),
780 ],
781 )
782 }
783
784 fn amoor_h(
785 &mut self,
786 op0: impl OperandCast,
787 op1: impl OperandCast,
788 op2: impl OperandCast,
789 op3: impl OperandCast,
790 op4: impl OperandCast,
791 ) {
792 self.emit_n(
793 Opcode::AMOORH as i64,
794 &[
795 op0.as_operand(),
796 op1.as_operand(),
797 op2.as_operand(),
798 op3.as_operand(),
799 op4.as_operand(),
800 ],
801 )
802 }
803
804 fn amoor_w(
805 &mut self,
806 op0: impl OperandCast,
807 op1: impl OperandCast,
808 op2: impl OperandCast,
809 op3: impl OperandCast,
810 op4: impl OperandCast,
811 ) {
812 self.emit_n(
813 Opcode::AMOORW as i64,
814 &[
815 op0.as_operand(),
816 op1.as_operand(),
817 op2.as_operand(),
818 op3.as_operand(),
819 op4.as_operand(),
820 ],
821 )
822 }
823
824 fn amoswap_b(
825 &mut self,
826 op0: impl OperandCast,
827 op1: impl OperandCast,
828 op2: impl OperandCast,
829 op3: impl OperandCast,
830 op4: impl OperandCast,
831 ) {
832 self.emit_n(
833 Opcode::AMOSWAPB as i64,
834 &[
835 op0.as_operand(),
836 op1.as_operand(),
837 op2.as_operand(),
838 op3.as_operand(),
839 op4.as_operand(),
840 ],
841 )
842 }
843
844 fn amoswap_d(
845 &mut self,
846 op0: impl OperandCast,
847 op1: impl OperandCast,
848 op2: impl OperandCast,
849 op3: impl OperandCast,
850 op4: impl OperandCast,
851 ) {
852 self.emit_n(
853 Opcode::AMOSWAPD as i64,
854 &[
855 op0.as_operand(),
856 op1.as_operand(),
857 op2.as_operand(),
858 op3.as_operand(),
859 op4.as_operand(),
860 ],
861 )
862 }
863
864 fn amoswap_h(
865 &mut self,
866 op0: impl OperandCast,
867 op1: impl OperandCast,
868 op2: impl OperandCast,
869 op3: impl OperandCast,
870 op4: impl OperandCast,
871 ) {
872 self.emit_n(
873 Opcode::AMOSWAPH as i64,
874 &[
875 op0.as_operand(),
876 op1.as_operand(),
877 op2.as_operand(),
878 op3.as_operand(),
879 op4.as_operand(),
880 ],
881 )
882 }
883
884 fn amoswap_w(
885 &mut self,
886 op0: impl OperandCast,
887 op1: impl OperandCast,
888 op2: impl OperandCast,
889 op3: impl OperandCast,
890 op4: impl OperandCast,
891 ) {
892 self.emit_n(
893 Opcode::AMOSWAPW as i64,
894 &[
895 op0.as_operand(),
896 op1.as_operand(),
897 op2.as_operand(),
898 op3.as_operand(),
899 op4.as_operand(),
900 ],
901 )
902 }
903
904 fn amoxor_b(
905 &mut self,
906 op0: impl OperandCast,
907 op1: impl OperandCast,
908 op2: impl OperandCast,
909 op3: impl OperandCast,
910 op4: impl OperandCast,
911 ) {
912 self.emit_n(
913 Opcode::AMOXORB as i64,
914 &[
915 op0.as_operand(),
916 op1.as_operand(),
917 op2.as_operand(),
918 op3.as_operand(),
919 op4.as_operand(),
920 ],
921 )
922 }
923
924 fn amoxor_d(
925 &mut self,
926 op0: impl OperandCast,
927 op1: impl OperandCast,
928 op2: impl OperandCast,
929 op3: impl OperandCast,
930 op4: impl OperandCast,
931 ) {
932 self.emit_n(
933 Opcode::AMOXORD as i64,
934 &[
935 op0.as_operand(),
936 op1.as_operand(),
937 op2.as_operand(),
938 op3.as_operand(),
939 op4.as_operand(),
940 ],
941 )
942 }
943
944 fn amoxor_h(
945 &mut self,
946 op0: impl OperandCast,
947 op1: impl OperandCast,
948 op2: impl OperandCast,
949 op3: impl OperandCast,
950 op4: impl OperandCast,
951 ) {
952 self.emit_n(
953 Opcode::AMOXORH as i64,
954 &[
955 op0.as_operand(),
956 op1.as_operand(),
957 op2.as_operand(),
958 op3.as_operand(),
959 op4.as_operand(),
960 ],
961 )
962 }
963
964 fn amoxor_w(
965 &mut self,
966 op0: impl OperandCast,
967 op1: impl OperandCast,
968 op2: impl OperandCast,
969 op3: impl OperandCast,
970 op4: impl OperandCast,
971 ) {
972 self.emit_n(
973 Opcode::AMOXORW as i64,
974 &[
975 op0.as_operand(),
976 op1.as_operand(),
977 op2.as_operand(),
978 op3.as_operand(),
979 op4.as_operand(),
980 ],
981 )
982 }
983
984 fn and(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
985 self.emit_n(
986 Opcode::AND as i64,
987 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
988 )
989 }
990
991 fn andi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
992 self.emit_n(
993 Opcode::ANDI as i64,
994 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
995 )
996 }
997
998 fn andn(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
999 self.emit_n(
1000 Opcode::ANDN as i64,
1001 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1002 )
1003 }
1004
1005 fn auipc(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1006 self.emit_n(Opcode::AUIPC as i64, &[op0.as_operand(), op1.as_operand()])
1007 }
1008
1009 fn bclr(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1010 self.emit_n(
1011 Opcode::BCLR as i64,
1012 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1013 )
1014 }
1015
1016 fn bclri(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1017 self.emit_n(
1018 Opcode::BCLRI as i64,
1019 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1020 )
1021 }
1022
1023 fn bclri_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1024 self.emit_n(
1025 Opcode::BCLRIRV32 as i64,
1026 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1027 )
1028 }
1029
1030 fn beq(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1031 self.emit_n(
1032 Opcode::BEQ as i64,
1033 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1034 )
1035 }
1036
1037 fn beqz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1038 self.emit_n(Opcode::BEQZ as i64, &[op0.as_operand(), op1.as_operand()])
1039 }
1040
1041 fn bext(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1042 self.emit_n(
1043 Opcode::BEXT as i64,
1044 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1045 )
1046 }
1047
1048 fn bexti(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1049 self.emit_n(
1050 Opcode::BEXTI as i64,
1051 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1052 )
1053 }
1054
1055 fn bexti_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1056 self.emit_n(
1057 Opcode::BEXTIRV32 as i64,
1058 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1059 )
1060 }
1061
1062 fn bge(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1063 self.emit_n(
1064 Opcode::BGE as i64,
1065 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1066 )
1067 }
1068
1069 fn bgeu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1070 self.emit_n(
1071 Opcode::BGEU as i64,
1072 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1073 )
1074 }
1075
1076 fn bgez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1077 self.emit_n(Opcode::BGEZ as i64, &[op0.as_operand(), op1.as_operand()])
1078 }
1079
1080 fn bgt(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1081 self.emit_n(
1082 Opcode::BGT as i64,
1083 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1084 )
1085 }
1086
1087 fn bgtu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1088 self.emit_n(
1089 Opcode::BGTU as i64,
1090 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1091 )
1092 }
1093
1094 fn bgtz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1095 self.emit_n(Opcode::BGTZ as i64, &[op0.as_operand(), op1.as_operand()])
1096 }
1097
1098 fn binv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1099 self.emit_n(
1100 Opcode::BINV as i64,
1101 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1102 )
1103 }
1104
1105 fn binvi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1106 self.emit_n(
1107 Opcode::BINVI as i64,
1108 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1109 )
1110 }
1111
1112 fn binvi_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1113 self.emit_n(
1114 Opcode::BINVIRV32 as i64,
1115 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1116 )
1117 }
1118
1119 fn ble(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1120 self.emit_n(
1121 Opcode::BLE as i64,
1122 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1123 )
1124 }
1125
1126 fn bleu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1127 self.emit_n(
1128 Opcode::BLEU as i64,
1129 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1130 )
1131 }
1132
1133 fn blez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1134 self.emit_n(Opcode::BLEZ as i64, &[op0.as_operand(), op1.as_operand()])
1135 }
1136
1137 fn blt(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1138 self.emit_n(
1139 Opcode::BLT as i64,
1140 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1141 )
1142 }
1143
1144 fn bltu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1145 self.emit_n(
1146 Opcode::BLTU as i64,
1147 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1148 )
1149 }
1150
1151 fn bltz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1152 self.emit_n(Opcode::BLTZ as i64, &[op0.as_operand(), op1.as_operand()])
1153 }
1154
1155 fn bne(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1156 self.emit_n(
1157 Opcode::BNE as i64,
1158 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1159 )
1160 }
1161
1162 fn bnez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1163 self.emit_n(Opcode::BNEZ as i64, &[op0.as_operand(), op1.as_operand()])
1164 }
1165
1166 fn brev8(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1167 self.emit_n(Opcode::BREV8 as i64, &[op0.as_operand(), op1.as_operand()])
1168 }
1169
1170 fn bset(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1171 self.emit_n(
1172 Opcode::BSET as i64,
1173 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1174 )
1175 }
1176
1177 fn bseti(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1178 self.emit_n(
1179 Opcode::BSETI as i64,
1180 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1181 )
1182 }
1183
1184 fn bseti_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1185 self.emit_n(
1186 Opcode::BSETIRV32 as i64,
1187 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1188 )
1189 }
1190
1191 fn c_add(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1192 self.emit_n(Opcode::CADD as i64, &[op0.as_operand(), op1.as_operand()])
1193 }
1194
1195 fn c_addi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1196 self.emit_n(Opcode::CADDI as i64, &[op0.as_operand(), op1.as_operand()])
1197 }
1198
1199 fn c_addi16sp(&mut self, op0: impl OperandCast) {
1200 self.emit_n(Opcode::CADDI16SP as i64, &[op0.as_operand()])
1201 }
1202
1203 fn c_addi4spn(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1204 self.emit_n(
1205 Opcode::CADDI4SPN as i64,
1206 &[op0.as_operand(), op1.as_operand()],
1207 )
1208 }
1209
1210 fn c_addiw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1211 self.emit_n(Opcode::CADDIW as i64, &[op0.as_operand(), op1.as_operand()])
1212 }
1213
1214 fn c_addw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1215 self.emit_n(Opcode::CADDW as i64, &[op0.as_operand(), op1.as_operand()])
1216 }
1217
1218 fn c_and(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1219 self.emit_n(Opcode::CAND as i64, &[op0.as_operand(), op1.as_operand()])
1220 }
1221
1222 fn c_andi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1223 self.emit_n(Opcode::CANDI as i64, &[op0.as_operand(), op1.as_operand()])
1224 }
1225
1226 fn c_beqz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1227 self.emit_n(Opcode::CBEQZ as i64, &[op0.as_operand(), op1.as_operand()])
1228 }
1229
1230 fn c_bnez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1231 self.emit_n(Opcode::CBNEZ as i64, &[op0.as_operand(), op1.as_operand()])
1232 }
1233
1234 fn c_ebreak(&mut self) {
1235 self.emit_n(Opcode::CEBREAK as i64, &[])
1236 }
1237
1238 fn c_fld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1239 self.emit_n(
1240 Opcode::CFLD as i64,
1241 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1242 )
1243 }
1244
1245 fn c_fldsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1246 self.emit_n(Opcode::CFLDSP as i64, &[op0.as_operand(), op1.as_operand()])
1247 }
1248
1249 fn c_flw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1250 self.emit_n(
1251 Opcode::CFLW as i64,
1252 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1253 )
1254 }
1255
1256 fn c_flwsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1257 self.emit_n(Opcode::CFLWSP as i64, &[op0.as_operand(), op1.as_operand()])
1258 }
1259
1260 fn c_fsd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1261 self.emit_n(
1262 Opcode::CFSD as i64,
1263 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1264 )
1265 }
1266
1267 fn c_fsdsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1268 self.emit_n(Opcode::CFSDSP as i64, &[op0.as_operand(), op1.as_operand()])
1269 }
1270
1271 fn c_fsw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1272 self.emit_n(
1273 Opcode::CFSW as i64,
1274 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1275 )
1276 }
1277
1278 fn c_fswsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1279 self.emit_n(Opcode::CFSWSP as i64, &[op0.as_operand(), op1.as_operand()])
1280 }
1281
1282 fn c_j(&mut self, op0: impl OperandCast) {
1283 self.emit_n(Opcode::CJ as i64, &[op0.as_operand()])
1284 }
1285
1286 fn c_jal(&mut self, op0: impl OperandCast) {
1287 self.emit_n(Opcode::CJAL as i64, &[op0.as_operand()])
1288 }
1289
1290 fn c_jalr(&mut self, op0: impl OperandCast) {
1291 self.emit_n(Opcode::CJALR as i64, &[op0.as_operand()])
1292 }
1293
1294 fn c_jr(&mut self, op0: impl OperandCast) {
1295 self.emit_n(Opcode::CJR as i64, &[op0.as_operand()])
1296 }
1297
1298 fn c_lbu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1299 self.emit_n(
1300 Opcode::CLBU as i64,
1301 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1302 )
1303 }
1304
1305 fn c_ld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1306 self.emit_n(
1307 Opcode::CLD as i64,
1308 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1309 )
1310 }
1311
1312 fn c_ldsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1313 self.emit_n(Opcode::CLDSP as i64, &[op0.as_operand(), op1.as_operand()])
1314 }
1315
1316 fn c_lh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1317 self.emit_n(
1318 Opcode::CLH as i64,
1319 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1320 )
1321 }
1322
1323 fn c_lhu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1324 self.emit_n(
1325 Opcode::CLHU as i64,
1326 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1327 )
1328 }
1329
1330 fn c_li(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1331 self.emit_n(Opcode::CLI as i64, &[op0.as_operand(), op1.as_operand()])
1332 }
1333
1334 fn c_lui(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1335 self.emit_n(Opcode::CLUI as i64, &[op0.as_operand(), op1.as_operand()])
1336 }
1337
1338 fn c_lw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1339 self.emit_n(
1340 Opcode::CLW as i64,
1341 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1342 )
1343 }
1344
1345 fn c_lwsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1346 self.emit_n(Opcode::CLWSP as i64, &[op0.as_operand(), op1.as_operand()])
1347 }
1348
1349 fn c_mop_1(&mut self) {
1350 self.emit_n(Opcode::CMOP1 as i64, &[])
1351 }
1352
1353 fn c_mop_11(&mut self) {
1354 self.emit_n(Opcode::CMOP11 as i64, &[])
1355 }
1356
1357 fn c_mop_13(&mut self) {
1358 self.emit_n(Opcode::CMOP13 as i64, &[])
1359 }
1360
1361 fn c_mop_15(&mut self) {
1362 self.emit_n(Opcode::CMOP15 as i64, &[])
1363 }
1364
1365 fn c_mop_3(&mut self) {
1366 self.emit_n(Opcode::CMOP3 as i64, &[])
1367 }
1368
1369 fn c_mop_5(&mut self) {
1370 self.emit_n(Opcode::CMOP5 as i64, &[])
1371 }
1372
1373 fn c_mop_7(&mut self) {
1374 self.emit_n(Opcode::CMOP7 as i64, &[])
1375 }
1376
1377 fn c_mop_9(&mut self) {
1378 self.emit_n(Opcode::CMOP9 as i64, &[])
1379 }
1380
1381 fn c_mop_n(&mut self, op0: impl OperandCast) {
1382 self.emit_n(Opcode::CMOPN as i64, &[op0.as_operand()])
1383 }
1384
1385 fn c_mul(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1386 self.emit_n(Opcode::CMUL as i64, &[op0.as_operand(), op1.as_operand()])
1387 }
1388
1389 fn c_mv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1390 self.emit_n(Opcode::CMV as i64, &[op0.as_operand(), op1.as_operand()])
1391 }
1392
1393 fn c_nop(&mut self, op0: impl OperandCast) {
1394 self.emit_n(Opcode::CNOP as i64, &[op0.as_operand()])
1395 }
1396
1397 fn c_not(&mut self, op0: impl OperandCast) {
1398 self.emit_n(Opcode::CNOT as i64, &[op0.as_operand()])
1399 }
1400
1401 fn c_ntl_all(&mut self) {
1402 self.emit_n(Opcode::CNTLALL as i64, &[])
1403 }
1404
1405 fn c_ntl_p1(&mut self) {
1406 self.emit_n(Opcode::CNTLP1 as i64, &[])
1407 }
1408
1409 fn c_ntl_pall(&mut self) {
1410 self.emit_n(Opcode::CNTLPALL as i64, &[])
1411 }
1412
1413 fn c_ntl_s1(&mut self) {
1414 self.emit_n(Opcode::CNTLS1 as i64, &[])
1415 }
1416
1417 fn c_or(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1418 self.emit_n(Opcode::COR as i64, &[op0.as_operand(), op1.as_operand()])
1419 }
1420
1421 fn c_sb(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1422 self.emit_n(
1423 Opcode::CSB as i64,
1424 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1425 )
1426 }
1427
1428 fn c_sd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1429 self.emit_n(
1430 Opcode::CSD as i64,
1431 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1432 )
1433 }
1434
1435 fn c_sdsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1436 self.emit_n(Opcode::CSDSP as i64, &[op0.as_operand(), op1.as_operand()])
1437 }
1438
1439 fn c_sext_b(&mut self, op0: impl OperandCast) {
1440 self.emit_n(Opcode::CSEXTB as i64, &[op0.as_operand()])
1441 }
1442
1443 fn c_sext_h(&mut self, op0: impl OperandCast) {
1444 self.emit_n(Opcode::CSEXTH as i64, &[op0.as_operand()])
1445 }
1446
1447 fn c_sext_w(&mut self, op0: impl OperandCast) {
1448 self.emit_n(Opcode::CSEXTW as i64, &[op0.as_operand()])
1449 }
1450
1451 fn c_sh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1452 self.emit_n(
1453 Opcode::CSH as i64,
1454 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1455 )
1456 }
1457
1458 fn c_slli(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1459 self.emit_n(Opcode::CSLLI as i64, &[op0.as_operand(), op1.as_operand()])
1460 }
1461
1462 fn c_slli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1463 self.emit_n(
1464 Opcode::CSLLIRV32 as i64,
1465 &[op0.as_operand(), op1.as_operand()],
1466 )
1467 }
1468
1469 fn c_srai(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1470 self.emit_n(Opcode::CSRAI as i64, &[op0.as_operand(), op1.as_operand()])
1471 }
1472
1473 fn c_srai_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1474 self.emit_n(
1475 Opcode::CSRAIRV32 as i64,
1476 &[op0.as_operand(), op1.as_operand()],
1477 )
1478 }
1479
1480 fn c_srli(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1481 self.emit_n(Opcode::CSRLI as i64, &[op0.as_operand(), op1.as_operand()])
1482 }
1483
1484 fn c_srli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1485 self.emit_n(
1486 Opcode::CSRLIRV32 as i64,
1487 &[op0.as_operand(), op1.as_operand()],
1488 )
1489 }
1490
1491 fn c_sub(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1492 self.emit_n(Opcode::CSUB as i64, &[op0.as_operand(), op1.as_operand()])
1493 }
1494
1495 fn c_subw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1496 self.emit_n(Opcode::CSUBW as i64, &[op0.as_operand(), op1.as_operand()])
1497 }
1498
1499 fn c_sw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1500 self.emit_n(
1501 Opcode::CSW as i64,
1502 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1503 )
1504 }
1505
1506 fn c_swsp(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1507 self.emit_n(Opcode::CSWSP as i64, &[op0.as_operand(), op1.as_operand()])
1508 }
1509
1510 fn c_xor(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1511 self.emit_n(Opcode::CXOR as i64, &[op0.as_operand(), op1.as_operand()])
1512 }
1513
1514 fn c_zext_b(&mut self, op0: impl OperandCast) {
1515 self.emit_n(Opcode::CZEXTB as i64, &[op0.as_operand()])
1516 }
1517
1518 fn c_zext_h(&mut self, op0: impl OperandCast) {
1519 self.emit_n(Opcode::CZEXTH as i64, &[op0.as_operand()])
1520 }
1521
1522 fn c_zext_w(&mut self, op0: impl OperandCast) {
1523 self.emit_n(Opcode::CZEXTW as i64, &[op0.as_operand()])
1524 }
1525
1526 fn cbo_clean(&mut self, op0: impl OperandCast) {
1527 self.emit_n(Opcode::CBOCLEAN as i64, &[op0.as_operand()])
1528 }
1529
1530 fn cbo_flush(&mut self, op0: impl OperandCast) {
1531 self.emit_n(Opcode::CBOFLUSH as i64, &[op0.as_operand()])
1532 }
1533
1534 fn cbo_inval(&mut self, op0: impl OperandCast) {
1535 self.emit_n(Opcode::CBOINVAL as i64, &[op0.as_operand()])
1536 }
1537
1538 fn cbo_zero(&mut self, op0: impl OperandCast) {
1539 self.emit_n(Opcode::CBOZERO as i64, &[op0.as_operand()])
1540 }
1541
1542 fn clmul(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1543 self.emit_n(
1544 Opcode::CLMUL as i64,
1545 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1546 )
1547 }
1548
1549 fn clmulh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1550 self.emit_n(
1551 Opcode::CLMULH as i64,
1552 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1553 )
1554 }
1555
1556 fn clmulr(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1557 self.emit_n(
1558 Opcode::CLMULR as i64,
1559 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1560 )
1561 }
1562
1563 fn clz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1564 self.emit_n(Opcode::CLZ as i64, &[op0.as_operand(), op1.as_operand()])
1565 }
1566
1567 fn clzw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1568 self.emit_n(Opcode::CLZW as i64, &[op0.as_operand(), op1.as_operand()])
1569 }
1570
1571 fn cm_jalt(&mut self, op0: impl OperandCast) {
1572 self.emit_n(Opcode::CMJALT as i64, &[op0.as_operand()])
1573 }
1574
1575 fn cm_mva01s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1576 self.emit_n(
1577 Opcode::CMMVA01S as i64,
1578 &[op0.as_operand(), op1.as_operand()],
1579 )
1580 }
1581
1582 fn cm_mvsa01(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1583 self.emit_n(
1584 Opcode::CMMVSA01 as i64,
1585 &[op0.as_operand(), op1.as_operand()],
1586 )
1587 }
1588
1589 fn cm_pop(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1590 self.emit_n(Opcode::CMPOP as i64, &[op0.as_operand(), op1.as_operand()])
1591 }
1592
1593 fn cm_popret(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1594 self.emit_n(
1595 Opcode::CMPOPRET as i64,
1596 &[op0.as_operand(), op1.as_operand()],
1597 )
1598 }
1599
1600 fn cm_popretz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1601 self.emit_n(
1602 Opcode::CMPOPRETZ as i64,
1603 &[op0.as_operand(), op1.as_operand()],
1604 )
1605 }
1606
1607 fn cm_push(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1608 self.emit_n(Opcode::CMPUSH as i64, &[op0.as_operand(), op1.as_operand()])
1609 }
1610
1611 fn cpop(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1612 self.emit_n(Opcode::CPOP as i64, &[op0.as_operand(), op1.as_operand()])
1613 }
1614
1615 fn cpopw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1616 self.emit_n(Opcode::CPOPW as i64, &[op0.as_operand(), op1.as_operand()])
1617 }
1618
1619 fn csrc(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1620 self.emit_n(Opcode::CSRC as i64, &[op0.as_operand(), op1.as_operand()])
1621 }
1622
1623 fn csrci(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1624 self.emit_n(Opcode::CSRCI as i64, &[op0.as_operand(), op1.as_operand()])
1625 }
1626
1627 fn csrr(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1628 self.emit_n(Opcode::CSRR as i64, &[op0.as_operand(), op1.as_operand()])
1629 }
1630
1631 fn csrrc(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1632 self.emit_n(
1633 Opcode::CSRRC as i64,
1634 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1635 )
1636 }
1637
1638 fn csrrci(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1639 self.emit_n(
1640 Opcode::CSRRCI as i64,
1641 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1642 )
1643 }
1644
1645 fn csrrs(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1646 self.emit_n(
1647 Opcode::CSRRS as i64,
1648 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1649 )
1650 }
1651
1652 fn csrrsi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1653 self.emit_n(
1654 Opcode::CSRRSI as i64,
1655 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1656 )
1657 }
1658
1659 fn csrrw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1660 self.emit_n(
1661 Opcode::CSRRW as i64,
1662 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1663 )
1664 }
1665
1666 fn csrrwi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1667 self.emit_n(
1668 Opcode::CSRRWI as i64,
1669 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1670 )
1671 }
1672
1673 fn csrs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1674 self.emit_n(Opcode::CSRS as i64, &[op0.as_operand(), op1.as_operand()])
1675 }
1676
1677 fn csrsi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1678 self.emit_n(Opcode::CSRSI as i64, &[op0.as_operand(), op1.as_operand()])
1679 }
1680
1681 fn csrw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1682 self.emit_n(Opcode::CSRW as i64, &[op0.as_operand(), op1.as_operand()])
1683 }
1684
1685 fn csrwi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1686 self.emit_n(Opcode::CSRWI as i64, &[op0.as_operand(), op1.as_operand()])
1687 }
1688
1689 fn ctz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1690 self.emit_n(Opcode::CTZ as i64, &[op0.as_operand(), op1.as_operand()])
1691 }
1692
1693 fn ctzw(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1694 self.emit_n(Opcode::CTZW as i64, &[op0.as_operand(), op1.as_operand()])
1695 }
1696
1697 fn czero_eqz(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1698 self.emit_n(
1699 Opcode::CZEROEQZ as i64,
1700 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1701 )
1702 }
1703
1704 fn czero_nez(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1705 self.emit_n(
1706 Opcode::CZERONEZ as i64,
1707 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1708 )
1709 }
1710
1711 fn div(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1712 self.emit_n(
1713 Opcode::DIV as i64,
1714 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1715 )
1716 }
1717
1718 fn divu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1719 self.emit_n(
1720 Opcode::DIVU as i64,
1721 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1722 )
1723 }
1724
1725 fn divuw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1726 self.emit_n(
1727 Opcode::DIVUW as i64,
1728 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1729 )
1730 }
1731
1732 fn divw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1733 self.emit_n(
1734 Opcode::DIVW as i64,
1735 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1736 )
1737 }
1738
1739 fn dret(&mut self) {
1740 self.emit_n(Opcode::DRET as i64, &[])
1741 }
1742
1743 fn ebreak(&mut self) {
1744 self.emit_n(Opcode::EBREAK as i64, &[])
1745 }
1746
1747 fn ecall(&mut self) {
1748 self.emit_n(Opcode::ECALL as i64, &[])
1749 }
1750
1751 fn fabs_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1752 self.emit_n(
1753 Opcode::FABSD as i64,
1754 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1755 )
1756 }
1757
1758 fn fabs_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1759 self.emit_n(
1760 Opcode::FABSH as i64,
1761 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1762 )
1763 }
1764
1765 fn fabs_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1766 self.emit_n(
1767 Opcode::FABSQ as i64,
1768 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1769 )
1770 }
1771
1772 fn fabs_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1773 self.emit_n(
1774 Opcode::FABSS as i64,
1775 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1776 )
1777 }
1778
1779 fn fadd_d(
1780 &mut self,
1781 op0: impl OperandCast,
1782 op1: impl OperandCast,
1783 op2: impl OperandCast,
1784 op3: impl OperandCast,
1785 ) {
1786 self.emit_n(
1787 Opcode::FADDD as i64,
1788 &[
1789 op0.as_operand(),
1790 op1.as_operand(),
1791 op2.as_operand(),
1792 op3.as_operand(),
1793 ],
1794 )
1795 }
1796
1797 fn fadd_h(
1798 &mut self,
1799 op0: impl OperandCast,
1800 op1: impl OperandCast,
1801 op2: impl OperandCast,
1802 op3: impl OperandCast,
1803 ) {
1804 self.emit_n(
1805 Opcode::FADDH as i64,
1806 &[
1807 op0.as_operand(),
1808 op1.as_operand(),
1809 op2.as_operand(),
1810 op3.as_operand(),
1811 ],
1812 )
1813 }
1814
1815 fn fadd_q(
1816 &mut self,
1817 op0: impl OperandCast,
1818 op1: impl OperandCast,
1819 op2: impl OperandCast,
1820 op3: impl OperandCast,
1821 ) {
1822 self.emit_n(
1823 Opcode::FADDQ as i64,
1824 &[
1825 op0.as_operand(),
1826 op1.as_operand(),
1827 op2.as_operand(),
1828 op3.as_operand(),
1829 ],
1830 )
1831 }
1832
1833 fn fadd_s(
1834 &mut self,
1835 op0: impl OperandCast,
1836 op1: impl OperandCast,
1837 op2: impl OperandCast,
1838 op3: impl OperandCast,
1839 ) {
1840 self.emit_n(
1841 Opcode::FADDS as i64,
1842 &[
1843 op0.as_operand(),
1844 op1.as_operand(),
1845 op2.as_operand(),
1846 op3.as_operand(),
1847 ],
1848 )
1849 }
1850
1851 fn fclass_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1852 self.emit_n(
1853 Opcode::FCLASSD as i64,
1854 &[op0.as_operand(), op1.as_operand()],
1855 )
1856 }
1857
1858 fn fclass_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1859 self.emit_n(
1860 Opcode::FCLASSH as i64,
1861 &[op0.as_operand(), op1.as_operand()],
1862 )
1863 }
1864
1865 fn fclass_q(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1866 self.emit_n(
1867 Opcode::FCLASSQ as i64,
1868 &[op0.as_operand(), op1.as_operand()],
1869 )
1870 }
1871
1872 fn fclass_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
1873 self.emit_n(
1874 Opcode::FCLASSS as i64,
1875 &[op0.as_operand(), op1.as_operand()],
1876 )
1877 }
1878
1879 fn fcvt_d_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1880 self.emit_n(
1881 Opcode::FCVTDH as i64,
1882 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1883 )
1884 }
1885
1886 fn fcvt_d_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1887 self.emit_n(
1888 Opcode::FCVTDL as i64,
1889 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1890 )
1891 }
1892
1893 fn fcvt_d_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1894 self.emit_n(
1895 Opcode::FCVTDLU as i64,
1896 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1897 )
1898 }
1899
1900 fn fcvt_d_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1901 self.emit_n(
1902 Opcode::FCVTDQ as i64,
1903 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1904 )
1905 }
1906
1907 fn fcvt_d_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1908 self.emit_n(
1909 Opcode::FCVTDS as i64,
1910 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1911 )
1912 }
1913
1914 fn fcvt_d_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1915 self.emit_n(
1916 Opcode::FCVTDW as i64,
1917 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1918 )
1919 }
1920
1921 fn fcvt_d_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1922 self.emit_n(
1923 Opcode::FCVTDWU as i64,
1924 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1925 )
1926 }
1927
1928 fn fcvt_h_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1929 self.emit_n(
1930 Opcode::FCVTHD as i64,
1931 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1932 )
1933 }
1934
1935 fn fcvt_h_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1936 self.emit_n(
1937 Opcode::FCVTHL as i64,
1938 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1939 )
1940 }
1941
1942 fn fcvt_h_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1943 self.emit_n(
1944 Opcode::FCVTHLU as i64,
1945 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1946 )
1947 }
1948
1949 fn fcvt_h_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1950 self.emit_n(
1951 Opcode::FCVTHQ as i64,
1952 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1953 )
1954 }
1955
1956 fn fcvt_h_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1957 self.emit_n(
1958 Opcode::FCVTHS as i64,
1959 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1960 )
1961 }
1962
1963 fn fcvt_h_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1964 self.emit_n(
1965 Opcode::FCVTHW as i64,
1966 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1967 )
1968 }
1969
1970 fn fcvt_h_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1971 self.emit_n(
1972 Opcode::FCVTHWU as i64,
1973 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1974 )
1975 }
1976
1977 fn fcvt_l_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1978 self.emit_n(
1979 Opcode::FCVTLD as i64,
1980 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1981 )
1982 }
1983
1984 fn fcvt_l_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1985 self.emit_n(
1986 Opcode::FCVTLH as i64,
1987 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1988 )
1989 }
1990
1991 fn fcvt_l_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1992 self.emit_n(
1993 Opcode::FCVTLQ as i64,
1994 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
1995 )
1996 }
1997
1998 fn fcvt_l_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
1999 self.emit_n(
2000 Opcode::FCVTLS as i64,
2001 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2002 )
2003 }
2004
2005 fn fcvt_lu_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2006 self.emit_n(
2007 Opcode::FCVTLUD as i64,
2008 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2009 )
2010 }
2011
2012 fn fcvt_lu_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2013 self.emit_n(
2014 Opcode::FCVTLUH as i64,
2015 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2016 )
2017 }
2018
2019 fn fcvt_lu_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2020 self.emit_n(
2021 Opcode::FCVTLUQ as i64,
2022 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2023 )
2024 }
2025
2026 fn fcvt_lu_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2027 self.emit_n(
2028 Opcode::FCVTLUS as i64,
2029 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2030 )
2031 }
2032
2033 fn fcvt_q_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2034 self.emit_n(
2035 Opcode::FCVTQD as i64,
2036 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2037 )
2038 }
2039
2040 fn fcvt_q_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2041 self.emit_n(
2042 Opcode::FCVTQH as i64,
2043 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2044 )
2045 }
2046
2047 fn fcvt_q_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2048 self.emit_n(
2049 Opcode::FCVTQL as i64,
2050 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2051 )
2052 }
2053
2054 fn fcvt_q_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2055 self.emit_n(
2056 Opcode::FCVTQLU as i64,
2057 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2058 )
2059 }
2060
2061 fn fcvt_q_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2062 self.emit_n(
2063 Opcode::FCVTQS as i64,
2064 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2065 )
2066 }
2067
2068 fn fcvt_q_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2069 self.emit_n(
2070 Opcode::FCVTQW as i64,
2071 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2072 )
2073 }
2074
2075 fn fcvt_q_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2076 self.emit_n(
2077 Opcode::FCVTQWU as i64,
2078 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2079 )
2080 }
2081
2082 fn fcvt_s_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2083 self.emit_n(
2084 Opcode::FCVTSD as i64,
2085 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2086 )
2087 }
2088
2089 fn fcvt_s_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2090 self.emit_n(
2091 Opcode::FCVTSH as i64,
2092 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2093 )
2094 }
2095
2096 fn fcvt_s_l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2097 self.emit_n(
2098 Opcode::FCVTSL as i64,
2099 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2100 )
2101 }
2102
2103 fn fcvt_s_lu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2104 self.emit_n(
2105 Opcode::FCVTSLU as i64,
2106 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2107 )
2108 }
2109
2110 fn fcvt_s_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2111 self.emit_n(
2112 Opcode::FCVTSQ as i64,
2113 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2114 )
2115 }
2116
2117 fn fcvt_s_w(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2118 self.emit_n(
2119 Opcode::FCVTSW as i64,
2120 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2121 )
2122 }
2123
2124 fn fcvt_s_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2125 self.emit_n(
2126 Opcode::FCVTSWU as i64,
2127 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2128 )
2129 }
2130
2131 fn fcvt_w_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2132 self.emit_n(
2133 Opcode::FCVTWD as i64,
2134 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2135 )
2136 }
2137
2138 fn fcvt_w_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2139 self.emit_n(
2140 Opcode::FCVTWH as i64,
2141 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2142 )
2143 }
2144
2145 fn fcvt_w_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2146 self.emit_n(
2147 Opcode::FCVTWQ as i64,
2148 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2149 )
2150 }
2151
2152 fn fcvt_w_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2153 self.emit_n(
2154 Opcode::FCVTWS as i64,
2155 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2156 )
2157 }
2158
2159 fn fcvt_wu_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2160 self.emit_n(
2161 Opcode::FCVTWUD as i64,
2162 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2163 )
2164 }
2165
2166 fn fcvt_wu_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2167 self.emit_n(
2168 Opcode::FCVTWUH as i64,
2169 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2170 )
2171 }
2172
2173 fn fcvt_wu_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2174 self.emit_n(
2175 Opcode::FCVTWUQ as i64,
2176 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2177 )
2178 }
2179
2180 fn fcvt_wu_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2181 self.emit_n(
2182 Opcode::FCVTWUS as i64,
2183 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2184 )
2185 }
2186
2187 fn fcvtmod_w_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2188 self.emit_n(
2189 Opcode::FCVTMODWD as i64,
2190 &[op0.as_operand(), op1.as_operand()],
2191 )
2192 }
2193
2194 fn fdiv_d(
2195 &mut self,
2196 op0: impl OperandCast,
2197 op1: impl OperandCast,
2198 op2: impl OperandCast,
2199 op3: impl OperandCast,
2200 ) {
2201 self.emit_n(
2202 Opcode::FDIVD as i64,
2203 &[
2204 op0.as_operand(),
2205 op1.as_operand(),
2206 op2.as_operand(),
2207 op3.as_operand(),
2208 ],
2209 )
2210 }
2211
2212 fn fdiv_h(
2213 &mut self,
2214 op0: impl OperandCast,
2215 op1: impl OperandCast,
2216 op2: impl OperandCast,
2217 op3: impl OperandCast,
2218 ) {
2219 self.emit_n(
2220 Opcode::FDIVH as i64,
2221 &[
2222 op0.as_operand(),
2223 op1.as_operand(),
2224 op2.as_operand(),
2225 op3.as_operand(),
2226 ],
2227 )
2228 }
2229
2230 fn fdiv_q(
2231 &mut self,
2232 op0: impl OperandCast,
2233 op1: impl OperandCast,
2234 op2: impl OperandCast,
2235 op3: impl OperandCast,
2236 ) {
2237 self.emit_n(
2238 Opcode::FDIVQ as i64,
2239 &[
2240 op0.as_operand(),
2241 op1.as_operand(),
2242 op2.as_operand(),
2243 op3.as_operand(),
2244 ],
2245 )
2246 }
2247
2248 fn fdiv_s(
2249 &mut self,
2250 op0: impl OperandCast,
2251 op1: impl OperandCast,
2252 op2: impl OperandCast,
2253 op3: impl OperandCast,
2254 ) {
2255 self.emit_n(
2256 Opcode::FDIVS as i64,
2257 &[
2258 op0.as_operand(),
2259 op1.as_operand(),
2260 op2.as_operand(),
2261 op3.as_operand(),
2262 ],
2263 )
2264 }
2265
2266 fn fence(
2267 &mut self,
2268 op0: impl OperandCast,
2269 op1: impl OperandCast,
2270 op2: impl OperandCast,
2271 op3: impl OperandCast,
2272 op4: impl OperandCast,
2273 ) {
2274 self.emit_n(
2275 Opcode::FENCE as i64,
2276 &[
2277 op0.as_operand(),
2278 op1.as_operand(),
2279 op2.as_operand(),
2280 op3.as_operand(),
2281 op4.as_operand(),
2282 ],
2283 )
2284 }
2285
2286 fn fence_i(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2287 self.emit_n(
2288 Opcode::FENCEI as i64,
2289 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2290 )
2291 }
2292
2293 fn fence_tso(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2294 self.emit_n(
2295 Opcode::FENCETSO as i64,
2296 &[op0.as_operand(), op1.as_operand()],
2297 )
2298 }
2299
2300 fn feq_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2301 self.emit_n(
2302 Opcode::FEQD as i64,
2303 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2304 )
2305 }
2306
2307 fn feq_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2308 self.emit_n(
2309 Opcode::FEQH as i64,
2310 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2311 )
2312 }
2313
2314 fn feq_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2315 self.emit_n(
2316 Opcode::FEQQ as i64,
2317 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2318 )
2319 }
2320
2321 fn feq_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2322 self.emit_n(
2323 Opcode::FEQS as i64,
2324 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2325 )
2326 }
2327
2328 fn fld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2329 self.emit_n(
2330 Opcode::FLD as i64,
2331 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2332 )
2333 }
2334
2335 fn fle_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2336 self.emit_n(
2337 Opcode::FLED as i64,
2338 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2339 )
2340 }
2341
2342 fn fle_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2343 self.emit_n(
2344 Opcode::FLEH as i64,
2345 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2346 )
2347 }
2348
2349 fn fle_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2350 self.emit_n(
2351 Opcode::FLEQ as i64,
2352 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2353 )
2354 }
2355
2356 fn fle_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2357 self.emit_n(
2358 Opcode::FLES as i64,
2359 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2360 )
2361 }
2362
2363 fn fleq_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2364 self.emit_n(
2365 Opcode::FLEQD as i64,
2366 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2367 )
2368 }
2369
2370 fn fleq_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2371 self.emit_n(
2372 Opcode::FLEQH as i64,
2373 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2374 )
2375 }
2376
2377 fn fleq_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2378 self.emit_n(
2379 Opcode::FLEQQ as i64,
2380 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2381 )
2382 }
2383
2384 fn fleq_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2385 self.emit_n(
2386 Opcode::FLEQS as i64,
2387 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2388 )
2389 }
2390
2391 fn flh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2392 self.emit_n(
2393 Opcode::FLH as i64,
2394 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2395 )
2396 }
2397
2398 fn fli_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2399 self.emit_n(Opcode::FLID as i64, &[op0.as_operand(), op1.as_operand()])
2400 }
2401
2402 fn fli_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2403 self.emit_n(Opcode::FLIH as i64, &[op0.as_operand(), op1.as_operand()])
2404 }
2405
2406 fn fli_q(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2407 self.emit_n(Opcode::FLIQ as i64, &[op0.as_operand(), op1.as_operand()])
2408 }
2409
2410 fn fli_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2411 self.emit_n(Opcode::FLIS as i64, &[op0.as_operand(), op1.as_operand()])
2412 }
2413
2414 fn flq(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2415 self.emit_n(
2416 Opcode::FLQ as i64,
2417 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2418 )
2419 }
2420
2421 fn flt_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2422 self.emit_n(
2423 Opcode::FLTD as i64,
2424 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2425 )
2426 }
2427
2428 fn flt_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2429 self.emit_n(
2430 Opcode::FLTH as i64,
2431 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2432 )
2433 }
2434
2435 fn flt_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2436 self.emit_n(
2437 Opcode::FLTQ as i64,
2438 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2439 )
2440 }
2441
2442 fn flt_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2443 self.emit_n(
2444 Opcode::FLTS as i64,
2445 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2446 )
2447 }
2448
2449 fn fltq_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2450 self.emit_n(
2451 Opcode::FLTQD as i64,
2452 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2453 )
2454 }
2455
2456 fn fltq_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2457 self.emit_n(
2458 Opcode::FLTQH as i64,
2459 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2460 )
2461 }
2462
2463 fn fltq_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2464 self.emit_n(
2465 Opcode::FLTQQ as i64,
2466 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2467 )
2468 }
2469
2470 fn fltq_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2471 self.emit_n(
2472 Opcode::FLTQS as i64,
2473 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2474 )
2475 }
2476
2477 fn flw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2478 self.emit_n(
2479 Opcode::FLW as i64,
2480 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2481 )
2482 }
2483
2484 fn fmadd_d(
2485 &mut self,
2486 op0: impl OperandCast,
2487 op1: impl OperandCast,
2488 op2: impl OperandCast,
2489 op3: impl OperandCast,
2490 op4: impl OperandCast,
2491 ) {
2492 self.emit_n(
2493 Opcode::FMADDD as i64,
2494 &[
2495 op0.as_operand(),
2496 op1.as_operand(),
2497 op2.as_operand(),
2498 op3.as_operand(),
2499 op4.as_operand(),
2500 ],
2501 )
2502 }
2503
2504 fn fmadd_h(
2505 &mut self,
2506 op0: impl OperandCast,
2507 op1: impl OperandCast,
2508 op2: impl OperandCast,
2509 op3: impl OperandCast,
2510 op4: impl OperandCast,
2511 ) {
2512 self.emit_n(
2513 Opcode::FMADDH as i64,
2514 &[
2515 op0.as_operand(),
2516 op1.as_operand(),
2517 op2.as_operand(),
2518 op3.as_operand(),
2519 op4.as_operand(),
2520 ],
2521 )
2522 }
2523
2524 fn fmadd_q(
2525 &mut self,
2526 op0: impl OperandCast,
2527 op1: impl OperandCast,
2528 op2: impl OperandCast,
2529 op3: impl OperandCast,
2530 op4: impl OperandCast,
2531 ) {
2532 self.emit_n(
2533 Opcode::FMADDQ as i64,
2534 &[
2535 op0.as_operand(),
2536 op1.as_operand(),
2537 op2.as_operand(),
2538 op3.as_operand(),
2539 op4.as_operand(),
2540 ],
2541 )
2542 }
2543
2544 fn fmadd_s(
2545 &mut self,
2546 op0: impl OperandCast,
2547 op1: impl OperandCast,
2548 op2: impl OperandCast,
2549 op3: impl OperandCast,
2550 op4: impl OperandCast,
2551 ) {
2552 self.emit_n(
2553 Opcode::FMADDS as i64,
2554 &[
2555 op0.as_operand(),
2556 op1.as_operand(),
2557 op2.as_operand(),
2558 op3.as_operand(),
2559 op4.as_operand(),
2560 ],
2561 )
2562 }
2563
2564 fn fmax_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2565 self.emit_n(
2566 Opcode::FMAXD as i64,
2567 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2568 )
2569 }
2570
2571 fn fmax_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2572 self.emit_n(
2573 Opcode::FMAXH as i64,
2574 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2575 )
2576 }
2577
2578 fn fmax_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2579 self.emit_n(
2580 Opcode::FMAXQ as i64,
2581 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2582 )
2583 }
2584
2585 fn fmax_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2586 self.emit_n(
2587 Opcode::FMAXS as i64,
2588 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2589 )
2590 }
2591
2592 fn fmaxm_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2593 self.emit_n(
2594 Opcode::FMAXMD as i64,
2595 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2596 )
2597 }
2598
2599 fn fmaxm_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2600 self.emit_n(
2601 Opcode::FMAXMH as i64,
2602 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2603 )
2604 }
2605
2606 fn fmaxm_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2607 self.emit_n(
2608 Opcode::FMAXMQ as i64,
2609 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2610 )
2611 }
2612
2613 fn fmaxm_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2614 self.emit_n(
2615 Opcode::FMAXMS as i64,
2616 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2617 )
2618 }
2619
2620 fn fmin_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2621 self.emit_n(
2622 Opcode::FMIND as i64,
2623 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2624 )
2625 }
2626
2627 fn fmin_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2628 self.emit_n(
2629 Opcode::FMINH as i64,
2630 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2631 )
2632 }
2633
2634 fn fmin_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2635 self.emit_n(
2636 Opcode::FMINQ as i64,
2637 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2638 )
2639 }
2640
2641 fn fmin_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2642 self.emit_n(
2643 Opcode::FMINS as i64,
2644 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2645 )
2646 }
2647
2648 fn fminm_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2649 self.emit_n(
2650 Opcode::FMINMD as i64,
2651 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2652 )
2653 }
2654
2655 fn fminm_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2656 self.emit_n(
2657 Opcode::FMINMH as i64,
2658 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2659 )
2660 }
2661
2662 fn fminm_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2663 self.emit_n(
2664 Opcode::FMINMQ as i64,
2665 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2666 )
2667 }
2668
2669 fn fminm_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2670 self.emit_n(
2671 Opcode::FMINMS as i64,
2672 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2673 )
2674 }
2675
2676 fn fmsub_d(
2677 &mut self,
2678 op0: impl OperandCast,
2679 op1: impl OperandCast,
2680 op2: impl OperandCast,
2681 op3: impl OperandCast,
2682 op4: impl OperandCast,
2683 ) {
2684 self.emit_n(
2685 Opcode::FMSUBD as i64,
2686 &[
2687 op0.as_operand(),
2688 op1.as_operand(),
2689 op2.as_operand(),
2690 op3.as_operand(),
2691 op4.as_operand(),
2692 ],
2693 )
2694 }
2695
2696 fn fmsub_h(
2697 &mut self,
2698 op0: impl OperandCast,
2699 op1: impl OperandCast,
2700 op2: impl OperandCast,
2701 op3: impl OperandCast,
2702 op4: impl OperandCast,
2703 ) {
2704 self.emit_n(
2705 Opcode::FMSUBH as i64,
2706 &[
2707 op0.as_operand(),
2708 op1.as_operand(),
2709 op2.as_operand(),
2710 op3.as_operand(),
2711 op4.as_operand(),
2712 ],
2713 )
2714 }
2715
2716 fn fmsub_q(
2717 &mut self,
2718 op0: impl OperandCast,
2719 op1: impl OperandCast,
2720 op2: impl OperandCast,
2721 op3: impl OperandCast,
2722 op4: impl OperandCast,
2723 ) {
2724 self.emit_n(
2725 Opcode::FMSUBQ as i64,
2726 &[
2727 op0.as_operand(),
2728 op1.as_operand(),
2729 op2.as_operand(),
2730 op3.as_operand(),
2731 op4.as_operand(),
2732 ],
2733 )
2734 }
2735
2736 fn fmsub_s(
2737 &mut self,
2738 op0: impl OperandCast,
2739 op1: impl OperandCast,
2740 op2: impl OperandCast,
2741 op3: impl OperandCast,
2742 op4: impl OperandCast,
2743 ) {
2744 self.emit_n(
2745 Opcode::FMSUBS as i64,
2746 &[
2747 op0.as_operand(),
2748 op1.as_operand(),
2749 op2.as_operand(),
2750 op3.as_operand(),
2751 op4.as_operand(),
2752 ],
2753 )
2754 }
2755
2756 fn fmul_d(
2757 &mut self,
2758 op0: impl OperandCast,
2759 op1: impl OperandCast,
2760 op2: impl OperandCast,
2761 op3: impl OperandCast,
2762 ) {
2763 self.emit_n(
2764 Opcode::FMULD as i64,
2765 &[
2766 op0.as_operand(),
2767 op1.as_operand(),
2768 op2.as_operand(),
2769 op3.as_operand(),
2770 ],
2771 )
2772 }
2773
2774 fn fmul_h(
2775 &mut self,
2776 op0: impl OperandCast,
2777 op1: impl OperandCast,
2778 op2: impl OperandCast,
2779 op3: impl OperandCast,
2780 ) {
2781 self.emit_n(
2782 Opcode::FMULH as i64,
2783 &[
2784 op0.as_operand(),
2785 op1.as_operand(),
2786 op2.as_operand(),
2787 op3.as_operand(),
2788 ],
2789 )
2790 }
2791
2792 fn fmul_q(
2793 &mut self,
2794 op0: impl OperandCast,
2795 op1: impl OperandCast,
2796 op2: impl OperandCast,
2797 op3: impl OperandCast,
2798 ) {
2799 self.emit_n(
2800 Opcode::FMULQ as i64,
2801 &[
2802 op0.as_operand(),
2803 op1.as_operand(),
2804 op2.as_operand(),
2805 op3.as_operand(),
2806 ],
2807 )
2808 }
2809
2810 fn fmul_s(
2811 &mut self,
2812 op0: impl OperandCast,
2813 op1: impl OperandCast,
2814 op2: impl OperandCast,
2815 op3: impl OperandCast,
2816 ) {
2817 self.emit_n(
2818 Opcode::FMULS as i64,
2819 &[
2820 op0.as_operand(),
2821 op1.as_operand(),
2822 op2.as_operand(),
2823 op3.as_operand(),
2824 ],
2825 )
2826 }
2827
2828 fn fmv_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2829 self.emit_n(
2830 Opcode::FMVD as i64,
2831 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2832 )
2833 }
2834
2835 fn fmv_d_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2836 self.emit_n(Opcode::FMVDX as i64, &[op0.as_operand(), op1.as_operand()])
2837 }
2838
2839 fn fmv_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2840 self.emit_n(
2841 Opcode::FMVH as i64,
2842 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2843 )
2844 }
2845
2846 fn fmv_h_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2847 self.emit_n(Opcode::FMVHX as i64, &[op0.as_operand(), op1.as_operand()])
2848 }
2849
2850 fn fmv_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2851 self.emit_n(
2852 Opcode::FMVQ as i64,
2853 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2854 )
2855 }
2856
2857 fn fmv_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2858 self.emit_n(
2859 Opcode::FMVS as i64,
2860 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2861 )
2862 }
2863
2864 fn fmv_s_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2865 self.emit_n(Opcode::FMVSX as i64, &[op0.as_operand(), op1.as_operand()])
2866 }
2867
2868 fn fmv_w_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2869 self.emit_n(Opcode::FMVWX as i64, &[op0.as_operand(), op1.as_operand()])
2870 }
2871
2872 fn fmv_x_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2873 self.emit_n(Opcode::FMVXD as i64, &[op0.as_operand(), op1.as_operand()])
2874 }
2875
2876 fn fmv_x_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2877 self.emit_n(Opcode::FMVXH as i64, &[op0.as_operand(), op1.as_operand()])
2878 }
2879
2880 fn fmv_x_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2881 self.emit_n(Opcode::FMVXS as i64, &[op0.as_operand(), op1.as_operand()])
2882 }
2883
2884 fn fmv_x_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2885 self.emit_n(Opcode::FMVXW as i64, &[op0.as_operand(), op1.as_operand()])
2886 }
2887
2888 fn fmvh_x_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2889 self.emit_n(Opcode::FMVHXD as i64, &[op0.as_operand(), op1.as_operand()])
2890 }
2891
2892 fn fmvh_x_q(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
2893 self.emit_n(Opcode::FMVHXQ as i64, &[op0.as_operand(), op1.as_operand()])
2894 }
2895
2896 fn fmvp_d_x(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2897 self.emit_n(
2898 Opcode::FMVPDX as i64,
2899 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2900 )
2901 }
2902
2903 fn fmvp_q_x(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2904 self.emit_n(
2905 Opcode::FMVPQX as i64,
2906 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2907 )
2908 }
2909
2910 fn fneg_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2911 self.emit_n(
2912 Opcode::FNEGD as i64,
2913 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2914 )
2915 }
2916
2917 fn fneg_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2918 self.emit_n(
2919 Opcode::FNEGH as i64,
2920 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2921 )
2922 }
2923
2924 fn fneg_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2925 self.emit_n(
2926 Opcode::FNEGQ as i64,
2927 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2928 )
2929 }
2930
2931 fn fneg_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
2932 self.emit_n(
2933 Opcode::FNEGS as i64,
2934 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
2935 )
2936 }
2937
2938 fn fnmadd_d(
2939 &mut self,
2940 op0: impl OperandCast,
2941 op1: impl OperandCast,
2942 op2: impl OperandCast,
2943 op3: impl OperandCast,
2944 op4: impl OperandCast,
2945 ) {
2946 self.emit_n(
2947 Opcode::FNMADDD as i64,
2948 &[
2949 op0.as_operand(),
2950 op1.as_operand(),
2951 op2.as_operand(),
2952 op3.as_operand(),
2953 op4.as_operand(),
2954 ],
2955 )
2956 }
2957
2958 fn fnmadd_h(
2959 &mut self,
2960 op0: impl OperandCast,
2961 op1: impl OperandCast,
2962 op2: impl OperandCast,
2963 op3: impl OperandCast,
2964 op4: impl OperandCast,
2965 ) {
2966 self.emit_n(
2967 Opcode::FNMADDH as i64,
2968 &[
2969 op0.as_operand(),
2970 op1.as_operand(),
2971 op2.as_operand(),
2972 op3.as_operand(),
2973 op4.as_operand(),
2974 ],
2975 )
2976 }
2977
2978 fn fnmadd_q(
2979 &mut self,
2980 op0: impl OperandCast,
2981 op1: impl OperandCast,
2982 op2: impl OperandCast,
2983 op3: impl OperandCast,
2984 op4: impl OperandCast,
2985 ) {
2986 self.emit_n(
2987 Opcode::FNMADDQ as i64,
2988 &[
2989 op0.as_operand(),
2990 op1.as_operand(),
2991 op2.as_operand(),
2992 op3.as_operand(),
2993 op4.as_operand(),
2994 ],
2995 )
2996 }
2997
2998 fn fnmadd_s(
2999 &mut self,
3000 op0: impl OperandCast,
3001 op1: impl OperandCast,
3002 op2: impl OperandCast,
3003 op3: impl OperandCast,
3004 op4: impl OperandCast,
3005 ) {
3006 self.emit_n(
3007 Opcode::FNMADDS as i64,
3008 &[
3009 op0.as_operand(),
3010 op1.as_operand(),
3011 op2.as_operand(),
3012 op3.as_operand(),
3013 op4.as_operand(),
3014 ],
3015 )
3016 }
3017
3018 fn fnmsub_d(
3019 &mut self,
3020 op0: impl OperandCast,
3021 op1: impl OperandCast,
3022 op2: impl OperandCast,
3023 op3: impl OperandCast,
3024 op4: impl OperandCast,
3025 ) {
3026 self.emit_n(
3027 Opcode::FNMSUBD as i64,
3028 &[
3029 op0.as_operand(),
3030 op1.as_operand(),
3031 op2.as_operand(),
3032 op3.as_operand(),
3033 op4.as_operand(),
3034 ],
3035 )
3036 }
3037
3038 fn fnmsub_h(
3039 &mut self,
3040 op0: impl OperandCast,
3041 op1: impl OperandCast,
3042 op2: impl OperandCast,
3043 op3: impl OperandCast,
3044 op4: impl OperandCast,
3045 ) {
3046 self.emit_n(
3047 Opcode::FNMSUBH as i64,
3048 &[
3049 op0.as_operand(),
3050 op1.as_operand(),
3051 op2.as_operand(),
3052 op3.as_operand(),
3053 op4.as_operand(),
3054 ],
3055 )
3056 }
3057
3058 fn fnmsub_q(
3059 &mut self,
3060 op0: impl OperandCast,
3061 op1: impl OperandCast,
3062 op2: impl OperandCast,
3063 op3: impl OperandCast,
3064 op4: impl OperandCast,
3065 ) {
3066 self.emit_n(
3067 Opcode::FNMSUBQ as i64,
3068 &[
3069 op0.as_operand(),
3070 op1.as_operand(),
3071 op2.as_operand(),
3072 op3.as_operand(),
3073 op4.as_operand(),
3074 ],
3075 )
3076 }
3077
3078 fn fnmsub_s(
3079 &mut self,
3080 op0: impl OperandCast,
3081 op1: impl OperandCast,
3082 op2: impl OperandCast,
3083 op3: impl OperandCast,
3084 op4: impl OperandCast,
3085 ) {
3086 self.emit_n(
3087 Opcode::FNMSUBS as i64,
3088 &[
3089 op0.as_operand(),
3090 op1.as_operand(),
3091 op2.as_operand(),
3092 op3.as_operand(),
3093 op4.as_operand(),
3094 ],
3095 )
3096 }
3097
3098 fn frcsr(&mut self, op0: impl OperandCast) {
3099 self.emit_n(Opcode::FRCSR as i64, &[op0.as_operand()])
3100 }
3101
3102 fn frflags(&mut self, op0: impl OperandCast) {
3103 self.emit_n(Opcode::FRFLAGS as i64, &[op0.as_operand()])
3104 }
3105
3106 fn fround_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3107 self.emit_n(
3108 Opcode::FROUNDD as i64,
3109 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3110 )
3111 }
3112
3113 fn fround_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3114 self.emit_n(
3115 Opcode::FROUNDH as i64,
3116 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3117 )
3118 }
3119
3120 fn fround_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3121 self.emit_n(
3122 Opcode::FROUNDQ as i64,
3123 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3124 )
3125 }
3126
3127 fn fround_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3128 self.emit_n(
3129 Opcode::FROUNDS as i64,
3130 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3131 )
3132 }
3133
3134 fn froundnx_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3135 self.emit_n(
3136 Opcode::FROUNDNXD as i64,
3137 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3138 )
3139 }
3140
3141 fn froundnx_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3142 self.emit_n(
3143 Opcode::FROUNDNXH as i64,
3144 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3145 )
3146 }
3147
3148 fn froundnx_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3149 self.emit_n(
3150 Opcode::FROUNDNXQ as i64,
3151 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3152 )
3153 }
3154
3155 fn froundnx_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3156 self.emit_n(
3157 Opcode::FROUNDNXS as i64,
3158 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3159 )
3160 }
3161
3162 fn frrm(&mut self, op0: impl OperandCast) {
3163 self.emit_n(Opcode::FRRM as i64, &[op0.as_operand()])
3164 }
3165
3166 fn fscsr(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3167 self.emit_n(Opcode::FSCSR as i64, &[op0.as_operand(), op1.as_operand()])
3168 }
3169
3170 fn fsd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3171 self.emit_n(
3172 Opcode::FSD as i64,
3173 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3174 )
3175 }
3176
3177 fn fsflags(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3178 self.emit_n(
3179 Opcode::FSFLAGS as i64,
3180 &[op0.as_operand(), op1.as_operand()],
3181 )
3182 }
3183
3184 fn fsflagsi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3185 self.emit_n(
3186 Opcode::FSFLAGSI as i64,
3187 &[op0.as_operand(), op1.as_operand()],
3188 )
3189 }
3190
3191 fn fsgnj_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3192 self.emit_n(
3193 Opcode::FSGNJD as i64,
3194 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3195 )
3196 }
3197
3198 fn fsgnj_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3199 self.emit_n(
3200 Opcode::FSGNJH as i64,
3201 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3202 )
3203 }
3204
3205 fn fsgnj_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3206 self.emit_n(
3207 Opcode::FSGNJQ as i64,
3208 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3209 )
3210 }
3211
3212 fn fsgnj_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3213 self.emit_n(
3214 Opcode::FSGNJS as i64,
3215 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3216 )
3217 }
3218
3219 fn fsgnjn_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3220 self.emit_n(
3221 Opcode::FSGNJND as i64,
3222 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3223 )
3224 }
3225
3226 fn fsgnjn_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3227 self.emit_n(
3228 Opcode::FSGNJNH as i64,
3229 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3230 )
3231 }
3232
3233 fn fsgnjn_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3234 self.emit_n(
3235 Opcode::FSGNJNQ as i64,
3236 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3237 )
3238 }
3239
3240 fn fsgnjn_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3241 self.emit_n(
3242 Opcode::FSGNJNS as i64,
3243 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3244 )
3245 }
3246
3247 fn fsgnjx_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3248 self.emit_n(
3249 Opcode::FSGNJXD as i64,
3250 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3251 )
3252 }
3253
3254 fn fsgnjx_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3255 self.emit_n(
3256 Opcode::FSGNJXH as i64,
3257 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3258 )
3259 }
3260
3261 fn fsgnjx_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3262 self.emit_n(
3263 Opcode::FSGNJXQ as i64,
3264 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3265 )
3266 }
3267
3268 fn fsgnjx_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3269 self.emit_n(
3270 Opcode::FSGNJXS as i64,
3271 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3272 )
3273 }
3274
3275 fn fsh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3276 self.emit_n(
3277 Opcode::FSH as i64,
3278 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3279 )
3280 }
3281
3282 fn fsq(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3283 self.emit_n(
3284 Opcode::FSQ as i64,
3285 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3286 )
3287 }
3288
3289 fn fsqrt_d(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3290 self.emit_n(
3291 Opcode::FSQRTD as i64,
3292 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3293 )
3294 }
3295
3296 fn fsqrt_h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3297 self.emit_n(
3298 Opcode::FSQRTH as i64,
3299 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3300 )
3301 }
3302
3303 fn fsqrt_q(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3304 self.emit_n(
3305 Opcode::FSQRTQ as i64,
3306 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3307 )
3308 }
3309
3310 fn fsqrt_s(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3311 self.emit_n(
3312 Opcode::FSQRTS as i64,
3313 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3314 )
3315 }
3316
3317 fn fsrm(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3318 self.emit_n(Opcode::FSRM as i64, &[op0.as_operand(), op1.as_operand()])
3319 }
3320
3321 fn fsrmi(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3322 self.emit_n(Opcode::FSRMI as i64, &[op0.as_operand(), op1.as_operand()])
3323 }
3324
3325 fn fsub_d(
3326 &mut self,
3327 op0: impl OperandCast,
3328 op1: impl OperandCast,
3329 op2: impl OperandCast,
3330 op3: impl OperandCast,
3331 ) {
3332 self.emit_n(
3333 Opcode::FSUBD as i64,
3334 &[
3335 op0.as_operand(),
3336 op1.as_operand(),
3337 op2.as_operand(),
3338 op3.as_operand(),
3339 ],
3340 )
3341 }
3342
3343 fn fsub_h(
3344 &mut self,
3345 op0: impl OperandCast,
3346 op1: impl OperandCast,
3347 op2: impl OperandCast,
3348 op3: impl OperandCast,
3349 ) {
3350 self.emit_n(
3351 Opcode::FSUBH as i64,
3352 &[
3353 op0.as_operand(),
3354 op1.as_operand(),
3355 op2.as_operand(),
3356 op3.as_operand(),
3357 ],
3358 )
3359 }
3360
3361 fn fsub_q(
3362 &mut self,
3363 op0: impl OperandCast,
3364 op1: impl OperandCast,
3365 op2: impl OperandCast,
3366 op3: impl OperandCast,
3367 ) {
3368 self.emit_n(
3369 Opcode::FSUBQ as i64,
3370 &[
3371 op0.as_operand(),
3372 op1.as_operand(),
3373 op2.as_operand(),
3374 op3.as_operand(),
3375 ],
3376 )
3377 }
3378
3379 fn fsub_s(
3380 &mut self,
3381 op0: impl OperandCast,
3382 op1: impl OperandCast,
3383 op2: impl OperandCast,
3384 op3: impl OperandCast,
3385 ) {
3386 self.emit_n(
3387 Opcode::FSUBS as i64,
3388 &[
3389 op0.as_operand(),
3390 op1.as_operand(),
3391 op2.as_operand(),
3392 op3.as_operand(),
3393 ],
3394 )
3395 }
3396
3397 fn fsw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3398 self.emit_n(
3399 Opcode::FSW as i64,
3400 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3401 )
3402 }
3403
3404 fn hfence_gvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3405 self.emit_n(
3406 Opcode::HFENCEGVMA as i64,
3407 &[op0.as_operand(), op1.as_operand()],
3408 )
3409 }
3410
3411 fn hfence_vvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3412 self.emit_n(
3413 Opcode::HFENCEVVMA as i64,
3414 &[op0.as_operand(), op1.as_operand()],
3415 )
3416 }
3417
3418 fn hinval_gvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3419 self.emit_n(
3420 Opcode::HINVALGVMA as i64,
3421 &[op0.as_operand(), op1.as_operand()],
3422 )
3423 }
3424
3425 fn hinval_vvma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3426 self.emit_n(
3427 Opcode::HINVALVVMA as i64,
3428 &[op0.as_operand(), op1.as_operand()],
3429 )
3430 }
3431
3432 fn hlv_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3433 self.emit_n(Opcode::HLVB as i64, &[op0.as_operand(), op1.as_operand()])
3434 }
3435
3436 fn hlv_bu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3437 self.emit_n(Opcode::HLVBU as i64, &[op0.as_operand(), op1.as_operand()])
3438 }
3439
3440 fn hlv_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3441 self.emit_n(Opcode::HLVD as i64, &[op0.as_operand(), op1.as_operand()])
3442 }
3443
3444 fn hlv_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3445 self.emit_n(Opcode::HLVH as i64, &[op0.as_operand(), op1.as_operand()])
3446 }
3447
3448 fn hlv_hu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3449 self.emit_n(Opcode::HLVHU as i64, &[op0.as_operand(), op1.as_operand()])
3450 }
3451
3452 fn hlv_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3453 self.emit_n(Opcode::HLVW as i64, &[op0.as_operand(), op1.as_operand()])
3454 }
3455
3456 fn hlv_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3457 self.emit_n(Opcode::HLVWU as i64, &[op0.as_operand(), op1.as_operand()])
3458 }
3459
3460 fn hlvx_hu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3461 self.emit_n(Opcode::HLVXHU as i64, &[op0.as_operand(), op1.as_operand()])
3462 }
3463
3464 fn hlvx_wu(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3465 self.emit_n(Opcode::HLVXWU as i64, &[op0.as_operand(), op1.as_operand()])
3466 }
3467
3468 fn hsv_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3469 self.emit_n(Opcode::HSVB as i64, &[op0.as_operand(), op1.as_operand()])
3470 }
3471
3472 fn hsv_d(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3473 self.emit_n(Opcode::HSVD as i64, &[op0.as_operand(), op1.as_operand()])
3474 }
3475
3476 fn hsv_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3477 self.emit_n(Opcode::HSVH as i64, &[op0.as_operand(), op1.as_operand()])
3478 }
3479
3480 fn hsv_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3481 self.emit_n(Opcode::HSVW as i64, &[op0.as_operand(), op1.as_operand()])
3482 }
3483
3484 fn j(&mut self, op0: impl OperandCast) {
3485 self.emit_n(Opcode::J as i64, &[op0.as_operand()])
3486 }
3487
3488 fn jal(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3489 self.emit_n(Opcode::JAL as i64, &[op0.as_operand(), op1.as_operand()])
3490 }
3491
3492 fn jal_pseudo(&mut self, op0: impl OperandCast) {
3493 self.emit_n(Opcode::JALPSEUDO as i64, &[op0.as_operand()])
3494 }
3495
3496 fn jalr(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3497 self.emit_n(
3498 Opcode::JALR as i64,
3499 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3500 )
3501 }
3502
3503 fn jalr_pseudo(&mut self, op0: impl OperandCast) {
3504 self.emit_n(Opcode::JALRPSEUDO as i64, &[op0.as_operand()])
3505 }
3506
3507 fn jr(&mut self, op0: impl OperandCast) {
3508 self.emit_n(Opcode::JR as i64, &[op0.as_operand()])
3509 }
3510
3511 fn lb(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3512 self.emit_n(
3513 Opcode::LB as i64,
3514 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3515 )
3516 }
3517
3518 fn lbu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3519 self.emit_n(
3520 Opcode::LBU as i64,
3521 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3522 )
3523 }
3524
3525 fn ld(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3526 self.emit_n(
3527 Opcode::LD as i64,
3528 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3529 )
3530 }
3531
3532 fn lh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3533 self.emit_n(
3534 Opcode::LH as i64,
3535 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3536 )
3537 }
3538
3539 fn lhu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3540 self.emit_n(
3541 Opcode::LHU as i64,
3542 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3543 )
3544 }
3545
3546 fn lr_d(
3547 &mut self,
3548 op0: impl OperandCast,
3549 op1: impl OperandCast,
3550 op2: impl OperandCast,
3551 op3: impl OperandCast,
3552 ) {
3553 self.emit_n(
3554 Opcode::LRD as i64,
3555 &[
3556 op0.as_operand(),
3557 op1.as_operand(),
3558 op2.as_operand(),
3559 op3.as_operand(),
3560 ],
3561 )
3562 }
3563
3564 fn lr_w(
3565 &mut self,
3566 op0: impl OperandCast,
3567 op1: impl OperandCast,
3568 op2: impl OperandCast,
3569 op3: impl OperandCast,
3570 ) {
3571 self.emit_n(
3572 Opcode::LRW as i64,
3573 &[
3574 op0.as_operand(),
3575 op1.as_operand(),
3576 op2.as_operand(),
3577 op3.as_operand(),
3578 ],
3579 )
3580 }
3581
3582 fn lui(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3583 self.emit_n(Opcode::LUI as i64, &[op0.as_operand(), op1.as_operand()])
3584 }
3585
3586 fn lw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3587 self.emit_n(
3588 Opcode::LW as i64,
3589 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3590 )
3591 }
3592
3593 fn lwu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3594 self.emit_n(
3595 Opcode::LWU as i64,
3596 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3597 )
3598 }
3599
3600 fn max(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3601 self.emit_n(
3602 Opcode::MAX as i64,
3603 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3604 )
3605 }
3606
3607 fn maxu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3608 self.emit_n(
3609 Opcode::MAXU as i64,
3610 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3611 )
3612 }
3613
3614 fn min(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3615 self.emit_n(
3616 Opcode::MIN as i64,
3617 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3618 )
3619 }
3620
3621 fn minu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3622 self.emit_n(
3623 Opcode::MINU as i64,
3624 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3625 )
3626 }
3627
3628 fn mop_r_0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3629 self.emit_n(Opcode::MOPR0 as i64, &[op0.as_operand(), op1.as_operand()])
3630 }
3631
3632 fn mop_r_1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3633 self.emit_n(Opcode::MOPR1 as i64, &[op0.as_operand(), op1.as_operand()])
3634 }
3635
3636 fn mop_r_10(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3637 self.emit_n(Opcode::MOPR10 as i64, &[op0.as_operand(), op1.as_operand()])
3638 }
3639
3640 fn mop_r_11(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3641 self.emit_n(Opcode::MOPR11 as i64, &[op0.as_operand(), op1.as_operand()])
3642 }
3643
3644 fn mop_r_12(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3645 self.emit_n(Opcode::MOPR12 as i64, &[op0.as_operand(), op1.as_operand()])
3646 }
3647
3648 fn mop_r_13(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3649 self.emit_n(Opcode::MOPR13 as i64, &[op0.as_operand(), op1.as_operand()])
3650 }
3651
3652 fn mop_r_14(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3653 self.emit_n(Opcode::MOPR14 as i64, &[op0.as_operand(), op1.as_operand()])
3654 }
3655
3656 fn mop_r_15(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3657 self.emit_n(Opcode::MOPR15 as i64, &[op0.as_operand(), op1.as_operand()])
3658 }
3659
3660 fn mop_r_16(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3661 self.emit_n(Opcode::MOPR16 as i64, &[op0.as_operand(), op1.as_operand()])
3662 }
3663
3664 fn mop_r_17(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3665 self.emit_n(Opcode::MOPR17 as i64, &[op0.as_operand(), op1.as_operand()])
3666 }
3667
3668 fn mop_r_18(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3669 self.emit_n(Opcode::MOPR18 as i64, &[op0.as_operand(), op1.as_operand()])
3670 }
3671
3672 fn mop_r_19(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3673 self.emit_n(Opcode::MOPR19 as i64, &[op0.as_operand(), op1.as_operand()])
3674 }
3675
3676 fn mop_r_2(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3677 self.emit_n(Opcode::MOPR2 as i64, &[op0.as_operand(), op1.as_operand()])
3678 }
3679
3680 fn mop_r_20(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3681 self.emit_n(Opcode::MOPR20 as i64, &[op0.as_operand(), op1.as_operand()])
3682 }
3683
3684 fn mop_r_21(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3685 self.emit_n(Opcode::MOPR21 as i64, &[op0.as_operand(), op1.as_operand()])
3686 }
3687
3688 fn mop_r_22(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3689 self.emit_n(Opcode::MOPR22 as i64, &[op0.as_operand(), op1.as_operand()])
3690 }
3691
3692 fn mop_r_23(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3693 self.emit_n(Opcode::MOPR23 as i64, &[op0.as_operand(), op1.as_operand()])
3694 }
3695
3696 fn mop_r_24(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3697 self.emit_n(Opcode::MOPR24 as i64, &[op0.as_operand(), op1.as_operand()])
3698 }
3699
3700 fn mop_r_25(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3701 self.emit_n(Opcode::MOPR25 as i64, &[op0.as_operand(), op1.as_operand()])
3702 }
3703
3704 fn mop_r_26(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3705 self.emit_n(Opcode::MOPR26 as i64, &[op0.as_operand(), op1.as_operand()])
3706 }
3707
3708 fn mop_r_27(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3709 self.emit_n(Opcode::MOPR27 as i64, &[op0.as_operand(), op1.as_operand()])
3710 }
3711
3712 fn mop_r_28(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3713 self.emit_n(Opcode::MOPR28 as i64, &[op0.as_operand(), op1.as_operand()])
3714 }
3715
3716 fn mop_r_29(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3717 self.emit_n(Opcode::MOPR29 as i64, &[op0.as_operand(), op1.as_operand()])
3718 }
3719
3720 fn mop_r_3(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3721 self.emit_n(Opcode::MOPR3 as i64, &[op0.as_operand(), op1.as_operand()])
3722 }
3723
3724 fn mop_r_30(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3725 self.emit_n(Opcode::MOPR30 as i64, &[op0.as_operand(), op1.as_operand()])
3726 }
3727
3728 fn mop_r_31(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3729 self.emit_n(Opcode::MOPR31 as i64, &[op0.as_operand(), op1.as_operand()])
3730 }
3731
3732 fn mop_r_4(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3733 self.emit_n(Opcode::MOPR4 as i64, &[op0.as_operand(), op1.as_operand()])
3734 }
3735
3736 fn mop_r_5(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3737 self.emit_n(Opcode::MOPR5 as i64, &[op0.as_operand(), op1.as_operand()])
3738 }
3739
3740 fn mop_r_6(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3741 self.emit_n(Opcode::MOPR6 as i64, &[op0.as_operand(), op1.as_operand()])
3742 }
3743
3744 fn mop_r_7(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3745 self.emit_n(Opcode::MOPR7 as i64, &[op0.as_operand(), op1.as_operand()])
3746 }
3747
3748 fn mop_r_8(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3749 self.emit_n(Opcode::MOPR8 as i64, &[op0.as_operand(), op1.as_operand()])
3750 }
3751
3752 fn mop_r_9(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3753 self.emit_n(Opcode::MOPR9 as i64, &[op0.as_operand(), op1.as_operand()])
3754 }
3755
3756 fn mop_r_n(
3757 &mut self,
3758 op0: impl OperandCast,
3759 op1: impl OperandCast,
3760 op2: impl OperandCast,
3761 op3: impl OperandCast,
3762 op4: impl OperandCast,
3763 ) {
3764 self.emit_n(
3765 Opcode::MOPRN as i64,
3766 &[
3767 op0.as_operand(),
3768 op1.as_operand(),
3769 op2.as_operand(),
3770 op3.as_operand(),
3771 op4.as_operand(),
3772 ],
3773 )
3774 }
3775
3776 fn mop_rr_0(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3777 self.emit_n(
3778 Opcode::MOPRR0 as i64,
3779 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3780 )
3781 }
3782
3783 fn mop_rr_1(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3784 self.emit_n(
3785 Opcode::MOPRR1 as i64,
3786 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3787 )
3788 }
3789
3790 fn mop_rr_2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3791 self.emit_n(
3792 Opcode::MOPRR2 as i64,
3793 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3794 )
3795 }
3796
3797 fn mop_rr_3(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3798 self.emit_n(
3799 Opcode::MOPRR3 as i64,
3800 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3801 )
3802 }
3803
3804 fn mop_rr_4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3805 self.emit_n(
3806 Opcode::MOPRR4 as i64,
3807 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3808 )
3809 }
3810
3811 fn mop_rr_5(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3812 self.emit_n(
3813 Opcode::MOPRR5 as i64,
3814 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3815 )
3816 }
3817
3818 fn mop_rr_6(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3819 self.emit_n(
3820 Opcode::MOPRR6 as i64,
3821 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3822 )
3823 }
3824
3825 fn mop_rr_7(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3826 self.emit_n(
3827 Opcode::MOPRR7 as i64,
3828 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3829 )
3830 }
3831
3832 fn mop_rr_n(
3833 &mut self,
3834 op0: impl OperandCast,
3835 op1: impl OperandCast,
3836 op2: impl OperandCast,
3837 op3: impl OperandCast,
3838 op4: impl OperandCast,
3839 ) {
3840 self.emit_n(
3841 Opcode::MOPRRN as i64,
3842 &[
3843 op0.as_operand(),
3844 op1.as_operand(),
3845 op2.as_operand(),
3846 op3.as_operand(),
3847 op4.as_operand(),
3848 ],
3849 )
3850 }
3851
3852 fn mret(&mut self) {
3853 self.emit_n(Opcode::MRET as i64, &[])
3854 }
3855
3856 fn mul(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3857 self.emit_n(
3858 Opcode::MUL as i64,
3859 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3860 )
3861 }
3862
3863 fn mulh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3864 self.emit_n(
3865 Opcode::MULH as i64,
3866 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3867 )
3868 }
3869
3870 fn mulhsu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3871 self.emit_n(
3872 Opcode::MULHSU as i64,
3873 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3874 )
3875 }
3876
3877 fn mulhu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3878 self.emit_n(
3879 Opcode::MULHU as i64,
3880 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3881 )
3882 }
3883
3884 fn mulw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3885 self.emit_n(
3886 Opcode::MULW as i64,
3887 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3888 )
3889 }
3890
3891 fn mv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3892 self.emit_n(Opcode::MV as i64, &[op0.as_operand(), op1.as_operand()])
3893 }
3894
3895 fn neg(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3896 self.emit_n(Opcode::NEG as i64, &[op0.as_operand(), op1.as_operand()])
3897 }
3898
3899 fn nop(&mut self) {
3900 self.emit_n(Opcode::NOP as i64, &[])
3901 }
3902
3903 fn ntl_all(&mut self) {
3904 self.emit_n(Opcode::NTLALL as i64, &[])
3905 }
3906
3907 fn ntl_p1(&mut self) {
3908 self.emit_n(Opcode::NTLP1 as i64, &[])
3909 }
3910
3911 fn ntl_pall(&mut self) {
3912 self.emit_n(Opcode::NTLPALL as i64, &[])
3913 }
3914
3915 fn ntl_s1(&mut self) {
3916 self.emit_n(Opcode::NTLS1 as i64, &[])
3917 }
3918
3919 fn or(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3920 self.emit_n(
3921 Opcode::OR as i64,
3922 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3923 )
3924 }
3925
3926 fn orc_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3927 self.emit_n(Opcode::ORCB as i64, &[op0.as_operand(), op1.as_operand()])
3928 }
3929
3930 fn ori(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3931 self.emit_n(
3932 Opcode::ORI as i64,
3933 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3934 )
3935 }
3936
3937 fn orn(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3938 self.emit_n(
3939 Opcode::ORN as i64,
3940 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3941 )
3942 }
3943
3944 fn pack(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3945 self.emit_n(
3946 Opcode::PACK as i64,
3947 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3948 )
3949 }
3950
3951 fn packh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3952 self.emit_n(
3953 Opcode::PACKH as i64,
3954 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3955 )
3956 }
3957
3958 fn packw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
3959 self.emit_n(
3960 Opcode::PACKW as i64,
3961 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
3962 )
3963 }
3964
3965 fn pause(&mut self) {
3966 self.emit_n(Opcode::PAUSE as i64, &[])
3967 }
3968
3969 fn prefetch_i(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3970 self.emit_n(
3971 Opcode::PREFETCHI as i64,
3972 &[op0.as_operand(), op1.as_operand()],
3973 )
3974 }
3975
3976 fn prefetch_r(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3977 self.emit_n(
3978 Opcode::PREFETCHR as i64,
3979 &[op0.as_operand(), op1.as_operand()],
3980 )
3981 }
3982
3983 fn prefetch_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
3984 self.emit_n(
3985 Opcode::PREFETCHW as i64,
3986 &[op0.as_operand(), op1.as_operand()],
3987 )
3988 }
3989
3990 fn rdcycle(&mut self, op0: impl OperandCast) {
3991 self.emit_n(Opcode::RDCYCLE as i64, &[op0.as_operand()])
3992 }
3993
3994 fn rdcycleh(&mut self, op0: impl OperandCast) {
3995 self.emit_n(Opcode::RDCYCLEH as i64, &[op0.as_operand()])
3996 }
3997
3998 fn rdinstret(&mut self, op0: impl OperandCast) {
3999 self.emit_n(Opcode::RDINSTRET as i64, &[op0.as_operand()])
4000 }
4001
4002 fn rdinstreth(&mut self, op0: impl OperandCast) {
4003 self.emit_n(Opcode::RDINSTRETH as i64, &[op0.as_operand()])
4004 }
4005
4006 fn rdtime(&mut self, op0: impl OperandCast) {
4007 self.emit_n(Opcode::RDTIME as i64, &[op0.as_operand()])
4008 }
4009
4010 fn rdtimeh(&mut self, op0: impl OperandCast) {
4011 self.emit_n(Opcode::RDTIMEH as i64, &[op0.as_operand()])
4012 }
4013
4014 fn rem(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4015 self.emit_n(
4016 Opcode::REM as i64,
4017 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4018 )
4019 }
4020
4021 fn remu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4022 self.emit_n(
4023 Opcode::REMU as i64,
4024 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4025 )
4026 }
4027
4028 fn remuw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4029 self.emit_n(
4030 Opcode::REMUW as i64,
4031 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4032 )
4033 }
4034
4035 fn remw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4036 self.emit_n(
4037 Opcode::REMW as i64,
4038 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4039 )
4040 }
4041
4042 fn ret(&mut self) {
4043 self.emit_n(Opcode::RET as i64, &[])
4044 }
4045
4046 fn rev8(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4047 self.emit_n(Opcode::REV8 as i64, &[op0.as_operand(), op1.as_operand()])
4048 }
4049
4050 fn rev8_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4051 self.emit_n(
4052 Opcode::REV8RV32 as i64,
4053 &[op0.as_operand(), op1.as_operand()],
4054 )
4055 }
4056
4057 fn rol(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4058 self.emit_n(
4059 Opcode::ROL as i64,
4060 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4061 )
4062 }
4063
4064 fn rolw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4065 self.emit_n(
4066 Opcode::ROLW as i64,
4067 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4068 )
4069 }
4070
4071 fn ror(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4072 self.emit_n(
4073 Opcode::ROR as i64,
4074 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4075 )
4076 }
4077
4078 fn rori(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4079 self.emit_n(
4080 Opcode::RORI as i64,
4081 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4082 )
4083 }
4084
4085 fn rori_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4086 self.emit_n(
4087 Opcode::RORIRV32 as i64,
4088 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4089 )
4090 }
4091
4092 fn roriw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4093 self.emit_n(
4094 Opcode::RORIW as i64,
4095 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4096 )
4097 }
4098
4099 fn rorw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4100 self.emit_n(
4101 Opcode::RORW as i64,
4102 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4103 )
4104 }
4105
4106 fn sb(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4107 self.emit_n(
4108 Opcode::SB as i64,
4109 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4110 )
4111 }
4112
4113 fn sbreak(&mut self) {
4114 self.emit_n(Opcode::SBREAK as i64, &[])
4115 }
4116
4117 fn sc_d(
4118 &mut self,
4119 op0: impl OperandCast,
4120 op1: impl OperandCast,
4121 op2: impl OperandCast,
4122 op3: impl OperandCast,
4123 op4: impl OperandCast,
4124 ) {
4125 self.emit_n(
4126 Opcode::SCD as i64,
4127 &[
4128 op0.as_operand(),
4129 op1.as_operand(),
4130 op2.as_operand(),
4131 op3.as_operand(),
4132 op4.as_operand(),
4133 ],
4134 )
4135 }
4136
4137 fn sc_w(
4138 &mut self,
4139 op0: impl OperandCast,
4140 op1: impl OperandCast,
4141 op2: impl OperandCast,
4142 op3: impl OperandCast,
4143 op4: impl OperandCast,
4144 ) {
4145 self.emit_n(
4146 Opcode::SCW as i64,
4147 &[
4148 op0.as_operand(),
4149 op1.as_operand(),
4150 op2.as_operand(),
4151 op3.as_operand(),
4152 op4.as_operand(),
4153 ],
4154 )
4155 }
4156
4157 fn scall(&mut self) {
4158 self.emit_n(Opcode::SCALL as i64, &[])
4159 }
4160
4161 fn sd(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4162 self.emit_n(
4163 Opcode::SD as i64,
4164 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4165 )
4166 }
4167
4168 fn seqz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4169 self.emit_n(Opcode::SEQZ as i64, &[op0.as_operand(), op1.as_operand()])
4170 }
4171
4172 fn sext_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4173 self.emit_n(Opcode::SEXTB as i64, &[op0.as_operand(), op1.as_operand()])
4174 }
4175
4176 fn sext_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4177 self.emit_n(Opcode::SEXTH as i64, &[op0.as_operand(), op1.as_operand()])
4178 }
4179
4180 fn sext_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4181 self.emit_n(Opcode::SEXTW as i64, &[op0.as_operand(), op1.as_operand()])
4182 }
4183
4184 fn sfence_inval_ir(&mut self) {
4185 self.emit_n(Opcode::SFENCEINVALIR as i64, &[])
4186 }
4187
4188 fn sfence_vma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4189 self.emit_n(
4190 Opcode::SFENCEVMA as i64,
4191 &[op0.as_operand(), op1.as_operand()],
4192 )
4193 }
4194
4195 fn sfence_w_inval(&mut self) {
4196 self.emit_n(Opcode::SFENCEWINVAL as i64, &[])
4197 }
4198
4199 fn sgtz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4200 self.emit_n(Opcode::SGTZ as i64, &[op0.as_operand(), op1.as_operand()])
4201 }
4202
4203 fn sh(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4204 self.emit_n(
4205 Opcode::SH as i64,
4206 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4207 )
4208 }
4209
4210 fn sh1add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4211 self.emit_n(
4212 Opcode::SH1ADD as i64,
4213 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4214 )
4215 }
4216
4217 fn sh1add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4218 self.emit_n(
4219 Opcode::SH1ADDUW as i64,
4220 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4221 )
4222 }
4223
4224 fn sh2add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4225 self.emit_n(
4226 Opcode::SH2ADD as i64,
4227 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4228 )
4229 }
4230
4231 fn sh2add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4232 self.emit_n(
4233 Opcode::SH2ADDUW as i64,
4234 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4235 )
4236 }
4237
4238 fn sh3add(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4239 self.emit_n(
4240 Opcode::SH3ADD as i64,
4241 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4242 )
4243 }
4244
4245 fn sh3add_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4246 self.emit_n(
4247 Opcode::SH3ADDUW as i64,
4248 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4249 )
4250 }
4251
4252 fn sha256sig0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4253 self.emit_n(
4254 Opcode::SHA256SIG0 as i64,
4255 &[op0.as_operand(), op1.as_operand()],
4256 )
4257 }
4258
4259 fn sha256sig1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4260 self.emit_n(
4261 Opcode::SHA256SIG1 as i64,
4262 &[op0.as_operand(), op1.as_operand()],
4263 )
4264 }
4265
4266 fn sha256sum0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4267 self.emit_n(
4268 Opcode::SHA256SUM0 as i64,
4269 &[op0.as_operand(), op1.as_operand()],
4270 )
4271 }
4272
4273 fn sha256sum1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4274 self.emit_n(
4275 Opcode::SHA256SUM1 as i64,
4276 &[op0.as_operand(), op1.as_operand()],
4277 )
4278 }
4279
4280 fn sha512sig0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4281 self.emit_n(
4282 Opcode::SHA512SIG0 as i64,
4283 &[op0.as_operand(), op1.as_operand()],
4284 )
4285 }
4286
4287 fn sha512sig0h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4288 self.emit_n(
4289 Opcode::SHA512SIG0H as i64,
4290 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4291 )
4292 }
4293
4294 fn sha512sig0l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4295 self.emit_n(
4296 Opcode::SHA512SIG0L as i64,
4297 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4298 )
4299 }
4300
4301 fn sha512sig1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4302 self.emit_n(
4303 Opcode::SHA512SIG1 as i64,
4304 &[op0.as_operand(), op1.as_operand()],
4305 )
4306 }
4307
4308 fn sha512sig1h(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4309 self.emit_n(
4310 Opcode::SHA512SIG1H as i64,
4311 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4312 )
4313 }
4314
4315 fn sha512sig1l(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4316 self.emit_n(
4317 Opcode::SHA512SIG1L as i64,
4318 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4319 )
4320 }
4321
4322 fn sha512sum0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4323 self.emit_n(
4324 Opcode::SHA512SUM0 as i64,
4325 &[op0.as_operand(), op1.as_operand()],
4326 )
4327 }
4328
4329 fn sha512sum0r(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4330 self.emit_n(
4331 Opcode::SHA512SUM0R as i64,
4332 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4333 )
4334 }
4335
4336 fn sha512sum1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4337 self.emit_n(
4338 Opcode::SHA512SUM1 as i64,
4339 &[op0.as_operand(), op1.as_operand()],
4340 )
4341 }
4342
4343 fn sha512sum1r(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4344 self.emit_n(
4345 Opcode::SHA512SUM1R as i64,
4346 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4347 )
4348 }
4349
4350 fn sinval_vma(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4351 self.emit_n(
4352 Opcode::SINVALVMA as i64,
4353 &[op0.as_operand(), op1.as_operand()],
4354 )
4355 }
4356
4357 fn sll(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4358 self.emit_n(
4359 Opcode::SLL as i64,
4360 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4361 )
4362 }
4363
4364 fn slli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4365 self.emit_n(
4366 Opcode::SLLI as i64,
4367 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4368 )
4369 }
4370
4371 fn slli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4372 self.emit_n(
4373 Opcode::SLLIRV32 as i64,
4374 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4375 )
4376 }
4377
4378 fn slli_uw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4379 self.emit_n(
4380 Opcode::SLLIUW as i64,
4381 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4382 )
4383 }
4384
4385 fn slliw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4386 self.emit_n(
4387 Opcode::SLLIW as i64,
4388 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4389 )
4390 }
4391
4392 fn sllw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4393 self.emit_n(
4394 Opcode::SLLW as i64,
4395 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4396 )
4397 }
4398
4399 fn slt(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4400 self.emit_n(
4401 Opcode::SLT as i64,
4402 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4403 )
4404 }
4405
4406 fn slti(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4407 self.emit_n(
4408 Opcode::SLTI as i64,
4409 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4410 )
4411 }
4412
4413 fn sltiu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4414 self.emit_n(
4415 Opcode::SLTIU as i64,
4416 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4417 )
4418 }
4419
4420 fn sltu(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4421 self.emit_n(
4422 Opcode::SLTU as i64,
4423 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4424 )
4425 }
4426
4427 fn sltz(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4428 self.emit_n(Opcode::SLTZ as i64, &[op0.as_operand(), op1.as_operand()])
4429 }
4430
4431 fn sm3p0(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4432 self.emit_n(Opcode::SM3P0 as i64, &[op0.as_operand(), op1.as_operand()])
4433 }
4434
4435 fn sm3p1(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4436 self.emit_n(Opcode::SM3P1 as i64, &[op0.as_operand(), op1.as_operand()])
4437 }
4438
4439 fn sm4ed(
4440 &mut self,
4441 op0: impl OperandCast,
4442 op1: impl OperandCast,
4443 op2: impl OperandCast,
4444 op3: impl OperandCast,
4445 ) {
4446 self.emit_n(
4447 Opcode::SM4ED as i64,
4448 &[
4449 op0.as_operand(),
4450 op1.as_operand(),
4451 op2.as_operand(),
4452 op3.as_operand(),
4453 ],
4454 )
4455 }
4456
4457 fn sm4ks(
4458 &mut self,
4459 op0: impl OperandCast,
4460 op1: impl OperandCast,
4461 op2: impl OperandCast,
4462 op3: impl OperandCast,
4463 ) {
4464 self.emit_n(
4465 Opcode::SM4KS as i64,
4466 &[
4467 op0.as_operand(),
4468 op1.as_operand(),
4469 op2.as_operand(),
4470 op3.as_operand(),
4471 ],
4472 )
4473 }
4474
4475 fn snez(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4476 self.emit_n(Opcode::SNEZ as i64, &[op0.as_operand(), op1.as_operand()])
4477 }
4478
4479 fn sra(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4480 self.emit_n(
4481 Opcode::SRA as i64,
4482 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4483 )
4484 }
4485
4486 fn srai(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4487 self.emit_n(
4488 Opcode::SRAI as i64,
4489 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4490 )
4491 }
4492
4493 fn srai_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4494 self.emit_n(
4495 Opcode::SRAIRV32 as i64,
4496 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4497 )
4498 }
4499
4500 fn sraiw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4501 self.emit_n(
4502 Opcode::SRAIW as i64,
4503 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4504 )
4505 }
4506
4507 fn sraw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4508 self.emit_n(
4509 Opcode::SRAW as i64,
4510 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4511 )
4512 }
4513
4514 fn sret(&mut self) {
4515 self.emit_n(Opcode::SRET as i64, &[])
4516 }
4517
4518 fn srl(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4519 self.emit_n(
4520 Opcode::SRL as i64,
4521 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4522 )
4523 }
4524
4525 fn srli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4526 self.emit_n(
4527 Opcode::SRLI as i64,
4528 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4529 )
4530 }
4531
4532 fn srli_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4533 self.emit_n(
4534 Opcode::SRLIRV32 as i64,
4535 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4536 )
4537 }
4538
4539 fn srliw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4540 self.emit_n(
4541 Opcode::SRLIW as i64,
4542 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4543 )
4544 }
4545
4546 fn srlw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4547 self.emit_n(
4548 Opcode::SRLW as i64,
4549 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4550 )
4551 }
4552
4553 fn sub(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4554 self.emit_n(
4555 Opcode::SUB as i64,
4556 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4557 )
4558 }
4559
4560 fn subw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4561 self.emit_n(
4562 Opcode::SUBW as i64,
4563 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4564 )
4565 }
4566
4567 fn sw(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4568 self.emit_n(
4569 Opcode::SW as i64,
4570 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4571 )
4572 }
4573
4574 fn unzip(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4575 self.emit_n(Opcode::UNZIP as i64, &[op0.as_operand(), op1.as_operand()])
4576 }
4577
4578 fn vaadd_vv(
4579 &mut self,
4580 op0: impl OperandCast,
4581 op1: impl OperandCast,
4582 op2: impl OperandCast,
4583 op3: impl OperandCast,
4584 ) {
4585 self.emit_n(
4586 Opcode::VAADDVV as i64,
4587 &[
4588 op0.as_operand(),
4589 op1.as_operand(),
4590 op2.as_operand(),
4591 op3.as_operand(),
4592 ],
4593 )
4594 }
4595
4596 fn vaadd_vx(
4597 &mut self,
4598 op0: impl OperandCast,
4599 op1: impl OperandCast,
4600 op2: impl OperandCast,
4601 op3: impl OperandCast,
4602 ) {
4603 self.emit_n(
4604 Opcode::VAADDVX as i64,
4605 &[
4606 op0.as_operand(),
4607 op1.as_operand(),
4608 op2.as_operand(),
4609 op3.as_operand(),
4610 ],
4611 )
4612 }
4613
4614 fn vaaddu_vv(
4615 &mut self,
4616 op0: impl OperandCast,
4617 op1: impl OperandCast,
4618 op2: impl OperandCast,
4619 op3: impl OperandCast,
4620 ) {
4621 self.emit_n(
4622 Opcode::VAADDUVV as i64,
4623 &[
4624 op0.as_operand(),
4625 op1.as_operand(),
4626 op2.as_operand(),
4627 op3.as_operand(),
4628 ],
4629 )
4630 }
4631
4632 fn vaaddu_vx(
4633 &mut self,
4634 op0: impl OperandCast,
4635 op1: impl OperandCast,
4636 op2: impl OperandCast,
4637 op3: impl OperandCast,
4638 ) {
4639 self.emit_n(
4640 Opcode::VAADDUVX as i64,
4641 &[
4642 op0.as_operand(),
4643 op1.as_operand(),
4644 op2.as_operand(),
4645 op3.as_operand(),
4646 ],
4647 )
4648 }
4649
4650 fn vadc_vim(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4651 self.emit_n(
4652 Opcode::VADCVIM as i64,
4653 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4654 )
4655 }
4656
4657 fn vadc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4658 self.emit_n(
4659 Opcode::VADCVVM as i64,
4660 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4661 )
4662 }
4663
4664 fn vadc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4665 self.emit_n(
4666 Opcode::VADCVXM as i64,
4667 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4668 )
4669 }
4670
4671 fn vadd_vi(
4672 &mut self,
4673 op0: impl OperandCast,
4674 op1: impl OperandCast,
4675 op2: impl OperandCast,
4676 op3: impl OperandCast,
4677 ) {
4678 self.emit_n(
4679 Opcode::VADDVI as i64,
4680 &[
4681 op0.as_operand(),
4682 op1.as_operand(),
4683 op2.as_operand(),
4684 op3.as_operand(),
4685 ],
4686 )
4687 }
4688
4689 fn vadd_vv(
4690 &mut self,
4691 op0: impl OperandCast,
4692 op1: impl OperandCast,
4693 op2: impl OperandCast,
4694 op3: impl OperandCast,
4695 ) {
4696 self.emit_n(
4697 Opcode::VADDVV as i64,
4698 &[
4699 op0.as_operand(),
4700 op1.as_operand(),
4701 op2.as_operand(),
4702 op3.as_operand(),
4703 ],
4704 )
4705 }
4706
4707 fn vadd_vx(
4708 &mut self,
4709 op0: impl OperandCast,
4710 op1: impl OperandCast,
4711 op2: impl OperandCast,
4712 op3: impl OperandCast,
4713 ) {
4714 self.emit_n(
4715 Opcode::VADDVX as i64,
4716 &[
4717 op0.as_operand(),
4718 op1.as_operand(),
4719 op2.as_operand(),
4720 op3.as_operand(),
4721 ],
4722 )
4723 }
4724
4725 fn vaesdf_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4726 self.emit_n(
4727 Opcode::VAESDFVS as i64,
4728 &[op0.as_operand(), op1.as_operand()],
4729 )
4730 }
4731
4732 fn vaesdf_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4733 self.emit_n(
4734 Opcode::VAESDFVV as i64,
4735 &[op0.as_operand(), op1.as_operand()],
4736 )
4737 }
4738
4739 fn vaesdm_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4740 self.emit_n(
4741 Opcode::VAESDMVS as i64,
4742 &[op0.as_operand(), op1.as_operand()],
4743 )
4744 }
4745
4746 fn vaesdm_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4747 self.emit_n(
4748 Opcode::VAESDMVV as i64,
4749 &[op0.as_operand(), op1.as_operand()],
4750 )
4751 }
4752
4753 fn vaesef_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4754 self.emit_n(
4755 Opcode::VAESEFVS as i64,
4756 &[op0.as_operand(), op1.as_operand()],
4757 )
4758 }
4759
4760 fn vaesef_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4761 self.emit_n(
4762 Opcode::VAESEFVV as i64,
4763 &[op0.as_operand(), op1.as_operand()],
4764 )
4765 }
4766
4767 fn vaesem_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4768 self.emit_n(
4769 Opcode::VAESEMVS as i64,
4770 &[op0.as_operand(), op1.as_operand()],
4771 )
4772 }
4773
4774 fn vaesem_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4775 self.emit_n(
4776 Opcode::VAESEMVV as i64,
4777 &[op0.as_operand(), op1.as_operand()],
4778 )
4779 }
4780
4781 fn vaeskf1_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4782 self.emit_n(
4783 Opcode::VAESKF1VI as i64,
4784 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4785 )
4786 }
4787
4788 fn vaeskf2_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4789 self.emit_n(
4790 Opcode::VAESKF2VI as i64,
4791 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4792 )
4793 }
4794
4795 fn vaesz_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
4796 self.emit_n(
4797 Opcode::VAESZVS as i64,
4798 &[op0.as_operand(), op1.as_operand()],
4799 )
4800 }
4801
4802 fn vand_vi(
4803 &mut self,
4804 op0: impl OperandCast,
4805 op1: impl OperandCast,
4806 op2: impl OperandCast,
4807 op3: impl OperandCast,
4808 ) {
4809 self.emit_n(
4810 Opcode::VANDVI as i64,
4811 &[
4812 op0.as_operand(),
4813 op1.as_operand(),
4814 op2.as_operand(),
4815 op3.as_operand(),
4816 ],
4817 )
4818 }
4819
4820 fn vand_vv(
4821 &mut self,
4822 op0: impl OperandCast,
4823 op1: impl OperandCast,
4824 op2: impl OperandCast,
4825 op3: impl OperandCast,
4826 ) {
4827 self.emit_n(
4828 Opcode::VANDVV as i64,
4829 &[
4830 op0.as_operand(),
4831 op1.as_operand(),
4832 op2.as_operand(),
4833 op3.as_operand(),
4834 ],
4835 )
4836 }
4837
4838 fn vand_vx(
4839 &mut self,
4840 op0: impl OperandCast,
4841 op1: impl OperandCast,
4842 op2: impl OperandCast,
4843 op3: impl OperandCast,
4844 ) {
4845 self.emit_n(
4846 Opcode::VANDVX as i64,
4847 &[
4848 op0.as_operand(),
4849 op1.as_operand(),
4850 op2.as_operand(),
4851 op3.as_operand(),
4852 ],
4853 )
4854 }
4855
4856 fn vandn_vv(
4857 &mut self,
4858 op0: impl OperandCast,
4859 op1: impl OperandCast,
4860 op2: impl OperandCast,
4861 op3: impl OperandCast,
4862 ) {
4863 self.emit_n(
4864 Opcode::VANDNVV as i64,
4865 &[
4866 op0.as_operand(),
4867 op1.as_operand(),
4868 op2.as_operand(),
4869 op3.as_operand(),
4870 ],
4871 )
4872 }
4873
4874 fn vandn_vx(
4875 &mut self,
4876 op0: impl OperandCast,
4877 op1: impl OperandCast,
4878 op2: impl OperandCast,
4879 op3: impl OperandCast,
4880 ) {
4881 self.emit_n(
4882 Opcode::VANDNVX as i64,
4883 &[
4884 op0.as_operand(),
4885 op1.as_operand(),
4886 op2.as_operand(),
4887 op3.as_operand(),
4888 ],
4889 )
4890 }
4891
4892 fn vasub_vv(
4893 &mut self,
4894 op0: impl OperandCast,
4895 op1: impl OperandCast,
4896 op2: impl OperandCast,
4897 op3: impl OperandCast,
4898 ) {
4899 self.emit_n(
4900 Opcode::VASUBVV as i64,
4901 &[
4902 op0.as_operand(),
4903 op1.as_operand(),
4904 op2.as_operand(),
4905 op3.as_operand(),
4906 ],
4907 )
4908 }
4909
4910 fn vasub_vx(
4911 &mut self,
4912 op0: impl OperandCast,
4913 op1: impl OperandCast,
4914 op2: impl OperandCast,
4915 op3: impl OperandCast,
4916 ) {
4917 self.emit_n(
4918 Opcode::VASUBVX as i64,
4919 &[
4920 op0.as_operand(),
4921 op1.as_operand(),
4922 op2.as_operand(),
4923 op3.as_operand(),
4924 ],
4925 )
4926 }
4927
4928 fn vasubu_vv(
4929 &mut self,
4930 op0: impl OperandCast,
4931 op1: impl OperandCast,
4932 op2: impl OperandCast,
4933 op3: impl OperandCast,
4934 ) {
4935 self.emit_n(
4936 Opcode::VASUBUVV as i64,
4937 &[
4938 op0.as_operand(),
4939 op1.as_operand(),
4940 op2.as_operand(),
4941 op3.as_operand(),
4942 ],
4943 )
4944 }
4945
4946 fn vasubu_vx(
4947 &mut self,
4948 op0: impl OperandCast,
4949 op1: impl OperandCast,
4950 op2: impl OperandCast,
4951 op3: impl OperandCast,
4952 ) {
4953 self.emit_n(
4954 Opcode::VASUBUVX as i64,
4955 &[
4956 op0.as_operand(),
4957 op1.as_operand(),
4958 op2.as_operand(),
4959 op3.as_operand(),
4960 ],
4961 )
4962 }
4963
4964 fn vbrev8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4965 self.emit_n(
4966 Opcode::VBREV8V as i64,
4967 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4968 )
4969 }
4970
4971 fn vbrev_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
4972 self.emit_n(
4973 Opcode::VBREVV as i64,
4974 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
4975 )
4976 }
4977
4978 fn vclmul_vv(
4979 &mut self,
4980 op0: impl OperandCast,
4981 op1: impl OperandCast,
4982 op2: impl OperandCast,
4983 op3: impl OperandCast,
4984 ) {
4985 self.emit_n(
4986 Opcode::VCLMULVV as i64,
4987 &[
4988 op0.as_operand(),
4989 op1.as_operand(),
4990 op2.as_operand(),
4991 op3.as_operand(),
4992 ],
4993 )
4994 }
4995
4996 fn vclmul_vx(
4997 &mut self,
4998 op0: impl OperandCast,
4999 op1: impl OperandCast,
5000 op2: impl OperandCast,
5001 op3: impl OperandCast,
5002 ) {
5003 self.emit_n(
5004 Opcode::VCLMULVX as i64,
5005 &[
5006 op0.as_operand(),
5007 op1.as_operand(),
5008 op2.as_operand(),
5009 op3.as_operand(),
5010 ],
5011 )
5012 }
5013
5014 fn vclmulh_vv(
5015 &mut self,
5016 op0: impl OperandCast,
5017 op1: impl OperandCast,
5018 op2: impl OperandCast,
5019 op3: impl OperandCast,
5020 ) {
5021 self.emit_n(
5022 Opcode::VCLMULHVV as i64,
5023 &[
5024 op0.as_operand(),
5025 op1.as_operand(),
5026 op2.as_operand(),
5027 op3.as_operand(),
5028 ],
5029 )
5030 }
5031
5032 fn vclmulh_vx(
5033 &mut self,
5034 op0: impl OperandCast,
5035 op1: impl OperandCast,
5036 op2: impl OperandCast,
5037 op3: impl OperandCast,
5038 ) {
5039 self.emit_n(
5040 Opcode::VCLMULHVX as i64,
5041 &[
5042 op0.as_operand(),
5043 op1.as_operand(),
5044 op2.as_operand(),
5045 op3.as_operand(),
5046 ],
5047 )
5048 }
5049
5050 fn vclz_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5051 self.emit_n(
5052 Opcode::VCLZV as i64,
5053 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5054 )
5055 }
5056
5057 fn vcompress_vm(
5058 &mut self,
5059 op0: impl OperandCast,
5060 op1: impl OperandCast,
5061 op2: impl OperandCast,
5062 ) {
5063 self.emit_n(
5064 Opcode::VCOMPRESSVM as i64,
5065 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5066 )
5067 }
5068
5069 fn vcpop_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5070 self.emit_n(
5071 Opcode::VCPOPM as i64,
5072 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5073 )
5074 }
5075
5076 fn vcpop_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5077 self.emit_n(
5078 Opcode::VCPOPV as i64,
5079 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5080 )
5081 }
5082
5083 fn vctz_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5084 self.emit_n(
5085 Opcode::VCTZV as i64,
5086 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5087 )
5088 }
5089
5090 fn vdiv_vv(
5091 &mut self,
5092 op0: impl OperandCast,
5093 op1: impl OperandCast,
5094 op2: impl OperandCast,
5095 op3: impl OperandCast,
5096 ) {
5097 self.emit_n(
5098 Opcode::VDIVVV as i64,
5099 &[
5100 op0.as_operand(),
5101 op1.as_operand(),
5102 op2.as_operand(),
5103 op3.as_operand(),
5104 ],
5105 )
5106 }
5107
5108 fn vdiv_vx(
5109 &mut self,
5110 op0: impl OperandCast,
5111 op1: impl OperandCast,
5112 op2: impl OperandCast,
5113 op3: impl OperandCast,
5114 ) {
5115 self.emit_n(
5116 Opcode::VDIVVX as i64,
5117 &[
5118 op0.as_operand(),
5119 op1.as_operand(),
5120 op2.as_operand(),
5121 op3.as_operand(),
5122 ],
5123 )
5124 }
5125
5126 fn vdivu_vv(
5127 &mut self,
5128 op0: impl OperandCast,
5129 op1: impl OperandCast,
5130 op2: impl OperandCast,
5131 op3: impl OperandCast,
5132 ) {
5133 self.emit_n(
5134 Opcode::VDIVUVV as i64,
5135 &[
5136 op0.as_operand(),
5137 op1.as_operand(),
5138 op2.as_operand(),
5139 op3.as_operand(),
5140 ],
5141 )
5142 }
5143
5144 fn vdivu_vx(
5145 &mut self,
5146 op0: impl OperandCast,
5147 op1: impl OperandCast,
5148 op2: impl OperandCast,
5149 op3: impl OperandCast,
5150 ) {
5151 self.emit_n(
5152 Opcode::VDIVUVX as i64,
5153 &[
5154 op0.as_operand(),
5155 op1.as_operand(),
5156 op2.as_operand(),
5157 op3.as_operand(),
5158 ],
5159 )
5160 }
5161
5162 fn vfadd_vf(
5163 &mut self,
5164 op0: impl OperandCast,
5165 op1: impl OperandCast,
5166 op2: impl OperandCast,
5167 op3: impl OperandCast,
5168 ) {
5169 self.emit_n(
5170 Opcode::VFADDVF as i64,
5171 &[
5172 op0.as_operand(),
5173 op1.as_operand(),
5174 op2.as_operand(),
5175 op3.as_operand(),
5176 ],
5177 )
5178 }
5179
5180 fn vfadd_vv(
5181 &mut self,
5182 op0: impl OperandCast,
5183 op1: impl OperandCast,
5184 op2: impl OperandCast,
5185 op3: impl OperandCast,
5186 ) {
5187 self.emit_n(
5188 Opcode::VFADDVV as i64,
5189 &[
5190 op0.as_operand(),
5191 op1.as_operand(),
5192 op2.as_operand(),
5193 op3.as_operand(),
5194 ],
5195 )
5196 }
5197
5198 fn vfclass_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5199 self.emit_n(
5200 Opcode::VFCLASSV as i64,
5201 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5202 )
5203 }
5204
5205 fn vfcvt_f_x_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5206 self.emit_n(
5207 Opcode::VFCVTFXV as i64,
5208 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5209 )
5210 }
5211
5212 fn vfcvt_f_xu_v(
5213 &mut self,
5214 op0: impl OperandCast,
5215 op1: impl OperandCast,
5216 op2: impl OperandCast,
5217 ) {
5218 self.emit_n(
5219 Opcode::VFCVTFXUV as i64,
5220 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5221 )
5222 }
5223
5224 fn vfcvt_rtz_x_f_v(
5225 &mut self,
5226 op0: impl OperandCast,
5227 op1: impl OperandCast,
5228 op2: impl OperandCast,
5229 ) {
5230 self.emit_n(
5231 Opcode::VFCVTRTZXFV as i64,
5232 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5233 )
5234 }
5235
5236 fn vfcvt_rtz_xu_f_v(
5237 &mut self,
5238 op0: impl OperandCast,
5239 op1: impl OperandCast,
5240 op2: impl OperandCast,
5241 ) {
5242 self.emit_n(
5243 Opcode::VFCVTRTZXUFV as i64,
5244 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5245 )
5246 }
5247
5248 fn vfcvt_x_f_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5249 self.emit_n(
5250 Opcode::VFCVTXFV as i64,
5251 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5252 )
5253 }
5254
5255 fn vfcvt_xu_f_v(
5256 &mut self,
5257 op0: impl OperandCast,
5258 op1: impl OperandCast,
5259 op2: impl OperandCast,
5260 ) {
5261 self.emit_n(
5262 Opcode::VFCVTXUFV as i64,
5263 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5264 )
5265 }
5266
5267 fn vfdiv_vf(
5268 &mut self,
5269 op0: impl OperandCast,
5270 op1: impl OperandCast,
5271 op2: impl OperandCast,
5272 op3: impl OperandCast,
5273 ) {
5274 self.emit_n(
5275 Opcode::VFDIVVF as i64,
5276 &[
5277 op0.as_operand(),
5278 op1.as_operand(),
5279 op2.as_operand(),
5280 op3.as_operand(),
5281 ],
5282 )
5283 }
5284
5285 fn vfdiv_vv(
5286 &mut self,
5287 op0: impl OperandCast,
5288 op1: impl OperandCast,
5289 op2: impl OperandCast,
5290 op3: impl OperandCast,
5291 ) {
5292 self.emit_n(
5293 Opcode::VFDIVVV as i64,
5294 &[
5295 op0.as_operand(),
5296 op1.as_operand(),
5297 op2.as_operand(),
5298 op3.as_operand(),
5299 ],
5300 )
5301 }
5302
5303 fn vfirst_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5304 self.emit_n(
5305 Opcode::VFIRSTM as i64,
5306 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5307 )
5308 }
5309
5310 fn vfmacc_vf(
5311 &mut self,
5312 op0: impl OperandCast,
5313 op1: impl OperandCast,
5314 op2: impl OperandCast,
5315 op3: impl OperandCast,
5316 ) {
5317 self.emit_n(
5318 Opcode::VFMACCVF as i64,
5319 &[
5320 op0.as_operand(),
5321 op1.as_operand(),
5322 op2.as_operand(),
5323 op3.as_operand(),
5324 ],
5325 )
5326 }
5327
5328 fn vfmacc_vv(
5329 &mut self,
5330 op0: impl OperandCast,
5331 op1: impl OperandCast,
5332 op2: impl OperandCast,
5333 op3: impl OperandCast,
5334 ) {
5335 self.emit_n(
5336 Opcode::VFMACCVV as i64,
5337 &[
5338 op0.as_operand(),
5339 op1.as_operand(),
5340 op2.as_operand(),
5341 op3.as_operand(),
5342 ],
5343 )
5344 }
5345
5346 fn vfmadd_vf(
5347 &mut self,
5348 op0: impl OperandCast,
5349 op1: impl OperandCast,
5350 op2: impl OperandCast,
5351 op3: impl OperandCast,
5352 ) {
5353 self.emit_n(
5354 Opcode::VFMADDVF as i64,
5355 &[
5356 op0.as_operand(),
5357 op1.as_operand(),
5358 op2.as_operand(),
5359 op3.as_operand(),
5360 ],
5361 )
5362 }
5363
5364 fn vfmadd_vv(
5365 &mut self,
5366 op0: impl OperandCast,
5367 op1: impl OperandCast,
5368 op2: impl OperandCast,
5369 op3: impl OperandCast,
5370 ) {
5371 self.emit_n(
5372 Opcode::VFMADDVV as i64,
5373 &[
5374 op0.as_operand(),
5375 op1.as_operand(),
5376 op2.as_operand(),
5377 op3.as_operand(),
5378 ],
5379 )
5380 }
5381
5382 fn vfmax_vf(
5383 &mut self,
5384 op0: impl OperandCast,
5385 op1: impl OperandCast,
5386 op2: impl OperandCast,
5387 op3: impl OperandCast,
5388 ) {
5389 self.emit_n(
5390 Opcode::VFMAXVF as i64,
5391 &[
5392 op0.as_operand(),
5393 op1.as_operand(),
5394 op2.as_operand(),
5395 op3.as_operand(),
5396 ],
5397 )
5398 }
5399
5400 fn vfmax_vv(
5401 &mut self,
5402 op0: impl OperandCast,
5403 op1: impl OperandCast,
5404 op2: impl OperandCast,
5405 op3: impl OperandCast,
5406 ) {
5407 self.emit_n(
5408 Opcode::VFMAXVV as i64,
5409 &[
5410 op0.as_operand(),
5411 op1.as_operand(),
5412 op2.as_operand(),
5413 op3.as_operand(),
5414 ],
5415 )
5416 }
5417
5418 fn vfmerge_vfm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5419 self.emit_n(
5420 Opcode::VFMERGEVFM as i64,
5421 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5422 )
5423 }
5424
5425 fn vfmin_vf(
5426 &mut self,
5427 op0: impl OperandCast,
5428 op1: impl OperandCast,
5429 op2: impl OperandCast,
5430 op3: impl OperandCast,
5431 ) {
5432 self.emit_n(
5433 Opcode::VFMINVF as i64,
5434 &[
5435 op0.as_operand(),
5436 op1.as_operand(),
5437 op2.as_operand(),
5438 op3.as_operand(),
5439 ],
5440 )
5441 }
5442
5443 fn vfmin_vv(
5444 &mut self,
5445 op0: impl OperandCast,
5446 op1: impl OperandCast,
5447 op2: impl OperandCast,
5448 op3: impl OperandCast,
5449 ) {
5450 self.emit_n(
5451 Opcode::VFMINVV as i64,
5452 &[
5453 op0.as_operand(),
5454 op1.as_operand(),
5455 op2.as_operand(),
5456 op3.as_operand(),
5457 ],
5458 )
5459 }
5460
5461 fn vfmsac_vf(
5462 &mut self,
5463 op0: impl OperandCast,
5464 op1: impl OperandCast,
5465 op2: impl OperandCast,
5466 op3: impl OperandCast,
5467 ) {
5468 self.emit_n(
5469 Opcode::VFMSACVF as i64,
5470 &[
5471 op0.as_operand(),
5472 op1.as_operand(),
5473 op2.as_operand(),
5474 op3.as_operand(),
5475 ],
5476 )
5477 }
5478
5479 fn vfmsac_vv(
5480 &mut self,
5481 op0: impl OperandCast,
5482 op1: impl OperandCast,
5483 op2: impl OperandCast,
5484 op3: impl OperandCast,
5485 ) {
5486 self.emit_n(
5487 Opcode::VFMSACVV as i64,
5488 &[
5489 op0.as_operand(),
5490 op1.as_operand(),
5491 op2.as_operand(),
5492 op3.as_operand(),
5493 ],
5494 )
5495 }
5496
5497 fn vfmsub_vf(
5498 &mut self,
5499 op0: impl OperandCast,
5500 op1: impl OperandCast,
5501 op2: impl OperandCast,
5502 op3: impl OperandCast,
5503 ) {
5504 self.emit_n(
5505 Opcode::VFMSUBVF as i64,
5506 &[
5507 op0.as_operand(),
5508 op1.as_operand(),
5509 op2.as_operand(),
5510 op3.as_operand(),
5511 ],
5512 )
5513 }
5514
5515 fn vfmsub_vv(
5516 &mut self,
5517 op0: impl OperandCast,
5518 op1: impl OperandCast,
5519 op2: impl OperandCast,
5520 op3: impl OperandCast,
5521 ) {
5522 self.emit_n(
5523 Opcode::VFMSUBVV as i64,
5524 &[
5525 op0.as_operand(),
5526 op1.as_operand(),
5527 op2.as_operand(),
5528 op3.as_operand(),
5529 ],
5530 )
5531 }
5532
5533 fn vfmul_vf(
5534 &mut self,
5535 op0: impl OperandCast,
5536 op1: impl OperandCast,
5537 op2: impl OperandCast,
5538 op3: impl OperandCast,
5539 ) {
5540 self.emit_n(
5541 Opcode::VFMULVF as i64,
5542 &[
5543 op0.as_operand(),
5544 op1.as_operand(),
5545 op2.as_operand(),
5546 op3.as_operand(),
5547 ],
5548 )
5549 }
5550
5551 fn vfmul_vv(
5552 &mut self,
5553 op0: impl OperandCast,
5554 op1: impl OperandCast,
5555 op2: impl OperandCast,
5556 op3: impl OperandCast,
5557 ) {
5558 self.emit_n(
5559 Opcode::VFMULVV as i64,
5560 &[
5561 op0.as_operand(),
5562 op1.as_operand(),
5563 op2.as_operand(),
5564 op3.as_operand(),
5565 ],
5566 )
5567 }
5568
5569 fn vfmv_f_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
5570 self.emit_n(Opcode::VFMVFS as i64, &[op0.as_operand(), op1.as_operand()])
5571 }
5572
5573 fn vfmv_s_f(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
5574 self.emit_n(Opcode::VFMVSF as i64, &[op0.as_operand(), op1.as_operand()])
5575 }
5576
5577 fn vfmv_v_f(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
5578 self.emit_n(Opcode::VFMVVF as i64, &[op0.as_operand(), op1.as_operand()])
5579 }
5580
5581 fn vfncvt_f_f_w(
5582 &mut self,
5583 op0: impl OperandCast,
5584 op1: impl OperandCast,
5585 op2: impl OperandCast,
5586 ) {
5587 self.emit_n(
5588 Opcode::VFNCVTFFW as i64,
5589 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5590 )
5591 }
5592
5593 fn vfncvt_f_x_w(
5594 &mut self,
5595 op0: impl OperandCast,
5596 op1: impl OperandCast,
5597 op2: impl OperandCast,
5598 ) {
5599 self.emit_n(
5600 Opcode::VFNCVTFXW as i64,
5601 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5602 )
5603 }
5604
5605 fn vfncvt_f_xu_w(
5606 &mut self,
5607 op0: impl OperandCast,
5608 op1: impl OperandCast,
5609 op2: impl OperandCast,
5610 ) {
5611 self.emit_n(
5612 Opcode::VFNCVTFXUW as i64,
5613 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5614 )
5615 }
5616
5617 fn vfncvt_rod_f_f_w(
5618 &mut self,
5619 op0: impl OperandCast,
5620 op1: impl OperandCast,
5621 op2: impl OperandCast,
5622 ) {
5623 self.emit_n(
5624 Opcode::VFNCVTRODFFW as i64,
5625 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5626 )
5627 }
5628
5629 fn vfncvt_rtz_x_f_w(
5630 &mut self,
5631 op0: impl OperandCast,
5632 op1: impl OperandCast,
5633 op2: impl OperandCast,
5634 ) {
5635 self.emit_n(
5636 Opcode::VFNCVTRTZXFW as i64,
5637 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5638 )
5639 }
5640
5641 fn vfncvt_rtz_xu_f_w(
5642 &mut self,
5643 op0: impl OperandCast,
5644 op1: impl OperandCast,
5645 op2: impl OperandCast,
5646 ) {
5647 self.emit_n(
5648 Opcode::VFNCVTRTZXUFW as i64,
5649 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5650 )
5651 }
5652
5653 fn vfncvt_x_f_w(
5654 &mut self,
5655 op0: impl OperandCast,
5656 op1: impl OperandCast,
5657 op2: impl OperandCast,
5658 ) {
5659 self.emit_n(
5660 Opcode::VFNCVTXFW as i64,
5661 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5662 )
5663 }
5664
5665 fn vfncvt_xu_f_w(
5666 &mut self,
5667 op0: impl OperandCast,
5668 op1: impl OperandCast,
5669 op2: impl OperandCast,
5670 ) {
5671 self.emit_n(
5672 Opcode::VFNCVTXUFW as i64,
5673 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5674 )
5675 }
5676
5677 fn vfnmacc_vf(
5678 &mut self,
5679 op0: impl OperandCast,
5680 op1: impl OperandCast,
5681 op2: impl OperandCast,
5682 op3: impl OperandCast,
5683 ) {
5684 self.emit_n(
5685 Opcode::VFNMACCVF as i64,
5686 &[
5687 op0.as_operand(),
5688 op1.as_operand(),
5689 op2.as_operand(),
5690 op3.as_operand(),
5691 ],
5692 )
5693 }
5694
5695 fn vfnmacc_vv(
5696 &mut self,
5697 op0: impl OperandCast,
5698 op1: impl OperandCast,
5699 op2: impl OperandCast,
5700 op3: impl OperandCast,
5701 ) {
5702 self.emit_n(
5703 Opcode::VFNMACCVV as i64,
5704 &[
5705 op0.as_operand(),
5706 op1.as_operand(),
5707 op2.as_operand(),
5708 op3.as_operand(),
5709 ],
5710 )
5711 }
5712
5713 fn vfnmadd_vf(
5714 &mut self,
5715 op0: impl OperandCast,
5716 op1: impl OperandCast,
5717 op2: impl OperandCast,
5718 op3: impl OperandCast,
5719 ) {
5720 self.emit_n(
5721 Opcode::VFNMADDVF as i64,
5722 &[
5723 op0.as_operand(),
5724 op1.as_operand(),
5725 op2.as_operand(),
5726 op3.as_operand(),
5727 ],
5728 )
5729 }
5730
5731 fn vfnmadd_vv(
5732 &mut self,
5733 op0: impl OperandCast,
5734 op1: impl OperandCast,
5735 op2: impl OperandCast,
5736 op3: impl OperandCast,
5737 ) {
5738 self.emit_n(
5739 Opcode::VFNMADDVV as i64,
5740 &[
5741 op0.as_operand(),
5742 op1.as_operand(),
5743 op2.as_operand(),
5744 op3.as_operand(),
5745 ],
5746 )
5747 }
5748
5749 fn vfnmsac_vf(
5750 &mut self,
5751 op0: impl OperandCast,
5752 op1: impl OperandCast,
5753 op2: impl OperandCast,
5754 op3: impl OperandCast,
5755 ) {
5756 self.emit_n(
5757 Opcode::VFNMSACVF as i64,
5758 &[
5759 op0.as_operand(),
5760 op1.as_operand(),
5761 op2.as_operand(),
5762 op3.as_operand(),
5763 ],
5764 )
5765 }
5766
5767 fn vfnmsac_vv(
5768 &mut self,
5769 op0: impl OperandCast,
5770 op1: impl OperandCast,
5771 op2: impl OperandCast,
5772 op3: impl OperandCast,
5773 ) {
5774 self.emit_n(
5775 Opcode::VFNMSACVV as i64,
5776 &[
5777 op0.as_operand(),
5778 op1.as_operand(),
5779 op2.as_operand(),
5780 op3.as_operand(),
5781 ],
5782 )
5783 }
5784
5785 fn vfnmsub_vf(
5786 &mut self,
5787 op0: impl OperandCast,
5788 op1: impl OperandCast,
5789 op2: impl OperandCast,
5790 op3: impl OperandCast,
5791 ) {
5792 self.emit_n(
5793 Opcode::VFNMSUBVF as i64,
5794 &[
5795 op0.as_operand(),
5796 op1.as_operand(),
5797 op2.as_operand(),
5798 op3.as_operand(),
5799 ],
5800 )
5801 }
5802
5803 fn vfnmsub_vv(
5804 &mut self,
5805 op0: impl OperandCast,
5806 op1: impl OperandCast,
5807 op2: impl OperandCast,
5808 op3: impl OperandCast,
5809 ) {
5810 self.emit_n(
5811 Opcode::VFNMSUBVV as i64,
5812 &[
5813 op0.as_operand(),
5814 op1.as_operand(),
5815 op2.as_operand(),
5816 op3.as_operand(),
5817 ],
5818 )
5819 }
5820
5821 fn vfrdiv_vf(
5822 &mut self,
5823 op0: impl OperandCast,
5824 op1: impl OperandCast,
5825 op2: impl OperandCast,
5826 op3: impl OperandCast,
5827 ) {
5828 self.emit_n(
5829 Opcode::VFRDIVVF as i64,
5830 &[
5831 op0.as_operand(),
5832 op1.as_operand(),
5833 op2.as_operand(),
5834 op3.as_operand(),
5835 ],
5836 )
5837 }
5838
5839 fn vfrec7_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5840 self.emit_n(
5841 Opcode::VFREC7V as i64,
5842 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5843 )
5844 }
5845
5846 fn vfredmax_vs(
5847 &mut self,
5848 op0: impl OperandCast,
5849 op1: impl OperandCast,
5850 op2: impl OperandCast,
5851 op3: impl OperandCast,
5852 ) {
5853 self.emit_n(
5854 Opcode::VFREDMAXVS as i64,
5855 &[
5856 op0.as_operand(),
5857 op1.as_operand(),
5858 op2.as_operand(),
5859 op3.as_operand(),
5860 ],
5861 )
5862 }
5863
5864 fn vfredmin_vs(
5865 &mut self,
5866 op0: impl OperandCast,
5867 op1: impl OperandCast,
5868 op2: impl OperandCast,
5869 op3: impl OperandCast,
5870 ) {
5871 self.emit_n(
5872 Opcode::VFREDMINVS as i64,
5873 &[
5874 op0.as_operand(),
5875 op1.as_operand(),
5876 op2.as_operand(),
5877 op3.as_operand(),
5878 ],
5879 )
5880 }
5881
5882 fn vfredosum_vs(
5883 &mut self,
5884 op0: impl OperandCast,
5885 op1: impl OperandCast,
5886 op2: impl OperandCast,
5887 op3: impl OperandCast,
5888 ) {
5889 self.emit_n(
5890 Opcode::VFREDOSUMVS as i64,
5891 &[
5892 op0.as_operand(),
5893 op1.as_operand(),
5894 op2.as_operand(),
5895 op3.as_operand(),
5896 ],
5897 )
5898 }
5899
5900 fn vfredsum_vs(
5901 &mut self,
5902 op0: impl OperandCast,
5903 op1: impl OperandCast,
5904 op2: impl OperandCast,
5905 op3: impl OperandCast,
5906 ) {
5907 self.emit_n(
5908 Opcode::VFREDSUMVS as i64,
5909 &[
5910 op0.as_operand(),
5911 op1.as_operand(),
5912 op2.as_operand(),
5913 op3.as_operand(),
5914 ],
5915 )
5916 }
5917
5918 fn vfredusum_vs(
5919 &mut self,
5920 op0: impl OperandCast,
5921 op1: impl OperandCast,
5922 op2: impl OperandCast,
5923 op3: impl OperandCast,
5924 ) {
5925 self.emit_n(
5926 Opcode::VFREDUSUMVS as i64,
5927 &[
5928 op0.as_operand(),
5929 op1.as_operand(),
5930 op2.as_operand(),
5931 op3.as_operand(),
5932 ],
5933 )
5934 }
5935
5936 fn vfrsqrt7_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
5937 self.emit_n(
5938 Opcode::VFRSQRT7V as i64,
5939 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
5940 )
5941 }
5942
5943 fn vfrsub_vf(
5944 &mut self,
5945 op0: impl OperandCast,
5946 op1: impl OperandCast,
5947 op2: impl OperandCast,
5948 op3: impl OperandCast,
5949 ) {
5950 self.emit_n(
5951 Opcode::VFRSUBVF as i64,
5952 &[
5953 op0.as_operand(),
5954 op1.as_operand(),
5955 op2.as_operand(),
5956 op3.as_operand(),
5957 ],
5958 )
5959 }
5960
5961 fn vfsgnj_vf(
5962 &mut self,
5963 op0: impl OperandCast,
5964 op1: impl OperandCast,
5965 op2: impl OperandCast,
5966 op3: impl OperandCast,
5967 ) {
5968 self.emit_n(
5969 Opcode::VFSGNJVF as i64,
5970 &[
5971 op0.as_operand(),
5972 op1.as_operand(),
5973 op2.as_operand(),
5974 op3.as_operand(),
5975 ],
5976 )
5977 }
5978
5979 fn vfsgnj_vv(
5980 &mut self,
5981 op0: impl OperandCast,
5982 op1: impl OperandCast,
5983 op2: impl OperandCast,
5984 op3: impl OperandCast,
5985 ) {
5986 self.emit_n(
5987 Opcode::VFSGNJVV as i64,
5988 &[
5989 op0.as_operand(),
5990 op1.as_operand(),
5991 op2.as_operand(),
5992 op3.as_operand(),
5993 ],
5994 )
5995 }
5996
5997 fn vfsgnjn_vf(
5998 &mut self,
5999 op0: impl OperandCast,
6000 op1: impl OperandCast,
6001 op2: impl OperandCast,
6002 op3: impl OperandCast,
6003 ) {
6004 self.emit_n(
6005 Opcode::VFSGNJNVF as i64,
6006 &[
6007 op0.as_operand(),
6008 op1.as_operand(),
6009 op2.as_operand(),
6010 op3.as_operand(),
6011 ],
6012 )
6013 }
6014
6015 fn vfsgnjn_vv(
6016 &mut self,
6017 op0: impl OperandCast,
6018 op1: impl OperandCast,
6019 op2: impl OperandCast,
6020 op3: impl OperandCast,
6021 ) {
6022 self.emit_n(
6023 Opcode::VFSGNJNVV as i64,
6024 &[
6025 op0.as_operand(),
6026 op1.as_operand(),
6027 op2.as_operand(),
6028 op3.as_operand(),
6029 ],
6030 )
6031 }
6032
6033 fn vfsgnjx_vf(
6034 &mut self,
6035 op0: impl OperandCast,
6036 op1: impl OperandCast,
6037 op2: impl OperandCast,
6038 op3: impl OperandCast,
6039 ) {
6040 self.emit_n(
6041 Opcode::VFSGNJXVF as i64,
6042 &[
6043 op0.as_operand(),
6044 op1.as_operand(),
6045 op2.as_operand(),
6046 op3.as_operand(),
6047 ],
6048 )
6049 }
6050
6051 fn vfsgnjx_vv(
6052 &mut self,
6053 op0: impl OperandCast,
6054 op1: impl OperandCast,
6055 op2: impl OperandCast,
6056 op3: impl OperandCast,
6057 ) {
6058 self.emit_n(
6059 Opcode::VFSGNJXVV as i64,
6060 &[
6061 op0.as_operand(),
6062 op1.as_operand(),
6063 op2.as_operand(),
6064 op3.as_operand(),
6065 ],
6066 )
6067 }
6068
6069 fn vfslide1down_vf(
6070 &mut self,
6071 op0: impl OperandCast,
6072 op1: impl OperandCast,
6073 op2: impl OperandCast,
6074 op3: impl OperandCast,
6075 ) {
6076 self.emit_n(
6077 Opcode::VFSLIDE1DOWNVF as i64,
6078 &[
6079 op0.as_operand(),
6080 op1.as_operand(),
6081 op2.as_operand(),
6082 op3.as_operand(),
6083 ],
6084 )
6085 }
6086
6087 fn vfslide1up_vf(
6088 &mut self,
6089 op0: impl OperandCast,
6090 op1: impl OperandCast,
6091 op2: impl OperandCast,
6092 op3: impl OperandCast,
6093 ) {
6094 self.emit_n(
6095 Opcode::VFSLIDE1UPVF as i64,
6096 &[
6097 op0.as_operand(),
6098 op1.as_operand(),
6099 op2.as_operand(),
6100 op3.as_operand(),
6101 ],
6102 )
6103 }
6104
6105 fn vfsqrt_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
6106 self.emit_n(
6107 Opcode::VFSQRTV as i64,
6108 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6109 )
6110 }
6111
6112 fn vfsub_vf(
6113 &mut self,
6114 op0: impl OperandCast,
6115 op1: impl OperandCast,
6116 op2: impl OperandCast,
6117 op3: impl OperandCast,
6118 ) {
6119 self.emit_n(
6120 Opcode::VFSUBVF as i64,
6121 &[
6122 op0.as_operand(),
6123 op1.as_operand(),
6124 op2.as_operand(),
6125 op3.as_operand(),
6126 ],
6127 )
6128 }
6129
6130 fn vfsub_vv(
6131 &mut self,
6132 op0: impl OperandCast,
6133 op1: impl OperandCast,
6134 op2: impl OperandCast,
6135 op3: impl OperandCast,
6136 ) {
6137 self.emit_n(
6138 Opcode::VFSUBVV as i64,
6139 &[
6140 op0.as_operand(),
6141 op1.as_operand(),
6142 op2.as_operand(),
6143 op3.as_operand(),
6144 ],
6145 )
6146 }
6147
6148 fn vfwadd_vf(
6149 &mut self,
6150 op0: impl OperandCast,
6151 op1: impl OperandCast,
6152 op2: impl OperandCast,
6153 op3: impl OperandCast,
6154 ) {
6155 self.emit_n(
6156 Opcode::VFWADDVF as i64,
6157 &[
6158 op0.as_operand(),
6159 op1.as_operand(),
6160 op2.as_operand(),
6161 op3.as_operand(),
6162 ],
6163 )
6164 }
6165
6166 fn vfwadd_vv(
6167 &mut self,
6168 op0: impl OperandCast,
6169 op1: impl OperandCast,
6170 op2: impl OperandCast,
6171 op3: impl OperandCast,
6172 ) {
6173 self.emit_n(
6174 Opcode::VFWADDVV as i64,
6175 &[
6176 op0.as_operand(),
6177 op1.as_operand(),
6178 op2.as_operand(),
6179 op3.as_operand(),
6180 ],
6181 )
6182 }
6183
6184 fn vfwadd_wf(
6185 &mut self,
6186 op0: impl OperandCast,
6187 op1: impl OperandCast,
6188 op2: impl OperandCast,
6189 op3: impl OperandCast,
6190 ) {
6191 self.emit_n(
6192 Opcode::VFWADDWF as i64,
6193 &[
6194 op0.as_operand(),
6195 op1.as_operand(),
6196 op2.as_operand(),
6197 op3.as_operand(),
6198 ],
6199 )
6200 }
6201
6202 fn vfwadd_wv(
6203 &mut self,
6204 op0: impl OperandCast,
6205 op1: impl OperandCast,
6206 op2: impl OperandCast,
6207 op3: impl OperandCast,
6208 ) {
6209 self.emit_n(
6210 Opcode::VFWADDWV as i64,
6211 &[
6212 op0.as_operand(),
6213 op1.as_operand(),
6214 op2.as_operand(),
6215 op3.as_operand(),
6216 ],
6217 )
6218 }
6219
6220 fn vfwcvt_f_f_v(
6221 &mut self,
6222 op0: impl OperandCast,
6223 op1: impl OperandCast,
6224 op2: impl OperandCast,
6225 ) {
6226 self.emit_n(
6227 Opcode::VFWCVTFFV as i64,
6228 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6229 )
6230 }
6231
6232 fn vfwcvt_f_x_v(
6233 &mut self,
6234 op0: impl OperandCast,
6235 op1: impl OperandCast,
6236 op2: impl OperandCast,
6237 ) {
6238 self.emit_n(
6239 Opcode::VFWCVTFXV as i64,
6240 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6241 )
6242 }
6243
6244 fn vfwcvt_f_xu_v(
6245 &mut self,
6246 op0: impl OperandCast,
6247 op1: impl OperandCast,
6248 op2: impl OperandCast,
6249 ) {
6250 self.emit_n(
6251 Opcode::VFWCVTFXUV as i64,
6252 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6253 )
6254 }
6255
6256 fn vfwcvt_rtz_x_f_v(
6257 &mut self,
6258 op0: impl OperandCast,
6259 op1: impl OperandCast,
6260 op2: impl OperandCast,
6261 ) {
6262 self.emit_n(
6263 Opcode::VFWCVTRTZXFV as i64,
6264 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6265 )
6266 }
6267
6268 fn vfwcvt_rtz_xu_f_v(
6269 &mut self,
6270 op0: impl OperandCast,
6271 op1: impl OperandCast,
6272 op2: impl OperandCast,
6273 ) {
6274 self.emit_n(
6275 Opcode::VFWCVTRTZXUFV as i64,
6276 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6277 )
6278 }
6279
6280 fn vfwcvt_x_f_v(
6281 &mut self,
6282 op0: impl OperandCast,
6283 op1: impl OperandCast,
6284 op2: impl OperandCast,
6285 ) {
6286 self.emit_n(
6287 Opcode::VFWCVTXFV as i64,
6288 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6289 )
6290 }
6291
6292 fn vfwcvt_xu_f_v(
6293 &mut self,
6294 op0: impl OperandCast,
6295 op1: impl OperandCast,
6296 op2: impl OperandCast,
6297 ) {
6298 self.emit_n(
6299 Opcode::VFWCVTXUFV as i64,
6300 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6301 )
6302 }
6303
6304 fn vfwmacc_vf(
6305 &mut self,
6306 op0: impl OperandCast,
6307 op1: impl OperandCast,
6308 op2: impl OperandCast,
6309 op3: impl OperandCast,
6310 ) {
6311 self.emit_n(
6312 Opcode::VFWMACCVF as i64,
6313 &[
6314 op0.as_operand(),
6315 op1.as_operand(),
6316 op2.as_operand(),
6317 op3.as_operand(),
6318 ],
6319 )
6320 }
6321
6322 fn vfwmacc_vv(
6323 &mut self,
6324 op0: impl OperandCast,
6325 op1: impl OperandCast,
6326 op2: impl OperandCast,
6327 op3: impl OperandCast,
6328 ) {
6329 self.emit_n(
6330 Opcode::VFWMACCVV as i64,
6331 &[
6332 op0.as_operand(),
6333 op1.as_operand(),
6334 op2.as_operand(),
6335 op3.as_operand(),
6336 ],
6337 )
6338 }
6339
6340 fn vfwmsac_vf(
6341 &mut self,
6342 op0: impl OperandCast,
6343 op1: impl OperandCast,
6344 op2: impl OperandCast,
6345 op3: impl OperandCast,
6346 ) {
6347 self.emit_n(
6348 Opcode::VFWMSACVF as i64,
6349 &[
6350 op0.as_operand(),
6351 op1.as_operand(),
6352 op2.as_operand(),
6353 op3.as_operand(),
6354 ],
6355 )
6356 }
6357
6358 fn vfwmsac_vv(
6359 &mut self,
6360 op0: impl OperandCast,
6361 op1: impl OperandCast,
6362 op2: impl OperandCast,
6363 op3: impl OperandCast,
6364 ) {
6365 self.emit_n(
6366 Opcode::VFWMSACVV as i64,
6367 &[
6368 op0.as_operand(),
6369 op1.as_operand(),
6370 op2.as_operand(),
6371 op3.as_operand(),
6372 ],
6373 )
6374 }
6375
6376 fn vfwmul_vf(
6377 &mut self,
6378 op0: impl OperandCast,
6379 op1: impl OperandCast,
6380 op2: impl OperandCast,
6381 op3: impl OperandCast,
6382 ) {
6383 self.emit_n(
6384 Opcode::VFWMULVF as i64,
6385 &[
6386 op0.as_operand(),
6387 op1.as_operand(),
6388 op2.as_operand(),
6389 op3.as_operand(),
6390 ],
6391 )
6392 }
6393
6394 fn vfwmul_vv(
6395 &mut self,
6396 op0: impl OperandCast,
6397 op1: impl OperandCast,
6398 op2: impl OperandCast,
6399 op3: impl OperandCast,
6400 ) {
6401 self.emit_n(
6402 Opcode::VFWMULVV as i64,
6403 &[
6404 op0.as_operand(),
6405 op1.as_operand(),
6406 op2.as_operand(),
6407 op3.as_operand(),
6408 ],
6409 )
6410 }
6411
6412 fn vfwnmacc_vf(
6413 &mut self,
6414 op0: impl OperandCast,
6415 op1: impl OperandCast,
6416 op2: impl OperandCast,
6417 op3: impl OperandCast,
6418 ) {
6419 self.emit_n(
6420 Opcode::VFWNMACCVF as i64,
6421 &[
6422 op0.as_operand(),
6423 op1.as_operand(),
6424 op2.as_operand(),
6425 op3.as_operand(),
6426 ],
6427 )
6428 }
6429
6430 fn vfwnmacc_vv(
6431 &mut self,
6432 op0: impl OperandCast,
6433 op1: impl OperandCast,
6434 op2: impl OperandCast,
6435 op3: impl OperandCast,
6436 ) {
6437 self.emit_n(
6438 Opcode::VFWNMACCVV as i64,
6439 &[
6440 op0.as_operand(),
6441 op1.as_operand(),
6442 op2.as_operand(),
6443 op3.as_operand(),
6444 ],
6445 )
6446 }
6447
6448 fn vfwnmsac_vf(
6449 &mut self,
6450 op0: impl OperandCast,
6451 op1: impl OperandCast,
6452 op2: impl OperandCast,
6453 op3: impl OperandCast,
6454 ) {
6455 self.emit_n(
6456 Opcode::VFWNMSACVF as i64,
6457 &[
6458 op0.as_operand(),
6459 op1.as_operand(),
6460 op2.as_operand(),
6461 op3.as_operand(),
6462 ],
6463 )
6464 }
6465
6466 fn vfwnmsac_vv(
6467 &mut self,
6468 op0: impl OperandCast,
6469 op1: impl OperandCast,
6470 op2: impl OperandCast,
6471 op3: impl OperandCast,
6472 ) {
6473 self.emit_n(
6474 Opcode::VFWNMSACVV as i64,
6475 &[
6476 op0.as_operand(),
6477 op1.as_operand(),
6478 op2.as_operand(),
6479 op3.as_operand(),
6480 ],
6481 )
6482 }
6483
6484 fn vfwredosum_vs(
6485 &mut self,
6486 op0: impl OperandCast,
6487 op1: impl OperandCast,
6488 op2: impl OperandCast,
6489 op3: impl OperandCast,
6490 ) {
6491 self.emit_n(
6492 Opcode::VFWREDOSUMVS as i64,
6493 &[
6494 op0.as_operand(),
6495 op1.as_operand(),
6496 op2.as_operand(),
6497 op3.as_operand(),
6498 ],
6499 )
6500 }
6501
6502 fn vfwredsum_vs(
6503 &mut self,
6504 op0: impl OperandCast,
6505 op1: impl OperandCast,
6506 op2: impl OperandCast,
6507 op3: impl OperandCast,
6508 ) {
6509 self.emit_n(
6510 Opcode::VFWREDSUMVS as i64,
6511 &[
6512 op0.as_operand(),
6513 op1.as_operand(),
6514 op2.as_operand(),
6515 op3.as_operand(),
6516 ],
6517 )
6518 }
6519
6520 fn vfwredusum_vs(
6521 &mut self,
6522 op0: impl OperandCast,
6523 op1: impl OperandCast,
6524 op2: impl OperandCast,
6525 op3: impl OperandCast,
6526 ) {
6527 self.emit_n(
6528 Opcode::VFWREDUSUMVS as i64,
6529 &[
6530 op0.as_operand(),
6531 op1.as_operand(),
6532 op2.as_operand(),
6533 op3.as_operand(),
6534 ],
6535 )
6536 }
6537
6538 fn vfwsub_vf(
6539 &mut self,
6540 op0: impl OperandCast,
6541 op1: impl OperandCast,
6542 op2: impl OperandCast,
6543 op3: impl OperandCast,
6544 ) {
6545 self.emit_n(
6546 Opcode::VFWSUBVF as i64,
6547 &[
6548 op0.as_operand(),
6549 op1.as_operand(),
6550 op2.as_operand(),
6551 op3.as_operand(),
6552 ],
6553 )
6554 }
6555
6556 fn vfwsub_vv(
6557 &mut self,
6558 op0: impl OperandCast,
6559 op1: impl OperandCast,
6560 op2: impl OperandCast,
6561 op3: impl OperandCast,
6562 ) {
6563 self.emit_n(
6564 Opcode::VFWSUBVV as i64,
6565 &[
6566 op0.as_operand(),
6567 op1.as_operand(),
6568 op2.as_operand(),
6569 op3.as_operand(),
6570 ],
6571 )
6572 }
6573
6574 fn vfwsub_wf(
6575 &mut self,
6576 op0: impl OperandCast,
6577 op1: impl OperandCast,
6578 op2: impl OperandCast,
6579 op3: impl OperandCast,
6580 ) {
6581 self.emit_n(
6582 Opcode::VFWSUBWF as i64,
6583 &[
6584 op0.as_operand(),
6585 op1.as_operand(),
6586 op2.as_operand(),
6587 op3.as_operand(),
6588 ],
6589 )
6590 }
6591
6592 fn vfwsub_wv(
6593 &mut self,
6594 op0: impl OperandCast,
6595 op1: impl OperandCast,
6596 op2: impl OperandCast,
6597 op3: impl OperandCast,
6598 ) {
6599 self.emit_n(
6600 Opcode::VFWSUBWV as i64,
6601 &[
6602 op0.as_operand(),
6603 op1.as_operand(),
6604 op2.as_operand(),
6605 op3.as_operand(),
6606 ],
6607 )
6608 }
6609
6610 fn vghsh_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
6611 self.emit_n(
6612 Opcode::VGHSHVV as i64,
6613 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6614 )
6615 }
6616
6617 fn vgmul_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6618 self.emit_n(
6619 Opcode::VGMULVV as i64,
6620 &[op0.as_operand(), op1.as_operand()],
6621 )
6622 }
6623
6624 fn vid_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6625 self.emit_n(Opcode::VIDV as i64, &[op0.as_operand(), op1.as_operand()])
6626 }
6627
6628 fn viota_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
6629 self.emit_n(
6630 Opcode::VIOTAM as i64,
6631 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
6632 )
6633 }
6634
6635 fn vl1r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6636 self.emit_n(Opcode::VL1RV as i64, &[op0.as_operand(), op1.as_operand()])
6637 }
6638
6639 fn vl1re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6640 self.emit_n(
6641 Opcode::VL1RE16V as i64,
6642 &[op0.as_operand(), op1.as_operand()],
6643 )
6644 }
6645
6646 fn vl1re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6647 self.emit_n(
6648 Opcode::VL1RE32V as i64,
6649 &[op0.as_operand(), op1.as_operand()],
6650 )
6651 }
6652
6653 fn vl1re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6654 self.emit_n(
6655 Opcode::VL1RE64V as i64,
6656 &[op0.as_operand(), op1.as_operand()],
6657 )
6658 }
6659
6660 fn vl1re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6661 self.emit_n(
6662 Opcode::VL1RE8V as i64,
6663 &[op0.as_operand(), op1.as_operand()],
6664 )
6665 }
6666
6667 fn vl2r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6668 self.emit_n(Opcode::VL2RV as i64, &[op0.as_operand(), op1.as_operand()])
6669 }
6670
6671 fn vl2re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6672 self.emit_n(
6673 Opcode::VL2RE16V as i64,
6674 &[op0.as_operand(), op1.as_operand()],
6675 )
6676 }
6677
6678 fn vl2re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6679 self.emit_n(
6680 Opcode::VL2RE32V as i64,
6681 &[op0.as_operand(), op1.as_operand()],
6682 )
6683 }
6684
6685 fn vl2re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6686 self.emit_n(
6687 Opcode::VL2RE64V as i64,
6688 &[op0.as_operand(), op1.as_operand()],
6689 )
6690 }
6691
6692 fn vl2re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6693 self.emit_n(
6694 Opcode::VL2RE8V as i64,
6695 &[op0.as_operand(), op1.as_operand()],
6696 )
6697 }
6698
6699 fn vl4r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6700 self.emit_n(Opcode::VL4RV as i64, &[op0.as_operand(), op1.as_operand()])
6701 }
6702
6703 fn vl4re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6704 self.emit_n(
6705 Opcode::VL4RE16V as i64,
6706 &[op0.as_operand(), op1.as_operand()],
6707 )
6708 }
6709
6710 fn vl4re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6711 self.emit_n(
6712 Opcode::VL4RE32V as i64,
6713 &[op0.as_operand(), op1.as_operand()],
6714 )
6715 }
6716
6717 fn vl4re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6718 self.emit_n(
6719 Opcode::VL4RE64V as i64,
6720 &[op0.as_operand(), op1.as_operand()],
6721 )
6722 }
6723
6724 fn vl4re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6725 self.emit_n(
6726 Opcode::VL4RE8V as i64,
6727 &[op0.as_operand(), op1.as_operand()],
6728 )
6729 }
6730
6731 fn vl8r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6732 self.emit_n(Opcode::VL8RV as i64, &[op0.as_operand(), op1.as_operand()])
6733 }
6734
6735 fn vl8re16_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6736 self.emit_n(
6737 Opcode::VL8RE16V as i64,
6738 &[op0.as_operand(), op1.as_operand()],
6739 )
6740 }
6741
6742 fn vl8re32_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6743 self.emit_n(
6744 Opcode::VL8RE32V as i64,
6745 &[op0.as_operand(), op1.as_operand()],
6746 )
6747 }
6748
6749 fn vl8re64_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6750 self.emit_n(
6751 Opcode::VL8RE64V as i64,
6752 &[op0.as_operand(), op1.as_operand()],
6753 )
6754 }
6755
6756 fn vl8re8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6757 self.emit_n(
6758 Opcode::VL8RE8V as i64,
6759 &[op0.as_operand(), op1.as_operand()],
6760 )
6761 }
6762
6763 fn vle16_v(
6764 &mut self,
6765 op0: impl OperandCast,
6766 op1: impl OperandCast,
6767 op2: impl OperandCast,
6768 op3: impl OperandCast,
6769 ) {
6770 self.emit_n(
6771 Opcode::VLE16V as i64,
6772 &[
6773 op0.as_operand(),
6774 op1.as_operand(),
6775 op2.as_operand(),
6776 op3.as_operand(),
6777 ],
6778 )
6779 }
6780
6781 fn vle16ff_v(
6782 &mut self,
6783 op0: impl OperandCast,
6784 op1: impl OperandCast,
6785 op2: impl OperandCast,
6786 op3: impl OperandCast,
6787 ) {
6788 self.emit_n(
6789 Opcode::VLE16FFV as i64,
6790 &[
6791 op0.as_operand(),
6792 op1.as_operand(),
6793 op2.as_operand(),
6794 op3.as_operand(),
6795 ],
6796 )
6797 }
6798
6799 fn vle1_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6800 self.emit_n(Opcode::VLE1V as i64, &[op0.as_operand(), op1.as_operand()])
6801 }
6802
6803 fn vle32_v(
6804 &mut self,
6805 op0: impl OperandCast,
6806 op1: impl OperandCast,
6807 op2: impl OperandCast,
6808 op3: impl OperandCast,
6809 ) {
6810 self.emit_n(
6811 Opcode::VLE32V as i64,
6812 &[
6813 op0.as_operand(),
6814 op1.as_operand(),
6815 op2.as_operand(),
6816 op3.as_operand(),
6817 ],
6818 )
6819 }
6820
6821 fn vle32ff_v(
6822 &mut self,
6823 op0: impl OperandCast,
6824 op1: impl OperandCast,
6825 op2: impl OperandCast,
6826 op3: impl OperandCast,
6827 ) {
6828 self.emit_n(
6829 Opcode::VLE32FFV as i64,
6830 &[
6831 op0.as_operand(),
6832 op1.as_operand(),
6833 op2.as_operand(),
6834 op3.as_operand(),
6835 ],
6836 )
6837 }
6838
6839 fn vle64_v(
6840 &mut self,
6841 op0: impl OperandCast,
6842 op1: impl OperandCast,
6843 op2: impl OperandCast,
6844 op3: impl OperandCast,
6845 ) {
6846 self.emit_n(
6847 Opcode::VLE64V as i64,
6848 &[
6849 op0.as_operand(),
6850 op1.as_operand(),
6851 op2.as_operand(),
6852 op3.as_operand(),
6853 ],
6854 )
6855 }
6856
6857 fn vle64ff_v(
6858 &mut self,
6859 op0: impl OperandCast,
6860 op1: impl OperandCast,
6861 op2: impl OperandCast,
6862 op3: impl OperandCast,
6863 ) {
6864 self.emit_n(
6865 Opcode::VLE64FFV as i64,
6866 &[
6867 op0.as_operand(),
6868 op1.as_operand(),
6869 op2.as_operand(),
6870 op3.as_operand(),
6871 ],
6872 )
6873 }
6874
6875 fn vle8_v(
6876 &mut self,
6877 op0: impl OperandCast,
6878 op1: impl OperandCast,
6879 op2: impl OperandCast,
6880 op3: impl OperandCast,
6881 ) {
6882 self.emit_n(
6883 Opcode::VLE8V as i64,
6884 &[
6885 op0.as_operand(),
6886 op1.as_operand(),
6887 op2.as_operand(),
6888 op3.as_operand(),
6889 ],
6890 )
6891 }
6892
6893 fn vle8ff_v(
6894 &mut self,
6895 op0: impl OperandCast,
6896 op1: impl OperandCast,
6897 op2: impl OperandCast,
6898 op3: impl OperandCast,
6899 ) {
6900 self.emit_n(
6901 Opcode::VLE8FFV as i64,
6902 &[
6903 op0.as_operand(),
6904 op1.as_operand(),
6905 op2.as_operand(),
6906 op3.as_operand(),
6907 ],
6908 )
6909 }
6910
6911 fn vlm_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
6912 self.emit_n(Opcode::VLMV as i64, &[op0.as_operand(), op1.as_operand()])
6913 }
6914
6915 fn vloxei16_v(
6916 &mut self,
6917 op0: impl OperandCast,
6918 op1: impl OperandCast,
6919 op2: impl OperandCast,
6920 op3: impl OperandCast,
6921 op4: impl OperandCast,
6922 ) {
6923 self.emit_n(
6924 Opcode::VLOXEI16V as i64,
6925 &[
6926 op0.as_operand(),
6927 op1.as_operand(),
6928 op2.as_operand(),
6929 op3.as_operand(),
6930 op4.as_operand(),
6931 ],
6932 )
6933 }
6934
6935 fn vloxei32_v(
6936 &mut self,
6937 op0: impl OperandCast,
6938 op1: impl OperandCast,
6939 op2: impl OperandCast,
6940 op3: impl OperandCast,
6941 op4: impl OperandCast,
6942 ) {
6943 self.emit_n(
6944 Opcode::VLOXEI32V as i64,
6945 &[
6946 op0.as_operand(),
6947 op1.as_operand(),
6948 op2.as_operand(),
6949 op3.as_operand(),
6950 op4.as_operand(),
6951 ],
6952 )
6953 }
6954
6955 fn vloxei64_v(
6956 &mut self,
6957 op0: impl OperandCast,
6958 op1: impl OperandCast,
6959 op2: impl OperandCast,
6960 op3: impl OperandCast,
6961 op4: impl OperandCast,
6962 ) {
6963 self.emit_n(
6964 Opcode::VLOXEI64V as i64,
6965 &[
6966 op0.as_operand(),
6967 op1.as_operand(),
6968 op2.as_operand(),
6969 op3.as_operand(),
6970 op4.as_operand(),
6971 ],
6972 )
6973 }
6974
6975 fn vloxei8_v(
6976 &mut self,
6977 op0: impl OperandCast,
6978 op1: impl OperandCast,
6979 op2: impl OperandCast,
6980 op3: impl OperandCast,
6981 op4: impl OperandCast,
6982 ) {
6983 self.emit_n(
6984 Opcode::VLOXEI8V as i64,
6985 &[
6986 op0.as_operand(),
6987 op1.as_operand(),
6988 op2.as_operand(),
6989 op3.as_operand(),
6990 op4.as_operand(),
6991 ],
6992 )
6993 }
6994
6995 fn vlse16_v(
6996 &mut self,
6997 op0: impl OperandCast,
6998 op1: impl OperandCast,
6999 op2: impl OperandCast,
7000 op3: impl OperandCast,
7001 op4: impl OperandCast,
7002 ) {
7003 self.emit_n(
7004 Opcode::VLSE16V as i64,
7005 &[
7006 op0.as_operand(),
7007 op1.as_operand(),
7008 op2.as_operand(),
7009 op3.as_operand(),
7010 op4.as_operand(),
7011 ],
7012 )
7013 }
7014
7015 fn vlse32_v(
7016 &mut self,
7017 op0: impl OperandCast,
7018 op1: impl OperandCast,
7019 op2: impl OperandCast,
7020 op3: impl OperandCast,
7021 op4: impl OperandCast,
7022 ) {
7023 self.emit_n(
7024 Opcode::VLSE32V as i64,
7025 &[
7026 op0.as_operand(),
7027 op1.as_operand(),
7028 op2.as_operand(),
7029 op3.as_operand(),
7030 op4.as_operand(),
7031 ],
7032 )
7033 }
7034
7035 fn vlse64_v(
7036 &mut self,
7037 op0: impl OperandCast,
7038 op1: impl OperandCast,
7039 op2: impl OperandCast,
7040 op3: impl OperandCast,
7041 op4: impl OperandCast,
7042 ) {
7043 self.emit_n(
7044 Opcode::VLSE64V as i64,
7045 &[
7046 op0.as_operand(),
7047 op1.as_operand(),
7048 op2.as_operand(),
7049 op3.as_operand(),
7050 op4.as_operand(),
7051 ],
7052 )
7053 }
7054
7055 fn vlse8_v(
7056 &mut self,
7057 op0: impl OperandCast,
7058 op1: impl OperandCast,
7059 op2: impl OperandCast,
7060 op3: impl OperandCast,
7061 op4: impl OperandCast,
7062 ) {
7063 self.emit_n(
7064 Opcode::VLSE8V as i64,
7065 &[
7066 op0.as_operand(),
7067 op1.as_operand(),
7068 op2.as_operand(),
7069 op3.as_operand(),
7070 op4.as_operand(),
7071 ],
7072 )
7073 }
7074
7075 fn vluxei16_v(
7076 &mut self,
7077 op0: impl OperandCast,
7078 op1: impl OperandCast,
7079 op2: impl OperandCast,
7080 op3: impl OperandCast,
7081 op4: impl OperandCast,
7082 ) {
7083 self.emit_n(
7084 Opcode::VLUXEI16V as i64,
7085 &[
7086 op0.as_operand(),
7087 op1.as_operand(),
7088 op2.as_operand(),
7089 op3.as_operand(),
7090 op4.as_operand(),
7091 ],
7092 )
7093 }
7094
7095 fn vluxei32_v(
7096 &mut self,
7097 op0: impl OperandCast,
7098 op1: impl OperandCast,
7099 op2: impl OperandCast,
7100 op3: impl OperandCast,
7101 op4: impl OperandCast,
7102 ) {
7103 self.emit_n(
7104 Opcode::VLUXEI32V as i64,
7105 &[
7106 op0.as_operand(),
7107 op1.as_operand(),
7108 op2.as_operand(),
7109 op3.as_operand(),
7110 op4.as_operand(),
7111 ],
7112 )
7113 }
7114
7115 fn vluxei64_v(
7116 &mut self,
7117 op0: impl OperandCast,
7118 op1: impl OperandCast,
7119 op2: impl OperandCast,
7120 op3: impl OperandCast,
7121 op4: impl OperandCast,
7122 ) {
7123 self.emit_n(
7124 Opcode::VLUXEI64V as i64,
7125 &[
7126 op0.as_operand(),
7127 op1.as_operand(),
7128 op2.as_operand(),
7129 op3.as_operand(),
7130 op4.as_operand(),
7131 ],
7132 )
7133 }
7134
7135 fn vluxei8_v(
7136 &mut self,
7137 op0: impl OperandCast,
7138 op1: impl OperandCast,
7139 op2: impl OperandCast,
7140 op3: impl OperandCast,
7141 op4: impl OperandCast,
7142 ) {
7143 self.emit_n(
7144 Opcode::VLUXEI8V as i64,
7145 &[
7146 op0.as_operand(),
7147 op1.as_operand(),
7148 op2.as_operand(),
7149 op3.as_operand(),
7150 op4.as_operand(),
7151 ],
7152 )
7153 }
7154
7155 fn vmacc_vv(
7156 &mut self,
7157 op0: impl OperandCast,
7158 op1: impl OperandCast,
7159 op2: impl OperandCast,
7160 op3: impl OperandCast,
7161 ) {
7162 self.emit_n(
7163 Opcode::VMACCVV as i64,
7164 &[
7165 op0.as_operand(),
7166 op1.as_operand(),
7167 op2.as_operand(),
7168 op3.as_operand(),
7169 ],
7170 )
7171 }
7172
7173 fn vmacc_vx(
7174 &mut self,
7175 op0: impl OperandCast,
7176 op1: impl OperandCast,
7177 op2: impl OperandCast,
7178 op3: impl OperandCast,
7179 ) {
7180 self.emit_n(
7181 Opcode::VMACCVX as i64,
7182 &[
7183 op0.as_operand(),
7184 op1.as_operand(),
7185 op2.as_operand(),
7186 op3.as_operand(),
7187 ],
7188 )
7189 }
7190
7191 fn vmadc_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7192 self.emit_n(
7193 Opcode::VMADCVI as i64,
7194 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7195 )
7196 }
7197
7198 fn vmadc_vim(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7199 self.emit_n(
7200 Opcode::VMADCVIM as i64,
7201 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7202 )
7203 }
7204
7205 fn vmadc_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7206 self.emit_n(
7207 Opcode::VMADCVV as i64,
7208 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7209 )
7210 }
7211
7212 fn vmadc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7213 self.emit_n(
7214 Opcode::VMADCVVM as i64,
7215 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7216 )
7217 }
7218
7219 fn vmadc_vx(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7220 self.emit_n(
7221 Opcode::VMADCVX as i64,
7222 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7223 )
7224 }
7225
7226 fn vmadc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7227 self.emit_n(
7228 Opcode::VMADCVXM as i64,
7229 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7230 )
7231 }
7232
7233 fn vmadd_vv(
7234 &mut self,
7235 op0: impl OperandCast,
7236 op1: impl OperandCast,
7237 op2: impl OperandCast,
7238 op3: impl OperandCast,
7239 ) {
7240 self.emit_n(
7241 Opcode::VMADDVV as i64,
7242 &[
7243 op0.as_operand(),
7244 op1.as_operand(),
7245 op2.as_operand(),
7246 op3.as_operand(),
7247 ],
7248 )
7249 }
7250
7251 fn vmadd_vx(
7252 &mut self,
7253 op0: impl OperandCast,
7254 op1: impl OperandCast,
7255 op2: impl OperandCast,
7256 op3: impl OperandCast,
7257 ) {
7258 self.emit_n(
7259 Opcode::VMADDVX as i64,
7260 &[
7261 op0.as_operand(),
7262 op1.as_operand(),
7263 op2.as_operand(),
7264 op3.as_operand(),
7265 ],
7266 )
7267 }
7268
7269 fn vmand_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7270 self.emit_n(
7271 Opcode::VMANDMM as i64,
7272 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7273 )
7274 }
7275
7276 fn vmandn_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7277 self.emit_n(
7278 Opcode::VMANDNMM as i64,
7279 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7280 )
7281 }
7282
7283 fn vmandnot_mm(
7284 &mut self,
7285 op0: impl OperandCast,
7286 op1: impl OperandCast,
7287 op2: impl OperandCast,
7288 op3: impl OperandCast,
7289 ) {
7290 self.emit_n(
7291 Opcode::VMANDNOTMM as i64,
7292 &[
7293 op0.as_operand(),
7294 op1.as_operand(),
7295 op2.as_operand(),
7296 op3.as_operand(),
7297 ],
7298 )
7299 }
7300
7301 fn vmax_vv(
7302 &mut self,
7303 op0: impl OperandCast,
7304 op1: impl OperandCast,
7305 op2: impl OperandCast,
7306 op3: impl OperandCast,
7307 ) {
7308 self.emit_n(
7309 Opcode::VMAXVV as i64,
7310 &[
7311 op0.as_operand(),
7312 op1.as_operand(),
7313 op2.as_operand(),
7314 op3.as_operand(),
7315 ],
7316 )
7317 }
7318
7319 fn vmax_vx(
7320 &mut self,
7321 op0: impl OperandCast,
7322 op1: impl OperandCast,
7323 op2: impl OperandCast,
7324 op3: impl OperandCast,
7325 ) {
7326 self.emit_n(
7327 Opcode::VMAXVX as i64,
7328 &[
7329 op0.as_operand(),
7330 op1.as_operand(),
7331 op2.as_operand(),
7332 op3.as_operand(),
7333 ],
7334 )
7335 }
7336
7337 fn vmaxu_vv(
7338 &mut self,
7339 op0: impl OperandCast,
7340 op1: impl OperandCast,
7341 op2: impl OperandCast,
7342 op3: impl OperandCast,
7343 ) {
7344 self.emit_n(
7345 Opcode::VMAXUVV as i64,
7346 &[
7347 op0.as_operand(),
7348 op1.as_operand(),
7349 op2.as_operand(),
7350 op3.as_operand(),
7351 ],
7352 )
7353 }
7354
7355 fn vmaxu_vx(
7356 &mut self,
7357 op0: impl OperandCast,
7358 op1: impl OperandCast,
7359 op2: impl OperandCast,
7360 op3: impl OperandCast,
7361 ) {
7362 self.emit_n(
7363 Opcode::VMAXUVX as i64,
7364 &[
7365 op0.as_operand(),
7366 op1.as_operand(),
7367 op2.as_operand(),
7368 op3.as_operand(),
7369 ],
7370 )
7371 }
7372
7373 fn vmerge_vim(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7374 self.emit_n(
7375 Opcode::VMERGEVIM as i64,
7376 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7377 )
7378 }
7379
7380 fn vmerge_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7381 self.emit_n(
7382 Opcode::VMERGEVVM as i64,
7383 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7384 )
7385 }
7386
7387 fn vmerge_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7388 self.emit_n(
7389 Opcode::VMERGEVXM as i64,
7390 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7391 )
7392 }
7393
7394 fn vmfeq_vf(
7395 &mut self,
7396 op0: impl OperandCast,
7397 op1: impl OperandCast,
7398 op2: impl OperandCast,
7399 op3: impl OperandCast,
7400 ) {
7401 self.emit_n(
7402 Opcode::VMFEQVF as i64,
7403 &[
7404 op0.as_operand(),
7405 op1.as_operand(),
7406 op2.as_operand(),
7407 op3.as_operand(),
7408 ],
7409 )
7410 }
7411
7412 fn vmfeq_vv(
7413 &mut self,
7414 op0: impl OperandCast,
7415 op1: impl OperandCast,
7416 op2: impl OperandCast,
7417 op3: impl OperandCast,
7418 ) {
7419 self.emit_n(
7420 Opcode::VMFEQVV as i64,
7421 &[
7422 op0.as_operand(),
7423 op1.as_operand(),
7424 op2.as_operand(),
7425 op3.as_operand(),
7426 ],
7427 )
7428 }
7429
7430 fn vmfge_vf(
7431 &mut self,
7432 op0: impl OperandCast,
7433 op1: impl OperandCast,
7434 op2: impl OperandCast,
7435 op3: impl OperandCast,
7436 ) {
7437 self.emit_n(
7438 Opcode::VMFGEVF as i64,
7439 &[
7440 op0.as_operand(),
7441 op1.as_operand(),
7442 op2.as_operand(),
7443 op3.as_operand(),
7444 ],
7445 )
7446 }
7447
7448 fn vmfgt_vf(
7449 &mut self,
7450 op0: impl OperandCast,
7451 op1: impl OperandCast,
7452 op2: impl OperandCast,
7453 op3: impl OperandCast,
7454 ) {
7455 self.emit_n(
7456 Opcode::VMFGTVF as i64,
7457 &[
7458 op0.as_operand(),
7459 op1.as_operand(),
7460 op2.as_operand(),
7461 op3.as_operand(),
7462 ],
7463 )
7464 }
7465
7466 fn vmfle_vf(
7467 &mut self,
7468 op0: impl OperandCast,
7469 op1: impl OperandCast,
7470 op2: impl OperandCast,
7471 op3: impl OperandCast,
7472 ) {
7473 self.emit_n(
7474 Opcode::VMFLEVF as i64,
7475 &[
7476 op0.as_operand(),
7477 op1.as_operand(),
7478 op2.as_operand(),
7479 op3.as_operand(),
7480 ],
7481 )
7482 }
7483
7484 fn vmfle_vv(
7485 &mut self,
7486 op0: impl OperandCast,
7487 op1: impl OperandCast,
7488 op2: impl OperandCast,
7489 op3: impl OperandCast,
7490 ) {
7491 self.emit_n(
7492 Opcode::VMFLEVV as i64,
7493 &[
7494 op0.as_operand(),
7495 op1.as_operand(),
7496 op2.as_operand(),
7497 op3.as_operand(),
7498 ],
7499 )
7500 }
7501
7502 fn vmflt_vf(
7503 &mut self,
7504 op0: impl OperandCast,
7505 op1: impl OperandCast,
7506 op2: impl OperandCast,
7507 op3: impl OperandCast,
7508 ) {
7509 self.emit_n(
7510 Opcode::VMFLTVF as i64,
7511 &[
7512 op0.as_operand(),
7513 op1.as_operand(),
7514 op2.as_operand(),
7515 op3.as_operand(),
7516 ],
7517 )
7518 }
7519
7520 fn vmflt_vv(
7521 &mut self,
7522 op0: impl OperandCast,
7523 op1: impl OperandCast,
7524 op2: impl OperandCast,
7525 op3: impl OperandCast,
7526 ) {
7527 self.emit_n(
7528 Opcode::VMFLTVV as i64,
7529 &[
7530 op0.as_operand(),
7531 op1.as_operand(),
7532 op2.as_operand(),
7533 op3.as_operand(),
7534 ],
7535 )
7536 }
7537
7538 fn vmfne_vf(
7539 &mut self,
7540 op0: impl OperandCast,
7541 op1: impl OperandCast,
7542 op2: impl OperandCast,
7543 op3: impl OperandCast,
7544 ) {
7545 self.emit_n(
7546 Opcode::VMFNEVF as i64,
7547 &[
7548 op0.as_operand(),
7549 op1.as_operand(),
7550 op2.as_operand(),
7551 op3.as_operand(),
7552 ],
7553 )
7554 }
7555
7556 fn vmfne_vv(
7557 &mut self,
7558 op0: impl OperandCast,
7559 op1: impl OperandCast,
7560 op2: impl OperandCast,
7561 op3: impl OperandCast,
7562 ) {
7563 self.emit_n(
7564 Opcode::VMFNEVV as i64,
7565 &[
7566 op0.as_operand(),
7567 op1.as_operand(),
7568 op2.as_operand(),
7569 op3.as_operand(),
7570 ],
7571 )
7572 }
7573
7574 fn vmin_vv(
7575 &mut self,
7576 op0: impl OperandCast,
7577 op1: impl OperandCast,
7578 op2: impl OperandCast,
7579 op3: impl OperandCast,
7580 ) {
7581 self.emit_n(
7582 Opcode::VMINVV as i64,
7583 &[
7584 op0.as_operand(),
7585 op1.as_operand(),
7586 op2.as_operand(),
7587 op3.as_operand(),
7588 ],
7589 )
7590 }
7591
7592 fn vmin_vx(
7593 &mut self,
7594 op0: impl OperandCast,
7595 op1: impl OperandCast,
7596 op2: impl OperandCast,
7597 op3: impl OperandCast,
7598 ) {
7599 self.emit_n(
7600 Opcode::VMINVX as i64,
7601 &[
7602 op0.as_operand(),
7603 op1.as_operand(),
7604 op2.as_operand(),
7605 op3.as_operand(),
7606 ],
7607 )
7608 }
7609
7610 fn vminu_vv(
7611 &mut self,
7612 op0: impl OperandCast,
7613 op1: impl OperandCast,
7614 op2: impl OperandCast,
7615 op3: impl OperandCast,
7616 ) {
7617 self.emit_n(
7618 Opcode::VMINUVV as i64,
7619 &[
7620 op0.as_operand(),
7621 op1.as_operand(),
7622 op2.as_operand(),
7623 op3.as_operand(),
7624 ],
7625 )
7626 }
7627
7628 fn vminu_vx(
7629 &mut self,
7630 op0: impl OperandCast,
7631 op1: impl OperandCast,
7632 op2: impl OperandCast,
7633 op3: impl OperandCast,
7634 ) {
7635 self.emit_n(
7636 Opcode::VMINUVX as i64,
7637 &[
7638 op0.as_operand(),
7639 op1.as_operand(),
7640 op2.as_operand(),
7641 op3.as_operand(),
7642 ],
7643 )
7644 }
7645
7646 fn vmnand_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7647 self.emit_n(
7648 Opcode::VMNANDMM as i64,
7649 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7650 )
7651 }
7652
7653 fn vmnor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7654 self.emit_n(
7655 Opcode::VMNORMM as i64,
7656 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7657 )
7658 }
7659
7660 fn vmor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7661 self.emit_n(
7662 Opcode::VMORMM as i64,
7663 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7664 )
7665 }
7666
7667 fn vmorn_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7668 self.emit_n(
7669 Opcode::VMORNMM as i64,
7670 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7671 )
7672 }
7673
7674 fn vmornot_mm(
7675 &mut self,
7676 op0: impl OperandCast,
7677 op1: impl OperandCast,
7678 op2: impl OperandCast,
7679 op3: impl OperandCast,
7680 ) {
7681 self.emit_n(
7682 Opcode::VMORNOTMM as i64,
7683 &[
7684 op0.as_operand(),
7685 op1.as_operand(),
7686 op2.as_operand(),
7687 op3.as_operand(),
7688 ],
7689 )
7690 }
7691
7692 fn vmsbc_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7693 self.emit_n(
7694 Opcode::VMSBCVV as i64,
7695 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7696 )
7697 }
7698
7699 fn vmsbc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7700 self.emit_n(
7701 Opcode::VMSBCVVM as i64,
7702 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7703 )
7704 }
7705
7706 fn vmsbc_vx(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7707 self.emit_n(
7708 Opcode::VMSBCVX as i64,
7709 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7710 )
7711 }
7712
7713 fn vmsbc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7714 self.emit_n(
7715 Opcode::VMSBCVXM as i64,
7716 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7717 )
7718 }
7719
7720 fn vmsbf_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7721 self.emit_n(
7722 Opcode::VMSBFM as i64,
7723 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7724 )
7725 }
7726
7727 fn vmseq_vi(
7728 &mut self,
7729 op0: impl OperandCast,
7730 op1: impl OperandCast,
7731 op2: impl OperandCast,
7732 op3: impl OperandCast,
7733 ) {
7734 self.emit_n(
7735 Opcode::VMSEQVI as i64,
7736 &[
7737 op0.as_operand(),
7738 op1.as_operand(),
7739 op2.as_operand(),
7740 op3.as_operand(),
7741 ],
7742 )
7743 }
7744
7745 fn vmseq_vv(
7746 &mut self,
7747 op0: impl OperandCast,
7748 op1: impl OperandCast,
7749 op2: impl OperandCast,
7750 op3: impl OperandCast,
7751 ) {
7752 self.emit_n(
7753 Opcode::VMSEQVV as i64,
7754 &[
7755 op0.as_operand(),
7756 op1.as_operand(),
7757 op2.as_operand(),
7758 op3.as_operand(),
7759 ],
7760 )
7761 }
7762
7763 fn vmseq_vx(
7764 &mut self,
7765 op0: impl OperandCast,
7766 op1: impl OperandCast,
7767 op2: impl OperandCast,
7768 op3: impl OperandCast,
7769 ) {
7770 self.emit_n(
7771 Opcode::VMSEQVX as i64,
7772 &[
7773 op0.as_operand(),
7774 op1.as_operand(),
7775 op2.as_operand(),
7776 op3.as_operand(),
7777 ],
7778 )
7779 }
7780
7781 fn vmsgt_vi(
7782 &mut self,
7783 op0: impl OperandCast,
7784 op1: impl OperandCast,
7785 op2: impl OperandCast,
7786 op3: impl OperandCast,
7787 ) {
7788 self.emit_n(
7789 Opcode::VMSGTVI as i64,
7790 &[
7791 op0.as_operand(),
7792 op1.as_operand(),
7793 op2.as_operand(),
7794 op3.as_operand(),
7795 ],
7796 )
7797 }
7798
7799 fn vmsgt_vx(
7800 &mut self,
7801 op0: impl OperandCast,
7802 op1: impl OperandCast,
7803 op2: impl OperandCast,
7804 op3: impl OperandCast,
7805 ) {
7806 self.emit_n(
7807 Opcode::VMSGTVX as i64,
7808 &[
7809 op0.as_operand(),
7810 op1.as_operand(),
7811 op2.as_operand(),
7812 op3.as_operand(),
7813 ],
7814 )
7815 }
7816
7817 fn vmsgtu_vi(
7818 &mut self,
7819 op0: impl OperandCast,
7820 op1: impl OperandCast,
7821 op2: impl OperandCast,
7822 op3: impl OperandCast,
7823 ) {
7824 self.emit_n(
7825 Opcode::VMSGTUVI as i64,
7826 &[
7827 op0.as_operand(),
7828 op1.as_operand(),
7829 op2.as_operand(),
7830 op3.as_operand(),
7831 ],
7832 )
7833 }
7834
7835 fn vmsgtu_vx(
7836 &mut self,
7837 op0: impl OperandCast,
7838 op1: impl OperandCast,
7839 op2: impl OperandCast,
7840 op3: impl OperandCast,
7841 ) {
7842 self.emit_n(
7843 Opcode::VMSGTUVX as i64,
7844 &[
7845 op0.as_operand(),
7846 op1.as_operand(),
7847 op2.as_operand(),
7848 op3.as_operand(),
7849 ],
7850 )
7851 }
7852
7853 fn vmsif_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
7854 self.emit_n(
7855 Opcode::VMSIFM as i64,
7856 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
7857 )
7858 }
7859
7860 fn vmsle_vi(
7861 &mut self,
7862 op0: impl OperandCast,
7863 op1: impl OperandCast,
7864 op2: impl OperandCast,
7865 op3: impl OperandCast,
7866 ) {
7867 self.emit_n(
7868 Opcode::VMSLEVI as i64,
7869 &[
7870 op0.as_operand(),
7871 op1.as_operand(),
7872 op2.as_operand(),
7873 op3.as_operand(),
7874 ],
7875 )
7876 }
7877
7878 fn vmsle_vv(
7879 &mut self,
7880 op0: impl OperandCast,
7881 op1: impl OperandCast,
7882 op2: impl OperandCast,
7883 op3: impl OperandCast,
7884 ) {
7885 self.emit_n(
7886 Opcode::VMSLEVV as i64,
7887 &[
7888 op0.as_operand(),
7889 op1.as_operand(),
7890 op2.as_operand(),
7891 op3.as_operand(),
7892 ],
7893 )
7894 }
7895
7896 fn vmsle_vx(
7897 &mut self,
7898 op0: impl OperandCast,
7899 op1: impl OperandCast,
7900 op2: impl OperandCast,
7901 op3: impl OperandCast,
7902 ) {
7903 self.emit_n(
7904 Opcode::VMSLEVX as i64,
7905 &[
7906 op0.as_operand(),
7907 op1.as_operand(),
7908 op2.as_operand(),
7909 op3.as_operand(),
7910 ],
7911 )
7912 }
7913
7914 fn vmsleu_vi(
7915 &mut self,
7916 op0: impl OperandCast,
7917 op1: impl OperandCast,
7918 op2: impl OperandCast,
7919 op3: impl OperandCast,
7920 ) {
7921 self.emit_n(
7922 Opcode::VMSLEUVI as i64,
7923 &[
7924 op0.as_operand(),
7925 op1.as_operand(),
7926 op2.as_operand(),
7927 op3.as_operand(),
7928 ],
7929 )
7930 }
7931
7932 fn vmsleu_vv(
7933 &mut self,
7934 op0: impl OperandCast,
7935 op1: impl OperandCast,
7936 op2: impl OperandCast,
7937 op3: impl OperandCast,
7938 ) {
7939 self.emit_n(
7940 Opcode::VMSLEUVV as i64,
7941 &[
7942 op0.as_operand(),
7943 op1.as_operand(),
7944 op2.as_operand(),
7945 op3.as_operand(),
7946 ],
7947 )
7948 }
7949
7950 fn vmsleu_vx(
7951 &mut self,
7952 op0: impl OperandCast,
7953 op1: impl OperandCast,
7954 op2: impl OperandCast,
7955 op3: impl OperandCast,
7956 ) {
7957 self.emit_n(
7958 Opcode::VMSLEUVX as i64,
7959 &[
7960 op0.as_operand(),
7961 op1.as_operand(),
7962 op2.as_operand(),
7963 op3.as_operand(),
7964 ],
7965 )
7966 }
7967
7968 fn vmslt_vv(
7969 &mut self,
7970 op0: impl OperandCast,
7971 op1: impl OperandCast,
7972 op2: impl OperandCast,
7973 op3: impl OperandCast,
7974 ) {
7975 self.emit_n(
7976 Opcode::VMSLTVV as i64,
7977 &[
7978 op0.as_operand(),
7979 op1.as_operand(),
7980 op2.as_operand(),
7981 op3.as_operand(),
7982 ],
7983 )
7984 }
7985
7986 fn vmslt_vx(
7987 &mut self,
7988 op0: impl OperandCast,
7989 op1: impl OperandCast,
7990 op2: impl OperandCast,
7991 op3: impl OperandCast,
7992 ) {
7993 self.emit_n(
7994 Opcode::VMSLTVX as i64,
7995 &[
7996 op0.as_operand(),
7997 op1.as_operand(),
7998 op2.as_operand(),
7999 op3.as_operand(),
8000 ],
8001 )
8002 }
8003
8004 fn vmsltu_vv(
8005 &mut self,
8006 op0: impl OperandCast,
8007 op1: impl OperandCast,
8008 op2: impl OperandCast,
8009 op3: impl OperandCast,
8010 ) {
8011 self.emit_n(
8012 Opcode::VMSLTUVV as i64,
8013 &[
8014 op0.as_operand(),
8015 op1.as_operand(),
8016 op2.as_operand(),
8017 op3.as_operand(),
8018 ],
8019 )
8020 }
8021
8022 fn vmsltu_vx(
8023 &mut self,
8024 op0: impl OperandCast,
8025 op1: impl OperandCast,
8026 op2: impl OperandCast,
8027 op3: impl OperandCast,
8028 ) {
8029 self.emit_n(
8030 Opcode::VMSLTUVX as i64,
8031 &[
8032 op0.as_operand(),
8033 op1.as_operand(),
8034 op2.as_operand(),
8035 op3.as_operand(),
8036 ],
8037 )
8038 }
8039
8040 fn vmsne_vi(
8041 &mut self,
8042 op0: impl OperandCast,
8043 op1: impl OperandCast,
8044 op2: impl OperandCast,
8045 op3: impl OperandCast,
8046 ) {
8047 self.emit_n(
8048 Opcode::VMSNEVI as i64,
8049 &[
8050 op0.as_operand(),
8051 op1.as_operand(),
8052 op2.as_operand(),
8053 op3.as_operand(),
8054 ],
8055 )
8056 }
8057
8058 fn vmsne_vv(
8059 &mut self,
8060 op0: impl OperandCast,
8061 op1: impl OperandCast,
8062 op2: impl OperandCast,
8063 op3: impl OperandCast,
8064 ) {
8065 self.emit_n(
8066 Opcode::VMSNEVV as i64,
8067 &[
8068 op0.as_operand(),
8069 op1.as_operand(),
8070 op2.as_operand(),
8071 op3.as_operand(),
8072 ],
8073 )
8074 }
8075
8076 fn vmsne_vx(
8077 &mut self,
8078 op0: impl OperandCast,
8079 op1: impl OperandCast,
8080 op2: impl OperandCast,
8081 op3: impl OperandCast,
8082 ) {
8083 self.emit_n(
8084 Opcode::VMSNEVX as i64,
8085 &[
8086 op0.as_operand(),
8087 op1.as_operand(),
8088 op2.as_operand(),
8089 op3.as_operand(),
8090 ],
8091 )
8092 }
8093
8094 fn vmsof_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
8095 self.emit_n(
8096 Opcode::VMSOFM as i64,
8097 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
8098 )
8099 }
8100
8101 fn vmul_vv(
8102 &mut self,
8103 op0: impl OperandCast,
8104 op1: impl OperandCast,
8105 op2: impl OperandCast,
8106 op3: impl OperandCast,
8107 ) {
8108 self.emit_n(
8109 Opcode::VMULVV as i64,
8110 &[
8111 op0.as_operand(),
8112 op1.as_operand(),
8113 op2.as_operand(),
8114 op3.as_operand(),
8115 ],
8116 )
8117 }
8118
8119 fn vmul_vx(
8120 &mut self,
8121 op0: impl OperandCast,
8122 op1: impl OperandCast,
8123 op2: impl OperandCast,
8124 op3: impl OperandCast,
8125 ) {
8126 self.emit_n(
8127 Opcode::VMULVX as i64,
8128 &[
8129 op0.as_operand(),
8130 op1.as_operand(),
8131 op2.as_operand(),
8132 op3.as_operand(),
8133 ],
8134 )
8135 }
8136
8137 fn vmulh_vv(
8138 &mut self,
8139 op0: impl OperandCast,
8140 op1: impl OperandCast,
8141 op2: impl OperandCast,
8142 op3: impl OperandCast,
8143 ) {
8144 self.emit_n(
8145 Opcode::VMULHVV as i64,
8146 &[
8147 op0.as_operand(),
8148 op1.as_operand(),
8149 op2.as_operand(),
8150 op3.as_operand(),
8151 ],
8152 )
8153 }
8154
8155 fn vmulh_vx(
8156 &mut self,
8157 op0: impl OperandCast,
8158 op1: impl OperandCast,
8159 op2: impl OperandCast,
8160 op3: impl OperandCast,
8161 ) {
8162 self.emit_n(
8163 Opcode::VMULHVX as i64,
8164 &[
8165 op0.as_operand(),
8166 op1.as_operand(),
8167 op2.as_operand(),
8168 op3.as_operand(),
8169 ],
8170 )
8171 }
8172
8173 fn vmulhsu_vv(
8174 &mut self,
8175 op0: impl OperandCast,
8176 op1: impl OperandCast,
8177 op2: impl OperandCast,
8178 op3: impl OperandCast,
8179 ) {
8180 self.emit_n(
8181 Opcode::VMULHSUVV as i64,
8182 &[
8183 op0.as_operand(),
8184 op1.as_operand(),
8185 op2.as_operand(),
8186 op3.as_operand(),
8187 ],
8188 )
8189 }
8190
8191 fn vmulhsu_vx(
8192 &mut self,
8193 op0: impl OperandCast,
8194 op1: impl OperandCast,
8195 op2: impl OperandCast,
8196 op3: impl OperandCast,
8197 ) {
8198 self.emit_n(
8199 Opcode::VMULHSUVX as i64,
8200 &[
8201 op0.as_operand(),
8202 op1.as_operand(),
8203 op2.as_operand(),
8204 op3.as_operand(),
8205 ],
8206 )
8207 }
8208
8209 fn vmulhu_vv(
8210 &mut self,
8211 op0: impl OperandCast,
8212 op1: impl OperandCast,
8213 op2: impl OperandCast,
8214 op3: impl OperandCast,
8215 ) {
8216 self.emit_n(
8217 Opcode::VMULHUVV as i64,
8218 &[
8219 op0.as_operand(),
8220 op1.as_operand(),
8221 op2.as_operand(),
8222 op3.as_operand(),
8223 ],
8224 )
8225 }
8226
8227 fn vmulhu_vx(
8228 &mut self,
8229 op0: impl OperandCast,
8230 op1: impl OperandCast,
8231 op2: impl OperandCast,
8232 op3: impl OperandCast,
8233 ) {
8234 self.emit_n(
8235 Opcode::VMULHUVX as i64,
8236 &[
8237 op0.as_operand(),
8238 op1.as_operand(),
8239 op2.as_operand(),
8240 op3.as_operand(),
8241 ],
8242 )
8243 }
8244
8245 fn vmv1r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8246 self.emit_n(Opcode::VMV1RV as i64, &[op0.as_operand(), op1.as_operand()])
8247 }
8248
8249 fn vmv2r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8250 self.emit_n(Opcode::VMV2RV as i64, &[op0.as_operand(), op1.as_operand()])
8251 }
8252
8253 fn vmv4r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8254 self.emit_n(Opcode::VMV4RV as i64, &[op0.as_operand(), op1.as_operand()])
8255 }
8256
8257 fn vmv8r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8258 self.emit_n(Opcode::VMV8RV as i64, &[op0.as_operand(), op1.as_operand()])
8259 }
8260
8261 fn vmv_s_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8262 self.emit_n(Opcode::VMVSX as i64, &[op0.as_operand(), op1.as_operand()])
8263 }
8264
8265 fn vmv_v_i(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8266 self.emit_n(Opcode::VMVVI as i64, &[op0.as_operand(), op1.as_operand()])
8267 }
8268
8269 fn vmv_v_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8270 self.emit_n(Opcode::VMVVV as i64, &[op0.as_operand(), op1.as_operand()])
8271 }
8272
8273 fn vmv_v_x(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8274 self.emit_n(Opcode::VMVVX as i64, &[op0.as_operand(), op1.as_operand()])
8275 }
8276
8277 fn vmv_x_s(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
8278 self.emit_n(Opcode::VMVXS as i64, &[op0.as_operand(), op1.as_operand()])
8279 }
8280
8281 fn vmxnor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
8282 self.emit_n(
8283 Opcode::VMXNORMM as i64,
8284 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
8285 )
8286 }
8287
8288 fn vmxor_mm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
8289 self.emit_n(
8290 Opcode::VMXORMM as i64,
8291 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
8292 )
8293 }
8294
8295 fn vnclip_wi(
8296 &mut self,
8297 op0: impl OperandCast,
8298 op1: impl OperandCast,
8299 op2: impl OperandCast,
8300 op3: impl OperandCast,
8301 ) {
8302 self.emit_n(
8303 Opcode::VNCLIPWI as i64,
8304 &[
8305 op0.as_operand(),
8306 op1.as_operand(),
8307 op2.as_operand(),
8308 op3.as_operand(),
8309 ],
8310 )
8311 }
8312
8313 fn vnclip_wv(
8314 &mut self,
8315 op0: impl OperandCast,
8316 op1: impl OperandCast,
8317 op2: impl OperandCast,
8318 op3: impl OperandCast,
8319 ) {
8320 self.emit_n(
8321 Opcode::VNCLIPWV as i64,
8322 &[
8323 op0.as_operand(),
8324 op1.as_operand(),
8325 op2.as_operand(),
8326 op3.as_operand(),
8327 ],
8328 )
8329 }
8330
8331 fn vnclip_wx(
8332 &mut self,
8333 op0: impl OperandCast,
8334 op1: impl OperandCast,
8335 op2: impl OperandCast,
8336 op3: impl OperandCast,
8337 ) {
8338 self.emit_n(
8339 Opcode::VNCLIPWX as i64,
8340 &[
8341 op0.as_operand(),
8342 op1.as_operand(),
8343 op2.as_operand(),
8344 op3.as_operand(),
8345 ],
8346 )
8347 }
8348
8349 fn vnclipu_wi(
8350 &mut self,
8351 op0: impl OperandCast,
8352 op1: impl OperandCast,
8353 op2: impl OperandCast,
8354 op3: impl OperandCast,
8355 ) {
8356 self.emit_n(
8357 Opcode::VNCLIPUWI as i64,
8358 &[
8359 op0.as_operand(),
8360 op1.as_operand(),
8361 op2.as_operand(),
8362 op3.as_operand(),
8363 ],
8364 )
8365 }
8366
8367 fn vnclipu_wv(
8368 &mut self,
8369 op0: impl OperandCast,
8370 op1: impl OperandCast,
8371 op2: impl OperandCast,
8372 op3: impl OperandCast,
8373 ) {
8374 self.emit_n(
8375 Opcode::VNCLIPUWV as i64,
8376 &[
8377 op0.as_operand(),
8378 op1.as_operand(),
8379 op2.as_operand(),
8380 op3.as_operand(),
8381 ],
8382 )
8383 }
8384
8385 fn vnclipu_wx(
8386 &mut self,
8387 op0: impl OperandCast,
8388 op1: impl OperandCast,
8389 op2: impl OperandCast,
8390 op3: impl OperandCast,
8391 ) {
8392 self.emit_n(
8393 Opcode::VNCLIPUWX as i64,
8394 &[
8395 op0.as_operand(),
8396 op1.as_operand(),
8397 op2.as_operand(),
8398 op3.as_operand(),
8399 ],
8400 )
8401 }
8402
8403 fn vnmsac_vv(
8404 &mut self,
8405 op0: impl OperandCast,
8406 op1: impl OperandCast,
8407 op2: impl OperandCast,
8408 op3: impl OperandCast,
8409 ) {
8410 self.emit_n(
8411 Opcode::VNMSACVV as i64,
8412 &[
8413 op0.as_operand(),
8414 op1.as_operand(),
8415 op2.as_operand(),
8416 op3.as_operand(),
8417 ],
8418 )
8419 }
8420
8421 fn vnmsac_vx(
8422 &mut self,
8423 op0: impl OperandCast,
8424 op1: impl OperandCast,
8425 op2: impl OperandCast,
8426 op3: impl OperandCast,
8427 ) {
8428 self.emit_n(
8429 Opcode::VNMSACVX as i64,
8430 &[
8431 op0.as_operand(),
8432 op1.as_operand(),
8433 op2.as_operand(),
8434 op3.as_operand(),
8435 ],
8436 )
8437 }
8438
8439 fn vnmsub_vv(
8440 &mut self,
8441 op0: impl OperandCast,
8442 op1: impl OperandCast,
8443 op2: impl OperandCast,
8444 op3: impl OperandCast,
8445 ) {
8446 self.emit_n(
8447 Opcode::VNMSUBVV as i64,
8448 &[
8449 op0.as_operand(),
8450 op1.as_operand(),
8451 op2.as_operand(),
8452 op3.as_operand(),
8453 ],
8454 )
8455 }
8456
8457 fn vnmsub_vx(
8458 &mut self,
8459 op0: impl OperandCast,
8460 op1: impl OperandCast,
8461 op2: impl OperandCast,
8462 op3: impl OperandCast,
8463 ) {
8464 self.emit_n(
8465 Opcode::VNMSUBVX as i64,
8466 &[
8467 op0.as_operand(),
8468 op1.as_operand(),
8469 op2.as_operand(),
8470 op3.as_operand(),
8471 ],
8472 )
8473 }
8474
8475 fn vnsra_wi(
8476 &mut self,
8477 op0: impl OperandCast,
8478 op1: impl OperandCast,
8479 op2: impl OperandCast,
8480 op3: impl OperandCast,
8481 ) {
8482 self.emit_n(
8483 Opcode::VNSRAWI as i64,
8484 &[
8485 op0.as_operand(),
8486 op1.as_operand(),
8487 op2.as_operand(),
8488 op3.as_operand(),
8489 ],
8490 )
8491 }
8492
8493 fn vnsra_wv(
8494 &mut self,
8495 op0: impl OperandCast,
8496 op1: impl OperandCast,
8497 op2: impl OperandCast,
8498 op3: impl OperandCast,
8499 ) {
8500 self.emit_n(
8501 Opcode::VNSRAWV as i64,
8502 &[
8503 op0.as_operand(),
8504 op1.as_operand(),
8505 op2.as_operand(),
8506 op3.as_operand(),
8507 ],
8508 )
8509 }
8510
8511 fn vnsra_wx(
8512 &mut self,
8513 op0: impl OperandCast,
8514 op1: impl OperandCast,
8515 op2: impl OperandCast,
8516 op3: impl OperandCast,
8517 ) {
8518 self.emit_n(
8519 Opcode::VNSRAWX as i64,
8520 &[
8521 op0.as_operand(),
8522 op1.as_operand(),
8523 op2.as_operand(),
8524 op3.as_operand(),
8525 ],
8526 )
8527 }
8528
8529 fn vnsrl_wi(
8530 &mut self,
8531 op0: impl OperandCast,
8532 op1: impl OperandCast,
8533 op2: impl OperandCast,
8534 op3: impl OperandCast,
8535 ) {
8536 self.emit_n(
8537 Opcode::VNSRLWI as i64,
8538 &[
8539 op0.as_operand(),
8540 op1.as_operand(),
8541 op2.as_operand(),
8542 op3.as_operand(),
8543 ],
8544 )
8545 }
8546
8547 fn vnsrl_wv(
8548 &mut self,
8549 op0: impl OperandCast,
8550 op1: impl OperandCast,
8551 op2: impl OperandCast,
8552 op3: impl OperandCast,
8553 ) {
8554 self.emit_n(
8555 Opcode::VNSRLWV as i64,
8556 &[
8557 op0.as_operand(),
8558 op1.as_operand(),
8559 op2.as_operand(),
8560 op3.as_operand(),
8561 ],
8562 )
8563 }
8564
8565 fn vnsrl_wx(
8566 &mut self,
8567 op0: impl OperandCast,
8568 op1: impl OperandCast,
8569 op2: impl OperandCast,
8570 op3: impl OperandCast,
8571 ) {
8572 self.emit_n(
8573 Opcode::VNSRLWX as i64,
8574 &[
8575 op0.as_operand(),
8576 op1.as_operand(),
8577 op2.as_operand(),
8578 op3.as_operand(),
8579 ],
8580 )
8581 }
8582
8583 fn vor_vi(
8584 &mut self,
8585 op0: impl OperandCast,
8586 op1: impl OperandCast,
8587 op2: impl OperandCast,
8588 op3: impl OperandCast,
8589 ) {
8590 self.emit_n(
8591 Opcode::VORVI as i64,
8592 &[
8593 op0.as_operand(),
8594 op1.as_operand(),
8595 op2.as_operand(),
8596 op3.as_operand(),
8597 ],
8598 )
8599 }
8600
8601 fn vor_vv(
8602 &mut self,
8603 op0: impl OperandCast,
8604 op1: impl OperandCast,
8605 op2: impl OperandCast,
8606 op3: impl OperandCast,
8607 ) {
8608 self.emit_n(
8609 Opcode::VORVV as i64,
8610 &[
8611 op0.as_operand(),
8612 op1.as_operand(),
8613 op2.as_operand(),
8614 op3.as_operand(),
8615 ],
8616 )
8617 }
8618
8619 fn vor_vx(
8620 &mut self,
8621 op0: impl OperandCast,
8622 op1: impl OperandCast,
8623 op2: impl OperandCast,
8624 op3: impl OperandCast,
8625 ) {
8626 self.emit_n(
8627 Opcode::VORVX as i64,
8628 &[
8629 op0.as_operand(),
8630 op1.as_operand(),
8631 op2.as_operand(),
8632 op3.as_operand(),
8633 ],
8634 )
8635 }
8636
8637 fn vpopc_m(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
8638 self.emit_n(
8639 Opcode::VPOPCM as i64,
8640 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
8641 )
8642 }
8643
8644 fn vredand_vs(
8645 &mut self,
8646 op0: impl OperandCast,
8647 op1: impl OperandCast,
8648 op2: impl OperandCast,
8649 op3: impl OperandCast,
8650 ) {
8651 self.emit_n(
8652 Opcode::VREDANDVS as i64,
8653 &[
8654 op0.as_operand(),
8655 op1.as_operand(),
8656 op2.as_operand(),
8657 op3.as_operand(),
8658 ],
8659 )
8660 }
8661
8662 fn vredmax_vs(
8663 &mut self,
8664 op0: impl OperandCast,
8665 op1: impl OperandCast,
8666 op2: impl OperandCast,
8667 op3: impl OperandCast,
8668 ) {
8669 self.emit_n(
8670 Opcode::VREDMAXVS as i64,
8671 &[
8672 op0.as_operand(),
8673 op1.as_operand(),
8674 op2.as_operand(),
8675 op3.as_operand(),
8676 ],
8677 )
8678 }
8679
8680 fn vredmaxu_vs(
8681 &mut self,
8682 op0: impl OperandCast,
8683 op1: impl OperandCast,
8684 op2: impl OperandCast,
8685 op3: impl OperandCast,
8686 ) {
8687 self.emit_n(
8688 Opcode::VREDMAXUVS as i64,
8689 &[
8690 op0.as_operand(),
8691 op1.as_operand(),
8692 op2.as_operand(),
8693 op3.as_operand(),
8694 ],
8695 )
8696 }
8697
8698 fn vredmin_vs(
8699 &mut self,
8700 op0: impl OperandCast,
8701 op1: impl OperandCast,
8702 op2: impl OperandCast,
8703 op3: impl OperandCast,
8704 ) {
8705 self.emit_n(
8706 Opcode::VREDMINVS as i64,
8707 &[
8708 op0.as_operand(),
8709 op1.as_operand(),
8710 op2.as_operand(),
8711 op3.as_operand(),
8712 ],
8713 )
8714 }
8715
8716 fn vredminu_vs(
8717 &mut self,
8718 op0: impl OperandCast,
8719 op1: impl OperandCast,
8720 op2: impl OperandCast,
8721 op3: impl OperandCast,
8722 ) {
8723 self.emit_n(
8724 Opcode::VREDMINUVS as i64,
8725 &[
8726 op0.as_operand(),
8727 op1.as_operand(),
8728 op2.as_operand(),
8729 op3.as_operand(),
8730 ],
8731 )
8732 }
8733
8734 fn vredor_vs(
8735 &mut self,
8736 op0: impl OperandCast,
8737 op1: impl OperandCast,
8738 op2: impl OperandCast,
8739 op3: impl OperandCast,
8740 ) {
8741 self.emit_n(
8742 Opcode::VREDORVS as i64,
8743 &[
8744 op0.as_operand(),
8745 op1.as_operand(),
8746 op2.as_operand(),
8747 op3.as_operand(),
8748 ],
8749 )
8750 }
8751
8752 fn vredsum_vs(
8753 &mut self,
8754 op0: impl OperandCast,
8755 op1: impl OperandCast,
8756 op2: impl OperandCast,
8757 op3: impl OperandCast,
8758 ) {
8759 self.emit_n(
8760 Opcode::VREDSUMVS as i64,
8761 &[
8762 op0.as_operand(),
8763 op1.as_operand(),
8764 op2.as_operand(),
8765 op3.as_operand(),
8766 ],
8767 )
8768 }
8769
8770 fn vredxor_vs(
8771 &mut self,
8772 op0: impl OperandCast,
8773 op1: impl OperandCast,
8774 op2: impl OperandCast,
8775 op3: impl OperandCast,
8776 ) {
8777 self.emit_n(
8778 Opcode::VREDXORVS as i64,
8779 &[
8780 op0.as_operand(),
8781 op1.as_operand(),
8782 op2.as_operand(),
8783 op3.as_operand(),
8784 ],
8785 )
8786 }
8787
8788 fn vrem_vv(
8789 &mut self,
8790 op0: impl OperandCast,
8791 op1: impl OperandCast,
8792 op2: impl OperandCast,
8793 op3: impl OperandCast,
8794 ) {
8795 self.emit_n(
8796 Opcode::VREMVV as i64,
8797 &[
8798 op0.as_operand(),
8799 op1.as_operand(),
8800 op2.as_operand(),
8801 op3.as_operand(),
8802 ],
8803 )
8804 }
8805
8806 fn vrem_vx(
8807 &mut self,
8808 op0: impl OperandCast,
8809 op1: impl OperandCast,
8810 op2: impl OperandCast,
8811 op3: impl OperandCast,
8812 ) {
8813 self.emit_n(
8814 Opcode::VREMVX as i64,
8815 &[
8816 op0.as_operand(),
8817 op1.as_operand(),
8818 op2.as_operand(),
8819 op3.as_operand(),
8820 ],
8821 )
8822 }
8823
8824 fn vremu_vv(
8825 &mut self,
8826 op0: impl OperandCast,
8827 op1: impl OperandCast,
8828 op2: impl OperandCast,
8829 op3: impl OperandCast,
8830 ) {
8831 self.emit_n(
8832 Opcode::VREMUVV as i64,
8833 &[
8834 op0.as_operand(),
8835 op1.as_operand(),
8836 op2.as_operand(),
8837 op3.as_operand(),
8838 ],
8839 )
8840 }
8841
8842 fn vremu_vx(
8843 &mut self,
8844 op0: impl OperandCast,
8845 op1: impl OperandCast,
8846 op2: impl OperandCast,
8847 op3: impl OperandCast,
8848 ) {
8849 self.emit_n(
8850 Opcode::VREMUVX as i64,
8851 &[
8852 op0.as_operand(),
8853 op1.as_operand(),
8854 op2.as_operand(),
8855 op3.as_operand(),
8856 ],
8857 )
8858 }
8859
8860 fn vrev8_v(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
8861 self.emit_n(
8862 Opcode::VREV8V as i64,
8863 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
8864 )
8865 }
8866
8867 fn vrgather_vi(
8868 &mut self,
8869 op0: impl OperandCast,
8870 op1: impl OperandCast,
8871 op2: impl OperandCast,
8872 op3: impl OperandCast,
8873 ) {
8874 self.emit_n(
8875 Opcode::VRGATHERVI as i64,
8876 &[
8877 op0.as_operand(),
8878 op1.as_operand(),
8879 op2.as_operand(),
8880 op3.as_operand(),
8881 ],
8882 )
8883 }
8884
8885 fn vrgather_vv(
8886 &mut self,
8887 op0: impl OperandCast,
8888 op1: impl OperandCast,
8889 op2: impl OperandCast,
8890 op3: impl OperandCast,
8891 ) {
8892 self.emit_n(
8893 Opcode::VRGATHERVV as i64,
8894 &[
8895 op0.as_operand(),
8896 op1.as_operand(),
8897 op2.as_operand(),
8898 op3.as_operand(),
8899 ],
8900 )
8901 }
8902
8903 fn vrgather_vx(
8904 &mut self,
8905 op0: impl OperandCast,
8906 op1: impl OperandCast,
8907 op2: impl OperandCast,
8908 op3: impl OperandCast,
8909 ) {
8910 self.emit_n(
8911 Opcode::VRGATHERVX as i64,
8912 &[
8913 op0.as_operand(),
8914 op1.as_operand(),
8915 op2.as_operand(),
8916 op3.as_operand(),
8917 ],
8918 )
8919 }
8920
8921 fn vrgatherei16_vv(
8922 &mut self,
8923 op0: impl OperandCast,
8924 op1: impl OperandCast,
8925 op2: impl OperandCast,
8926 op3: impl OperandCast,
8927 ) {
8928 self.emit_n(
8929 Opcode::VRGATHEREI16VV as i64,
8930 &[
8931 op0.as_operand(),
8932 op1.as_operand(),
8933 op2.as_operand(),
8934 op3.as_operand(),
8935 ],
8936 )
8937 }
8938
8939 fn vrol_vv(
8940 &mut self,
8941 op0: impl OperandCast,
8942 op1: impl OperandCast,
8943 op2: impl OperandCast,
8944 op3: impl OperandCast,
8945 ) {
8946 self.emit_n(
8947 Opcode::VROLVV as i64,
8948 &[
8949 op0.as_operand(),
8950 op1.as_operand(),
8951 op2.as_operand(),
8952 op3.as_operand(),
8953 ],
8954 )
8955 }
8956
8957 fn vrol_vx(
8958 &mut self,
8959 op0: impl OperandCast,
8960 op1: impl OperandCast,
8961 op2: impl OperandCast,
8962 op3: impl OperandCast,
8963 ) {
8964 self.emit_n(
8965 Opcode::VROLVX as i64,
8966 &[
8967 op0.as_operand(),
8968 op1.as_operand(),
8969 op2.as_operand(),
8970 op3.as_operand(),
8971 ],
8972 )
8973 }
8974
8975 fn vror_vi(
8976 &mut self,
8977 op0: impl OperandCast,
8978 op1: impl OperandCast,
8979 op2: impl OperandCast,
8980 op3: impl OperandCast,
8981 ) {
8982 self.emit_n(
8983 Opcode::VRORVI as i64,
8984 &[
8985 op0.as_operand(),
8986 op1.as_operand(),
8987 op2.as_operand(),
8988 op3.as_operand(),
8989 ],
8990 )
8991 }
8992
8993 fn vror_vv(
8994 &mut self,
8995 op0: impl OperandCast,
8996 op1: impl OperandCast,
8997 op2: impl OperandCast,
8998 op3: impl OperandCast,
8999 ) {
9000 self.emit_n(
9001 Opcode::VRORVV as i64,
9002 &[
9003 op0.as_operand(),
9004 op1.as_operand(),
9005 op2.as_operand(),
9006 op3.as_operand(),
9007 ],
9008 )
9009 }
9010
9011 fn vror_vx(
9012 &mut self,
9013 op0: impl OperandCast,
9014 op1: impl OperandCast,
9015 op2: impl OperandCast,
9016 op3: impl OperandCast,
9017 ) {
9018 self.emit_n(
9019 Opcode::VRORVX as i64,
9020 &[
9021 op0.as_operand(),
9022 op1.as_operand(),
9023 op2.as_operand(),
9024 op3.as_operand(),
9025 ],
9026 )
9027 }
9028
9029 fn vrsub_vi(
9030 &mut self,
9031 op0: impl OperandCast,
9032 op1: impl OperandCast,
9033 op2: impl OperandCast,
9034 op3: impl OperandCast,
9035 ) {
9036 self.emit_n(
9037 Opcode::VRSUBVI as i64,
9038 &[
9039 op0.as_operand(),
9040 op1.as_operand(),
9041 op2.as_operand(),
9042 op3.as_operand(),
9043 ],
9044 )
9045 }
9046
9047 fn vrsub_vx(
9048 &mut self,
9049 op0: impl OperandCast,
9050 op1: impl OperandCast,
9051 op2: impl OperandCast,
9052 op3: impl OperandCast,
9053 ) {
9054 self.emit_n(
9055 Opcode::VRSUBVX as i64,
9056 &[
9057 op0.as_operand(),
9058 op1.as_operand(),
9059 op2.as_operand(),
9060 op3.as_operand(),
9061 ],
9062 )
9063 }
9064
9065 fn vs1r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9066 self.emit_n(Opcode::VS1RV as i64, &[op0.as_operand(), op1.as_operand()])
9067 }
9068
9069 fn vs2r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9070 self.emit_n(Opcode::VS2RV as i64, &[op0.as_operand(), op1.as_operand()])
9071 }
9072
9073 fn vs4r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9074 self.emit_n(Opcode::VS4RV as i64, &[op0.as_operand(), op1.as_operand()])
9075 }
9076
9077 fn vs8r_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9078 self.emit_n(Opcode::VS8RV as i64, &[op0.as_operand(), op1.as_operand()])
9079 }
9080
9081 fn vsadd_vi(
9082 &mut self,
9083 op0: impl OperandCast,
9084 op1: impl OperandCast,
9085 op2: impl OperandCast,
9086 op3: impl OperandCast,
9087 ) {
9088 self.emit_n(
9089 Opcode::VSADDVI as i64,
9090 &[
9091 op0.as_operand(),
9092 op1.as_operand(),
9093 op2.as_operand(),
9094 op3.as_operand(),
9095 ],
9096 )
9097 }
9098
9099 fn vsadd_vv(
9100 &mut self,
9101 op0: impl OperandCast,
9102 op1: impl OperandCast,
9103 op2: impl OperandCast,
9104 op3: impl OperandCast,
9105 ) {
9106 self.emit_n(
9107 Opcode::VSADDVV as i64,
9108 &[
9109 op0.as_operand(),
9110 op1.as_operand(),
9111 op2.as_operand(),
9112 op3.as_operand(),
9113 ],
9114 )
9115 }
9116
9117 fn vsadd_vx(
9118 &mut self,
9119 op0: impl OperandCast,
9120 op1: impl OperandCast,
9121 op2: impl OperandCast,
9122 op3: impl OperandCast,
9123 ) {
9124 self.emit_n(
9125 Opcode::VSADDVX as i64,
9126 &[
9127 op0.as_operand(),
9128 op1.as_operand(),
9129 op2.as_operand(),
9130 op3.as_operand(),
9131 ],
9132 )
9133 }
9134
9135 fn vsaddu_vi(
9136 &mut self,
9137 op0: impl OperandCast,
9138 op1: impl OperandCast,
9139 op2: impl OperandCast,
9140 op3: impl OperandCast,
9141 ) {
9142 self.emit_n(
9143 Opcode::VSADDUVI as i64,
9144 &[
9145 op0.as_operand(),
9146 op1.as_operand(),
9147 op2.as_operand(),
9148 op3.as_operand(),
9149 ],
9150 )
9151 }
9152
9153 fn vsaddu_vv(
9154 &mut self,
9155 op0: impl OperandCast,
9156 op1: impl OperandCast,
9157 op2: impl OperandCast,
9158 op3: impl OperandCast,
9159 ) {
9160 self.emit_n(
9161 Opcode::VSADDUVV as i64,
9162 &[
9163 op0.as_operand(),
9164 op1.as_operand(),
9165 op2.as_operand(),
9166 op3.as_operand(),
9167 ],
9168 )
9169 }
9170
9171 fn vsaddu_vx(
9172 &mut self,
9173 op0: impl OperandCast,
9174 op1: impl OperandCast,
9175 op2: impl OperandCast,
9176 op3: impl OperandCast,
9177 ) {
9178 self.emit_n(
9179 Opcode::VSADDUVX as i64,
9180 &[
9181 op0.as_operand(),
9182 op1.as_operand(),
9183 op2.as_operand(),
9184 op3.as_operand(),
9185 ],
9186 )
9187 }
9188
9189 fn vsbc_vvm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9190 self.emit_n(
9191 Opcode::VSBCVVM as i64,
9192 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9193 )
9194 }
9195
9196 fn vsbc_vxm(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9197 self.emit_n(
9198 Opcode::VSBCVXM as i64,
9199 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9200 )
9201 }
9202
9203 fn vse16_v(
9204 &mut self,
9205 op0: impl OperandCast,
9206 op1: impl OperandCast,
9207 op2: impl OperandCast,
9208 op3: impl OperandCast,
9209 ) {
9210 self.emit_n(
9211 Opcode::VSE16V as i64,
9212 &[
9213 op0.as_operand(),
9214 op1.as_operand(),
9215 op2.as_operand(),
9216 op3.as_operand(),
9217 ],
9218 )
9219 }
9220
9221 fn vse1_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9222 self.emit_n(Opcode::VSE1V as i64, &[op0.as_operand(), op1.as_operand()])
9223 }
9224
9225 fn vse32_v(
9226 &mut self,
9227 op0: impl OperandCast,
9228 op1: impl OperandCast,
9229 op2: impl OperandCast,
9230 op3: impl OperandCast,
9231 ) {
9232 self.emit_n(
9233 Opcode::VSE32V as i64,
9234 &[
9235 op0.as_operand(),
9236 op1.as_operand(),
9237 op2.as_operand(),
9238 op3.as_operand(),
9239 ],
9240 )
9241 }
9242
9243 fn vse64_v(
9244 &mut self,
9245 op0: impl OperandCast,
9246 op1: impl OperandCast,
9247 op2: impl OperandCast,
9248 op3: impl OperandCast,
9249 ) {
9250 self.emit_n(
9251 Opcode::VSE64V as i64,
9252 &[
9253 op0.as_operand(),
9254 op1.as_operand(),
9255 op2.as_operand(),
9256 op3.as_operand(),
9257 ],
9258 )
9259 }
9260
9261 fn vse8_v(
9262 &mut self,
9263 op0: impl OperandCast,
9264 op1: impl OperandCast,
9265 op2: impl OperandCast,
9266 op3: impl OperandCast,
9267 ) {
9268 self.emit_n(
9269 Opcode::VSE8V as i64,
9270 &[
9271 op0.as_operand(),
9272 op1.as_operand(),
9273 op2.as_operand(),
9274 op3.as_operand(),
9275 ],
9276 )
9277 }
9278
9279 fn vsetivli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9280 self.emit_n(
9281 Opcode::VSETIVLI as i64,
9282 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9283 )
9284 }
9285
9286 fn vsetvl(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9287 self.emit_n(
9288 Opcode::VSETVL as i64,
9289 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9290 )
9291 }
9292
9293 fn vsetvli(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9294 self.emit_n(
9295 Opcode::VSETVLI as i64,
9296 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9297 )
9298 }
9299
9300 fn vsext_vf2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9301 self.emit_n(
9302 Opcode::VSEXTVF2 as i64,
9303 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9304 )
9305 }
9306
9307 fn vsext_vf4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9308 self.emit_n(
9309 Opcode::VSEXTVF4 as i64,
9310 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9311 )
9312 }
9313
9314 fn vsext_vf8(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9315 self.emit_n(
9316 Opcode::VSEXTVF8 as i64,
9317 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9318 )
9319 }
9320
9321 fn vsha2ch_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9322 self.emit_n(
9323 Opcode::VSHA2CHVV as i64,
9324 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9325 )
9326 }
9327
9328 fn vsha2cl_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9329 self.emit_n(
9330 Opcode::VSHA2CLVV as i64,
9331 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9332 )
9333 }
9334
9335 fn vsha2ms_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9336 self.emit_n(
9337 Opcode::VSHA2MSVV as i64,
9338 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9339 )
9340 }
9341
9342 fn vslide1down_vx(
9343 &mut self,
9344 op0: impl OperandCast,
9345 op1: impl OperandCast,
9346 op2: impl OperandCast,
9347 op3: impl OperandCast,
9348 ) {
9349 self.emit_n(
9350 Opcode::VSLIDE1DOWNVX as i64,
9351 &[
9352 op0.as_operand(),
9353 op1.as_operand(),
9354 op2.as_operand(),
9355 op3.as_operand(),
9356 ],
9357 )
9358 }
9359
9360 fn vslide1up_vx(
9361 &mut self,
9362 op0: impl OperandCast,
9363 op1: impl OperandCast,
9364 op2: impl OperandCast,
9365 op3: impl OperandCast,
9366 ) {
9367 self.emit_n(
9368 Opcode::VSLIDE1UPVX as i64,
9369 &[
9370 op0.as_operand(),
9371 op1.as_operand(),
9372 op2.as_operand(),
9373 op3.as_operand(),
9374 ],
9375 )
9376 }
9377
9378 fn vslidedown_vi(
9379 &mut self,
9380 op0: impl OperandCast,
9381 op1: impl OperandCast,
9382 op2: impl OperandCast,
9383 op3: impl OperandCast,
9384 ) {
9385 self.emit_n(
9386 Opcode::VSLIDEDOWNVI as i64,
9387 &[
9388 op0.as_operand(),
9389 op1.as_operand(),
9390 op2.as_operand(),
9391 op3.as_operand(),
9392 ],
9393 )
9394 }
9395
9396 fn vslidedown_vx(
9397 &mut self,
9398 op0: impl OperandCast,
9399 op1: impl OperandCast,
9400 op2: impl OperandCast,
9401 op3: impl OperandCast,
9402 ) {
9403 self.emit_n(
9404 Opcode::VSLIDEDOWNVX as i64,
9405 &[
9406 op0.as_operand(),
9407 op1.as_operand(),
9408 op2.as_operand(),
9409 op3.as_operand(),
9410 ],
9411 )
9412 }
9413
9414 fn vslideup_vi(
9415 &mut self,
9416 op0: impl OperandCast,
9417 op1: impl OperandCast,
9418 op2: impl OperandCast,
9419 op3: impl OperandCast,
9420 ) {
9421 self.emit_n(
9422 Opcode::VSLIDEUPVI as i64,
9423 &[
9424 op0.as_operand(),
9425 op1.as_operand(),
9426 op2.as_operand(),
9427 op3.as_operand(),
9428 ],
9429 )
9430 }
9431
9432 fn vslideup_vx(
9433 &mut self,
9434 op0: impl OperandCast,
9435 op1: impl OperandCast,
9436 op2: impl OperandCast,
9437 op3: impl OperandCast,
9438 ) {
9439 self.emit_n(
9440 Opcode::VSLIDEUPVX as i64,
9441 &[
9442 op0.as_operand(),
9443 op1.as_operand(),
9444 op2.as_operand(),
9445 op3.as_operand(),
9446 ],
9447 )
9448 }
9449
9450 fn vsll_vi(
9451 &mut self,
9452 op0: impl OperandCast,
9453 op1: impl OperandCast,
9454 op2: impl OperandCast,
9455 op3: impl OperandCast,
9456 ) {
9457 self.emit_n(
9458 Opcode::VSLLVI as i64,
9459 &[
9460 op0.as_operand(),
9461 op1.as_operand(),
9462 op2.as_operand(),
9463 op3.as_operand(),
9464 ],
9465 )
9466 }
9467
9468 fn vsll_vv(
9469 &mut self,
9470 op0: impl OperandCast,
9471 op1: impl OperandCast,
9472 op2: impl OperandCast,
9473 op3: impl OperandCast,
9474 ) {
9475 self.emit_n(
9476 Opcode::VSLLVV as i64,
9477 &[
9478 op0.as_operand(),
9479 op1.as_operand(),
9480 op2.as_operand(),
9481 op3.as_operand(),
9482 ],
9483 )
9484 }
9485
9486 fn vsll_vx(
9487 &mut self,
9488 op0: impl OperandCast,
9489 op1: impl OperandCast,
9490 op2: impl OperandCast,
9491 op3: impl OperandCast,
9492 ) {
9493 self.emit_n(
9494 Opcode::VSLLVX as i64,
9495 &[
9496 op0.as_operand(),
9497 op1.as_operand(),
9498 op2.as_operand(),
9499 op3.as_operand(),
9500 ],
9501 )
9502 }
9503
9504 fn vsm3c_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9505 self.emit_n(
9506 Opcode::VSM3CVI as i64,
9507 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9508 )
9509 }
9510
9511 fn vsm3me_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9512 self.emit_n(
9513 Opcode::VSM3MEVV as i64,
9514 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9515 )
9516 }
9517
9518 fn vsm4k_vi(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
9519 self.emit_n(
9520 Opcode::VSM4KVI as i64,
9521 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
9522 )
9523 }
9524
9525 fn vsm4r_vs(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9526 self.emit_n(
9527 Opcode::VSM4RVS as i64,
9528 &[op0.as_operand(), op1.as_operand()],
9529 )
9530 }
9531
9532 fn vsm4r_vv(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9533 self.emit_n(
9534 Opcode::VSM4RVV as i64,
9535 &[op0.as_operand(), op1.as_operand()],
9536 )
9537 }
9538
9539 fn vsm_v(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
9540 self.emit_n(Opcode::VSMV as i64, &[op0.as_operand(), op1.as_operand()])
9541 }
9542
9543 fn vsmul_vv(
9544 &mut self,
9545 op0: impl OperandCast,
9546 op1: impl OperandCast,
9547 op2: impl OperandCast,
9548 op3: impl OperandCast,
9549 ) {
9550 self.emit_n(
9551 Opcode::VSMULVV as i64,
9552 &[
9553 op0.as_operand(),
9554 op1.as_operand(),
9555 op2.as_operand(),
9556 op3.as_operand(),
9557 ],
9558 )
9559 }
9560
9561 fn vsmul_vx(
9562 &mut self,
9563 op0: impl OperandCast,
9564 op1: impl OperandCast,
9565 op2: impl OperandCast,
9566 op3: impl OperandCast,
9567 ) {
9568 self.emit_n(
9569 Opcode::VSMULVX as i64,
9570 &[
9571 op0.as_operand(),
9572 op1.as_operand(),
9573 op2.as_operand(),
9574 op3.as_operand(),
9575 ],
9576 )
9577 }
9578
9579 fn vsoxei16_v(
9580 &mut self,
9581 op0: impl OperandCast,
9582 op1: impl OperandCast,
9583 op2: impl OperandCast,
9584 op3: impl OperandCast,
9585 op4: impl OperandCast,
9586 ) {
9587 self.emit_n(
9588 Opcode::VSOXEI16V as i64,
9589 &[
9590 op0.as_operand(),
9591 op1.as_operand(),
9592 op2.as_operand(),
9593 op3.as_operand(),
9594 op4.as_operand(),
9595 ],
9596 )
9597 }
9598
9599 fn vsoxei32_v(
9600 &mut self,
9601 op0: impl OperandCast,
9602 op1: impl OperandCast,
9603 op2: impl OperandCast,
9604 op3: impl OperandCast,
9605 op4: impl OperandCast,
9606 ) {
9607 self.emit_n(
9608 Opcode::VSOXEI32V as i64,
9609 &[
9610 op0.as_operand(),
9611 op1.as_operand(),
9612 op2.as_operand(),
9613 op3.as_operand(),
9614 op4.as_operand(),
9615 ],
9616 )
9617 }
9618
9619 fn vsoxei64_v(
9620 &mut self,
9621 op0: impl OperandCast,
9622 op1: impl OperandCast,
9623 op2: impl OperandCast,
9624 op3: impl OperandCast,
9625 op4: impl OperandCast,
9626 ) {
9627 self.emit_n(
9628 Opcode::VSOXEI64V as i64,
9629 &[
9630 op0.as_operand(),
9631 op1.as_operand(),
9632 op2.as_operand(),
9633 op3.as_operand(),
9634 op4.as_operand(),
9635 ],
9636 )
9637 }
9638
9639 fn vsoxei8_v(
9640 &mut self,
9641 op0: impl OperandCast,
9642 op1: impl OperandCast,
9643 op2: impl OperandCast,
9644 op3: impl OperandCast,
9645 op4: impl OperandCast,
9646 ) {
9647 self.emit_n(
9648 Opcode::VSOXEI8V as i64,
9649 &[
9650 op0.as_operand(),
9651 op1.as_operand(),
9652 op2.as_operand(),
9653 op3.as_operand(),
9654 op4.as_operand(),
9655 ],
9656 )
9657 }
9658
9659 fn vsra_vi(
9660 &mut self,
9661 op0: impl OperandCast,
9662 op1: impl OperandCast,
9663 op2: impl OperandCast,
9664 op3: impl OperandCast,
9665 ) {
9666 self.emit_n(
9667 Opcode::VSRAVI as i64,
9668 &[
9669 op0.as_operand(),
9670 op1.as_operand(),
9671 op2.as_operand(),
9672 op3.as_operand(),
9673 ],
9674 )
9675 }
9676
9677 fn vsra_vv(
9678 &mut self,
9679 op0: impl OperandCast,
9680 op1: impl OperandCast,
9681 op2: impl OperandCast,
9682 op3: impl OperandCast,
9683 ) {
9684 self.emit_n(
9685 Opcode::VSRAVV as i64,
9686 &[
9687 op0.as_operand(),
9688 op1.as_operand(),
9689 op2.as_operand(),
9690 op3.as_operand(),
9691 ],
9692 )
9693 }
9694
9695 fn vsra_vx(
9696 &mut self,
9697 op0: impl OperandCast,
9698 op1: impl OperandCast,
9699 op2: impl OperandCast,
9700 op3: impl OperandCast,
9701 ) {
9702 self.emit_n(
9703 Opcode::VSRAVX as i64,
9704 &[
9705 op0.as_operand(),
9706 op1.as_operand(),
9707 op2.as_operand(),
9708 op3.as_operand(),
9709 ],
9710 )
9711 }
9712
9713 fn vsrl_vi(
9714 &mut self,
9715 op0: impl OperandCast,
9716 op1: impl OperandCast,
9717 op2: impl OperandCast,
9718 op3: impl OperandCast,
9719 ) {
9720 self.emit_n(
9721 Opcode::VSRLVI as i64,
9722 &[
9723 op0.as_operand(),
9724 op1.as_operand(),
9725 op2.as_operand(),
9726 op3.as_operand(),
9727 ],
9728 )
9729 }
9730
9731 fn vsrl_vv(
9732 &mut self,
9733 op0: impl OperandCast,
9734 op1: impl OperandCast,
9735 op2: impl OperandCast,
9736 op3: impl OperandCast,
9737 ) {
9738 self.emit_n(
9739 Opcode::VSRLVV as i64,
9740 &[
9741 op0.as_operand(),
9742 op1.as_operand(),
9743 op2.as_operand(),
9744 op3.as_operand(),
9745 ],
9746 )
9747 }
9748
9749 fn vsrl_vx(
9750 &mut self,
9751 op0: impl OperandCast,
9752 op1: impl OperandCast,
9753 op2: impl OperandCast,
9754 op3: impl OperandCast,
9755 ) {
9756 self.emit_n(
9757 Opcode::VSRLVX as i64,
9758 &[
9759 op0.as_operand(),
9760 op1.as_operand(),
9761 op2.as_operand(),
9762 op3.as_operand(),
9763 ],
9764 )
9765 }
9766
9767 fn vsse16_v(
9768 &mut self,
9769 op0: impl OperandCast,
9770 op1: impl OperandCast,
9771 op2: impl OperandCast,
9772 op3: impl OperandCast,
9773 op4: impl OperandCast,
9774 ) {
9775 self.emit_n(
9776 Opcode::VSSE16V as i64,
9777 &[
9778 op0.as_operand(),
9779 op1.as_operand(),
9780 op2.as_operand(),
9781 op3.as_operand(),
9782 op4.as_operand(),
9783 ],
9784 )
9785 }
9786
9787 fn vsse32_v(
9788 &mut self,
9789 op0: impl OperandCast,
9790 op1: impl OperandCast,
9791 op2: impl OperandCast,
9792 op3: impl OperandCast,
9793 op4: impl OperandCast,
9794 ) {
9795 self.emit_n(
9796 Opcode::VSSE32V as i64,
9797 &[
9798 op0.as_operand(),
9799 op1.as_operand(),
9800 op2.as_operand(),
9801 op3.as_operand(),
9802 op4.as_operand(),
9803 ],
9804 )
9805 }
9806
9807 fn vsse64_v(
9808 &mut self,
9809 op0: impl OperandCast,
9810 op1: impl OperandCast,
9811 op2: impl OperandCast,
9812 op3: impl OperandCast,
9813 op4: impl OperandCast,
9814 ) {
9815 self.emit_n(
9816 Opcode::VSSE64V as i64,
9817 &[
9818 op0.as_operand(),
9819 op1.as_operand(),
9820 op2.as_operand(),
9821 op3.as_operand(),
9822 op4.as_operand(),
9823 ],
9824 )
9825 }
9826
9827 fn vsse8_v(
9828 &mut self,
9829 op0: impl OperandCast,
9830 op1: impl OperandCast,
9831 op2: impl OperandCast,
9832 op3: impl OperandCast,
9833 op4: impl OperandCast,
9834 ) {
9835 self.emit_n(
9836 Opcode::VSSE8V as i64,
9837 &[
9838 op0.as_operand(),
9839 op1.as_operand(),
9840 op2.as_operand(),
9841 op3.as_operand(),
9842 op4.as_operand(),
9843 ],
9844 )
9845 }
9846
9847 fn vssra_vi(
9848 &mut self,
9849 op0: impl OperandCast,
9850 op1: impl OperandCast,
9851 op2: impl OperandCast,
9852 op3: impl OperandCast,
9853 ) {
9854 self.emit_n(
9855 Opcode::VSSRAVI as i64,
9856 &[
9857 op0.as_operand(),
9858 op1.as_operand(),
9859 op2.as_operand(),
9860 op3.as_operand(),
9861 ],
9862 )
9863 }
9864
9865 fn vssra_vv(
9866 &mut self,
9867 op0: impl OperandCast,
9868 op1: impl OperandCast,
9869 op2: impl OperandCast,
9870 op3: impl OperandCast,
9871 ) {
9872 self.emit_n(
9873 Opcode::VSSRAVV as i64,
9874 &[
9875 op0.as_operand(),
9876 op1.as_operand(),
9877 op2.as_operand(),
9878 op3.as_operand(),
9879 ],
9880 )
9881 }
9882
9883 fn vssra_vx(
9884 &mut self,
9885 op0: impl OperandCast,
9886 op1: impl OperandCast,
9887 op2: impl OperandCast,
9888 op3: impl OperandCast,
9889 ) {
9890 self.emit_n(
9891 Opcode::VSSRAVX as i64,
9892 &[
9893 op0.as_operand(),
9894 op1.as_operand(),
9895 op2.as_operand(),
9896 op3.as_operand(),
9897 ],
9898 )
9899 }
9900
9901 fn vssrl_vi(
9902 &mut self,
9903 op0: impl OperandCast,
9904 op1: impl OperandCast,
9905 op2: impl OperandCast,
9906 op3: impl OperandCast,
9907 ) {
9908 self.emit_n(
9909 Opcode::VSSRLVI as i64,
9910 &[
9911 op0.as_operand(),
9912 op1.as_operand(),
9913 op2.as_operand(),
9914 op3.as_operand(),
9915 ],
9916 )
9917 }
9918
9919 fn vssrl_vv(
9920 &mut self,
9921 op0: impl OperandCast,
9922 op1: impl OperandCast,
9923 op2: impl OperandCast,
9924 op3: impl OperandCast,
9925 ) {
9926 self.emit_n(
9927 Opcode::VSSRLVV as i64,
9928 &[
9929 op0.as_operand(),
9930 op1.as_operand(),
9931 op2.as_operand(),
9932 op3.as_operand(),
9933 ],
9934 )
9935 }
9936
9937 fn vssrl_vx(
9938 &mut self,
9939 op0: impl OperandCast,
9940 op1: impl OperandCast,
9941 op2: impl OperandCast,
9942 op3: impl OperandCast,
9943 ) {
9944 self.emit_n(
9945 Opcode::VSSRLVX as i64,
9946 &[
9947 op0.as_operand(),
9948 op1.as_operand(),
9949 op2.as_operand(),
9950 op3.as_operand(),
9951 ],
9952 )
9953 }
9954
9955 fn vssub_vv(
9956 &mut self,
9957 op0: impl OperandCast,
9958 op1: impl OperandCast,
9959 op2: impl OperandCast,
9960 op3: impl OperandCast,
9961 ) {
9962 self.emit_n(
9963 Opcode::VSSUBVV as i64,
9964 &[
9965 op0.as_operand(),
9966 op1.as_operand(),
9967 op2.as_operand(),
9968 op3.as_operand(),
9969 ],
9970 )
9971 }
9972
9973 fn vssub_vx(
9974 &mut self,
9975 op0: impl OperandCast,
9976 op1: impl OperandCast,
9977 op2: impl OperandCast,
9978 op3: impl OperandCast,
9979 ) {
9980 self.emit_n(
9981 Opcode::VSSUBVX as i64,
9982 &[
9983 op0.as_operand(),
9984 op1.as_operand(),
9985 op2.as_operand(),
9986 op3.as_operand(),
9987 ],
9988 )
9989 }
9990
9991 fn vssubu_vv(
9992 &mut self,
9993 op0: impl OperandCast,
9994 op1: impl OperandCast,
9995 op2: impl OperandCast,
9996 op3: impl OperandCast,
9997 ) {
9998 self.emit_n(
9999 Opcode::VSSUBUVV as i64,
10000 &[
10001 op0.as_operand(),
10002 op1.as_operand(),
10003 op2.as_operand(),
10004 op3.as_operand(),
10005 ],
10006 )
10007 }
10008
10009 fn vssubu_vx(
10010 &mut self,
10011 op0: impl OperandCast,
10012 op1: impl OperandCast,
10013 op2: impl OperandCast,
10014 op3: impl OperandCast,
10015 ) {
10016 self.emit_n(
10017 Opcode::VSSUBUVX as i64,
10018 &[
10019 op0.as_operand(),
10020 op1.as_operand(),
10021 op2.as_operand(),
10022 op3.as_operand(),
10023 ],
10024 )
10025 }
10026
10027 fn vsub_vv(
10028 &mut self,
10029 op0: impl OperandCast,
10030 op1: impl OperandCast,
10031 op2: impl OperandCast,
10032 op3: impl OperandCast,
10033 ) {
10034 self.emit_n(
10035 Opcode::VSUBVV as i64,
10036 &[
10037 op0.as_operand(),
10038 op1.as_operand(),
10039 op2.as_operand(),
10040 op3.as_operand(),
10041 ],
10042 )
10043 }
10044
10045 fn vsub_vx(
10046 &mut self,
10047 op0: impl OperandCast,
10048 op1: impl OperandCast,
10049 op2: impl OperandCast,
10050 op3: impl OperandCast,
10051 ) {
10052 self.emit_n(
10053 Opcode::VSUBVX as i64,
10054 &[
10055 op0.as_operand(),
10056 op1.as_operand(),
10057 op2.as_operand(),
10058 op3.as_operand(),
10059 ],
10060 )
10061 }
10062
10063 fn vsuxei16_v(
10064 &mut self,
10065 op0: impl OperandCast,
10066 op1: impl OperandCast,
10067 op2: impl OperandCast,
10068 op3: impl OperandCast,
10069 op4: impl OperandCast,
10070 ) {
10071 self.emit_n(
10072 Opcode::VSUXEI16V as i64,
10073 &[
10074 op0.as_operand(),
10075 op1.as_operand(),
10076 op2.as_operand(),
10077 op3.as_operand(),
10078 op4.as_operand(),
10079 ],
10080 )
10081 }
10082
10083 fn vsuxei32_v(
10084 &mut self,
10085 op0: impl OperandCast,
10086 op1: impl OperandCast,
10087 op2: impl OperandCast,
10088 op3: impl OperandCast,
10089 op4: impl OperandCast,
10090 ) {
10091 self.emit_n(
10092 Opcode::VSUXEI32V as i64,
10093 &[
10094 op0.as_operand(),
10095 op1.as_operand(),
10096 op2.as_operand(),
10097 op3.as_operand(),
10098 op4.as_operand(),
10099 ],
10100 )
10101 }
10102
10103 fn vsuxei64_v(
10104 &mut self,
10105 op0: impl OperandCast,
10106 op1: impl OperandCast,
10107 op2: impl OperandCast,
10108 op3: impl OperandCast,
10109 op4: impl OperandCast,
10110 ) {
10111 self.emit_n(
10112 Opcode::VSUXEI64V as i64,
10113 &[
10114 op0.as_operand(),
10115 op1.as_operand(),
10116 op2.as_operand(),
10117 op3.as_operand(),
10118 op4.as_operand(),
10119 ],
10120 )
10121 }
10122
10123 fn vsuxei8_v(
10124 &mut self,
10125 op0: impl OperandCast,
10126 op1: impl OperandCast,
10127 op2: impl OperandCast,
10128 op3: impl OperandCast,
10129 op4: impl OperandCast,
10130 ) {
10131 self.emit_n(
10132 Opcode::VSUXEI8V as i64,
10133 &[
10134 op0.as_operand(),
10135 op1.as_operand(),
10136 op2.as_operand(),
10137 op3.as_operand(),
10138 op4.as_operand(),
10139 ],
10140 )
10141 }
10142
10143 fn vwadd_vv(
10144 &mut self,
10145 op0: impl OperandCast,
10146 op1: impl OperandCast,
10147 op2: impl OperandCast,
10148 op3: impl OperandCast,
10149 ) {
10150 self.emit_n(
10151 Opcode::VWADDVV as i64,
10152 &[
10153 op0.as_operand(),
10154 op1.as_operand(),
10155 op2.as_operand(),
10156 op3.as_operand(),
10157 ],
10158 )
10159 }
10160
10161 fn vwadd_vx(
10162 &mut self,
10163 op0: impl OperandCast,
10164 op1: impl OperandCast,
10165 op2: impl OperandCast,
10166 op3: impl OperandCast,
10167 ) {
10168 self.emit_n(
10169 Opcode::VWADDVX as i64,
10170 &[
10171 op0.as_operand(),
10172 op1.as_operand(),
10173 op2.as_operand(),
10174 op3.as_operand(),
10175 ],
10176 )
10177 }
10178
10179 fn vwadd_wv(
10180 &mut self,
10181 op0: impl OperandCast,
10182 op1: impl OperandCast,
10183 op2: impl OperandCast,
10184 op3: impl OperandCast,
10185 ) {
10186 self.emit_n(
10187 Opcode::VWADDWV as i64,
10188 &[
10189 op0.as_operand(),
10190 op1.as_operand(),
10191 op2.as_operand(),
10192 op3.as_operand(),
10193 ],
10194 )
10195 }
10196
10197 fn vwadd_wx(
10198 &mut self,
10199 op0: impl OperandCast,
10200 op1: impl OperandCast,
10201 op2: impl OperandCast,
10202 op3: impl OperandCast,
10203 ) {
10204 self.emit_n(
10205 Opcode::VWADDWX as i64,
10206 &[
10207 op0.as_operand(),
10208 op1.as_operand(),
10209 op2.as_operand(),
10210 op3.as_operand(),
10211 ],
10212 )
10213 }
10214
10215 fn vwaddu_vv(
10216 &mut self,
10217 op0: impl OperandCast,
10218 op1: impl OperandCast,
10219 op2: impl OperandCast,
10220 op3: impl OperandCast,
10221 ) {
10222 self.emit_n(
10223 Opcode::VWADDUVV as i64,
10224 &[
10225 op0.as_operand(),
10226 op1.as_operand(),
10227 op2.as_operand(),
10228 op3.as_operand(),
10229 ],
10230 )
10231 }
10232
10233 fn vwaddu_vx(
10234 &mut self,
10235 op0: impl OperandCast,
10236 op1: impl OperandCast,
10237 op2: impl OperandCast,
10238 op3: impl OperandCast,
10239 ) {
10240 self.emit_n(
10241 Opcode::VWADDUVX as i64,
10242 &[
10243 op0.as_operand(),
10244 op1.as_operand(),
10245 op2.as_operand(),
10246 op3.as_operand(),
10247 ],
10248 )
10249 }
10250
10251 fn vwaddu_wv(
10252 &mut self,
10253 op0: impl OperandCast,
10254 op1: impl OperandCast,
10255 op2: impl OperandCast,
10256 op3: impl OperandCast,
10257 ) {
10258 self.emit_n(
10259 Opcode::VWADDUWV as i64,
10260 &[
10261 op0.as_operand(),
10262 op1.as_operand(),
10263 op2.as_operand(),
10264 op3.as_operand(),
10265 ],
10266 )
10267 }
10268
10269 fn vwaddu_wx(
10270 &mut self,
10271 op0: impl OperandCast,
10272 op1: impl OperandCast,
10273 op2: impl OperandCast,
10274 op3: impl OperandCast,
10275 ) {
10276 self.emit_n(
10277 Opcode::VWADDUWX as i64,
10278 &[
10279 op0.as_operand(),
10280 op1.as_operand(),
10281 op2.as_operand(),
10282 op3.as_operand(),
10283 ],
10284 )
10285 }
10286
10287 fn vwmacc_vv(
10288 &mut self,
10289 op0: impl OperandCast,
10290 op1: impl OperandCast,
10291 op2: impl OperandCast,
10292 op3: impl OperandCast,
10293 ) {
10294 self.emit_n(
10295 Opcode::VWMACCVV as i64,
10296 &[
10297 op0.as_operand(),
10298 op1.as_operand(),
10299 op2.as_operand(),
10300 op3.as_operand(),
10301 ],
10302 )
10303 }
10304
10305 fn vwmacc_vx(
10306 &mut self,
10307 op0: impl OperandCast,
10308 op1: impl OperandCast,
10309 op2: impl OperandCast,
10310 op3: impl OperandCast,
10311 ) {
10312 self.emit_n(
10313 Opcode::VWMACCVX as i64,
10314 &[
10315 op0.as_operand(),
10316 op1.as_operand(),
10317 op2.as_operand(),
10318 op3.as_operand(),
10319 ],
10320 )
10321 }
10322
10323 fn vwmaccsu_vv(
10324 &mut self,
10325 op0: impl OperandCast,
10326 op1: impl OperandCast,
10327 op2: impl OperandCast,
10328 op3: impl OperandCast,
10329 ) {
10330 self.emit_n(
10331 Opcode::VWMACCSUVV as i64,
10332 &[
10333 op0.as_operand(),
10334 op1.as_operand(),
10335 op2.as_operand(),
10336 op3.as_operand(),
10337 ],
10338 )
10339 }
10340
10341 fn vwmaccsu_vx(
10342 &mut self,
10343 op0: impl OperandCast,
10344 op1: impl OperandCast,
10345 op2: impl OperandCast,
10346 op3: impl OperandCast,
10347 ) {
10348 self.emit_n(
10349 Opcode::VWMACCSUVX as i64,
10350 &[
10351 op0.as_operand(),
10352 op1.as_operand(),
10353 op2.as_operand(),
10354 op3.as_operand(),
10355 ],
10356 )
10357 }
10358
10359 fn vwmaccu_vv(
10360 &mut self,
10361 op0: impl OperandCast,
10362 op1: impl OperandCast,
10363 op2: impl OperandCast,
10364 op3: impl OperandCast,
10365 ) {
10366 self.emit_n(
10367 Opcode::VWMACCUVV as i64,
10368 &[
10369 op0.as_operand(),
10370 op1.as_operand(),
10371 op2.as_operand(),
10372 op3.as_operand(),
10373 ],
10374 )
10375 }
10376
10377 fn vwmaccu_vx(
10378 &mut self,
10379 op0: impl OperandCast,
10380 op1: impl OperandCast,
10381 op2: impl OperandCast,
10382 op3: impl OperandCast,
10383 ) {
10384 self.emit_n(
10385 Opcode::VWMACCUVX as i64,
10386 &[
10387 op0.as_operand(),
10388 op1.as_operand(),
10389 op2.as_operand(),
10390 op3.as_operand(),
10391 ],
10392 )
10393 }
10394
10395 fn vwmaccus_vx(
10396 &mut self,
10397 op0: impl OperandCast,
10398 op1: impl OperandCast,
10399 op2: impl OperandCast,
10400 op3: impl OperandCast,
10401 ) {
10402 self.emit_n(
10403 Opcode::VWMACCUSVX as i64,
10404 &[
10405 op0.as_operand(),
10406 op1.as_operand(),
10407 op2.as_operand(),
10408 op3.as_operand(),
10409 ],
10410 )
10411 }
10412
10413 fn vwmul_vv(
10414 &mut self,
10415 op0: impl OperandCast,
10416 op1: impl OperandCast,
10417 op2: impl OperandCast,
10418 op3: impl OperandCast,
10419 ) {
10420 self.emit_n(
10421 Opcode::VWMULVV as i64,
10422 &[
10423 op0.as_operand(),
10424 op1.as_operand(),
10425 op2.as_operand(),
10426 op3.as_operand(),
10427 ],
10428 )
10429 }
10430
10431 fn vwmul_vx(
10432 &mut self,
10433 op0: impl OperandCast,
10434 op1: impl OperandCast,
10435 op2: impl OperandCast,
10436 op3: impl OperandCast,
10437 ) {
10438 self.emit_n(
10439 Opcode::VWMULVX as i64,
10440 &[
10441 op0.as_operand(),
10442 op1.as_operand(),
10443 op2.as_operand(),
10444 op3.as_operand(),
10445 ],
10446 )
10447 }
10448
10449 fn vwmulsu_vv(
10450 &mut self,
10451 op0: impl OperandCast,
10452 op1: impl OperandCast,
10453 op2: impl OperandCast,
10454 op3: impl OperandCast,
10455 ) {
10456 self.emit_n(
10457 Opcode::VWMULSUVV as i64,
10458 &[
10459 op0.as_operand(),
10460 op1.as_operand(),
10461 op2.as_operand(),
10462 op3.as_operand(),
10463 ],
10464 )
10465 }
10466
10467 fn vwmulsu_vx(
10468 &mut self,
10469 op0: impl OperandCast,
10470 op1: impl OperandCast,
10471 op2: impl OperandCast,
10472 op3: impl OperandCast,
10473 ) {
10474 self.emit_n(
10475 Opcode::VWMULSUVX as i64,
10476 &[
10477 op0.as_operand(),
10478 op1.as_operand(),
10479 op2.as_operand(),
10480 op3.as_operand(),
10481 ],
10482 )
10483 }
10484
10485 fn vwmulu_vv(
10486 &mut self,
10487 op0: impl OperandCast,
10488 op1: impl OperandCast,
10489 op2: impl OperandCast,
10490 op3: impl OperandCast,
10491 ) {
10492 self.emit_n(
10493 Opcode::VWMULUVV as i64,
10494 &[
10495 op0.as_operand(),
10496 op1.as_operand(),
10497 op2.as_operand(),
10498 op3.as_operand(),
10499 ],
10500 )
10501 }
10502
10503 fn vwmulu_vx(
10504 &mut self,
10505 op0: impl OperandCast,
10506 op1: impl OperandCast,
10507 op2: impl OperandCast,
10508 op3: impl OperandCast,
10509 ) {
10510 self.emit_n(
10511 Opcode::VWMULUVX as i64,
10512 &[
10513 op0.as_operand(),
10514 op1.as_operand(),
10515 op2.as_operand(),
10516 op3.as_operand(),
10517 ],
10518 )
10519 }
10520
10521 fn vwredsum_vs(
10522 &mut self,
10523 op0: impl OperandCast,
10524 op1: impl OperandCast,
10525 op2: impl OperandCast,
10526 op3: impl OperandCast,
10527 ) {
10528 self.emit_n(
10529 Opcode::VWREDSUMVS as i64,
10530 &[
10531 op0.as_operand(),
10532 op1.as_operand(),
10533 op2.as_operand(),
10534 op3.as_operand(),
10535 ],
10536 )
10537 }
10538
10539 fn vwredsumu_vs(
10540 &mut self,
10541 op0: impl OperandCast,
10542 op1: impl OperandCast,
10543 op2: impl OperandCast,
10544 op3: impl OperandCast,
10545 ) {
10546 self.emit_n(
10547 Opcode::VWREDSUMUVS as i64,
10548 &[
10549 op0.as_operand(),
10550 op1.as_operand(),
10551 op2.as_operand(),
10552 op3.as_operand(),
10553 ],
10554 )
10555 }
10556
10557 fn vwsll_vi(
10558 &mut self,
10559 op0: impl OperandCast,
10560 op1: impl OperandCast,
10561 op2: impl OperandCast,
10562 op3: impl OperandCast,
10563 ) {
10564 self.emit_n(
10565 Opcode::VWSLLVI as i64,
10566 &[
10567 op0.as_operand(),
10568 op1.as_operand(),
10569 op2.as_operand(),
10570 op3.as_operand(),
10571 ],
10572 )
10573 }
10574
10575 fn vwsll_vv(
10576 &mut self,
10577 op0: impl OperandCast,
10578 op1: impl OperandCast,
10579 op2: impl OperandCast,
10580 op3: impl OperandCast,
10581 ) {
10582 self.emit_n(
10583 Opcode::VWSLLVV as i64,
10584 &[
10585 op0.as_operand(),
10586 op1.as_operand(),
10587 op2.as_operand(),
10588 op3.as_operand(),
10589 ],
10590 )
10591 }
10592
10593 fn vwsll_vx(
10594 &mut self,
10595 op0: impl OperandCast,
10596 op1: impl OperandCast,
10597 op2: impl OperandCast,
10598 op3: impl OperandCast,
10599 ) {
10600 self.emit_n(
10601 Opcode::VWSLLVX as i64,
10602 &[
10603 op0.as_operand(),
10604 op1.as_operand(),
10605 op2.as_operand(),
10606 op3.as_operand(),
10607 ],
10608 )
10609 }
10610
10611 fn vwsub_vv(
10612 &mut self,
10613 op0: impl OperandCast,
10614 op1: impl OperandCast,
10615 op2: impl OperandCast,
10616 op3: impl OperandCast,
10617 ) {
10618 self.emit_n(
10619 Opcode::VWSUBVV as i64,
10620 &[
10621 op0.as_operand(),
10622 op1.as_operand(),
10623 op2.as_operand(),
10624 op3.as_operand(),
10625 ],
10626 )
10627 }
10628
10629 fn vwsub_vx(
10630 &mut self,
10631 op0: impl OperandCast,
10632 op1: impl OperandCast,
10633 op2: impl OperandCast,
10634 op3: impl OperandCast,
10635 ) {
10636 self.emit_n(
10637 Opcode::VWSUBVX as i64,
10638 &[
10639 op0.as_operand(),
10640 op1.as_operand(),
10641 op2.as_operand(),
10642 op3.as_operand(),
10643 ],
10644 )
10645 }
10646
10647 fn vwsub_wv(
10648 &mut self,
10649 op0: impl OperandCast,
10650 op1: impl OperandCast,
10651 op2: impl OperandCast,
10652 op3: impl OperandCast,
10653 ) {
10654 self.emit_n(
10655 Opcode::VWSUBWV as i64,
10656 &[
10657 op0.as_operand(),
10658 op1.as_operand(),
10659 op2.as_operand(),
10660 op3.as_operand(),
10661 ],
10662 )
10663 }
10664
10665 fn vwsub_wx(
10666 &mut self,
10667 op0: impl OperandCast,
10668 op1: impl OperandCast,
10669 op2: impl OperandCast,
10670 op3: impl OperandCast,
10671 ) {
10672 self.emit_n(
10673 Opcode::VWSUBWX as i64,
10674 &[
10675 op0.as_operand(),
10676 op1.as_operand(),
10677 op2.as_operand(),
10678 op3.as_operand(),
10679 ],
10680 )
10681 }
10682
10683 fn vwsubu_vv(
10684 &mut self,
10685 op0: impl OperandCast,
10686 op1: impl OperandCast,
10687 op2: impl OperandCast,
10688 op3: impl OperandCast,
10689 ) {
10690 self.emit_n(
10691 Opcode::VWSUBUVV as i64,
10692 &[
10693 op0.as_operand(),
10694 op1.as_operand(),
10695 op2.as_operand(),
10696 op3.as_operand(),
10697 ],
10698 )
10699 }
10700
10701 fn vwsubu_vx(
10702 &mut self,
10703 op0: impl OperandCast,
10704 op1: impl OperandCast,
10705 op2: impl OperandCast,
10706 op3: impl OperandCast,
10707 ) {
10708 self.emit_n(
10709 Opcode::VWSUBUVX as i64,
10710 &[
10711 op0.as_operand(),
10712 op1.as_operand(),
10713 op2.as_operand(),
10714 op3.as_operand(),
10715 ],
10716 )
10717 }
10718
10719 fn vwsubu_wv(
10720 &mut self,
10721 op0: impl OperandCast,
10722 op1: impl OperandCast,
10723 op2: impl OperandCast,
10724 op3: impl OperandCast,
10725 ) {
10726 self.emit_n(
10727 Opcode::VWSUBUWV as i64,
10728 &[
10729 op0.as_operand(),
10730 op1.as_operand(),
10731 op2.as_operand(),
10732 op3.as_operand(),
10733 ],
10734 )
10735 }
10736
10737 fn vwsubu_wx(
10738 &mut self,
10739 op0: impl OperandCast,
10740 op1: impl OperandCast,
10741 op2: impl OperandCast,
10742 op3: impl OperandCast,
10743 ) {
10744 self.emit_n(
10745 Opcode::VWSUBUWX as i64,
10746 &[
10747 op0.as_operand(),
10748 op1.as_operand(),
10749 op2.as_operand(),
10750 op3.as_operand(),
10751 ],
10752 )
10753 }
10754
10755 fn vxor_vi(
10756 &mut self,
10757 op0: impl OperandCast,
10758 op1: impl OperandCast,
10759 op2: impl OperandCast,
10760 op3: impl OperandCast,
10761 ) {
10762 self.emit_n(
10763 Opcode::VXORVI as i64,
10764 &[
10765 op0.as_operand(),
10766 op1.as_operand(),
10767 op2.as_operand(),
10768 op3.as_operand(),
10769 ],
10770 )
10771 }
10772
10773 fn vxor_vv(
10774 &mut self,
10775 op0: impl OperandCast,
10776 op1: impl OperandCast,
10777 op2: impl OperandCast,
10778 op3: impl OperandCast,
10779 ) {
10780 self.emit_n(
10781 Opcode::VXORVV as i64,
10782 &[
10783 op0.as_operand(),
10784 op1.as_operand(),
10785 op2.as_operand(),
10786 op3.as_operand(),
10787 ],
10788 )
10789 }
10790
10791 fn vxor_vx(
10792 &mut self,
10793 op0: impl OperandCast,
10794 op1: impl OperandCast,
10795 op2: impl OperandCast,
10796 op3: impl OperandCast,
10797 ) {
10798 self.emit_n(
10799 Opcode::VXORVX as i64,
10800 &[
10801 op0.as_operand(),
10802 op1.as_operand(),
10803 op2.as_operand(),
10804 op3.as_operand(),
10805 ],
10806 )
10807 }
10808
10809 fn vzext_vf2(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10810 self.emit_n(
10811 Opcode::VZEXTVF2 as i64,
10812 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10813 )
10814 }
10815
10816 fn vzext_vf4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10817 self.emit_n(
10818 Opcode::VZEXTVF4 as i64,
10819 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10820 )
10821 }
10822
10823 fn vzext_vf8(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10824 self.emit_n(
10825 Opcode::VZEXTVF8 as i64,
10826 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10827 )
10828 }
10829
10830 fn wfi(&mut self) {
10831 self.emit_n(Opcode::WFI as i64, &[])
10832 }
10833
10834 fn wrs_nto(&mut self) {
10835 self.emit_n(Opcode::WRSNTO as i64, &[])
10836 }
10837
10838 fn wrs_sto(&mut self) {
10839 self.emit_n(Opcode::WRSSTO as i64, &[])
10840 }
10841
10842 fn xnor(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10843 self.emit_n(
10844 Opcode::XNOR as i64,
10845 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10846 )
10847 }
10848
10849 fn xor(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10850 self.emit_n(
10851 Opcode::XOR as i64,
10852 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10853 )
10854 }
10855
10856 fn xori(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10857 self.emit_n(
10858 Opcode::XORI as i64,
10859 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10860 )
10861 }
10862
10863 fn xperm4(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10864 self.emit_n(
10865 Opcode::XPERM4 as i64,
10866 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10867 )
10868 }
10869
10870 fn xperm8(&mut self, op0: impl OperandCast, op1: impl OperandCast, op2: impl OperandCast) {
10871 self.emit_n(
10872 Opcode::XPERM8 as i64,
10873 &[op0.as_operand(), op1.as_operand(), op2.as_operand()],
10874 )
10875 }
10876
10877 fn zext_b(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
10878 self.emit_n(Opcode::ZEXTB as i64, &[op0.as_operand(), op1.as_operand()])
10879 }
10880
10881 fn zext_h(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
10882 self.emit_n(Opcode::ZEXTH as i64, &[op0.as_operand(), op1.as_operand()])
10883 }
10884
10885 fn zext_h_rv32(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
10886 self.emit_n(
10887 Opcode::ZEXTHRV32 as i64,
10888 &[op0.as_operand(), op1.as_operand()],
10889 )
10890 }
10891
10892 fn zext_w(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
10893 self.emit_n(Opcode::ZEXTW as i64, &[op0.as_operand(), op1.as_operand()])
10894 }
10895
10896 fn zip(&mut self, op0: impl OperandCast, op1: impl OperandCast) {
10897 self.emit_n(Opcode::ZIP as i64, &[op0.as_operand(), op1.as_operand()])
10898 }
10899}