1#[allow(unused_imports)]
2use crate::Instruction;
3use crate::{
4 encode_vec, BlockType, Catch, Encode, Handle, HeapType, Ieee32, Ieee64, Lane, MemArg, Ordering,
5 RefType, ValType,
6};
7use alloc::vec::Vec;
8
9#[derive(Debug)]
11pub struct InstructionSink<'a> {
12 sink: &'a mut Vec<u8>,
13}
14
15impl<'a> InstructionSink<'a> {
16 pub fn new(sink: &'a mut Vec<u8>) -> Self {
18 Self { sink }
19 }
20
21 pub fn unreachable(&mut self) -> &mut Self {
25 self.sink.push(0x00);
26 self
27 }
28
29 pub fn nop(&mut self) -> &mut Self {
31 self.sink.push(0x01);
32 self
33 }
34
35 pub fn block(&mut self, bt: BlockType) -> &mut Self {
37 self.sink.push(0x02);
38 bt.encode(self.sink);
39 self
40 }
41
42 pub fn loop_(&mut self, bt: BlockType) -> &mut Self {
44 self.sink.push(0x03);
45 bt.encode(self.sink);
46 self
47 }
48
49 pub fn if_(&mut self, bt: BlockType) -> &mut Self {
51 self.sink.push(0x04);
52 bt.encode(self.sink);
53 self
54 }
55
56 pub fn else_(&mut self) -> &mut Self {
58 self.sink.push(0x05);
59 self
60 }
61
62 pub fn end(&mut self) -> &mut Self {
64 self.sink.push(0x0B);
65 self
66 }
67
68 pub fn br(&mut self, l: u32) -> &mut Self {
70 self.sink.push(0x0C);
71 l.encode(self.sink);
72 self
73 }
74
75 pub fn br_if(&mut self, l: u32) -> &mut Self {
77 self.sink.push(0x0D);
78 l.encode(self.sink);
79 self
80 }
81
82 pub fn br_table<V: IntoIterator<Item = u32>>(&mut self, ls: V, l: u32) -> &mut Self
84 where
85 V::IntoIter: ExactSizeIterator,
86 {
87 self.sink.push(0x0E);
88 encode_vec(ls, self.sink);
89 l.encode(self.sink);
90 self
91 }
92
93 pub fn br_on_null(&mut self, l: u32) -> &mut Self {
95 self.sink.push(0xD5);
96 l.encode(self.sink);
97 self
98 }
99
100 pub fn br_on_non_null(&mut self, l: u32) -> &mut Self {
102 self.sink.push(0xD6);
103 l.encode(self.sink);
104 self
105 }
106
107 pub fn return_(&mut self) -> &mut Self {
109 self.sink.push(0x0F);
110 self
111 }
112
113 pub fn call(&mut self, f: u32) -> &mut Self {
115 self.sink.push(0x10);
116 f.encode(self.sink);
117 self
118 }
119
120 pub fn call_ref(&mut self, ty: u32) -> &mut Self {
122 self.sink.push(0x14);
123 ty.encode(self.sink);
124 self
125 }
126
127 pub fn call_indirect(&mut self, table_index: u32, type_index: u32) -> &mut Self {
129 self.sink.push(0x11);
130 type_index.encode(self.sink);
131 table_index.encode(self.sink);
132 self
133 }
134
135 pub fn return_call_ref(&mut self, ty: u32) -> &mut Self {
137 self.sink.push(0x15);
138 ty.encode(self.sink);
139 self
140 }
141
142 pub fn return_call(&mut self, f: u32) -> &mut Self {
144 self.sink.push(0x12);
145 f.encode(self.sink);
146 self
147 }
148
149 pub fn return_call_indirect(&mut self, table_index: u32, type_index: u32) -> &mut Self {
151 self.sink.push(0x13);
152 type_index.encode(self.sink);
153 table_index.encode(self.sink);
154 self
155 }
156
157 pub fn try_table<V: IntoIterator<Item = Catch>>(
159 &mut self,
160 ty: BlockType,
161 catches: V,
162 ) -> &mut Self
163 where
164 V::IntoIter: ExactSizeIterator,
165 {
166 self.sink.push(0x1f);
167 ty.encode(self.sink);
168 encode_vec(catches, self.sink);
169 self
170 }
171
172 pub fn throw(&mut self, t: u32) -> &mut Self {
174 self.sink.push(0x08);
175 t.encode(self.sink);
176 self
177 }
178
179 pub fn throw_ref(&mut self) -> &mut Self {
181 self.sink.push(0x0A);
182 self
183 }
184
185 pub fn try_(&mut self, bt: BlockType) -> &mut Self {
189 self.sink.push(0x06);
190 bt.encode(self.sink);
191 self
192 }
193
194 pub fn delegate(&mut self, l: u32) -> &mut Self {
196 self.sink.push(0x18);
197 l.encode(self.sink);
198 self
199 }
200
201 pub fn catch(&mut self, t: u32) -> &mut Self {
203 self.sink.push(0x07);
204 t.encode(self.sink);
205 self
206 }
207
208 pub fn catch_all(&mut self) -> &mut Self {
210 self.sink.push(0x19);
211 self
212 }
213
214 pub fn rethrow(&mut self, l: u32) -> &mut Self {
216 self.sink.push(0x09);
217 l.encode(self.sink);
218 self
219 }
220
221 pub fn drop(&mut self) -> &mut Self {
225 self.sink.push(0x1A);
226 self
227 }
228
229 pub fn select(&mut self) -> &mut Self {
231 self.sink.push(0x1B);
232 self
233 }
234
235 pub fn local_get(&mut self, l: u32) -> &mut Self {
239 self.sink.push(0x20);
240 l.encode(self.sink);
241 self
242 }
243
244 pub fn local_set(&mut self, l: u32) -> &mut Self {
246 self.sink.push(0x21);
247 l.encode(self.sink);
248 self
249 }
250
251 pub fn local_tee(&mut self, l: u32) -> &mut Self {
253 self.sink.push(0x22);
254 l.encode(self.sink);
255 self
256 }
257
258 pub fn global_get(&mut self, g: u32) -> &mut Self {
260 self.sink.push(0x23);
261 g.encode(self.sink);
262 self
263 }
264
265 pub fn global_set(&mut self, g: u32) -> &mut Self {
267 self.sink.push(0x24);
268 g.encode(self.sink);
269 self
270 }
271
272 pub fn i32_load(&mut self, m: MemArg) -> &mut Self {
276 self.sink.push(0x28);
277 m.encode(self.sink);
278 self
279 }
280
281 pub fn i64_load(&mut self, m: MemArg) -> &mut Self {
283 self.sink.push(0x29);
284 m.encode(self.sink);
285 self
286 }
287
288 pub fn f32_load(&mut self, m: MemArg) -> &mut Self {
290 self.sink.push(0x2A);
291 m.encode(self.sink);
292 self
293 }
294
295 pub fn f64_load(&mut self, m: MemArg) -> &mut Self {
297 self.sink.push(0x2B);
298 m.encode(self.sink);
299 self
300 }
301
302 pub fn i32_load8_s(&mut self, m: MemArg) -> &mut Self {
304 self.sink.push(0x2C);
305 m.encode(self.sink);
306 self
307 }
308
309 pub fn i32_load8_u(&mut self, m: MemArg) -> &mut Self {
311 self.sink.push(0x2D);
312 m.encode(self.sink);
313 self
314 }
315
316 pub fn i32_load16_s(&mut self, m: MemArg) -> &mut Self {
318 self.sink.push(0x2E);
319 m.encode(self.sink);
320 self
321 }
322
323 pub fn i32_load16_u(&mut self, m: MemArg) -> &mut Self {
325 self.sink.push(0x2F);
326 m.encode(self.sink);
327 self
328 }
329
330 pub fn i64_load8_s(&mut self, m: MemArg) -> &mut Self {
332 self.sink.push(0x30);
333 m.encode(self.sink);
334 self
335 }
336
337 pub fn i64_load8_u(&mut self, m: MemArg) -> &mut Self {
339 self.sink.push(0x31);
340 m.encode(self.sink);
341 self
342 }
343
344 pub fn i64_load16_s(&mut self, m: MemArg) -> &mut Self {
346 self.sink.push(0x32);
347 m.encode(self.sink);
348 self
349 }
350
351 pub fn i64_load16_u(&mut self, m: MemArg) -> &mut Self {
353 self.sink.push(0x33);
354 m.encode(self.sink);
355 self
356 }
357
358 pub fn i64_load32_s(&mut self, m: MemArg) -> &mut Self {
360 self.sink.push(0x34);
361 m.encode(self.sink);
362 self
363 }
364
365 pub fn i64_load32_u(&mut self, m: MemArg) -> &mut Self {
367 self.sink.push(0x35);
368 m.encode(self.sink);
369 self
370 }
371
372 pub fn i32_store(&mut self, m: MemArg) -> &mut Self {
374 self.sink.push(0x36);
375 m.encode(self.sink);
376 self
377 }
378
379 pub fn i64_store(&mut self, m: MemArg) -> &mut Self {
381 self.sink.push(0x37);
382 m.encode(self.sink);
383 self
384 }
385
386 pub fn f32_store(&mut self, m: MemArg) -> &mut Self {
388 self.sink.push(0x38);
389 m.encode(self.sink);
390 self
391 }
392
393 pub fn f64_store(&mut self, m: MemArg) -> &mut Self {
395 self.sink.push(0x39);
396 m.encode(self.sink);
397 self
398 }
399
400 pub fn i32_store8(&mut self, m: MemArg) -> &mut Self {
402 self.sink.push(0x3A);
403 m.encode(self.sink);
404 self
405 }
406
407 pub fn i32_store16(&mut self, m: MemArg) -> &mut Self {
409 self.sink.push(0x3B);
410 m.encode(self.sink);
411 self
412 }
413
414 pub fn i64_store8(&mut self, m: MemArg) -> &mut Self {
416 self.sink.push(0x3C);
417 m.encode(self.sink);
418 self
419 }
420
421 pub fn i64_store16(&mut self, m: MemArg) -> &mut Self {
423 self.sink.push(0x3D);
424 m.encode(self.sink);
425 self
426 }
427
428 pub fn i64_store32(&mut self, m: MemArg) -> &mut Self {
430 self.sink.push(0x3E);
431 m.encode(self.sink);
432 self
433 }
434
435 pub fn memory_size(&mut self, i: u32) -> &mut Self {
437 self.sink.push(0x3F);
438 i.encode(self.sink);
439 self
440 }
441
442 pub fn memory_grow(&mut self, i: u32) -> &mut Self {
444 self.sink.push(0x40);
445 i.encode(self.sink);
446 self
447 }
448
449 pub fn memory_init(&mut self, mem: u32, data_index: u32) -> &mut Self {
451 self.sink.push(0xfc);
452 self.sink.push(0x08);
453 data_index.encode(self.sink);
454 mem.encode(self.sink);
455 self
456 }
457
458 pub fn data_drop(&mut self, data: u32) -> &mut Self {
460 self.sink.push(0xfc);
461 self.sink.push(0x09);
462 data.encode(self.sink);
463 self
464 }
465
466 pub fn memory_copy(&mut self, dst_mem: u32, src_mem: u32) -> &mut Self {
468 self.sink.push(0xfc);
469 self.sink.push(0x0a);
470 dst_mem.encode(self.sink);
471 src_mem.encode(self.sink);
472 self
473 }
474
475 pub fn memory_fill(&mut self, mem: u32) -> &mut Self {
477 self.sink.push(0xfc);
478 self.sink.push(0x0b);
479 mem.encode(self.sink);
480 self
481 }
482
483 pub fn memory_discard(&mut self, mem: u32) -> &mut Self {
485 self.sink.push(0xfc);
486 self.sink.push(0x12);
487 mem.encode(self.sink);
488 self
489 }
490
491 pub fn i32_const(&mut self, x: i32) -> &mut Self {
495 self.sink.push(0x41);
496 x.encode(self.sink);
497 self
498 }
499
500 pub fn i64_const(&mut self, x: i64) -> &mut Self {
502 self.sink.push(0x42);
503 x.encode(self.sink);
504 self
505 }
506
507 pub fn f32_const(&mut self, x: Ieee32) -> &mut Self {
509 self.sink.push(0x43);
510 let x = x.bits();
511 self.sink.extend(x.to_le_bytes().iter().copied());
512 self
513 }
514
515 pub fn f64_const(&mut self, x: Ieee64) -> &mut Self {
517 self.sink.push(0x44);
518 let x = x.bits();
519 self.sink.extend(x.to_le_bytes().iter().copied());
520 self
521 }
522
523 pub fn i32_eqz(&mut self) -> &mut Self {
525 self.sink.push(0x45);
526 self
527 }
528
529 pub fn i32_eq(&mut self) -> &mut Self {
531 self.sink.push(0x46);
532 self
533 }
534
535 pub fn i32_ne(&mut self) -> &mut Self {
537 self.sink.push(0x47);
538 self
539 }
540
541 pub fn i32_lt_s(&mut self) -> &mut Self {
543 self.sink.push(0x48);
544 self
545 }
546
547 pub fn i32_lt_u(&mut self) -> &mut Self {
549 self.sink.push(0x49);
550 self
551 }
552
553 pub fn i32_gt_s(&mut self) -> &mut Self {
555 self.sink.push(0x4A);
556 self
557 }
558
559 pub fn i32_gt_u(&mut self) -> &mut Self {
561 self.sink.push(0x4B);
562 self
563 }
564
565 pub fn i32_le_s(&mut self) -> &mut Self {
567 self.sink.push(0x4C);
568 self
569 }
570
571 pub fn i32_le_u(&mut self) -> &mut Self {
573 self.sink.push(0x4D);
574 self
575 }
576
577 pub fn i32_ge_s(&mut self) -> &mut Self {
579 self.sink.push(0x4E);
580 self
581 }
582
583 pub fn i32_ge_u(&mut self) -> &mut Self {
585 self.sink.push(0x4F);
586 self
587 }
588
589 pub fn i64_eqz(&mut self) -> &mut Self {
591 self.sink.push(0x50);
592 self
593 }
594
595 pub fn i64_eq(&mut self) -> &mut Self {
597 self.sink.push(0x51);
598 self
599 }
600
601 pub fn i64_ne(&mut self) -> &mut Self {
603 self.sink.push(0x52);
604 self
605 }
606
607 pub fn i64_lt_s(&mut self) -> &mut Self {
609 self.sink.push(0x53);
610 self
611 }
612
613 pub fn i64_lt_u(&mut self) -> &mut Self {
615 self.sink.push(0x54);
616 self
617 }
618
619 pub fn i64_gt_s(&mut self) -> &mut Self {
621 self.sink.push(0x55);
622 self
623 }
624
625 pub fn i64_gt_u(&mut self) -> &mut Self {
627 self.sink.push(0x56);
628 self
629 }
630
631 pub fn i64_le_s(&mut self) -> &mut Self {
633 self.sink.push(0x57);
634 self
635 }
636
637 pub fn i64_le_u(&mut self) -> &mut Self {
639 self.sink.push(0x58);
640 self
641 }
642
643 pub fn i64_ge_s(&mut self) -> &mut Self {
645 self.sink.push(0x59);
646 self
647 }
648
649 pub fn i64_ge_u(&mut self) -> &mut Self {
651 self.sink.push(0x5A);
652 self
653 }
654
655 pub fn f32_eq(&mut self) -> &mut Self {
657 self.sink.push(0x5B);
658 self
659 }
660
661 pub fn f32_ne(&mut self) -> &mut Self {
663 self.sink.push(0x5C);
664 self
665 }
666
667 pub fn f32_lt(&mut self) -> &mut Self {
669 self.sink.push(0x5D);
670 self
671 }
672
673 pub fn f32_gt(&mut self) -> &mut Self {
675 self.sink.push(0x5E);
676 self
677 }
678
679 pub fn f32_le(&mut self) -> &mut Self {
681 self.sink.push(0x5F);
682 self
683 }
684
685 pub fn f32_ge(&mut self) -> &mut Self {
687 self.sink.push(0x60);
688 self
689 }
690
691 pub fn f64_eq(&mut self) -> &mut Self {
693 self.sink.push(0x61);
694 self
695 }
696
697 pub fn f64_ne(&mut self) -> &mut Self {
699 self.sink.push(0x62);
700 self
701 }
702
703 pub fn f64_lt(&mut self) -> &mut Self {
705 self.sink.push(0x63);
706 self
707 }
708
709 pub fn f64_gt(&mut self) -> &mut Self {
711 self.sink.push(0x64);
712 self
713 }
714
715 pub fn f64_le(&mut self) -> &mut Self {
717 self.sink.push(0x65);
718 self
719 }
720
721 pub fn f64_ge(&mut self) -> &mut Self {
723 self.sink.push(0x66);
724 self
725 }
726
727 pub fn i32_clz(&mut self) -> &mut Self {
729 self.sink.push(0x67);
730 self
731 }
732
733 pub fn i32_ctz(&mut self) -> &mut Self {
735 self.sink.push(0x68);
736 self
737 }
738
739 pub fn i32_popcnt(&mut self) -> &mut Self {
741 self.sink.push(0x69);
742 self
743 }
744
745 pub fn i32_add(&mut self) -> &mut Self {
747 self.sink.push(0x6A);
748 self
749 }
750
751 pub fn i32_sub(&mut self) -> &mut Self {
753 self.sink.push(0x6B);
754 self
755 }
756
757 pub fn i32_mul(&mut self) -> &mut Self {
759 self.sink.push(0x6C);
760 self
761 }
762
763 pub fn i32_div_s(&mut self) -> &mut Self {
765 self.sink.push(0x6D);
766 self
767 }
768
769 pub fn i32_div_u(&mut self) -> &mut Self {
771 self.sink.push(0x6E);
772 self
773 }
774
775 pub fn i32_rem_s(&mut self) -> &mut Self {
777 self.sink.push(0x6F);
778 self
779 }
780
781 pub fn i32_rem_u(&mut self) -> &mut Self {
783 self.sink.push(0x70);
784 self
785 }
786
787 pub fn i32_and(&mut self) -> &mut Self {
789 self.sink.push(0x71);
790 self
791 }
792
793 pub fn i32_or(&mut self) -> &mut Self {
795 self.sink.push(0x72);
796 self
797 }
798
799 pub fn i32_xor(&mut self) -> &mut Self {
801 self.sink.push(0x73);
802 self
803 }
804
805 pub fn i32_shl(&mut self) -> &mut Self {
807 self.sink.push(0x74);
808 self
809 }
810
811 pub fn i32_shr_s(&mut self) -> &mut Self {
813 self.sink.push(0x75);
814 self
815 }
816
817 pub fn i32_shr_u(&mut self) -> &mut Self {
819 self.sink.push(0x76);
820 self
821 }
822
823 pub fn i32_rotl(&mut self) -> &mut Self {
825 self.sink.push(0x77);
826 self
827 }
828
829 pub fn i32_rotr(&mut self) -> &mut Self {
831 self.sink.push(0x78);
832 self
833 }
834
835 pub fn i64_clz(&mut self) -> &mut Self {
837 self.sink.push(0x79);
838 self
839 }
840
841 pub fn i64_ctz(&mut self) -> &mut Self {
843 self.sink.push(0x7A);
844 self
845 }
846
847 pub fn i64_popcnt(&mut self) -> &mut Self {
849 self.sink.push(0x7B);
850 self
851 }
852
853 pub fn i64_add(&mut self) -> &mut Self {
855 self.sink.push(0x7C);
856 self
857 }
858
859 pub fn i64_sub(&mut self) -> &mut Self {
861 self.sink.push(0x7D);
862 self
863 }
864
865 pub fn i64_mul(&mut self) -> &mut Self {
867 self.sink.push(0x7E);
868 self
869 }
870
871 pub fn i64_div_s(&mut self) -> &mut Self {
873 self.sink.push(0x7F);
874 self
875 }
876
877 pub fn i64_div_u(&mut self) -> &mut Self {
879 self.sink.push(0x80);
880 self
881 }
882
883 pub fn i64_rem_s(&mut self) -> &mut Self {
885 self.sink.push(0x81);
886 self
887 }
888
889 pub fn i64_rem_u(&mut self) -> &mut Self {
891 self.sink.push(0x82);
892 self
893 }
894
895 pub fn i64_and(&mut self) -> &mut Self {
897 self.sink.push(0x83);
898 self
899 }
900
901 pub fn i64_or(&mut self) -> &mut Self {
903 self.sink.push(0x84);
904 self
905 }
906
907 pub fn i64_xor(&mut self) -> &mut Self {
909 self.sink.push(0x85);
910 self
911 }
912
913 pub fn i64_shl(&mut self) -> &mut Self {
915 self.sink.push(0x86);
916 self
917 }
918
919 pub fn i64_shr_s(&mut self) -> &mut Self {
921 self.sink.push(0x87);
922 self
923 }
924
925 pub fn i64_shr_u(&mut self) -> &mut Self {
927 self.sink.push(0x88);
928 self
929 }
930
931 pub fn i64_rotl(&mut self) -> &mut Self {
933 self.sink.push(0x89);
934 self
935 }
936
937 pub fn i64_rotr(&mut self) -> &mut Self {
939 self.sink.push(0x8A);
940 self
941 }
942
943 pub fn f32_abs(&mut self) -> &mut Self {
945 self.sink.push(0x8B);
946 self
947 }
948
949 pub fn f32_neg(&mut self) -> &mut Self {
951 self.sink.push(0x8C);
952 self
953 }
954
955 pub fn f32_ceil(&mut self) -> &mut Self {
957 self.sink.push(0x8D);
958 self
959 }
960
961 pub fn f32_floor(&mut self) -> &mut Self {
963 self.sink.push(0x8E);
964 self
965 }
966
967 pub fn f32_trunc(&mut self) -> &mut Self {
969 self.sink.push(0x8F);
970 self
971 }
972
973 pub fn f32_nearest(&mut self) -> &mut Self {
975 self.sink.push(0x90);
976 self
977 }
978
979 pub fn f32_sqrt(&mut self) -> &mut Self {
981 self.sink.push(0x91);
982 self
983 }
984
985 pub fn f32_add(&mut self) -> &mut Self {
987 self.sink.push(0x92);
988 self
989 }
990
991 pub fn f32_sub(&mut self) -> &mut Self {
993 self.sink.push(0x93);
994 self
995 }
996
997 pub fn f32_mul(&mut self) -> &mut Self {
999 self.sink.push(0x94);
1000 self
1001 }
1002
1003 pub fn f32_div(&mut self) -> &mut Self {
1005 self.sink.push(0x95);
1006 self
1007 }
1008
1009 pub fn f32_min(&mut self) -> &mut Self {
1011 self.sink.push(0x96);
1012 self
1013 }
1014
1015 pub fn f32_max(&mut self) -> &mut Self {
1017 self.sink.push(0x97);
1018 self
1019 }
1020
1021 pub fn f32_copysign(&mut self) -> &mut Self {
1023 self.sink.push(0x98);
1024 self
1025 }
1026
1027 pub fn f64_abs(&mut self) -> &mut Self {
1029 self.sink.push(0x99);
1030 self
1031 }
1032
1033 pub fn f64_neg(&mut self) -> &mut Self {
1035 self.sink.push(0x9A);
1036 self
1037 }
1038
1039 pub fn f64_ceil(&mut self) -> &mut Self {
1041 self.sink.push(0x9B);
1042 self
1043 }
1044
1045 pub fn f64_floor(&mut self) -> &mut Self {
1047 self.sink.push(0x9C);
1048 self
1049 }
1050
1051 pub fn f64_trunc(&mut self) -> &mut Self {
1053 self.sink.push(0x9D);
1054 self
1055 }
1056
1057 pub fn f64_nearest(&mut self) -> &mut Self {
1059 self.sink.push(0x9E);
1060 self
1061 }
1062
1063 pub fn f64_sqrt(&mut self) -> &mut Self {
1065 self.sink.push(0x9F);
1066 self
1067 }
1068
1069 pub fn f64_add(&mut self) -> &mut Self {
1071 self.sink.push(0xA0);
1072 self
1073 }
1074
1075 pub fn f64_sub(&mut self) -> &mut Self {
1077 self.sink.push(0xA1);
1078 self
1079 }
1080
1081 pub fn f64_mul(&mut self) -> &mut Self {
1083 self.sink.push(0xA2);
1084 self
1085 }
1086
1087 pub fn f64_div(&mut self) -> &mut Self {
1089 self.sink.push(0xA3);
1090 self
1091 }
1092
1093 pub fn f64_min(&mut self) -> &mut Self {
1095 self.sink.push(0xA4);
1096 self
1097 }
1098
1099 pub fn f64_max(&mut self) -> &mut Self {
1101 self.sink.push(0xA5);
1102 self
1103 }
1104
1105 pub fn f64_copysign(&mut self) -> &mut Self {
1107 self.sink.push(0xA6);
1108 self
1109 }
1110
1111 pub fn i32_wrap_i64(&mut self) -> &mut Self {
1113 self.sink.push(0xA7);
1114 self
1115 }
1116
1117 pub fn i32_trunc_f32_s(&mut self) -> &mut Self {
1119 self.sink.push(0xA8);
1120 self
1121 }
1122
1123 pub fn i32_trunc_f32_u(&mut self) -> &mut Self {
1125 self.sink.push(0xA9);
1126 self
1127 }
1128
1129 pub fn i32_trunc_f64_s(&mut self) -> &mut Self {
1131 self.sink.push(0xAA);
1132 self
1133 }
1134
1135 pub fn i32_trunc_f64_u(&mut self) -> &mut Self {
1137 self.sink.push(0xAB);
1138 self
1139 }
1140
1141 pub fn i64_extend_i32_s(&mut self) -> &mut Self {
1143 self.sink.push(0xAC);
1144 self
1145 }
1146
1147 pub fn i64_extend_i32_u(&mut self) -> &mut Self {
1149 self.sink.push(0xAD);
1150 self
1151 }
1152
1153 pub fn i64_trunc_f32_s(&mut self) -> &mut Self {
1155 self.sink.push(0xAE);
1156 self
1157 }
1158
1159 pub fn i64_trunc_f32_u(&mut self) -> &mut Self {
1161 self.sink.push(0xAF);
1162 self
1163 }
1164
1165 pub fn i64_trunc_f64_s(&mut self) -> &mut Self {
1167 self.sink.push(0xB0);
1168 self
1169 }
1170
1171 pub fn i64_trunc_f64_u(&mut self) -> &mut Self {
1173 self.sink.push(0xB1);
1174 self
1175 }
1176
1177 pub fn f32_convert_i32_s(&mut self) -> &mut Self {
1179 self.sink.push(0xB2);
1180 self
1181 }
1182
1183 pub fn f32_convert_i32_u(&mut self) -> &mut Self {
1185 self.sink.push(0xB3);
1186 self
1187 }
1188
1189 pub fn f32_convert_i64_s(&mut self) -> &mut Self {
1191 self.sink.push(0xB4);
1192 self
1193 }
1194
1195 pub fn f32_convert_i64_u(&mut self) -> &mut Self {
1197 self.sink.push(0xB5);
1198 self
1199 }
1200
1201 pub fn f32_demote_f64(&mut self) -> &mut Self {
1203 self.sink.push(0xB6);
1204 self
1205 }
1206
1207 pub fn f64_convert_i32_s(&mut self) -> &mut Self {
1209 self.sink.push(0xB7);
1210 self
1211 }
1212
1213 pub fn f64_convert_i32_u(&mut self) -> &mut Self {
1215 self.sink.push(0xB8);
1216 self
1217 }
1218
1219 pub fn f64_convert_i64_s(&mut self) -> &mut Self {
1221 self.sink.push(0xB9);
1222 self
1223 }
1224
1225 pub fn f64_convert_i64_u(&mut self) -> &mut Self {
1227 self.sink.push(0xBA);
1228 self
1229 }
1230
1231 pub fn f64_promote_f32(&mut self) -> &mut Self {
1233 self.sink.push(0xBB);
1234 self
1235 }
1236
1237 pub fn i32_reinterpret_f32(&mut self) -> &mut Self {
1239 self.sink.push(0xBC);
1240 self
1241 }
1242
1243 pub fn i64_reinterpret_f64(&mut self) -> &mut Self {
1245 self.sink.push(0xBD);
1246 self
1247 }
1248
1249 pub fn f32_reinterpret_i32(&mut self) -> &mut Self {
1251 self.sink.push(0xBE);
1252 self
1253 }
1254
1255 pub fn f64_reinterpret_i64(&mut self) -> &mut Self {
1257 self.sink.push(0xBF);
1258 self
1259 }
1260
1261 pub fn i32_extend8_s(&mut self) -> &mut Self {
1263 self.sink.push(0xC0);
1264 self
1265 }
1266
1267 pub fn i32_extend16_s(&mut self) -> &mut Self {
1269 self.sink.push(0xC1);
1270 self
1271 }
1272
1273 pub fn i64_extend8_s(&mut self) -> &mut Self {
1275 self.sink.push(0xC2);
1276 self
1277 }
1278
1279 pub fn i64_extend16_s(&mut self) -> &mut Self {
1281 self.sink.push(0xC3);
1282 self
1283 }
1284
1285 pub fn i64_extend32_s(&mut self) -> &mut Self {
1287 self.sink.push(0xC4);
1288 self
1289 }
1290
1291 pub fn i32_trunc_sat_f32_s(&mut self) -> &mut Self {
1293 self.sink.push(0xFC);
1294 self.sink.push(0x00);
1295 self
1296 }
1297
1298 pub fn i32_trunc_sat_f32_u(&mut self) -> &mut Self {
1300 self.sink.push(0xFC);
1301 self.sink.push(0x01);
1302 self
1303 }
1304
1305 pub fn i32_trunc_sat_f64_s(&mut self) -> &mut Self {
1307 self.sink.push(0xFC);
1308 self.sink.push(0x02);
1309 self
1310 }
1311
1312 pub fn i32_trunc_sat_f64_u(&mut self) -> &mut Self {
1314 self.sink.push(0xFC);
1315 self.sink.push(0x03);
1316 self
1317 }
1318
1319 pub fn i64_trunc_sat_f32_s(&mut self) -> &mut Self {
1321 self.sink.push(0xFC);
1322 self.sink.push(0x04);
1323 self
1324 }
1325
1326 pub fn i64_trunc_sat_f32_u(&mut self) -> &mut Self {
1328 self.sink.push(0xFC);
1329 self.sink.push(0x05);
1330 self
1331 }
1332
1333 pub fn i64_trunc_sat_f64_s(&mut self) -> &mut Self {
1335 self.sink.push(0xFC);
1336 self.sink.push(0x06);
1337 self
1338 }
1339
1340 pub fn i64_trunc_sat_f64_u(&mut self) -> &mut Self {
1342 self.sink.push(0xFC);
1343 self.sink.push(0x07);
1344 self
1345 }
1346
1347 pub fn typed_select(&mut self, ty: ValType) -> &mut Self {
1351 self.sink.push(0x1c);
1352 [ty].encode(self.sink);
1353 self
1354 }
1355
1356 pub fn typed_select_multi(&mut self, tys: &[ValType]) -> &mut Self {
1358 self.sink.push(0x1c);
1359 tys.encode(self.sink);
1360 self
1361 }
1362
1363 pub fn ref_null(&mut self, ty: HeapType) -> &mut Self {
1365 self.sink.push(0xd0);
1366 ty.encode(self.sink);
1367 self
1368 }
1369
1370 pub fn ref_is_null(&mut self) -> &mut Self {
1372 self.sink.push(0xd1);
1373 self
1374 }
1375
1376 pub fn ref_func(&mut self, f: u32) -> &mut Self {
1378 self.sink.push(0xd2);
1379 f.encode(self.sink);
1380 self
1381 }
1382
1383 pub fn ref_eq(&mut self) -> &mut Self {
1385 self.sink.push(0xd3);
1386 self
1387 }
1388
1389 pub fn ref_as_non_null(&mut self) -> &mut Self {
1391 self.sink.push(0xd4);
1392 self
1393 }
1394
1395 pub fn struct_new(&mut self, type_index: u32) -> &mut Self {
1399 self.sink.push(0xfb);
1400 self.sink.push(0x00);
1401 type_index.encode(self.sink);
1402 self
1403 }
1404
1405 pub fn struct_new_default(&mut self, type_index: u32) -> &mut Self {
1407 self.sink.push(0xfb);
1408 self.sink.push(0x01);
1409 type_index.encode(self.sink);
1410 self
1411 }
1412
1413 pub fn struct_get(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1415 self.sink.push(0xfb);
1416 self.sink.push(0x02);
1417 struct_type_index.encode(self.sink);
1418 field_index.encode(self.sink);
1419 self
1420 }
1421
1422 pub fn struct_get_s(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1424 self.sink.push(0xfb);
1425 self.sink.push(0x03);
1426 struct_type_index.encode(self.sink);
1427 field_index.encode(self.sink);
1428 self
1429 }
1430
1431 pub fn struct_get_u(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1433 self.sink.push(0xfb);
1434 self.sink.push(0x04);
1435 struct_type_index.encode(self.sink);
1436 field_index.encode(self.sink);
1437 self
1438 }
1439
1440 pub fn struct_set(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1442 self.sink.push(0xfb);
1443 self.sink.push(0x05);
1444 struct_type_index.encode(self.sink);
1445 field_index.encode(self.sink);
1446 self
1447 }
1448
1449 pub fn array_new(&mut self, type_index: u32) -> &mut Self {
1451 self.sink.push(0xfb);
1452 self.sink.push(0x06);
1453 type_index.encode(self.sink);
1454 self
1455 }
1456
1457 pub fn array_new_default(&mut self, type_index: u32) -> &mut Self {
1459 self.sink.push(0xfb);
1460 self.sink.push(0x07);
1461 type_index.encode(self.sink);
1462 self
1463 }
1464
1465 pub fn array_new_fixed(&mut self, array_type_index: u32, array_size: u32) -> &mut Self {
1467 self.sink.push(0xfb);
1468 self.sink.push(0x08);
1469 array_type_index.encode(self.sink);
1470 array_size.encode(self.sink);
1471 self
1472 }
1473
1474 pub fn array_new_data(&mut self, array_type_index: u32, array_data_index: u32) -> &mut Self {
1476 self.sink.push(0xfb);
1477 self.sink.push(0x09);
1478 array_type_index.encode(self.sink);
1479 array_data_index.encode(self.sink);
1480 self
1481 }
1482
1483 pub fn array_new_elem(&mut self, array_type_index: u32, array_elem_index: u32) -> &mut Self {
1485 self.sink.push(0xfb);
1486 self.sink.push(0x0a);
1487 array_type_index.encode(self.sink);
1488 array_elem_index.encode(self.sink);
1489 self
1490 }
1491
1492 pub fn array_get(&mut self, type_index: u32) -> &mut Self {
1494 self.sink.push(0xfb);
1495 self.sink.push(0x0b);
1496 type_index.encode(self.sink);
1497 self
1498 }
1499
1500 pub fn array_get_s(&mut self, type_index: u32) -> &mut Self {
1502 self.sink.push(0xfb);
1503 self.sink.push(0x0c);
1504 type_index.encode(self.sink);
1505 self
1506 }
1507
1508 pub fn array_get_u(&mut self, type_index: u32) -> &mut Self {
1510 self.sink.push(0xfb);
1511 self.sink.push(0x0d);
1512 type_index.encode(self.sink);
1513 self
1514 }
1515
1516 pub fn array_set(&mut self, type_index: u32) -> &mut Self {
1518 self.sink.push(0xfb);
1519 self.sink.push(0x0e);
1520 type_index.encode(self.sink);
1521 self
1522 }
1523
1524 pub fn array_len(&mut self) -> &mut Self {
1526 self.sink.push(0xfb);
1527 self.sink.push(0x0f);
1528 self
1529 }
1530
1531 pub fn array_fill(&mut self, type_index: u32) -> &mut Self {
1533 self.sink.push(0xfb);
1534 self.sink.push(0x10);
1535 type_index.encode(self.sink);
1536 self
1537 }
1538
1539 pub fn array_copy(
1541 &mut self,
1542 array_type_index_dst: u32,
1543 array_type_index_src: u32,
1544 ) -> &mut Self {
1545 self.sink.push(0xfb);
1546 self.sink.push(0x11);
1547 array_type_index_dst.encode(self.sink);
1548 array_type_index_src.encode(self.sink);
1549 self
1550 }
1551
1552 pub fn array_init_data(&mut self, array_type_index: u32, array_data_index: u32) -> &mut Self {
1554 self.sink.push(0xfb);
1555 self.sink.push(0x12);
1556 array_type_index.encode(self.sink);
1557 array_data_index.encode(self.sink);
1558 self
1559 }
1560
1561 pub fn array_init_elem(&mut self, array_type_index: u32, array_elem_index: u32) -> &mut Self {
1563 self.sink.push(0xfb);
1564 self.sink.push(0x13);
1565 array_type_index.encode(self.sink);
1566 array_elem_index.encode(self.sink);
1567 self
1568 }
1569
1570 pub fn ref_test_non_null(&mut self, heap_type: HeapType) -> &mut Self {
1572 self.sink.push(0xfb);
1573 self.sink.push(0x14);
1574 heap_type.encode(self.sink);
1575 self
1576 }
1577
1578 pub fn ref_test_nullable(&mut self, heap_type: HeapType) -> &mut Self {
1580 self.sink.push(0xfb);
1581 self.sink.push(0x15);
1582 heap_type.encode(self.sink);
1583 self
1584 }
1585
1586 pub fn ref_cast_non_null(&mut self, heap_type: HeapType) -> &mut Self {
1588 self.sink.push(0xfb);
1589 self.sink.push(0x16);
1590 heap_type.encode(self.sink);
1591 self
1592 }
1593
1594 pub fn ref_cast_nullable(&mut self, heap_type: HeapType) -> &mut Self {
1596 self.sink.push(0xfb);
1597 self.sink.push(0x17);
1598 heap_type.encode(self.sink);
1599 self
1600 }
1601
1602 pub fn br_on_cast(
1604 &mut self,
1605 relative_depth: u32,
1606 from_ref_type: RefType,
1607 to_ref_type: RefType,
1608 ) -> &mut Self {
1609 self.sink.push(0xfb);
1610 self.sink.push(0x18);
1611 let cast_flags = (from_ref_type.nullable as u8) | ((to_ref_type.nullable as u8) << 1);
1612 self.sink.push(cast_flags);
1613 relative_depth.encode(self.sink);
1614 from_ref_type.heap_type.encode(self.sink);
1615 to_ref_type.heap_type.encode(self.sink);
1616 self
1617 }
1618
1619 pub fn br_on_cast_fail(
1621 &mut self,
1622 relative_depth: u32,
1623 from_ref_type: RefType,
1624 to_ref_type: RefType,
1625 ) -> &mut Self {
1626 self.sink.push(0xfb);
1627 self.sink.push(0x19);
1628 let cast_flags = (from_ref_type.nullable as u8) | ((to_ref_type.nullable as u8) << 1);
1629 self.sink.push(cast_flags);
1630 relative_depth.encode(self.sink);
1631 from_ref_type.heap_type.encode(self.sink);
1632 to_ref_type.heap_type.encode(self.sink);
1633 self
1634 }
1635
1636 pub fn any_convert_extern(&mut self) -> &mut Self {
1638 self.sink.push(0xfb);
1639 self.sink.push(0x1a);
1640 self
1641 }
1642
1643 pub fn extern_convert_any(&mut self) -> &mut Self {
1645 self.sink.push(0xfb);
1646 self.sink.push(0x1b);
1647 self
1648 }
1649
1650 pub fn ref_i31(&mut self) -> &mut Self {
1652 self.sink.push(0xfb);
1653 self.sink.push(0x1c);
1654 self
1655 }
1656
1657 pub fn i31_get_s(&mut self) -> &mut Self {
1659 self.sink.push(0xfb);
1660 self.sink.push(0x1d);
1661 self
1662 }
1663
1664 pub fn i31_get_u(&mut self) -> &mut Self {
1666 self.sink.push(0xfb);
1667 self.sink.push(0x1e);
1668 self
1669 }
1670
1671 pub fn table_init(&mut self, table: u32, elem_index: u32) -> &mut Self {
1675 self.sink.push(0xfc);
1676 self.sink.push(0x0c);
1677 elem_index.encode(self.sink);
1678 table.encode(self.sink);
1679 self
1680 }
1681
1682 pub fn elem_drop(&mut self, segment: u32) -> &mut Self {
1684 self.sink.push(0xfc);
1685 self.sink.push(0x0d);
1686 segment.encode(self.sink);
1687 self
1688 }
1689
1690 pub fn table_fill(&mut self, table: u32) -> &mut Self {
1692 self.sink.push(0xfc);
1693 self.sink.push(0x11);
1694 table.encode(self.sink);
1695 self
1696 }
1697
1698 pub fn table_set(&mut self, table: u32) -> &mut Self {
1700 self.sink.push(0x26);
1701 table.encode(self.sink);
1702 self
1703 }
1704
1705 pub fn table_get(&mut self, table: u32) -> &mut Self {
1707 self.sink.push(0x25);
1708 table.encode(self.sink);
1709 self
1710 }
1711
1712 pub fn table_grow(&mut self, table: u32) -> &mut Self {
1714 self.sink.push(0xfc);
1715 self.sink.push(0x0f);
1716 table.encode(self.sink);
1717 self
1718 }
1719
1720 pub fn table_size(&mut self, table: u32) -> &mut Self {
1722 self.sink.push(0xfc);
1723 self.sink.push(0x10);
1724 table.encode(self.sink);
1725 self
1726 }
1727
1728 pub fn table_copy(&mut self, dst_table: u32, src_table: u32) -> &mut Self {
1730 self.sink.push(0xfc);
1731 self.sink.push(0x0e);
1732 dst_table.encode(self.sink);
1733 src_table.encode(self.sink);
1734 self
1735 }
1736
1737 pub fn v128_load(&mut self, memarg: MemArg) -> &mut Self {
1741 self.sink.push(0xFD);
1742 0x00u32.encode(self.sink);
1743 memarg.encode(self.sink);
1744 self
1745 }
1746
1747 pub fn v128_load8x8_s(&mut self, memarg: MemArg) -> &mut Self {
1749 self.sink.push(0xFD);
1750 0x01u32.encode(self.sink);
1751 memarg.encode(self.sink);
1752 self
1753 }
1754
1755 pub fn v128_load8x8_u(&mut self, memarg: MemArg) -> &mut Self {
1757 self.sink.push(0xFD);
1758 0x02u32.encode(self.sink);
1759 memarg.encode(self.sink);
1760 self
1761 }
1762
1763 pub fn v128_load16x4_s(&mut self, memarg: MemArg) -> &mut Self {
1765 self.sink.push(0xFD);
1766 0x03u32.encode(self.sink);
1767 memarg.encode(self.sink);
1768 self
1769 }
1770
1771 pub fn v128_load16x4_u(&mut self, memarg: MemArg) -> &mut Self {
1773 self.sink.push(0xFD);
1774 0x04u32.encode(self.sink);
1775 memarg.encode(self.sink);
1776 self
1777 }
1778
1779 pub fn v128_load32x2_s(&mut self, memarg: MemArg) -> &mut Self {
1781 self.sink.push(0xFD);
1782 0x05u32.encode(self.sink);
1783 memarg.encode(self.sink);
1784 self
1785 }
1786
1787 pub fn v128_load32x2_u(&mut self, memarg: MemArg) -> &mut Self {
1789 self.sink.push(0xFD);
1790 0x06u32.encode(self.sink);
1791 memarg.encode(self.sink);
1792 self
1793 }
1794
1795 pub fn v128_load8_splat(&mut self, memarg: MemArg) -> &mut Self {
1797 self.sink.push(0xFD);
1798 0x07u32.encode(self.sink);
1799 memarg.encode(self.sink);
1800 self
1801 }
1802
1803 pub fn v128_load16_splat(&mut self, memarg: MemArg) -> &mut Self {
1805 self.sink.push(0xFD);
1806 0x08u32.encode(self.sink);
1807 memarg.encode(self.sink);
1808 self
1809 }
1810
1811 pub fn v128_load32_splat(&mut self, memarg: MemArg) -> &mut Self {
1813 self.sink.push(0xFD);
1814 0x09u32.encode(self.sink);
1815 memarg.encode(self.sink);
1816 self
1817 }
1818
1819 pub fn v128_load64_splat(&mut self, memarg: MemArg) -> &mut Self {
1821 self.sink.push(0xFD);
1822 0x0Au32.encode(self.sink);
1823 memarg.encode(self.sink);
1824 self
1825 }
1826
1827 pub fn v128_load32_zero(&mut self, memarg: MemArg) -> &mut Self {
1829 self.sink.push(0xFD);
1830 0x5Cu32.encode(self.sink);
1831 memarg.encode(self.sink);
1832 self
1833 }
1834
1835 pub fn v128_load64_zero(&mut self, memarg: MemArg) -> &mut Self {
1837 self.sink.push(0xFD);
1838 0x5Du32.encode(self.sink);
1839 memarg.encode(self.sink);
1840 self
1841 }
1842
1843 pub fn v128_store(&mut self, memarg: MemArg) -> &mut Self {
1845 self.sink.push(0xFD);
1846 0x0Bu32.encode(self.sink);
1847 memarg.encode(self.sink);
1848 self
1849 }
1850
1851 pub fn v128_load8_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1853 self.sink.push(0xFD);
1854 0x54u32.encode(self.sink);
1855 memarg.encode(self.sink);
1856 self.sink.push(lane);
1857 self
1858 }
1859
1860 pub fn v128_load16_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1862 self.sink.push(0xFD);
1863 0x55u32.encode(self.sink);
1864 memarg.encode(self.sink);
1865 self.sink.push(lane);
1866 self
1867 }
1868
1869 pub fn v128_load32_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1871 self.sink.push(0xFD);
1872 0x56u32.encode(self.sink);
1873 memarg.encode(self.sink);
1874 self.sink.push(lane);
1875 self
1876 }
1877
1878 pub fn v128_load64_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1880 self.sink.push(0xFD);
1881 0x57u32.encode(self.sink);
1882 memarg.encode(self.sink);
1883 self.sink.push(lane);
1884 self
1885 }
1886
1887 pub fn v128_store8_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1889 self.sink.push(0xFD);
1890 0x58u32.encode(self.sink);
1891 memarg.encode(self.sink);
1892 self.sink.push(lane);
1893 self
1894 }
1895
1896 pub fn v128_store16_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1898 self.sink.push(0xFD);
1899 0x59u32.encode(self.sink);
1900 memarg.encode(self.sink);
1901 self.sink.push(lane);
1902 self
1903 }
1904
1905 pub fn v128_store32_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1907 self.sink.push(0xFD);
1908 0x5Au32.encode(self.sink);
1909 memarg.encode(self.sink);
1910 self.sink.push(lane);
1911 self
1912 }
1913
1914 pub fn v128_store64_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1916 self.sink.push(0xFD);
1917 0x5Bu32.encode(self.sink);
1918 memarg.encode(self.sink);
1919 self.sink.push(lane);
1920 self
1921 }
1922
1923 pub fn v128_const(&mut self, x: i128) -> &mut Self {
1925 self.sink.push(0xFD);
1926 0x0Cu32.encode(self.sink);
1927 self.sink.extend(x.to_le_bytes().iter().copied());
1928 self
1929 }
1930
1931 pub fn i8x16_shuffle(&mut self, lanes: [Lane; 16]) -> &mut Self {
1933 self.sink.push(0xFD);
1934 0x0Du32.encode(self.sink);
1935 self.sink.extend(lanes.iter().copied());
1936 self
1937 }
1938
1939 pub fn i8x16_extract_lane_s(&mut self, lane: Lane) -> &mut Self {
1941 self.sink.push(0xFD);
1942 0x15u32.encode(self.sink);
1943 self.sink.push(lane);
1944 self
1945 }
1946
1947 pub fn i8x16_extract_lane_u(&mut self, lane: Lane) -> &mut Self {
1949 self.sink.push(0xFD);
1950 0x16u32.encode(self.sink);
1951 self.sink.push(lane);
1952 self
1953 }
1954
1955 pub fn i8x16_replace_lane(&mut self, lane: Lane) -> &mut Self {
1957 self.sink.push(0xFD);
1958 0x17u32.encode(self.sink);
1959 self.sink.push(lane);
1960 self
1961 }
1962
1963 pub fn i16x8_extract_lane_s(&mut self, lane: Lane) -> &mut Self {
1965 self.sink.push(0xFD);
1966 0x18u32.encode(self.sink);
1967 self.sink.push(lane);
1968 self
1969 }
1970
1971 pub fn i16x8_extract_lane_u(&mut self, lane: Lane) -> &mut Self {
1973 self.sink.push(0xFD);
1974 0x19u32.encode(self.sink);
1975 self.sink.push(lane);
1976 self
1977 }
1978
1979 pub fn i16x8_replace_lane(&mut self, lane: Lane) -> &mut Self {
1981 self.sink.push(0xFD);
1982 0x1Au32.encode(self.sink);
1983 self.sink.push(lane);
1984 self
1985 }
1986
1987 pub fn i32x4_extract_lane(&mut self, lane: Lane) -> &mut Self {
1989 self.sink.push(0xFD);
1990 0x1Bu32.encode(self.sink);
1991 self.sink.push(lane);
1992 self
1993 }
1994
1995 pub fn i32x4_replace_lane(&mut self, lane: Lane) -> &mut Self {
1997 self.sink.push(0xFD);
1998 0x1Cu32.encode(self.sink);
1999 self.sink.push(lane);
2000 self
2001 }
2002
2003 pub fn i64x2_extract_lane(&mut self, lane: Lane) -> &mut Self {
2005 self.sink.push(0xFD);
2006 0x1Du32.encode(self.sink);
2007 self.sink.push(lane);
2008 self
2009 }
2010
2011 pub fn i64x2_replace_lane(&mut self, lane: Lane) -> &mut Self {
2013 self.sink.push(0xFD);
2014 0x1Eu32.encode(self.sink);
2015 self.sink.push(lane);
2016 self
2017 }
2018
2019 pub fn f32x4_extract_lane(&mut self, lane: Lane) -> &mut Self {
2021 self.sink.push(0xFD);
2022 0x1Fu32.encode(self.sink);
2023 self.sink.push(lane);
2024 self
2025 }
2026
2027 pub fn f32x4_replace_lane(&mut self, lane: Lane) -> &mut Self {
2029 self.sink.push(0xFD);
2030 0x20u32.encode(self.sink);
2031 self.sink.push(lane);
2032 self
2033 }
2034
2035 pub fn f64x2_extract_lane(&mut self, lane: Lane) -> &mut Self {
2037 self.sink.push(0xFD);
2038 0x21u32.encode(self.sink);
2039 self.sink.push(lane);
2040 self
2041 }
2042
2043 pub fn f64x2_replace_lane(&mut self, lane: Lane) -> &mut Self {
2045 self.sink.push(0xFD);
2046 0x22u32.encode(self.sink);
2047 self.sink.push(lane);
2048 self
2049 }
2050
2051 pub fn i8x16_swizzle(&mut self) -> &mut Self {
2053 self.sink.push(0xFD);
2054 0x0Eu32.encode(self.sink);
2055 self
2056 }
2057
2058 pub fn i8x16_splat(&mut self) -> &mut Self {
2060 self.sink.push(0xFD);
2061 0x0Fu32.encode(self.sink);
2062 self
2063 }
2064
2065 pub fn i16x8_splat(&mut self) -> &mut Self {
2067 self.sink.push(0xFD);
2068 0x10u32.encode(self.sink);
2069 self
2070 }
2071
2072 pub fn i32x4_splat(&mut self) -> &mut Self {
2074 self.sink.push(0xFD);
2075 0x11u32.encode(self.sink);
2076 self
2077 }
2078
2079 pub fn i64x2_splat(&mut self) -> &mut Self {
2081 self.sink.push(0xFD);
2082 0x12u32.encode(self.sink);
2083 self
2084 }
2085
2086 pub fn f32x4_splat(&mut self) -> &mut Self {
2088 self.sink.push(0xFD);
2089 0x13u32.encode(self.sink);
2090 self
2091 }
2092
2093 pub fn f64x2_splat(&mut self) -> &mut Self {
2095 self.sink.push(0xFD);
2096 0x14u32.encode(self.sink);
2097 self
2098 }
2099
2100 pub fn i8x16_eq(&mut self) -> &mut Self {
2102 self.sink.push(0xFD);
2103 0x23u32.encode(self.sink);
2104 self
2105 }
2106
2107 pub fn i8x16_ne(&mut self) -> &mut Self {
2109 self.sink.push(0xFD);
2110 0x24u32.encode(self.sink);
2111 self
2112 }
2113
2114 pub fn i8x16_lt_s(&mut self) -> &mut Self {
2116 self.sink.push(0xFD);
2117 0x25u32.encode(self.sink);
2118 self
2119 }
2120
2121 pub fn i8x16_lt_u(&mut self) -> &mut Self {
2123 self.sink.push(0xFD);
2124 0x26u32.encode(self.sink);
2125 self
2126 }
2127
2128 pub fn i8x16_gt_s(&mut self) -> &mut Self {
2130 self.sink.push(0xFD);
2131 0x27u32.encode(self.sink);
2132 self
2133 }
2134
2135 pub fn i8x16_gt_u(&mut self) -> &mut Self {
2137 self.sink.push(0xFD);
2138 0x28u32.encode(self.sink);
2139 self
2140 }
2141
2142 pub fn i8x16_le_s(&mut self) -> &mut Self {
2144 self.sink.push(0xFD);
2145 0x29u32.encode(self.sink);
2146 self
2147 }
2148
2149 pub fn i8x16_le_u(&mut self) -> &mut Self {
2151 self.sink.push(0xFD);
2152 0x2Au32.encode(self.sink);
2153 self
2154 }
2155
2156 pub fn i8x16_ge_s(&mut self) -> &mut Self {
2158 self.sink.push(0xFD);
2159 0x2Bu32.encode(self.sink);
2160 self
2161 }
2162
2163 pub fn i8x16_ge_u(&mut self) -> &mut Self {
2165 self.sink.push(0xFD);
2166 0x2Cu32.encode(self.sink);
2167 self
2168 }
2169
2170 pub fn i16x8_eq(&mut self) -> &mut Self {
2172 self.sink.push(0xFD);
2173 0x2Du32.encode(self.sink);
2174 self
2175 }
2176
2177 pub fn i16x8_ne(&mut self) -> &mut Self {
2179 self.sink.push(0xFD);
2180 0x2Eu32.encode(self.sink);
2181 self
2182 }
2183
2184 pub fn i16x8_lt_s(&mut self) -> &mut Self {
2186 self.sink.push(0xFD);
2187 0x2Fu32.encode(self.sink);
2188 self
2189 }
2190
2191 pub fn i16x8_lt_u(&mut self) -> &mut Self {
2193 self.sink.push(0xFD);
2194 0x30u32.encode(self.sink);
2195 self
2196 }
2197
2198 pub fn i16x8_gt_s(&mut self) -> &mut Self {
2200 self.sink.push(0xFD);
2201 0x31u32.encode(self.sink);
2202 self
2203 }
2204
2205 pub fn i16x8_gt_u(&mut self) -> &mut Self {
2207 self.sink.push(0xFD);
2208 0x32u32.encode(self.sink);
2209 self
2210 }
2211
2212 pub fn i16x8_le_s(&mut self) -> &mut Self {
2214 self.sink.push(0xFD);
2215 0x33u32.encode(self.sink);
2216 self
2217 }
2218
2219 pub fn i16x8_le_u(&mut self) -> &mut Self {
2221 self.sink.push(0xFD);
2222 0x34u32.encode(self.sink);
2223 self
2224 }
2225
2226 pub fn i16x8_ge_s(&mut self) -> &mut Self {
2228 self.sink.push(0xFD);
2229 0x35u32.encode(self.sink);
2230 self
2231 }
2232
2233 pub fn i16x8_ge_u(&mut self) -> &mut Self {
2235 self.sink.push(0xFD);
2236 0x36u32.encode(self.sink);
2237 self
2238 }
2239
2240 pub fn i32x4_eq(&mut self) -> &mut Self {
2242 self.sink.push(0xFD);
2243 0x37u32.encode(self.sink);
2244 self
2245 }
2246
2247 pub fn i32x4_ne(&mut self) -> &mut Self {
2249 self.sink.push(0xFD);
2250 0x38u32.encode(self.sink);
2251 self
2252 }
2253
2254 pub fn i32x4_lt_s(&mut self) -> &mut Self {
2256 self.sink.push(0xFD);
2257 0x39u32.encode(self.sink);
2258 self
2259 }
2260
2261 pub fn i32x4_lt_u(&mut self) -> &mut Self {
2263 self.sink.push(0xFD);
2264 0x3Au32.encode(self.sink);
2265 self
2266 }
2267
2268 pub fn i32x4_gt_s(&mut self) -> &mut Self {
2270 self.sink.push(0xFD);
2271 0x3Bu32.encode(self.sink);
2272 self
2273 }
2274
2275 pub fn i32x4_gt_u(&mut self) -> &mut Self {
2277 self.sink.push(0xFD);
2278 0x3Cu32.encode(self.sink);
2279 self
2280 }
2281
2282 pub fn i32x4_le_s(&mut self) -> &mut Self {
2284 self.sink.push(0xFD);
2285 0x3Du32.encode(self.sink);
2286 self
2287 }
2288
2289 pub fn i32x4_le_u(&mut self) -> &mut Self {
2291 self.sink.push(0xFD);
2292 0x3Eu32.encode(self.sink);
2293 self
2294 }
2295
2296 pub fn i32x4_ge_s(&mut self) -> &mut Self {
2298 self.sink.push(0xFD);
2299 0x3Fu32.encode(self.sink);
2300 self
2301 }
2302
2303 pub fn i32x4_ge_u(&mut self) -> &mut Self {
2305 self.sink.push(0xFD);
2306 0x40u32.encode(self.sink);
2307 self
2308 }
2309
2310 pub fn i64x2_eq(&mut self) -> &mut Self {
2312 self.sink.push(0xFD);
2313 0xD6u32.encode(self.sink);
2314 self
2315 }
2316
2317 pub fn i64x2_ne(&mut self) -> &mut Self {
2319 self.sink.push(0xFD);
2320 0xD7u32.encode(self.sink);
2321 self
2322 }
2323
2324 pub fn i64x2_lt_s(&mut self) -> &mut Self {
2326 self.sink.push(0xFD);
2327 0xD8u32.encode(self.sink);
2328 self
2329 }
2330
2331 pub fn i64x2_gt_s(&mut self) -> &mut Self {
2333 self.sink.push(0xFD);
2334 0xD9u32.encode(self.sink);
2335 self
2336 }
2337
2338 pub fn i64x2_le_s(&mut self) -> &mut Self {
2340 self.sink.push(0xFD);
2341 0xDAu32.encode(self.sink);
2342 self
2343 }
2344
2345 pub fn i64x2_ge_s(&mut self) -> &mut Self {
2347 self.sink.push(0xFD);
2348 0xDBu32.encode(self.sink);
2349 self
2350 }
2351
2352 pub fn f32x4_eq(&mut self) -> &mut Self {
2354 self.sink.push(0xFD);
2355 0x41u32.encode(self.sink);
2356 self
2357 }
2358
2359 pub fn f32x4_ne(&mut self) -> &mut Self {
2361 self.sink.push(0xFD);
2362 0x42u32.encode(self.sink);
2363 self
2364 }
2365
2366 pub fn f32x4_lt(&mut self) -> &mut Self {
2368 self.sink.push(0xFD);
2369 0x43u32.encode(self.sink);
2370 self
2371 }
2372
2373 pub fn f32x4_gt(&mut self) -> &mut Self {
2375 self.sink.push(0xFD);
2376 0x44u32.encode(self.sink);
2377 self
2378 }
2379
2380 pub fn f32x4_le(&mut self) -> &mut Self {
2382 self.sink.push(0xFD);
2383 0x45u32.encode(self.sink);
2384 self
2385 }
2386
2387 pub fn f32x4_ge(&mut self) -> &mut Self {
2389 self.sink.push(0xFD);
2390 0x46u32.encode(self.sink);
2391 self
2392 }
2393
2394 pub fn f64x2_eq(&mut self) -> &mut Self {
2396 self.sink.push(0xFD);
2397 0x47u32.encode(self.sink);
2398 self
2399 }
2400
2401 pub fn f64x2_ne(&mut self) -> &mut Self {
2403 self.sink.push(0xFD);
2404 0x48u32.encode(self.sink);
2405 self
2406 }
2407
2408 pub fn f64x2_lt(&mut self) -> &mut Self {
2410 self.sink.push(0xFD);
2411 0x49u32.encode(self.sink);
2412 self
2413 }
2414
2415 pub fn f64x2_gt(&mut self) -> &mut Self {
2417 self.sink.push(0xFD);
2418 0x4Au32.encode(self.sink);
2419 self
2420 }
2421
2422 pub fn f64x2_le(&mut self) -> &mut Self {
2424 self.sink.push(0xFD);
2425 0x4Bu32.encode(self.sink);
2426 self
2427 }
2428
2429 pub fn f64x2_ge(&mut self) -> &mut Self {
2431 self.sink.push(0xFD);
2432 0x4Cu32.encode(self.sink);
2433 self
2434 }
2435
2436 pub fn v128_not(&mut self) -> &mut Self {
2438 self.sink.push(0xFD);
2439 0x4Du32.encode(self.sink);
2440 self
2441 }
2442
2443 pub fn v128_and(&mut self) -> &mut Self {
2445 self.sink.push(0xFD);
2446 0x4Eu32.encode(self.sink);
2447 self
2448 }
2449
2450 pub fn v128_andnot(&mut self) -> &mut Self {
2452 self.sink.push(0xFD);
2453 0x4Fu32.encode(self.sink);
2454 self
2455 }
2456
2457 pub fn v128_or(&mut self) -> &mut Self {
2459 self.sink.push(0xFD);
2460 0x50u32.encode(self.sink);
2461 self
2462 }
2463
2464 pub fn v128_xor(&mut self) -> &mut Self {
2466 self.sink.push(0xFD);
2467 0x51u32.encode(self.sink);
2468 self
2469 }
2470
2471 pub fn v128_bitselect(&mut self) -> &mut Self {
2473 self.sink.push(0xFD);
2474 0x52u32.encode(self.sink);
2475 self
2476 }
2477
2478 pub fn v128_any_true(&mut self) -> &mut Self {
2480 self.sink.push(0xFD);
2481 0x53u32.encode(self.sink);
2482 self
2483 }
2484
2485 pub fn i8x16_abs(&mut self) -> &mut Self {
2487 self.sink.push(0xFD);
2488 0x60u32.encode(self.sink);
2489 self
2490 }
2491
2492 pub fn i8x16_neg(&mut self) -> &mut Self {
2494 self.sink.push(0xFD);
2495 0x61u32.encode(self.sink);
2496 self
2497 }
2498
2499 pub fn i8x16_popcnt(&mut self) -> &mut Self {
2501 self.sink.push(0xFD);
2502 0x62u32.encode(self.sink);
2503 self
2504 }
2505
2506 pub fn i8x16_all_true(&mut self) -> &mut Self {
2508 self.sink.push(0xFD);
2509 0x63u32.encode(self.sink);
2510 self
2511 }
2512
2513 pub fn i8x16_bitmask(&mut self) -> &mut Self {
2515 self.sink.push(0xFD);
2516 0x64u32.encode(self.sink);
2517 self
2518 }
2519
2520 pub fn i8x16_narrow_i16x8_s(&mut self) -> &mut Self {
2522 self.sink.push(0xFD);
2523 0x65u32.encode(self.sink);
2524 self
2525 }
2526
2527 pub fn i8x16_narrow_i16x8_u(&mut self) -> &mut Self {
2529 self.sink.push(0xFD);
2530 0x66u32.encode(self.sink);
2531 self
2532 }
2533
2534 pub fn i8x16_shl(&mut self) -> &mut Self {
2536 self.sink.push(0xFD);
2537 0x6bu32.encode(self.sink);
2538 self
2539 }
2540
2541 pub fn i8x16_shr_s(&mut self) -> &mut Self {
2543 self.sink.push(0xFD);
2544 0x6cu32.encode(self.sink);
2545 self
2546 }
2547
2548 pub fn i8x16_shr_u(&mut self) -> &mut Self {
2550 self.sink.push(0xFD);
2551 0x6du32.encode(self.sink);
2552 self
2553 }
2554
2555 pub fn i8x16_add(&mut self) -> &mut Self {
2557 self.sink.push(0xFD);
2558 0x6eu32.encode(self.sink);
2559 self
2560 }
2561
2562 pub fn i8x16_add_sat_s(&mut self) -> &mut Self {
2564 self.sink.push(0xFD);
2565 0x6fu32.encode(self.sink);
2566 self
2567 }
2568
2569 pub fn i8x16_add_sat_u(&mut self) -> &mut Self {
2571 self.sink.push(0xFD);
2572 0x70u32.encode(self.sink);
2573 self
2574 }
2575
2576 pub fn i8x16_sub(&mut self) -> &mut Self {
2578 self.sink.push(0xFD);
2579 0x71u32.encode(self.sink);
2580 self
2581 }
2582
2583 pub fn i8x16_sub_sat_s(&mut self) -> &mut Self {
2585 self.sink.push(0xFD);
2586 0x72u32.encode(self.sink);
2587 self
2588 }
2589
2590 pub fn i8x16_sub_sat_u(&mut self) -> &mut Self {
2592 self.sink.push(0xFD);
2593 0x73u32.encode(self.sink);
2594 self
2595 }
2596
2597 pub fn i8x16_min_s(&mut self) -> &mut Self {
2599 self.sink.push(0xFD);
2600 0x76u32.encode(self.sink);
2601 self
2602 }
2603
2604 pub fn i8x16_min_u(&mut self) -> &mut Self {
2606 self.sink.push(0xFD);
2607 0x77u32.encode(self.sink);
2608 self
2609 }
2610
2611 pub fn i8x16_max_s(&mut self) -> &mut Self {
2613 self.sink.push(0xFD);
2614 0x78u32.encode(self.sink);
2615 self
2616 }
2617
2618 pub fn i8x16_max_u(&mut self) -> &mut Self {
2620 self.sink.push(0xFD);
2621 0x79u32.encode(self.sink);
2622 self
2623 }
2624
2625 pub fn i8x16_avgr_u(&mut self) -> &mut Self {
2627 self.sink.push(0xFD);
2628 0x7Bu32.encode(self.sink);
2629 self
2630 }
2631
2632 pub fn i16x8_extadd_pairwise_i8x16_s(&mut self) -> &mut Self {
2634 self.sink.push(0xFD);
2635 0x7Cu32.encode(self.sink);
2636 self
2637 }
2638
2639 pub fn i16x8_extadd_pairwise_i8x16_u(&mut self) -> &mut Self {
2641 self.sink.push(0xFD);
2642 0x7Du32.encode(self.sink);
2643 self
2644 }
2645
2646 pub fn i16x8_abs(&mut self) -> &mut Self {
2648 self.sink.push(0xFD);
2649 0x80u32.encode(self.sink);
2650 self
2651 }
2652
2653 pub fn i16x8_neg(&mut self) -> &mut Self {
2655 self.sink.push(0xFD);
2656 0x81u32.encode(self.sink);
2657 self
2658 }
2659
2660 pub fn i16x8_q15mulr_sat_s(&mut self) -> &mut Self {
2662 self.sink.push(0xFD);
2663 0x82u32.encode(self.sink);
2664 self
2665 }
2666
2667 pub fn i16x8_all_true(&mut self) -> &mut Self {
2669 self.sink.push(0xFD);
2670 0x83u32.encode(self.sink);
2671 self
2672 }
2673
2674 pub fn i16x8_bitmask(&mut self) -> &mut Self {
2676 self.sink.push(0xFD);
2677 0x84u32.encode(self.sink);
2678 self
2679 }
2680
2681 pub fn i16x8_narrow_i32x4_s(&mut self) -> &mut Self {
2683 self.sink.push(0xFD);
2684 0x85u32.encode(self.sink);
2685 self
2686 }
2687
2688 pub fn i16x8_narrow_i32x4_u(&mut self) -> &mut Self {
2690 self.sink.push(0xFD);
2691 0x86u32.encode(self.sink);
2692 self
2693 }
2694
2695 pub fn i16x8_extend_low_i8x16_s(&mut self) -> &mut Self {
2697 self.sink.push(0xFD);
2698 0x87u32.encode(self.sink);
2699 self
2700 }
2701
2702 pub fn i16x8_extend_high_i8x16_s(&mut self) -> &mut Self {
2704 self.sink.push(0xFD);
2705 0x88u32.encode(self.sink);
2706 self
2707 }
2708
2709 pub fn i16x8_extend_low_i8x16_u(&mut self) -> &mut Self {
2711 self.sink.push(0xFD);
2712 0x89u32.encode(self.sink);
2713 self
2714 }
2715
2716 pub fn i16x8_extend_high_i8x16_u(&mut self) -> &mut Self {
2718 self.sink.push(0xFD);
2719 0x8Au32.encode(self.sink);
2720 self
2721 }
2722
2723 pub fn i16x8_shl(&mut self) -> &mut Self {
2725 self.sink.push(0xFD);
2726 0x8Bu32.encode(self.sink);
2727 self
2728 }
2729
2730 pub fn i16x8_shr_s(&mut self) -> &mut Self {
2732 self.sink.push(0xFD);
2733 0x8Cu32.encode(self.sink);
2734 self
2735 }
2736
2737 pub fn i16x8_shr_u(&mut self) -> &mut Self {
2739 self.sink.push(0xFD);
2740 0x8Du32.encode(self.sink);
2741 self
2742 }
2743
2744 pub fn i16x8_add(&mut self) -> &mut Self {
2746 self.sink.push(0xFD);
2747 0x8Eu32.encode(self.sink);
2748 self
2749 }
2750
2751 pub fn i16x8_add_sat_s(&mut self) -> &mut Self {
2753 self.sink.push(0xFD);
2754 0x8Fu32.encode(self.sink);
2755 self
2756 }
2757
2758 pub fn i16x8_add_sat_u(&mut self) -> &mut Self {
2760 self.sink.push(0xFD);
2761 0x90u32.encode(self.sink);
2762 self
2763 }
2764
2765 pub fn i16x8_sub(&mut self) -> &mut Self {
2767 self.sink.push(0xFD);
2768 0x91u32.encode(self.sink);
2769 self
2770 }
2771
2772 pub fn i16x8_sub_sat_s(&mut self) -> &mut Self {
2774 self.sink.push(0xFD);
2775 0x92u32.encode(self.sink);
2776 self
2777 }
2778
2779 pub fn i16x8_sub_sat_u(&mut self) -> &mut Self {
2781 self.sink.push(0xFD);
2782 0x93u32.encode(self.sink);
2783 self
2784 }
2785
2786 pub fn i16x8_mul(&mut self) -> &mut Self {
2788 self.sink.push(0xFD);
2789 0x95u32.encode(self.sink);
2790 self
2791 }
2792
2793 pub fn i16x8_min_s(&mut self) -> &mut Self {
2795 self.sink.push(0xFD);
2796 0x96u32.encode(self.sink);
2797 self
2798 }
2799
2800 pub fn i16x8_min_u(&mut self) -> &mut Self {
2802 self.sink.push(0xFD);
2803 0x97u32.encode(self.sink);
2804 self
2805 }
2806
2807 pub fn i16x8_max_s(&mut self) -> &mut Self {
2809 self.sink.push(0xFD);
2810 0x98u32.encode(self.sink);
2811 self
2812 }
2813
2814 pub fn i16x8_max_u(&mut self) -> &mut Self {
2816 self.sink.push(0xFD);
2817 0x99u32.encode(self.sink);
2818 self
2819 }
2820
2821 pub fn i16x8_avgr_u(&mut self) -> &mut Self {
2823 self.sink.push(0xFD);
2824 0x9Bu32.encode(self.sink);
2825 self
2826 }
2827
2828 pub fn i16x8_extmul_low_i8x16_s(&mut self) -> &mut Self {
2830 self.sink.push(0xFD);
2831 0x9Cu32.encode(self.sink);
2832 self
2833 }
2834
2835 pub fn i16x8_extmul_high_i8x16_s(&mut self) -> &mut Self {
2837 self.sink.push(0xFD);
2838 0x9Du32.encode(self.sink);
2839 self
2840 }
2841
2842 pub fn i16x8_extmul_low_i8x16_u(&mut self) -> &mut Self {
2844 self.sink.push(0xFD);
2845 0x9Eu32.encode(self.sink);
2846 self
2847 }
2848
2849 pub fn i16x8_extmul_high_i8x16_u(&mut self) -> &mut Self {
2851 self.sink.push(0xFD);
2852 0x9Fu32.encode(self.sink);
2853 self
2854 }
2855
2856 pub fn i32x4_extadd_pairwise_i16x8_s(&mut self) -> &mut Self {
2858 self.sink.push(0xFD);
2859 0x7Eu32.encode(self.sink);
2860 self
2861 }
2862
2863 pub fn i32x4_extadd_pairwise_i16x8_u(&mut self) -> &mut Self {
2865 self.sink.push(0xFD);
2866 0x7Fu32.encode(self.sink);
2867 self
2868 }
2869
2870 pub fn i32x4_abs(&mut self) -> &mut Self {
2872 self.sink.push(0xFD);
2873 0xA0u32.encode(self.sink);
2874 self
2875 }
2876
2877 pub fn i32x4_neg(&mut self) -> &mut Self {
2879 self.sink.push(0xFD);
2880 0xA1u32.encode(self.sink);
2881 self
2882 }
2883
2884 pub fn i32x4_all_true(&mut self) -> &mut Self {
2886 self.sink.push(0xFD);
2887 0xA3u32.encode(self.sink);
2888 self
2889 }
2890
2891 pub fn i32x4_bitmask(&mut self) -> &mut Self {
2893 self.sink.push(0xFD);
2894 0xA4u32.encode(self.sink);
2895 self
2896 }
2897
2898 pub fn i32x4_extend_low_i16x8_s(&mut self) -> &mut Self {
2900 self.sink.push(0xFD);
2901 0xA7u32.encode(self.sink);
2902 self
2903 }
2904
2905 pub fn i32x4_extend_high_i16x8_s(&mut self) -> &mut Self {
2907 self.sink.push(0xFD);
2908 0xA8u32.encode(self.sink);
2909 self
2910 }
2911
2912 pub fn i32x4_extend_low_i16x8_u(&mut self) -> &mut Self {
2914 self.sink.push(0xFD);
2915 0xA9u32.encode(self.sink);
2916 self
2917 }
2918
2919 pub fn i32x4_extend_high_i16x8_u(&mut self) -> &mut Self {
2921 self.sink.push(0xFD);
2922 0xAAu32.encode(self.sink);
2923 self
2924 }
2925
2926 pub fn i32x4_shl(&mut self) -> &mut Self {
2928 self.sink.push(0xFD);
2929 0xABu32.encode(self.sink);
2930 self
2931 }
2932
2933 pub fn i32x4_shr_s(&mut self) -> &mut Self {
2935 self.sink.push(0xFD);
2936 0xACu32.encode(self.sink);
2937 self
2938 }
2939
2940 pub fn i32x4_shr_u(&mut self) -> &mut Self {
2942 self.sink.push(0xFD);
2943 0xADu32.encode(self.sink);
2944 self
2945 }
2946
2947 pub fn i32x4_add(&mut self) -> &mut Self {
2949 self.sink.push(0xFD);
2950 0xAEu32.encode(self.sink);
2951 self
2952 }
2953
2954 pub fn i32x4_sub(&mut self) -> &mut Self {
2956 self.sink.push(0xFD);
2957 0xB1u32.encode(self.sink);
2958 self
2959 }
2960
2961 pub fn i32x4_mul(&mut self) -> &mut Self {
2963 self.sink.push(0xFD);
2964 0xB5u32.encode(self.sink);
2965 self
2966 }
2967
2968 pub fn i32x4_min_s(&mut self) -> &mut Self {
2970 self.sink.push(0xFD);
2971 0xB6u32.encode(self.sink);
2972 self
2973 }
2974
2975 pub fn i32x4_min_u(&mut self) -> &mut Self {
2977 self.sink.push(0xFD);
2978 0xB7u32.encode(self.sink);
2979 self
2980 }
2981
2982 pub fn i32x4_max_s(&mut self) -> &mut Self {
2984 self.sink.push(0xFD);
2985 0xB8u32.encode(self.sink);
2986 self
2987 }
2988
2989 pub fn i32x4_max_u(&mut self) -> &mut Self {
2991 self.sink.push(0xFD);
2992 0xB9u32.encode(self.sink);
2993 self
2994 }
2995
2996 pub fn i32x4_dot_i16x8_s(&mut self) -> &mut Self {
2998 self.sink.push(0xFD);
2999 0xBAu32.encode(self.sink);
3000 self
3001 }
3002
3003 pub fn i32x4_extmul_low_i16x8_s(&mut self) -> &mut Self {
3005 self.sink.push(0xFD);
3006 0xBCu32.encode(self.sink);
3007 self
3008 }
3009
3010 pub fn i32x4_extmul_high_i16x8_s(&mut self) -> &mut Self {
3012 self.sink.push(0xFD);
3013 0xBDu32.encode(self.sink);
3014 self
3015 }
3016
3017 pub fn i32x4_extmul_low_i16x8_u(&mut self) -> &mut Self {
3019 self.sink.push(0xFD);
3020 0xBEu32.encode(self.sink);
3021 self
3022 }
3023
3024 pub fn i32x4_extmul_high_i16x8_u(&mut self) -> &mut Self {
3026 self.sink.push(0xFD);
3027 0xBFu32.encode(self.sink);
3028 self
3029 }
3030
3031 pub fn i64x2_abs(&mut self) -> &mut Self {
3033 self.sink.push(0xFD);
3034 0xC0u32.encode(self.sink);
3035 self
3036 }
3037
3038 pub fn i64x2_neg(&mut self) -> &mut Self {
3040 self.sink.push(0xFD);
3041 0xC1u32.encode(self.sink);
3042 self
3043 }
3044
3045 pub fn i64x2_all_true(&mut self) -> &mut Self {
3047 self.sink.push(0xFD);
3048 0xC3u32.encode(self.sink);
3049 self
3050 }
3051
3052 pub fn i64x2_bitmask(&mut self) -> &mut Self {
3054 self.sink.push(0xFD);
3055 0xC4u32.encode(self.sink);
3056 self
3057 }
3058
3059 pub fn i64x2_extend_low_i32x4_s(&mut self) -> &mut Self {
3061 self.sink.push(0xFD);
3062 0xC7u32.encode(self.sink);
3063 self
3064 }
3065
3066 pub fn i64x2_extend_high_i32x4_s(&mut self) -> &mut Self {
3068 self.sink.push(0xFD);
3069 0xC8u32.encode(self.sink);
3070 self
3071 }
3072
3073 pub fn i64x2_extend_low_i32x4_u(&mut self) -> &mut Self {
3075 self.sink.push(0xFD);
3076 0xC9u32.encode(self.sink);
3077 self
3078 }
3079
3080 pub fn i64x2_extend_high_i32x4_u(&mut self) -> &mut Self {
3082 self.sink.push(0xFD);
3083 0xCAu32.encode(self.sink);
3084 self
3085 }
3086
3087 pub fn i64x2_shl(&mut self) -> &mut Self {
3089 self.sink.push(0xFD);
3090 0xCBu32.encode(self.sink);
3091 self
3092 }
3093
3094 pub fn i64x2_shr_s(&mut self) -> &mut Self {
3096 self.sink.push(0xFD);
3097 0xCCu32.encode(self.sink);
3098 self
3099 }
3100
3101 pub fn i64x2_shr_u(&mut self) -> &mut Self {
3103 self.sink.push(0xFD);
3104 0xCDu32.encode(self.sink);
3105 self
3106 }
3107
3108 pub fn i64x2_add(&mut self) -> &mut Self {
3110 self.sink.push(0xFD);
3111 0xCEu32.encode(self.sink);
3112 self
3113 }
3114
3115 pub fn i64x2_sub(&mut self) -> &mut Self {
3117 self.sink.push(0xFD);
3118 0xD1u32.encode(self.sink);
3119 self
3120 }
3121
3122 pub fn i64x2_mul(&mut self) -> &mut Self {
3124 self.sink.push(0xFD);
3125 0xD5u32.encode(self.sink);
3126 self
3127 }
3128
3129 pub fn i64x2_extmul_low_i32x4_s(&mut self) -> &mut Self {
3131 self.sink.push(0xFD);
3132 0xDCu32.encode(self.sink);
3133 self
3134 }
3135
3136 pub fn i64x2_extmul_high_i32x4_s(&mut self) -> &mut Self {
3138 self.sink.push(0xFD);
3139 0xDDu32.encode(self.sink);
3140 self
3141 }
3142
3143 pub fn i64x2_extmul_low_i32x4_u(&mut self) -> &mut Self {
3145 self.sink.push(0xFD);
3146 0xDEu32.encode(self.sink);
3147 self
3148 }
3149
3150 pub fn i64x2_extmul_high_i32x4_u(&mut self) -> &mut Self {
3152 self.sink.push(0xFD);
3153 0xDFu32.encode(self.sink);
3154 self
3155 }
3156
3157 pub fn f32x4_ceil(&mut self) -> &mut Self {
3159 self.sink.push(0xFD);
3160 0x67u32.encode(self.sink);
3161 self
3162 }
3163
3164 pub fn f32x4_floor(&mut self) -> &mut Self {
3166 self.sink.push(0xFD);
3167 0x68u32.encode(self.sink);
3168 self
3169 }
3170
3171 pub fn f32x4_trunc(&mut self) -> &mut Self {
3173 self.sink.push(0xFD);
3174 0x69u32.encode(self.sink);
3175 self
3176 }
3177
3178 pub fn f32x4_nearest(&mut self) -> &mut Self {
3180 self.sink.push(0xFD);
3181 0x6Au32.encode(self.sink);
3182 self
3183 }
3184
3185 pub fn f32x4_abs(&mut self) -> &mut Self {
3187 self.sink.push(0xFD);
3188 0xE0u32.encode(self.sink);
3189 self
3190 }
3191
3192 pub fn f32x4_neg(&mut self) -> &mut Self {
3194 self.sink.push(0xFD);
3195 0xE1u32.encode(self.sink);
3196 self
3197 }
3198
3199 pub fn f32x4_sqrt(&mut self) -> &mut Self {
3201 self.sink.push(0xFD);
3202 0xE3u32.encode(self.sink);
3203 self
3204 }
3205
3206 pub fn f32x4_add(&mut self) -> &mut Self {
3208 self.sink.push(0xFD);
3209 0xE4u32.encode(self.sink);
3210 self
3211 }
3212
3213 pub fn f32x4_sub(&mut self) -> &mut Self {
3215 self.sink.push(0xFD);
3216 0xE5u32.encode(self.sink);
3217 self
3218 }
3219
3220 pub fn f32x4_mul(&mut self) -> &mut Self {
3222 self.sink.push(0xFD);
3223 0xE6u32.encode(self.sink);
3224 self
3225 }
3226
3227 pub fn f32x4_div(&mut self) -> &mut Self {
3229 self.sink.push(0xFD);
3230 0xE7u32.encode(self.sink);
3231 self
3232 }
3233
3234 pub fn f32x4_min(&mut self) -> &mut Self {
3236 self.sink.push(0xFD);
3237 0xE8u32.encode(self.sink);
3238 self
3239 }
3240
3241 pub fn f32x4_max(&mut self) -> &mut Self {
3243 self.sink.push(0xFD);
3244 0xE9u32.encode(self.sink);
3245 self
3246 }
3247
3248 pub fn f32x4_pmin(&mut self) -> &mut Self {
3250 self.sink.push(0xFD);
3251 0xEAu32.encode(self.sink);
3252 self
3253 }
3254
3255 pub fn f32x4_pmax(&mut self) -> &mut Self {
3257 self.sink.push(0xFD);
3258 0xEBu32.encode(self.sink);
3259 self
3260 }
3261
3262 pub fn f64x2_ceil(&mut self) -> &mut Self {
3264 self.sink.push(0xFD);
3265 0x74u32.encode(self.sink);
3266 self
3267 }
3268
3269 pub fn f64x2_floor(&mut self) -> &mut Self {
3271 self.sink.push(0xFD);
3272 0x75u32.encode(self.sink);
3273 self
3274 }
3275
3276 pub fn f64x2_trunc(&mut self) -> &mut Self {
3278 self.sink.push(0xFD);
3279 0x7Au32.encode(self.sink);
3280 self
3281 }
3282
3283 pub fn f64x2_nearest(&mut self) -> &mut Self {
3285 self.sink.push(0xFD);
3286 0x94u32.encode(self.sink);
3287 self
3288 }
3289
3290 pub fn f64x2_abs(&mut self) -> &mut Self {
3292 self.sink.push(0xFD);
3293 0xECu32.encode(self.sink);
3294 self
3295 }
3296
3297 pub fn f64x2_neg(&mut self) -> &mut Self {
3299 self.sink.push(0xFD);
3300 0xEDu32.encode(self.sink);
3301 self
3302 }
3303
3304 pub fn f64x2_sqrt(&mut self) -> &mut Self {
3306 self.sink.push(0xFD);
3307 0xEFu32.encode(self.sink);
3308 self
3309 }
3310
3311 pub fn f64x2_add(&mut self) -> &mut Self {
3313 self.sink.push(0xFD);
3314 0xF0u32.encode(self.sink);
3315 self
3316 }
3317
3318 pub fn f64x2_sub(&mut self) -> &mut Self {
3320 self.sink.push(0xFD);
3321 0xF1u32.encode(self.sink);
3322 self
3323 }
3324
3325 pub fn f64x2_mul(&mut self) -> &mut Self {
3327 self.sink.push(0xFD);
3328 0xF2u32.encode(self.sink);
3329 self
3330 }
3331
3332 pub fn f64x2_div(&mut self) -> &mut Self {
3334 self.sink.push(0xFD);
3335 0xF3u32.encode(self.sink);
3336 self
3337 }
3338
3339 pub fn f64x2_min(&mut self) -> &mut Self {
3341 self.sink.push(0xFD);
3342 0xF4u32.encode(self.sink);
3343 self
3344 }
3345
3346 pub fn f64x2_max(&mut self) -> &mut Self {
3348 self.sink.push(0xFD);
3349 0xF5u32.encode(self.sink);
3350 self
3351 }
3352
3353 pub fn f64x2_pmin(&mut self) -> &mut Self {
3355 self.sink.push(0xFD);
3356 0xF6u32.encode(self.sink);
3357 self
3358 }
3359
3360 pub fn f64x2_pmax(&mut self) -> &mut Self {
3362 self.sink.push(0xFD);
3363 0xF7u32.encode(self.sink);
3364 self
3365 }
3366
3367 pub fn i32x4_trunc_sat_f32x4_s(&mut self) -> &mut Self {
3369 self.sink.push(0xFD);
3370 0xF8u32.encode(self.sink);
3371 self
3372 }
3373
3374 pub fn i32x4_trunc_sat_f32x4_u(&mut self) -> &mut Self {
3376 self.sink.push(0xFD);
3377 0xF9u32.encode(self.sink);
3378 self
3379 }
3380
3381 pub fn f32x4_convert_i32x4_s(&mut self) -> &mut Self {
3383 self.sink.push(0xFD);
3384 0xFAu32.encode(self.sink);
3385 self
3386 }
3387
3388 pub fn f32x4_convert_i32x4_u(&mut self) -> &mut Self {
3390 self.sink.push(0xFD);
3391 0xFBu32.encode(self.sink);
3392 self
3393 }
3394
3395 pub fn i32x4_trunc_sat_f64x2_s_zero(&mut self) -> &mut Self {
3397 self.sink.push(0xFD);
3398 0xFCu32.encode(self.sink);
3399 self
3400 }
3401
3402 pub fn i32x4_trunc_sat_f64x2_u_zero(&mut self) -> &mut Self {
3404 self.sink.push(0xFD);
3405 0xFDu32.encode(self.sink);
3406 self
3407 }
3408
3409 pub fn f64x2_convert_low_i32x4_s(&mut self) -> &mut Self {
3411 self.sink.push(0xFD);
3412 0xFEu32.encode(self.sink);
3413 self
3414 }
3415
3416 pub fn f64x2_convert_low_i32x4_u(&mut self) -> &mut Self {
3418 self.sink.push(0xFD);
3419 0xFFu32.encode(self.sink);
3420 self
3421 }
3422
3423 pub fn f32x4_demote_f64x2_zero(&mut self) -> &mut Self {
3425 self.sink.push(0xFD);
3426 0x5Eu32.encode(self.sink);
3427 self
3428 }
3429
3430 pub fn f64x2_promote_low_f32x4(&mut self) -> &mut Self {
3432 self.sink.push(0xFD);
3433 0x5Fu32.encode(self.sink);
3434 self
3435 }
3436
3437 pub fn i8x16_relaxed_swizzle(&mut self) -> &mut Self {
3441 self.sink.push(0xFD);
3442 0x100u32.encode(self.sink);
3443 self
3444 }
3445
3446 pub fn i32x4_relaxed_trunc_f32x4_s(&mut self) -> &mut Self {
3448 self.sink.push(0xFD);
3449 0x101u32.encode(self.sink);
3450 self
3451 }
3452
3453 pub fn i32x4_relaxed_trunc_f32x4_u(&mut self) -> &mut Self {
3455 self.sink.push(0xFD);
3456 0x102u32.encode(self.sink);
3457 self
3458 }
3459
3460 pub fn i32x4_relaxed_trunc_f64x2_s_zero(&mut self) -> &mut Self {
3462 self.sink.push(0xFD);
3463 0x103u32.encode(self.sink);
3464 self
3465 }
3466
3467 pub fn i32x4_relaxed_trunc_f64x2_u_zero(&mut self) -> &mut Self {
3469 self.sink.push(0xFD);
3470 0x104u32.encode(self.sink);
3471 self
3472 }
3473
3474 pub fn f32x4_relaxed_madd(&mut self) -> &mut Self {
3476 self.sink.push(0xFD);
3477 0x105u32.encode(self.sink);
3478 self
3479 }
3480
3481 pub fn f32x4_relaxed_nmadd(&mut self) -> &mut Self {
3483 self.sink.push(0xFD);
3484 0x106u32.encode(self.sink);
3485 self
3486 }
3487
3488 pub fn f64x2_relaxed_madd(&mut self) -> &mut Self {
3490 self.sink.push(0xFD);
3491 0x107u32.encode(self.sink);
3492 self
3493 }
3494
3495 pub fn f64x2_relaxed_nmadd(&mut self) -> &mut Self {
3497 self.sink.push(0xFD);
3498 0x108u32.encode(self.sink);
3499 self
3500 }
3501
3502 pub fn i8x16_relaxed_laneselect(&mut self) -> &mut Self {
3504 self.sink.push(0xFD);
3505 0x109u32.encode(self.sink);
3506 self
3507 }
3508
3509 pub fn i16x8_relaxed_laneselect(&mut self) -> &mut Self {
3511 self.sink.push(0xFD);
3512 0x10Au32.encode(self.sink);
3513 self
3514 }
3515
3516 pub fn i32x4_relaxed_laneselect(&mut self) -> &mut Self {
3518 self.sink.push(0xFD);
3519 0x10Bu32.encode(self.sink);
3520 self
3521 }
3522
3523 pub fn i64x2_relaxed_laneselect(&mut self) -> &mut Self {
3525 self.sink.push(0xFD);
3526 0x10Cu32.encode(self.sink);
3527 self
3528 }
3529
3530 pub fn f32x4_relaxed_min(&mut self) -> &mut Self {
3532 self.sink.push(0xFD);
3533 0x10Du32.encode(self.sink);
3534 self
3535 }
3536
3537 pub fn f32x4_relaxed_max(&mut self) -> &mut Self {
3539 self.sink.push(0xFD);
3540 0x10Eu32.encode(self.sink);
3541 self
3542 }
3543
3544 pub fn f64x2_relaxed_min(&mut self) -> &mut Self {
3546 self.sink.push(0xFD);
3547 0x10Fu32.encode(self.sink);
3548 self
3549 }
3550
3551 pub fn f64x2_relaxed_max(&mut self) -> &mut Self {
3553 self.sink.push(0xFD);
3554 0x110u32.encode(self.sink);
3555 self
3556 }
3557
3558 pub fn i16x8_relaxed_q15mulr_s(&mut self) -> &mut Self {
3560 self.sink.push(0xFD);
3561 0x111u32.encode(self.sink);
3562 self
3563 }
3564
3565 pub fn i16x8_relaxed_dot_i8x16_i7x16_s(&mut self) -> &mut Self {
3567 self.sink.push(0xFD);
3568 0x112u32.encode(self.sink);
3569 self
3570 }
3571
3572 pub fn i32x4_relaxed_dot_i8x16_i7x16_add_s(&mut self) -> &mut Self {
3574 self.sink.push(0xFD);
3575 0x113u32.encode(self.sink);
3576 self
3577 }
3578
3579 pub fn memory_atomic_notify(&mut self, memarg: MemArg) -> &mut Self {
3583 self.sink.push(0xFE);
3584 self.sink.push(0x00);
3585 memarg.encode(self.sink);
3586 self
3587 }
3588
3589 pub fn memory_atomic_wait32(&mut self, memarg: MemArg) -> &mut Self {
3591 self.sink.push(0xFE);
3592 self.sink.push(0x01);
3593 memarg.encode(self.sink);
3594 self
3595 }
3596
3597 pub fn memory_atomic_wait64(&mut self, memarg: MemArg) -> &mut Self {
3599 self.sink.push(0xFE);
3600 self.sink.push(0x02);
3601 memarg.encode(self.sink);
3602 self
3603 }
3604
3605 pub fn atomic_fence(&mut self) -> &mut Self {
3607 self.sink.push(0xFE);
3608 self.sink.push(0x03);
3609 self.sink.push(0x00);
3610 self
3611 }
3612
3613 pub fn i32_atomic_load(&mut self, memarg: MemArg) -> &mut Self {
3615 self.sink.push(0xFE);
3616 self.sink.push(0x10);
3617 memarg.encode(self.sink);
3618 self
3619 }
3620
3621 pub fn i64_atomic_load(&mut self, memarg: MemArg) -> &mut Self {
3623 self.sink.push(0xFE);
3624 self.sink.push(0x11);
3625 memarg.encode(self.sink);
3626 self
3627 }
3628
3629 pub fn i32_atomic_load8_u(&mut self, memarg: MemArg) -> &mut Self {
3631 self.sink.push(0xFE);
3632 self.sink.push(0x12);
3633 memarg.encode(self.sink);
3634 self
3635 }
3636
3637 pub fn i32_atomic_load16_u(&mut self, memarg: MemArg) -> &mut Self {
3639 self.sink.push(0xFE);
3640 self.sink.push(0x13);
3641 memarg.encode(self.sink);
3642 self
3643 }
3644
3645 pub fn i64_atomic_load8_u(&mut self, memarg: MemArg) -> &mut Self {
3647 self.sink.push(0xFE);
3648 self.sink.push(0x14);
3649 memarg.encode(self.sink);
3650 self
3651 }
3652
3653 pub fn i64_atomic_load16_u(&mut self, memarg: MemArg) -> &mut Self {
3655 self.sink.push(0xFE);
3656 self.sink.push(0x15);
3657 memarg.encode(self.sink);
3658 self
3659 }
3660
3661 pub fn i64_atomic_load32_u(&mut self, memarg: MemArg) -> &mut Self {
3663 self.sink.push(0xFE);
3664 self.sink.push(0x16);
3665 memarg.encode(self.sink);
3666 self
3667 }
3668
3669 pub fn i32_atomic_store(&mut self, memarg: MemArg) -> &mut Self {
3671 self.sink.push(0xFE);
3672 self.sink.push(0x17);
3673 memarg.encode(self.sink);
3674 self
3675 }
3676
3677 pub fn i64_atomic_store(&mut self, memarg: MemArg) -> &mut Self {
3679 self.sink.push(0xFE);
3680 self.sink.push(0x18);
3681 memarg.encode(self.sink);
3682 self
3683 }
3684
3685 pub fn i32_atomic_store8(&mut self, memarg: MemArg) -> &mut Self {
3687 self.sink.push(0xFE);
3688 self.sink.push(0x19);
3689 memarg.encode(self.sink);
3690 self
3691 }
3692
3693 pub fn i32_atomic_store16(&mut self, memarg: MemArg) -> &mut Self {
3695 self.sink.push(0xFE);
3696 self.sink.push(0x1A);
3697 memarg.encode(self.sink);
3698 self
3699 }
3700
3701 pub fn i64_atomic_store8(&mut self, memarg: MemArg) -> &mut Self {
3703 self.sink.push(0xFE);
3704 self.sink.push(0x1B);
3705 memarg.encode(self.sink);
3706 self
3707 }
3708
3709 pub fn i64_atomic_store16(&mut self, memarg: MemArg) -> &mut Self {
3711 self.sink.push(0xFE);
3712 self.sink.push(0x1C);
3713 memarg.encode(self.sink);
3714 self
3715 }
3716
3717 pub fn i64_atomic_store32(&mut self, memarg: MemArg) -> &mut Self {
3719 self.sink.push(0xFE);
3720 self.sink.push(0x1D);
3721 memarg.encode(self.sink);
3722 self
3723 }
3724
3725 pub fn i32_atomic_rmw_add(&mut self, memarg: MemArg) -> &mut Self {
3727 self.sink.push(0xFE);
3728 self.sink.push(0x1E);
3729 memarg.encode(self.sink);
3730 self
3731 }
3732
3733 pub fn i64_atomic_rmw_add(&mut self, memarg: MemArg) -> &mut Self {
3735 self.sink.push(0xFE);
3736 self.sink.push(0x1F);
3737 memarg.encode(self.sink);
3738 self
3739 }
3740
3741 pub fn i32_atomic_rmw8_add_u(&mut self, memarg: MemArg) -> &mut Self {
3743 self.sink.push(0xFE);
3744 self.sink.push(0x20);
3745 memarg.encode(self.sink);
3746 self
3747 }
3748
3749 pub fn i32_atomic_rmw16_add_u(&mut self, memarg: MemArg) -> &mut Self {
3751 self.sink.push(0xFE);
3752 self.sink.push(0x21);
3753 memarg.encode(self.sink);
3754 self
3755 }
3756
3757 pub fn i64_atomic_rmw8_add_u(&mut self, memarg: MemArg) -> &mut Self {
3759 self.sink.push(0xFE);
3760 self.sink.push(0x22);
3761 memarg.encode(self.sink);
3762 self
3763 }
3764
3765 pub fn i64_atomic_rmw16_add_u(&mut self, memarg: MemArg) -> &mut Self {
3767 self.sink.push(0xFE);
3768 self.sink.push(0x23);
3769 memarg.encode(self.sink);
3770 self
3771 }
3772
3773 pub fn i64_atomic_rmw32_add_u(&mut self, memarg: MemArg) -> &mut Self {
3775 self.sink.push(0xFE);
3776 self.sink.push(0x24);
3777 memarg.encode(self.sink);
3778 self
3779 }
3780
3781 pub fn i32_atomic_rmw_sub(&mut self, memarg: MemArg) -> &mut Self {
3783 self.sink.push(0xFE);
3784 self.sink.push(0x25);
3785 memarg.encode(self.sink);
3786 self
3787 }
3788
3789 pub fn i64_atomic_rmw_sub(&mut self, memarg: MemArg) -> &mut Self {
3791 self.sink.push(0xFE);
3792 self.sink.push(0x26);
3793 memarg.encode(self.sink);
3794 self
3795 }
3796
3797 pub fn i32_atomic_rmw8_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3799 self.sink.push(0xFE);
3800 self.sink.push(0x27);
3801 memarg.encode(self.sink);
3802 self
3803 }
3804
3805 pub fn i32_atomic_rmw16_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3807 self.sink.push(0xFE);
3808 self.sink.push(0x28);
3809 memarg.encode(self.sink);
3810 self
3811 }
3812
3813 pub fn i64_atomic_rmw8_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3815 self.sink.push(0xFE);
3816 self.sink.push(0x29);
3817 memarg.encode(self.sink);
3818 self
3819 }
3820
3821 pub fn i64_atomic_rmw16_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3823 self.sink.push(0xFE);
3824 self.sink.push(0x2A);
3825 memarg.encode(self.sink);
3826 self
3827 }
3828
3829 pub fn i64_atomic_rmw32_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3831 self.sink.push(0xFE);
3832 self.sink.push(0x2B);
3833 memarg.encode(self.sink);
3834 self
3835 }
3836
3837 pub fn i32_atomic_rmw_and(&mut self, memarg: MemArg) -> &mut Self {
3839 self.sink.push(0xFE);
3840 self.sink.push(0x2C);
3841 memarg.encode(self.sink);
3842 self
3843 }
3844
3845 pub fn i64_atomic_rmw_and(&mut self, memarg: MemArg) -> &mut Self {
3847 self.sink.push(0xFE);
3848 self.sink.push(0x2D);
3849 memarg.encode(self.sink);
3850 self
3851 }
3852
3853 pub fn i32_atomic_rmw8_and_u(&mut self, memarg: MemArg) -> &mut Self {
3855 self.sink.push(0xFE);
3856 self.sink.push(0x2E);
3857 memarg.encode(self.sink);
3858 self
3859 }
3860
3861 pub fn i32_atomic_rmw16_and_u(&mut self, memarg: MemArg) -> &mut Self {
3863 self.sink.push(0xFE);
3864 self.sink.push(0x2F);
3865 memarg.encode(self.sink);
3866 self
3867 }
3868
3869 pub fn i64_atomic_rmw8_and_u(&mut self, memarg: MemArg) -> &mut Self {
3871 self.sink.push(0xFE);
3872 self.sink.push(0x30);
3873 memarg.encode(self.sink);
3874 self
3875 }
3876
3877 pub fn i64_atomic_rmw16_and_u(&mut self, memarg: MemArg) -> &mut Self {
3879 self.sink.push(0xFE);
3880 self.sink.push(0x31);
3881 memarg.encode(self.sink);
3882 self
3883 }
3884
3885 pub fn i64_atomic_rmw32_and_u(&mut self, memarg: MemArg) -> &mut Self {
3887 self.sink.push(0xFE);
3888 self.sink.push(0x32);
3889 memarg.encode(self.sink);
3890 self
3891 }
3892
3893 pub fn i32_atomic_rmw_or(&mut self, memarg: MemArg) -> &mut Self {
3895 self.sink.push(0xFE);
3896 self.sink.push(0x33);
3897 memarg.encode(self.sink);
3898 self
3899 }
3900
3901 pub fn i64_atomic_rmw_or(&mut self, memarg: MemArg) -> &mut Self {
3903 self.sink.push(0xFE);
3904 self.sink.push(0x34);
3905 memarg.encode(self.sink);
3906 self
3907 }
3908
3909 pub fn i32_atomic_rmw8_or_u(&mut self, memarg: MemArg) -> &mut Self {
3911 self.sink.push(0xFE);
3912 self.sink.push(0x35);
3913 memarg.encode(self.sink);
3914 self
3915 }
3916
3917 pub fn i32_atomic_rmw16_or_u(&mut self, memarg: MemArg) -> &mut Self {
3919 self.sink.push(0xFE);
3920 self.sink.push(0x36);
3921 memarg.encode(self.sink);
3922 self
3923 }
3924
3925 pub fn i64_atomic_rmw8_or_u(&mut self, memarg: MemArg) -> &mut Self {
3927 self.sink.push(0xFE);
3928 self.sink.push(0x37);
3929 memarg.encode(self.sink);
3930 self
3931 }
3932
3933 pub fn i64_atomic_rmw16_or_u(&mut self, memarg: MemArg) -> &mut Self {
3935 self.sink.push(0xFE);
3936 self.sink.push(0x38);
3937 memarg.encode(self.sink);
3938 self
3939 }
3940
3941 pub fn i64_atomic_rmw32_or_u(&mut self, memarg: MemArg) -> &mut Self {
3943 self.sink.push(0xFE);
3944 self.sink.push(0x39);
3945 memarg.encode(self.sink);
3946 self
3947 }
3948
3949 pub fn i32_atomic_rmw_xor(&mut self, memarg: MemArg) -> &mut Self {
3951 self.sink.push(0xFE);
3952 self.sink.push(0x3A);
3953 memarg.encode(self.sink);
3954 self
3955 }
3956
3957 pub fn i64_atomic_rmw_xor(&mut self, memarg: MemArg) -> &mut Self {
3959 self.sink.push(0xFE);
3960 self.sink.push(0x3B);
3961 memarg.encode(self.sink);
3962 self
3963 }
3964
3965 pub fn i32_atomic_rmw8_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3967 self.sink.push(0xFE);
3968 self.sink.push(0x3C);
3969 memarg.encode(self.sink);
3970 self
3971 }
3972
3973 pub fn i32_atomic_rmw16_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3975 self.sink.push(0xFE);
3976 self.sink.push(0x3D);
3977 memarg.encode(self.sink);
3978 self
3979 }
3980
3981 pub fn i64_atomic_rmw8_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3983 self.sink.push(0xFE);
3984 self.sink.push(0x3E);
3985 memarg.encode(self.sink);
3986 self
3987 }
3988
3989 pub fn i64_atomic_rmw16_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3991 self.sink.push(0xFE);
3992 self.sink.push(0x3F);
3993 memarg.encode(self.sink);
3994 self
3995 }
3996
3997 pub fn i64_atomic_rmw32_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3999 self.sink.push(0xFE);
4000 self.sink.push(0x40);
4001 memarg.encode(self.sink);
4002 self
4003 }
4004
4005 pub fn i32_atomic_rmw_xchg(&mut self, memarg: MemArg) -> &mut Self {
4007 self.sink.push(0xFE);
4008 self.sink.push(0x41);
4009 memarg.encode(self.sink);
4010 self
4011 }
4012
4013 pub fn i64_atomic_rmw_xchg(&mut self, memarg: MemArg) -> &mut Self {
4015 self.sink.push(0xFE);
4016 self.sink.push(0x42);
4017 memarg.encode(self.sink);
4018 self
4019 }
4020
4021 pub fn i32_atomic_rmw8_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4023 self.sink.push(0xFE);
4024 self.sink.push(0x43);
4025 memarg.encode(self.sink);
4026 self
4027 }
4028
4029 pub fn i32_atomic_rmw16_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4031 self.sink.push(0xFE);
4032 self.sink.push(0x44);
4033 memarg.encode(self.sink);
4034 self
4035 }
4036
4037 pub fn i64_atomic_rmw8_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4039 self.sink.push(0xFE);
4040 self.sink.push(0x45);
4041 memarg.encode(self.sink);
4042 self
4043 }
4044
4045 pub fn i64_atomic_rmw16_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4047 self.sink.push(0xFE);
4048 self.sink.push(0x46);
4049 memarg.encode(self.sink);
4050 self
4051 }
4052
4053 pub fn i64_atomic_rmw32_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4055 self.sink.push(0xFE);
4056 self.sink.push(0x47);
4057 memarg.encode(self.sink);
4058 self
4059 }
4060
4061 pub fn i32_atomic_rmw_cmpxchg(&mut self, memarg: MemArg) -> &mut Self {
4063 self.sink.push(0xFE);
4064 self.sink.push(0x48);
4065 memarg.encode(self.sink);
4066 self
4067 }
4068
4069 pub fn i64_atomic_rmw_cmpxchg(&mut self, memarg: MemArg) -> &mut Self {
4071 self.sink.push(0xFE);
4072 self.sink.push(0x49);
4073 memarg.encode(self.sink);
4074 self
4075 }
4076
4077 pub fn i32_atomic_rmw8_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4079 self.sink.push(0xFE);
4080 self.sink.push(0x4A);
4081 memarg.encode(self.sink);
4082 self
4083 }
4084
4085 pub fn i32_atomic_rmw16_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4087 self.sink.push(0xFE);
4088 self.sink.push(0x4B);
4089 memarg.encode(self.sink);
4090 self
4091 }
4092
4093 pub fn i64_atomic_rmw8_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4095 self.sink.push(0xFE);
4096 self.sink.push(0x4C);
4097 memarg.encode(self.sink);
4098 self
4099 }
4100
4101 pub fn i64_atomic_rmw16_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4103 self.sink.push(0xFE);
4104 self.sink.push(0x4D);
4105 memarg.encode(self.sink);
4106 self
4107 }
4108
4109 pub fn i64_atomic_rmw32_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4111 self.sink.push(0xFE);
4112 self.sink.push(0x4E);
4113 memarg.encode(self.sink);
4114 self
4115 }
4116
4117 pub fn global_atomic_get(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4121 self.sink.push(0xFE);
4122 self.sink.push(0x4F);
4123 ordering.encode(self.sink);
4124 global_index.encode(self.sink);
4125 self
4126 }
4127
4128 pub fn global_atomic_set(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4130 self.sink.push(0xFE);
4131 self.sink.push(0x50);
4132 ordering.encode(self.sink);
4133 global_index.encode(self.sink);
4134 self
4135 }
4136
4137 pub fn global_atomic_rmw_add(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4139 self.sink.push(0xFE);
4140 self.sink.push(0x51);
4141 ordering.encode(self.sink);
4142 global_index.encode(self.sink);
4143 self
4144 }
4145
4146 pub fn global_atomic_rmw_sub(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4148 self.sink.push(0xFE);
4149 self.sink.push(0x52);
4150 ordering.encode(self.sink);
4151 global_index.encode(self.sink);
4152 self
4153 }
4154
4155 pub fn global_atomic_rmw_and(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4157 self.sink.push(0xFE);
4158 self.sink.push(0x53);
4159 ordering.encode(self.sink);
4160 global_index.encode(self.sink);
4161 self
4162 }
4163
4164 pub fn global_atomic_rmw_or(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4166 self.sink.push(0xFE);
4167 self.sink.push(0x54);
4168 ordering.encode(self.sink);
4169 global_index.encode(self.sink);
4170 self
4171 }
4172
4173 pub fn global_atomic_rmw_xor(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4175 self.sink.push(0xFE);
4176 self.sink.push(0x55);
4177 ordering.encode(self.sink);
4178 global_index.encode(self.sink);
4179 self
4180 }
4181
4182 pub fn global_atomic_rmw_xchg(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4184 self.sink.push(0xFE);
4185 self.sink.push(0x56);
4186 ordering.encode(self.sink);
4187 global_index.encode(self.sink);
4188 self
4189 }
4190
4191 pub fn global_atomic_rmw_cmpxchg(
4193 &mut self,
4194 ordering: Ordering,
4195 global_index: u32,
4196 ) -> &mut Self {
4197 self.sink.push(0xFE);
4198 self.sink.push(0x57);
4199 ordering.encode(self.sink);
4200 global_index.encode(self.sink);
4201 self
4202 }
4203
4204 pub fn table_atomic_get(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4206 self.sink.push(0xFE);
4207 self.sink.push(0x58);
4208 ordering.encode(self.sink);
4209 table_index.encode(self.sink);
4210 self
4211 }
4212
4213 pub fn table_atomic_set(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4215 self.sink.push(0xFE);
4216 self.sink.push(0x59);
4217 ordering.encode(self.sink);
4218 table_index.encode(self.sink);
4219 self
4220 }
4221
4222 pub fn table_atomic_rmw_xchg(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4224 self.sink.push(0xFE);
4225 self.sink.push(0x5A);
4226 ordering.encode(self.sink);
4227 table_index.encode(self.sink);
4228 self
4229 }
4230
4231 pub fn table_atomic_rmw_cmpxchg(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4233 self.sink.push(0xFE);
4234 self.sink.push(0x5B);
4235 ordering.encode(self.sink);
4236 table_index.encode(self.sink);
4237 self
4238 }
4239
4240 pub fn struct_atomic_get(
4242 &mut self,
4243 ordering: Ordering,
4244 struct_type_index: u32,
4245 field_index: u32,
4246 ) -> &mut Self {
4247 self.sink.push(0xFE);
4248 self.sink.push(0x5C);
4249 ordering.encode(self.sink);
4250 struct_type_index.encode(self.sink);
4251 field_index.encode(self.sink);
4252 self
4253 }
4254
4255 pub fn struct_atomic_get_s(
4257 &mut self,
4258 ordering: Ordering,
4259 struct_type_index: u32,
4260 field_index: u32,
4261 ) -> &mut Self {
4262 self.sink.push(0xFE);
4263 self.sink.push(0x5D);
4264 ordering.encode(self.sink);
4265 struct_type_index.encode(self.sink);
4266 field_index.encode(self.sink);
4267 self
4268 }
4269
4270 pub fn struct_atomic_get_u(
4272 &mut self,
4273 ordering: Ordering,
4274 struct_type_index: u32,
4275 field_index: u32,
4276 ) -> &mut Self {
4277 self.sink.push(0xFE);
4278 self.sink.push(0x5E);
4279 ordering.encode(self.sink);
4280 struct_type_index.encode(self.sink);
4281 field_index.encode(self.sink);
4282 self
4283 }
4284
4285 pub fn struct_atomic_set(
4287 &mut self,
4288 ordering: Ordering,
4289 struct_type_index: u32,
4290 field_index: u32,
4291 ) -> &mut Self {
4292 self.sink.push(0xFE);
4293 self.sink.push(0x5F);
4294 ordering.encode(self.sink);
4295 struct_type_index.encode(self.sink);
4296 field_index.encode(self.sink);
4297 self
4298 }
4299
4300 pub fn struct_atomic_rmw_add(
4302 &mut self,
4303 ordering: Ordering,
4304 struct_type_index: u32,
4305 field_index: u32,
4306 ) -> &mut Self {
4307 self.sink.push(0xFE);
4308 self.sink.push(0x60);
4309 ordering.encode(self.sink);
4310 struct_type_index.encode(self.sink);
4311 field_index.encode(self.sink);
4312 self
4313 }
4314
4315 pub fn struct_atomic_rmw_sub(
4317 &mut self,
4318 ordering: Ordering,
4319 struct_type_index: u32,
4320 field_index: u32,
4321 ) -> &mut Self {
4322 self.sink.push(0xFE);
4323 self.sink.push(0x61);
4324 ordering.encode(self.sink);
4325 struct_type_index.encode(self.sink);
4326 field_index.encode(self.sink);
4327 self
4328 }
4329
4330 pub fn struct_atomic_rmw_and(
4332 &mut self,
4333 ordering: Ordering,
4334 struct_type_index: u32,
4335 field_index: u32,
4336 ) -> &mut Self {
4337 self.sink.push(0xFE);
4338 self.sink.push(0x62);
4339 ordering.encode(self.sink);
4340 struct_type_index.encode(self.sink);
4341 field_index.encode(self.sink);
4342 self
4343 }
4344
4345 pub fn struct_atomic_rmw_or(
4347 &mut self,
4348 ordering: Ordering,
4349 struct_type_index: u32,
4350 field_index: u32,
4351 ) -> &mut Self {
4352 self.sink.push(0xFE);
4353 self.sink.push(0x63);
4354 ordering.encode(self.sink);
4355 struct_type_index.encode(self.sink);
4356 field_index.encode(self.sink);
4357 self
4358 }
4359
4360 pub fn struct_atomic_rmw_xor(
4362 &mut self,
4363 ordering: Ordering,
4364 struct_type_index: u32,
4365 field_index: u32,
4366 ) -> &mut Self {
4367 self.sink.push(0xFE);
4368 self.sink.push(0x64);
4369 ordering.encode(self.sink);
4370 struct_type_index.encode(self.sink);
4371 field_index.encode(self.sink);
4372 self
4373 }
4374
4375 pub fn struct_atomic_rmw_xchg(
4377 &mut self,
4378 ordering: Ordering,
4379 struct_type_index: u32,
4380 field_index: u32,
4381 ) -> &mut Self {
4382 self.sink.push(0xFE);
4383 self.sink.push(0x65);
4384 ordering.encode(self.sink);
4385 struct_type_index.encode(self.sink);
4386 field_index.encode(self.sink);
4387 self
4388 }
4389
4390 pub fn struct_atomic_rmw_cmpxchg(
4392 &mut self,
4393 ordering: Ordering,
4394 struct_type_index: u32,
4395 field_index: u32,
4396 ) -> &mut Self {
4397 self.sink.push(0xFE);
4398 self.sink.push(0x66);
4399 ordering.encode(self.sink);
4400 struct_type_index.encode(self.sink);
4401 field_index.encode(self.sink);
4402 self
4403 }
4404
4405 pub fn array_atomic_get(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4407 self.sink.push(0xFE);
4408 self.sink.push(0x67);
4409 ordering.encode(self.sink);
4410 array_type_index.encode(self.sink);
4411 self
4412 }
4413
4414 pub fn array_atomic_get_s(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4416 self.sink.push(0xFE);
4417 self.sink.push(0x68);
4418 ordering.encode(self.sink);
4419 array_type_index.encode(self.sink);
4420 self
4421 }
4422
4423 pub fn array_atomic_get_u(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4425 self.sink.push(0xFE);
4426 self.sink.push(0x69);
4427 ordering.encode(self.sink);
4428 array_type_index.encode(self.sink);
4429 self
4430 }
4431
4432 pub fn array_atomic_set(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4434 self.sink.push(0xFE);
4435 self.sink.push(0x6A);
4436 ordering.encode(self.sink);
4437 array_type_index.encode(self.sink);
4438 self
4439 }
4440
4441 pub fn array_atomic_rmw_add(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4443 self.sink.push(0xFE);
4444 self.sink.push(0x6B);
4445 ordering.encode(self.sink);
4446 array_type_index.encode(self.sink);
4447 self
4448 }
4449
4450 pub fn array_atomic_rmw_sub(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4452 self.sink.push(0xFE);
4453 self.sink.push(0x6C);
4454 ordering.encode(self.sink);
4455 array_type_index.encode(self.sink);
4456 self
4457 }
4458
4459 pub fn array_atomic_rmw_and(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4461 self.sink.push(0xFE);
4462 self.sink.push(0x6D);
4463 ordering.encode(self.sink);
4464 array_type_index.encode(self.sink);
4465 self
4466 }
4467
4468 pub fn array_atomic_rmw_or(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4470 self.sink.push(0xFE);
4471 self.sink.push(0x6E);
4472 ordering.encode(self.sink);
4473 array_type_index.encode(self.sink);
4474 self
4475 }
4476
4477 pub fn array_atomic_rmw_xor(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4479 self.sink.push(0xFE);
4480 self.sink.push(0x6F);
4481 ordering.encode(self.sink);
4482 array_type_index.encode(self.sink);
4483 self
4484 }
4485
4486 pub fn array_atomic_rmw_xchg(
4488 &mut self,
4489 ordering: Ordering,
4490 array_type_index: u32,
4491 ) -> &mut Self {
4492 self.sink.push(0xFE);
4493 self.sink.push(0x70);
4494 ordering.encode(self.sink);
4495 array_type_index.encode(self.sink);
4496 self
4497 }
4498
4499 pub fn array_atomic_rmw_cmpxchg(
4501 &mut self,
4502 ordering: Ordering,
4503 array_type_index: u32,
4504 ) -> &mut Self {
4505 self.sink.push(0xFE);
4506 self.sink.push(0x71);
4507 ordering.encode(self.sink);
4508 array_type_index.encode(self.sink);
4509 self
4510 }
4511
4512 pub fn ref_i31_shared(&mut self) -> &mut Self {
4514 self.sink.push(0xFE);
4515 self.sink.push(0x72);
4516 self
4517 }
4518
4519 pub fn cont_new(&mut self, type_index: u32) -> &mut Self {
4523 self.sink.push(0xE0);
4524 type_index.encode(self.sink);
4525 self
4526 }
4527
4528 pub fn cont_bind(&mut self, argument_index: u32, result_index: u32) -> &mut Self {
4530 self.sink.push(0xE1);
4531 argument_index.encode(self.sink);
4532 result_index.encode(self.sink);
4533 self
4534 }
4535
4536 pub fn suspend(&mut self, tag_index: u32) -> &mut Self {
4538 self.sink.push(0xE2);
4539 tag_index.encode(self.sink);
4540 self
4541 }
4542
4543 pub fn resume<V: IntoIterator<Item = Handle>>(
4545 &mut self,
4546 cont_type_index: u32,
4547 resume_table: V,
4548 ) -> &mut Self
4549 where
4550 V::IntoIter: ExactSizeIterator,
4551 {
4552 self.sink.push(0xE3);
4553 cont_type_index.encode(self.sink);
4554 encode_vec(resume_table, self.sink);
4555 self
4556 }
4557
4558 pub fn resume_throw<V: IntoIterator<Item = Handle>>(
4560 &mut self,
4561 cont_type_index: u32,
4562 tag_index: u32,
4563 resume_table: V,
4564 ) -> &mut Self
4565 where
4566 V::IntoIter: ExactSizeIterator,
4567 {
4568 self.sink.push(0xE4);
4569 cont_type_index.encode(self.sink);
4570 tag_index.encode(self.sink);
4571 encode_vec(resume_table, self.sink);
4572 self
4573 }
4574
4575 pub fn switch(&mut self, cont_type_index: u32, tag_index: u32) -> &mut Self {
4577 self.sink.push(0xE5);
4578 cont_type_index.encode(self.sink);
4579 tag_index.encode(self.sink);
4580 self
4581 }
4582
4583 pub fn i64_add128(&mut self) -> &mut Self {
4587 self.sink.push(0xFC);
4588 19u32.encode(self.sink);
4589 self
4590 }
4591
4592 pub fn i64_sub128(&mut self) -> &mut Self {
4594 self.sink.push(0xFC);
4595 20u32.encode(self.sink);
4596 self
4597 }
4598
4599 pub fn i64_mul_wide_s(&mut self) -> &mut Self {
4601 self.sink.push(0xFC);
4602 21u32.encode(self.sink);
4603 self
4604 }
4605
4606 pub fn i64_mul_wide_u(&mut self) -> &mut Self {
4608 self.sink.push(0xFC);
4609 22u32.encode(self.sink);
4610 self
4611 }
4612}